diff --git a/.hgtags-top-repo b/.hgtags-top-repo
index ecdc65b6c89..fe0f260a5a3 100644
--- a/.hgtags-top-repo
+++ b/.hgtags-top-repo
@@ -305,3 +305,4 @@ f25ee9f62427a9ba27418e5531a89754791a305b jdk9-b57
9fa2185bee17462d1014538bff60af6e6f0b01e7 jdk9-b60
ea38728b4f4bdd8fd0d7a89b18069f521cf05013 jdk9-b61
105d045a69174d870b69bfe471b3f2d05a9f8ecc jdk9-b62
+0b32ed628fa60e4ab99fb0b5866d648e16231f17 jdk9-b63
diff --git a/common/autoconf/basics_windows.m4 b/common/autoconf/basics_windows.m4
index 210cc455047..1233c168d74 100644
--- a/common/autoconf/basics_windows.m4
+++ b/common/autoconf/basics_windows.m4
@@ -320,8 +320,8 @@ AC_DEFUN([BASIC_CHECK_PATHS_WINDOWS],
WINDOWS_ENV_VENDOR='cygwin'
WINDOWS_ENV_VERSION="$CYGWIN_VERSION"
- CYGWIN_VERSION_OK=`$ECHO $CYGWIN_VERSION | $GREP ^1.7.`
- if test "x$CYGWIN_VERSION_OK" = x; then
+ CYGWIN_VERSION_OLD=`$ECHO $CYGWIN_VERSION | $GREP -e '^1\.[0-6]'`
+ if test "x$CYGWIN_VERSION_OLD" != x; then
AC_MSG_NOTICE([Your cygwin is too old. You are running $CYGWIN_VERSION, but at least cygwin 1.7 is required. Please upgrade.])
AC_MSG_ERROR([Cannot continue])
fi
diff --git a/common/autoconf/boot-jdk.m4 b/common/autoconf/boot-jdk.m4
index c0f0c07b4c8..a387def6474 100644
--- a/common/autoconf/boot-jdk.m4
+++ b/common/autoconf/boot-jdk.m4
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -306,7 +306,6 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK],
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVAC, javac)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVAH, javah)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAR, jar)
- BOOTJDK_CHECK_TOOL_IN_BOOTJDK(NATIVE2ASCII, native2ascii)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JARSIGNER, jarsigner)
# Finally, set some other options...
diff --git a/common/autoconf/bootcycle-spec.gmk.in b/common/autoconf/bootcycle-spec.gmk.in
index 6e68c3a0b34..5c51176271b 100644
--- a/common/autoconf/bootcycle-spec.gmk.in
+++ b/common/autoconf/bootcycle-spec.gmk.in
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -57,6 +57,5 @@ JAVA_CMD:=$(BOOT_JDK)/bin/java
JAVAC_CMD:=$(BOOT_JDK)/bin/javac
JAVAH_CMD:=$(BOOT_JDK)/bin/javah
JAR_CMD:=$(BOOT_JDK)/bin/jar
-NATIVE2ASCII_CMD:=$(BOOT_JDK)/bin/native2ascii
JARSIGNER_CMD:=$(BOOT_JDK)/bin/jarsigner
SJAVAC_SERVER_JAVA_CMD:=$(JAVA_CMD)
diff --git a/common/autoconf/flags.m4 b/common/autoconf/flags.m4
index 84df8837c7e..307fcfc0e1b 100644
--- a/common/autoconf/flags.m4
+++ b/common/autoconf/flags.m4
@@ -65,12 +65,6 @@ AC_DEFUN_ONCE([FLAGS_SETUP_INIT_FLAGS],
fi
AC_SUBST(POST_STRIP_CMD)
- if test "x$OPENJDK_TARGET_OS" = xsolaris; then
- # FIXME: break out into MCSFLAGS
- POST_MCS_CMD="$MCS -d -a \"JDK $FULL_VERSION\""
- fi
- AC_SUBST(POST_MCS_CMD)
-
if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
CC_OUT_OPTION=-Fo
EXE_OUT_OPTION=-out:
diff --git a/common/autoconf/generated-configure.sh b/common/autoconf/generated-configure.sh
index bc5063b9662..dbe3f079f9a 100644
--- a/common/autoconf/generated-configure.sh
+++ b/common/autoconf/generated-configure.sh
@@ -737,7 +737,6 @@ AR_OUT_OPTION
LD_OUT_OPTION
EXE_OUT_OPTION
CC_OUT_OPTION
-POST_MCS_CMD
POST_STRIP_CMD
ARFLAGS
COMPILER_TARGET_BITS_FLAG
@@ -759,7 +758,6 @@ ac_ct_OBJCOPY
OBJCOPY
ac_ct_NM
ac_ct_STRIP
-MCS
GNM
NM
STRIP
@@ -820,7 +818,6 @@ LANGTOOLS_TOPDIR
JAVAC_FLAGS
BOOT_JDK_SOURCETARGET
JARSIGNER
-NATIVE2ASCII
JAR
JAVAH
JAVAC
@@ -1189,7 +1186,6 @@ JAVA
JAVAC
JAVAH
JAR
-NATIVE2ASCII
JARSIGNER
CC
CFLAGS
@@ -1207,7 +1203,6 @@ LIPO
STRIP
NM
GNM
-MCS
OBJCOPY
OBJDUMP
BUILD_CC
@@ -2070,8 +2065,6 @@ Some influential environment variables:
JAVAC Override default value for JAVAC
JAVAH Override default value for JAVAH
JAR Override default value for JAR
- NATIVE2ASCII
- Override default value for NATIVE2ASCII
JARSIGNER Override default value for JARSIGNER
CC C compiler command
CFLAGS C compiler flags
@@ -2090,7 +2083,6 @@ Some influential environment variables:
STRIP Override default value for STRIP
NM Override default value for NM
GNM Override default value for GNM
- MCS Override default value for MCS
OBJCOPY Override default value for OBJCOPY
OBJDUMP Override default value for OBJDUMP
BUILD_CC Override default value for BUILD_CC
@@ -3599,7 +3591,7 @@ ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
# ... then the rest
#
-# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -4367,7 +4359,7 @@ VS_SDK_PLATFORM_NAME_2013=
#CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks:
-DATE_WHEN_GENERATED=1430331133
+DATE_WHEN_GENERATED=1430918902
###############################################################################
#
@@ -14052,8 +14044,8 @@ $as_echo "$CYGWIN_VERSION" >&6; }
WINDOWS_ENV_VENDOR='cygwin'
WINDOWS_ENV_VERSION="$CYGWIN_VERSION"
- CYGWIN_VERSION_OK=`$ECHO $CYGWIN_VERSION | $GREP ^1.7.`
- if test "x$CYGWIN_VERSION_OK" = x; then
+ CYGWIN_VERSION_OLD=`$ECHO $CYGWIN_VERSION | $GREP -e '^1\.0-6'`
+ if test "x$CYGWIN_VERSION_OLD" != x; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: Your cygwin is too old. You are running $CYGWIN_VERSION, but at least cygwin 1.7 is required. Please upgrade." >&5
$as_echo "$as_me: Your cygwin is too old. You are running $CYGWIN_VERSION, but at least cygwin 1.7 is required. Please upgrade." >&6;}
as_fn_error $? "Cannot continue" "$LINENO" 5
@@ -26054,133 +26046,6 @@ $as_echo "$tool_specified" >&6; }
- # Use user overridden value if available, otherwise locate tool in the Boot JDK.
-
- # Publish this variable in the help.
-
-
- if test "x$NATIVE2ASCII" = x; then
- # The variable is not set by user, try to locate tool using the code snippet
-
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for native2ascii in Boot JDK" >&5
-$as_echo_n "checking for native2ascii in Boot JDK... " >&6; }
- NATIVE2ASCII=$BOOT_JDK/bin/native2ascii
- if test ! -x $NATIVE2ASCII; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: not found" >&5
-$as_echo "not found" >&6; }
- { $as_echo "$as_me:${as_lineno-$LINENO}: Your Boot JDK seems broken. This might be fixed by explicitely setting --with-boot-jdk" >&5
-$as_echo "$as_me: Your Boot JDK seems broken. This might be fixed by explicitely setting --with-boot-jdk" >&6;}
- as_fn_error $? "Could not find native2ascii in the Boot JDK" "$LINENO" 5
- fi
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5
-$as_echo "ok" >&6; }
-
-
- else
- # The variable is set, but is it from the command line or the environment?
-
- # Try to remove the string !NATIVE2ASCII! from our list.
- try_remove_var=${CONFIGURE_OVERRIDDEN_VARIABLES//!NATIVE2ASCII!/}
- if test "x$try_remove_var" = "x$CONFIGURE_OVERRIDDEN_VARIABLES"; then
- # If it failed, the variable was not from the command line. Ignore it,
- # but warn the user (except for BASH, which is always set by the calling BASH).
- if test "xNATIVE2ASCII" != xBASH; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Ignoring value of NATIVE2ASCII from the environment. Use command line variables instead." >&5
-$as_echo "$as_me: WARNING: Ignoring value of NATIVE2ASCII from the environment. Use command line variables instead." >&2;}
- fi
- # Try to locate tool using the code snippet
-
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for native2ascii in Boot JDK" >&5
-$as_echo_n "checking for native2ascii in Boot JDK... " >&6; }
- NATIVE2ASCII=$BOOT_JDK/bin/native2ascii
- if test ! -x $NATIVE2ASCII; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: not found" >&5
-$as_echo "not found" >&6; }
- { $as_echo "$as_me:${as_lineno-$LINENO}: Your Boot JDK seems broken. This might be fixed by explicitely setting --with-boot-jdk" >&5
-$as_echo "$as_me: Your Boot JDK seems broken. This might be fixed by explicitely setting --with-boot-jdk" >&6;}
- as_fn_error $? "Could not find native2ascii in the Boot JDK" "$LINENO" 5
- fi
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5
-$as_echo "ok" >&6; }
-
-
- else
- # If it succeeded, then it was overridden by the user. We will use it
- # for the tool.
-
- # First remove it from the list of overridden variables, so we can test
- # for unknown variables in the end.
- CONFIGURE_OVERRIDDEN_VARIABLES="$try_remove_var"
-
- # Check if the provided tool contains a complete path.
- tool_specified="$NATIVE2ASCII"
- tool_basename="${tool_specified##*/}"
- if test "x$tool_basename" = "x$tool_specified"; then
- # A command without a complete path is provided, search $PATH.
- { $as_echo "$as_me:${as_lineno-$LINENO}: Will search for user supplied tool NATIVE2ASCII=$tool_basename" >&5
-$as_echo "$as_me: Will search for user supplied tool NATIVE2ASCII=$tool_basename" >&6;}
- # Extract the first word of "$tool_basename", so it can be a program name with args.
-set dummy $tool_basename; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if ${ac_cv_path_NATIVE2ASCII+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- case $NATIVE2ASCII in
- [\\/]* | ?:[\\/]*)
- ac_cv_path_NATIVE2ASCII="$NATIVE2ASCII" # Let the user override the test with a path.
- ;;
- *)
- as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
- IFS=$as_save_IFS
- test -z "$as_dir" && as_dir=.
- for ac_exec_ext in '' $ac_executable_extensions; do
- if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
- ac_cv_path_NATIVE2ASCII="$as_dir/$ac_word$ac_exec_ext"
- $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
- break 2
- fi
-done
- done
-IFS=$as_save_IFS
-
- ;;
-esac
-fi
-NATIVE2ASCII=$ac_cv_path_NATIVE2ASCII
-if test -n "$NATIVE2ASCII"; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NATIVE2ASCII" >&5
-$as_echo "$NATIVE2ASCII" >&6; }
-else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-
- if test "x$NATIVE2ASCII" = x; then
- as_fn_error $? "User supplied tool $tool_basename could not be found" "$LINENO" 5
- fi
- else
- # Otherwise we believe it is a complete path. Use it as it is.
- { $as_echo "$as_me:${as_lineno-$LINENO}: Will use user supplied tool NATIVE2ASCII=$tool_specified" >&5
-$as_echo "$as_me: Will use user supplied tool NATIVE2ASCII=$tool_specified" >&6;}
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for NATIVE2ASCII" >&5
-$as_echo_n "checking for NATIVE2ASCII... " >&6; }
- if test ! -x "$tool_specified"; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: not found" >&5
-$as_echo "not found" >&6; }
- as_fn_error $? "User supplied tool NATIVE2ASCII=$tool_specified does not exist or is not executable" "$LINENO" 5
- fi
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: $tool_specified" >&5
-$as_echo "$tool_specified" >&6; }
- fi
- fi
- fi
-
-
-
# Use user overridden value if available, otherwise locate tool in the Boot JDK.
# Publish this variable in the help.
@@ -28232,7 +28097,7 @@ $as_echo "$as_me: or run \"bash.exe -l\" from a VS command prompt and then run c
# For solaris we really need solaris tools, and not the GNU equivalent.
# The build tools on Solaris reside in /usr/ccs (C Compilation System),
# so add that to path before starting to probe.
- # FIXME: This was originally only done for AS,NM,GNM,STRIP,MCS,OBJCOPY,OBJDUMP.
+ # FIXME: This was originally only done for AS,NM,GNM,STRIP,OBJCOPY,OBJDUMP.
if test "x$OPENJDK_BUILD_OS" = xsolaris; then
PATH="/usr/ccs/bin:$PATH"
fi
@@ -36672,479 +36537,6 @@ $as_echo "$as_me: Rewriting GNM to \"$new_complete\"" >&6;}
fi
fi
-
-
-
- # Publish this variable in the help.
-
-
- if test "x$MCS" = x; then
- # The variable is not set by user, try to locate tool using the code snippet
- for ac_prog in mcs
-do
- # Extract the first word of "$ac_prog", so it can be a program name with args.
-set dummy $ac_prog; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if ${ac_cv_path_MCS+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- case $MCS in
- [\\/]* | ?:[\\/]*)
- ac_cv_path_MCS="$MCS" # Let the user override the test with a path.
- ;;
- *)
- as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
- IFS=$as_save_IFS
- test -z "$as_dir" && as_dir=.
- for ac_exec_ext in '' $ac_executable_extensions; do
- if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
- ac_cv_path_MCS="$as_dir/$ac_word$ac_exec_ext"
- $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
- break 2
- fi
-done
- done
-IFS=$as_save_IFS
-
- ;;
-esac
-fi
-MCS=$ac_cv_path_MCS
-if test -n "$MCS"; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MCS" >&5
-$as_echo "$MCS" >&6; }
-else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-
- test -n "$MCS" && break
-done
-
- else
- # The variable is set, but is it from the command line or the environment?
-
- # Try to remove the string !MCS! from our list.
- try_remove_var=${CONFIGURE_OVERRIDDEN_VARIABLES//!MCS!/}
- if test "x$try_remove_var" = "x$CONFIGURE_OVERRIDDEN_VARIABLES"; then
- # If it failed, the variable was not from the command line. Ignore it,
- # but warn the user (except for BASH, which is always set by the calling BASH).
- if test "xMCS" != xBASH; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Ignoring value of MCS from the environment. Use command line variables instead." >&5
-$as_echo "$as_me: WARNING: Ignoring value of MCS from the environment. Use command line variables instead." >&2;}
- fi
- # Try to locate tool using the code snippet
- for ac_prog in mcs
-do
- # Extract the first word of "$ac_prog", so it can be a program name with args.
-set dummy $ac_prog; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if ${ac_cv_path_MCS+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- case $MCS in
- [\\/]* | ?:[\\/]*)
- ac_cv_path_MCS="$MCS" # Let the user override the test with a path.
- ;;
- *)
- as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
- IFS=$as_save_IFS
- test -z "$as_dir" && as_dir=.
- for ac_exec_ext in '' $ac_executable_extensions; do
- if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
- ac_cv_path_MCS="$as_dir/$ac_word$ac_exec_ext"
- $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
- break 2
- fi
-done
- done
-IFS=$as_save_IFS
-
- ;;
-esac
-fi
-MCS=$ac_cv_path_MCS
-if test -n "$MCS"; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MCS" >&5
-$as_echo "$MCS" >&6; }
-else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-
- test -n "$MCS" && break
-done
-
- else
- # If it succeeded, then it was overridden by the user. We will use it
- # for the tool.
-
- # First remove it from the list of overridden variables, so we can test
- # for unknown variables in the end.
- CONFIGURE_OVERRIDDEN_VARIABLES="$try_remove_var"
-
- # Check if the provided tool contains a complete path.
- tool_specified="$MCS"
- tool_basename="${tool_specified##*/}"
- if test "x$tool_basename" = "x$tool_specified"; then
- # A command without a complete path is provided, search $PATH.
- { $as_echo "$as_me:${as_lineno-$LINENO}: Will search for user supplied tool MCS=$tool_basename" >&5
-$as_echo "$as_me: Will search for user supplied tool MCS=$tool_basename" >&6;}
- # Extract the first word of "$tool_basename", so it can be a program name with args.
-set dummy $tool_basename; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if ${ac_cv_path_MCS+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- case $MCS in
- [\\/]* | ?:[\\/]*)
- ac_cv_path_MCS="$MCS" # Let the user override the test with a path.
- ;;
- *)
- as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
- IFS=$as_save_IFS
- test -z "$as_dir" && as_dir=.
- for ac_exec_ext in '' $ac_executable_extensions; do
- if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
- ac_cv_path_MCS="$as_dir/$ac_word$ac_exec_ext"
- $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
- break 2
- fi
-done
- done
-IFS=$as_save_IFS
-
- ;;
-esac
-fi
-MCS=$ac_cv_path_MCS
-if test -n "$MCS"; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MCS" >&5
-$as_echo "$MCS" >&6; }
-else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-
- if test "x$MCS" = x; then
- as_fn_error $? "User supplied tool $tool_basename could not be found" "$LINENO" 5
- fi
- else
- # Otherwise we believe it is a complete path. Use it as it is.
- { $as_echo "$as_me:${as_lineno-$LINENO}: Will use user supplied tool MCS=$tool_specified" >&5
-$as_echo "$as_me: Will use user supplied tool MCS=$tool_specified" >&6;}
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for MCS" >&5
-$as_echo_n "checking for MCS... " >&6; }
- if test ! -x "$tool_specified"; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: not found" >&5
-$as_echo "not found" >&6; }
- as_fn_error $? "User supplied tool MCS=$tool_specified does not exist or is not executable" "$LINENO" 5
- fi
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: $tool_specified" >&5
-$as_echo "$tool_specified" >&6; }
- fi
- fi
- fi
-
-
-
- # Only process if variable expands to non-empty
-
- if test "x$MCS" != x; then
- if test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.cygwin"; then
-
- # First separate the path from the arguments. This will split at the first
- # space.
- complete="$MCS"
- path="${complete%% *}"
- tmp="$complete EOL"
- arguments="${tmp#* }"
-
- # Input might be given as Windows format, start by converting to
- # unix format.
- new_path=`$CYGPATH -u "$path"`
-
- # Now try to locate executable using which
- new_path=`$WHICH "$new_path" 2> /dev/null`
- # bat and cmd files are not always considered executable in cygwin causing which
- # to not find them
- if test "x$new_path" = x \
- && test "x`$ECHO \"$path\" | $GREP -i -e \"\\.bat$\" -e \"\\.cmd$\"`" != x \
- && test "x`$LS \"$path\" 2>/dev/null`" != x; then
- new_path=`$CYGPATH -u "$path"`
- fi
- if test "x$new_path" = x; then
- # Oops. Which didn't find the executable.
- # The splitting of arguments from the executable at a space might have been incorrect,
- # since paths with space are more likely in Windows. Give it another try with the whole
- # argument.
- path="$complete"
- arguments="EOL"
- new_path=`$CYGPATH -u "$path"`
- new_path=`$WHICH "$new_path" 2> /dev/null`
- # bat and cmd files are not always considered executable in cygwin causing which
- # to not find them
- if test "x$new_path" = x \
- && test "x`$ECHO \"$path\" | $GREP -i -e \"\\.bat$\" -e \"\\.cmd$\"`" != x \
- && test "x`$LS \"$path\" 2>/dev/null`" != x; then
- new_path=`$CYGPATH -u "$path"`
- fi
- if test "x$new_path" = x; then
- # It's still not found. Now this is an unrecoverable error.
- { $as_echo "$as_me:${as_lineno-$LINENO}: The path of MCS, which resolves as \"$complete\", is not found." >&5
-$as_echo "$as_me: The path of MCS, which resolves as \"$complete\", is not found." >&6;}
- has_space=`$ECHO "$complete" | $GREP " "`
- if test "x$has_space" != x; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: You might be mixing spaces in the path and extra arguments, which is not allowed." >&5
-$as_echo "$as_me: You might be mixing spaces in the path and extra arguments, which is not allowed." >&6;}
- fi
- as_fn_error $? "Cannot locate the the path of MCS" "$LINENO" 5
- fi
- fi
-
- # Cygwin tries to hide some aspects of the Windows file system, such that binaries are
- # named .exe but called without that suffix. Therefore, "foo" and "foo.exe" are considered
- # the same file, most of the time (as in "test -f"). But not when running cygpath -s, then
- # "foo.exe" is OK but "foo" is an error.
- #
- # This test is therefore slightly more accurate than "test -f" to check for file presence.
- # It is also a way to make sure we got the proper file name for the real test later on.
- test_shortpath=`$CYGPATH -s -m "$new_path" 2> /dev/null`
- if test "x$test_shortpath" = x; then
- # Short path failed, file does not exist as specified.
- # Try adding .exe or .cmd
- if test -f "${new_path}.exe"; then
- input_to_shortpath="${new_path}.exe"
- elif test -f "${new_path}.cmd"; then
- input_to_shortpath="${new_path}.cmd"
- else
- { $as_echo "$as_me:${as_lineno-$LINENO}: The path of MCS, which resolves as \"$new_path\", is invalid." >&5
-$as_echo "$as_me: The path of MCS, which resolves as \"$new_path\", is invalid." >&6;}
- { $as_echo "$as_me:${as_lineno-$LINENO}: Neither \"$new_path\" nor \"$new_path.exe/cmd\" can be found" >&5
-$as_echo "$as_me: Neither \"$new_path\" nor \"$new_path.exe/cmd\" can be found" >&6;}
- as_fn_error $? "Cannot locate the the path of MCS" "$LINENO" 5
- fi
- else
- input_to_shortpath="$new_path"
- fi
-
- # Call helper function which possibly converts this using DOS-style short mode.
- # If so, the updated path is stored in $new_path.
- new_path="$input_to_shortpath"
-
- input_path="$input_to_shortpath"
- # Check if we need to convert this using DOS-style short mode. If the path
- # contains just simple characters, use it. Otherwise (spaces, weird characters),
- # take no chances and rewrite it.
- # Note: m4 eats our [], so we need to use [ and ] instead.
- has_forbidden_chars=`$ECHO "$input_path" | $GREP [^-._/a-zA-Z0-9]`
- if test "x$has_forbidden_chars" != x; then
- # Now convert it to mixed DOS-style, short mode (no spaces, and / instead of \)
- shortmode_path=`$CYGPATH -s -m -a "$input_path"`
- path_after_shortmode=`$CYGPATH -u "$shortmode_path"`
- if test "x$path_after_shortmode" != "x$input_to_shortpath"; then
- # Going to short mode and back again did indeed matter. Since short mode is
- # case insensitive, let's make it lowercase to improve readability.
- shortmode_path=`$ECHO "$shortmode_path" | $TR 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
- # Now convert it back to Unix-style (cygpath)
- input_path=`$CYGPATH -u "$shortmode_path"`
- new_path="$input_path"
- fi
- fi
-
- test_cygdrive_prefix=`$ECHO $input_path | $GREP ^/cygdrive/`
- if test "x$test_cygdrive_prefix" = x; then
- # As a simple fix, exclude /usr/bin since it's not a real path.
- if test "x`$ECHO $input_to_shortpath | $GREP ^/usr/bin/`" = x; then
- # The path is in a Cygwin special directory (e.g. /home). We need this converted to
- # a path prefixed by /cygdrive for fixpath to work.
- new_path="$CYGWIN_ROOT_PATH$input_path"
- fi
- fi
-
- # remove trailing .exe if any
- new_path="${new_path/%.exe/}"
-
- elif test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.msys"; then
-
- # First separate the path from the arguments. This will split at the first
- # space.
- complete="$MCS"
- path="${complete%% *}"
- tmp="$complete EOL"
- arguments="${tmp#* }"
-
- # Input might be given as Windows format, start by converting to
- # unix format.
- new_path="$path"
-
- windows_path="$new_path"
- if test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.cygwin"; then
- unix_path=`$CYGPATH -u "$windows_path"`
- new_path="$unix_path"
- elif test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.msys"; then
- unix_path=`$ECHO "$windows_path" | $SED -e 's,^\\(.\\):,/\\1,g' -e 's,\\\\,/,g'`
- new_path="$unix_path"
- fi
-
-
- # Now try to locate executable using which
- new_path=`$WHICH "$new_path" 2> /dev/null`
-
- if test "x$new_path" = x; then
- # Oops. Which didn't find the executable.
- # The splitting of arguments from the executable at a space might have been incorrect,
- # since paths with space are more likely in Windows. Give it another try with the whole
- # argument.
- path="$complete"
- arguments="EOL"
- new_path="$path"
-
- windows_path="$new_path"
- if test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.cygwin"; then
- unix_path=`$CYGPATH -u "$windows_path"`
- new_path="$unix_path"
- elif test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.msys"; then
- unix_path=`$ECHO "$windows_path" | $SED -e 's,^\\(.\\):,/\\1,g' -e 's,\\\\,/,g'`
- new_path="$unix_path"
- fi
-
-
- new_path=`$WHICH "$new_path" 2> /dev/null`
- # bat and cmd files are not always considered executable in MSYS causing which
- # to not find them
- if test "x$new_path" = x \
- && test "x`$ECHO \"$path\" | $GREP -i -e \"\\.bat$\" -e \"\\.cmd$\"`" != x \
- && test "x`$LS \"$path\" 2>/dev/null`" != x; then
- new_path="$path"
-
- windows_path="$new_path"
- if test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.cygwin"; then
- unix_path=`$CYGPATH -u "$windows_path"`
- new_path="$unix_path"
- elif test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.msys"; then
- unix_path=`$ECHO "$windows_path" | $SED -e 's,^\\(.\\):,/\\1,g' -e 's,\\\\,/,g'`
- new_path="$unix_path"
- fi
-
- fi
-
- if test "x$new_path" = x; then
- # It's still not found. Now this is an unrecoverable error.
- { $as_echo "$as_me:${as_lineno-$LINENO}: The path of MCS, which resolves as \"$complete\", is not found." >&5
-$as_echo "$as_me: The path of MCS, which resolves as \"$complete\", is not found." >&6;}
- has_space=`$ECHO "$complete" | $GREP " "`
- if test "x$has_space" != x; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: You might be mixing spaces in the path and extra arguments, which is not allowed." >&5
-$as_echo "$as_me: You might be mixing spaces in the path and extra arguments, which is not allowed." >&6;}
- fi
- as_fn_error $? "Cannot locate the the path of MCS" "$LINENO" 5
- fi
- fi
-
- # Now new_path has a complete unix path to the binary
- if test "x`$ECHO $new_path | $GREP ^/bin/`" != x; then
- # Keep paths in /bin as-is, but remove trailing .exe if any
- new_path="${new_path/%.exe/}"
- # Do not save /bin paths to all_fixpath_prefixes!
- else
- # Not in mixed or Windows style, start by that.
- new_path=`cmd //c echo $new_path`
-
- input_path="$new_path"
- # Check if we need to convert this using DOS-style short mode. If the path
- # contains just simple characters, use it. Otherwise (spaces, weird characters),
- # take no chances and rewrite it.
- # Note: m4 eats our [], so we need to use [ and ] instead.
- has_forbidden_chars=`$ECHO "$input_path" | $GREP [^-_/:a-zA-Z0-9]`
- if test "x$has_forbidden_chars" != x; then
- # Now convert it to mixed DOS-style, short mode (no spaces, and / instead of \)
- new_path=`cmd /c "for %A in (\"$input_path\") do @echo %~sA"|$TR \\\\\\\\ / | $TR 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
- fi
-
- # Output is in $new_path
-
- windows_path="$new_path"
- if test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.cygwin"; then
- unix_path=`$CYGPATH -u "$windows_path"`
- new_path="$unix_path"
- elif test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.msys"; then
- unix_path=`$ECHO "$windows_path" | $SED -e 's,^\\(.\\):,/\\1,g' -e 's,\\\\,/,g'`
- new_path="$unix_path"
- fi
-
- # remove trailing .exe if any
- new_path="${new_path/%.exe/}"
-
- # Save the first 10 bytes of this path to the storage, so fixpath can work.
- all_fixpath_prefixes=("${all_fixpath_prefixes[@]}" "${new_path:0:10}")
- fi
-
- else
- # We're on a unix platform. Hooray! :)
- # First separate the path from the arguments. This will split at the first
- # space.
- complete="$MCS"
- path="${complete%% *}"
- tmp="$complete EOL"
- arguments="${tmp#* }"
-
- # Cannot rely on the command "which" here since it doesn't always work.
- is_absolute_path=`$ECHO "$path" | $GREP ^/`
- if test -z "$is_absolute_path"; then
- # Path to executable is not absolute. Find it.
- IFS_save="$IFS"
- IFS=:
- for p in $PATH; do
- if test -f "$p/$path" && test -x "$p/$path"; then
- new_path="$p/$path"
- break
- fi
- done
- IFS="$IFS_save"
- else
- # This is an absolute path, we can use it without further modifications.
- new_path="$path"
- fi
-
- if test "x$new_path" = x; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: The path of MCS, which resolves as \"$complete\", is not found." >&5
-$as_echo "$as_me: The path of MCS, which resolves as \"$complete\", is not found." >&6;}
- has_space=`$ECHO "$complete" | $GREP " "`
- if test "x$has_space" != x; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: This might be caused by spaces in the path, which is not allowed." >&5
-$as_echo "$as_me: This might be caused by spaces in the path, which is not allowed." >&6;}
- fi
- as_fn_error $? "Cannot locate the the path of MCS" "$LINENO" 5
- fi
- fi
-
- # Now join together the path and the arguments once again
- if test "x$arguments" != xEOL; then
- new_complete="$new_path ${arguments% *}"
- else
- new_complete="$new_path"
- fi
-
- if test "x$complete" != "x$new_complete"; then
- MCS="$new_complete"
- { $as_echo "$as_me:${as_lineno-$LINENO}: Rewriting MCS to \"$new_complete\"" >&5
-$as_echo "$as_me: Rewriting MCS to \"$new_complete\"" >&6;}
- fi
- fi
-
elif test "x$OPENJDK_TARGET_OS" != xwindows; then
# FIXME: we should unify this with the solaris case above.
@@ -41632,12 +41024,6 @@ $as_echo "$tool_specified" >&6; }
fi
- if test "x$OPENJDK_TARGET_OS" = xsolaris; then
- # FIXME: break out into MCSFLAGS
- POST_MCS_CMD="$MCS -d -a \"JDK $FULL_VERSION\""
- fi
-
-
if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
CC_OUT_OPTION=-Fo
EXE_OUT_OPTION=-out:
diff --git a/common/autoconf/spec.gmk.in b/common/autoconf/spec.gmk.in
index d9c172dc964..b603ae89cff 100644
--- a/common/autoconf/spec.gmk.in
+++ b/common/autoconf/spec.gmk.in
@@ -391,7 +391,6 @@ ARFLAGS:=@ARFLAGS@
NM:=@NM@
GNM:=@GNM@
STRIP:=@STRIP@
-MCS:=@MCS@
LIPO:=@LIPO@
@@ -440,7 +439,6 @@ EXE_SUFFIX:=@EXE_SUFFIX@
OBJ_SUFFIX:=@OBJ_SUFFIX@
POST_STRIP_CMD:=@POST_STRIP_CMD@
-POST_MCS_CMD:=@POST_MCS_CMD@
JAVA_FLAGS:=@JAVA_FLAGS@
JAVA_FLAGS_BIG:=@JAVA_FLAGS_BIG@
@@ -455,7 +453,6 @@ JAVA_CMD:=@JAVA@
JAVAC_CMD:=@JAVAC@
JAVAH_CMD:=@JAVAH@
JAR_CMD:=@JAR@
-NATIVE2ASCII_CMD:=@NATIVE2ASCII@
JARSIGNER_CMD:=@JARSIGNER@
SJAVAC_SERVER_JAVA_CMD:=@SJAVAC_SERVER_JAVA@
# These variables are meant to be used. They are defined with = instead of := to make
@@ -465,7 +462,6 @@ JAVA_SMALL=@FIXPATH@ $(JAVA_CMD) $(JAVA_FLAGS_SMALL) $(JAVA_FLAGS)
JAVAC=@FIXPATH@ $(JAVAC_CMD)
JAVAH=@FIXPATH@ $(JAVAH_CMD)
JAR=@FIXPATH@ $(JAR_CMD)
-NATIVE2ASCII=@FIXPATH@ $(NATIVE2ASCII_CMD) $(JAVA_TOOL_FLAGS_SMALL)
JARSIGNER=@FIXPATH@ $(JARSIGNER_CMD)
# A specific java binary with specific options can be used to run
# the long running background sjavac servers and other long running tasks.
@@ -692,10 +688,10 @@ SYMBOLS_IMAGE_SUBDIR:=symbols
SYMBOLS_IMAGE_DIR=$(IMAGES_OUTPUTDIR)/$(SYMBOLS_IMAGE_SUBDIR)
# Macosx bundles directory definitions
-JDK_BUNDLE_SUBDIR=jdk-bundle/jdk$(JDK_VERSION).jdk/Contents
-JRE_BUNDLE_SUBDIR=jre-bundle/jre$(JDK_VERSION).jre/Contents
-JDK_BUNDLE_DIR=$(IMAGES_OUTPUTDIR)/$(JDK_BUNDLE_SUBDIR)
-JRE_BUNDLE_DIR=$(IMAGES_OUTPUTDIR)/$(JRE_BUNDLE_SUBDIR)
+JDK_MACOSX_BUNDLE_SUBDIR=jdk-bundle/jdk$(JDK_VERSION).jdk/Contents
+JRE_MACOSX_BUNDLE_SUBDIR=jre-bundle/jre$(JDK_VERSION).jre/Contents
+JDK_MACOSX_BUNDLE_DIR=$(IMAGES_OUTPUTDIR)/$(JDK_MACOSX_BUNDLE_SUBDIR)
+JRE_MACOSX_BUNDLE_DIR=$(IMAGES_OUTPUTDIR)/$(JRE_MACOSX_BUNDLE_SUBDIR)
# This macro is called to allow inclusion of closed source counterparts.
# Unless overridden in closed sources, it expands to nothing.
diff --git a/common/autoconf/toolchain.m4 b/common/autoconf/toolchain.m4
index 4a3c6426273..553db0380d9 100644
--- a/common/autoconf/toolchain.m4
+++ b/common/autoconf/toolchain.m4
@@ -210,7 +210,7 @@ AC_DEFUN_ONCE([TOOLCHAIN_PRE_DETECTION],
# For solaris we really need solaris tools, and not the GNU equivalent.
# The build tools on Solaris reside in /usr/ccs (C Compilation System),
# so add that to path before starting to probe.
- # FIXME: This was originally only done for AS,NM,GNM,STRIP,MCS,OBJCOPY,OBJDUMP.
+ # FIXME: This was originally only done for AS,NM,GNM,STRIP,OBJCOPY,OBJDUMP.
if test "x$OPENJDK_BUILD_OS" = xsolaris; then
PATH="/usr/ccs/bin:$PATH"
fi
@@ -569,9 +569,6 @@ AC_DEFUN_ONCE([TOOLCHAIN_DETECT_TOOLCHAIN_EXTRA],
BASIC_FIXUP_EXECUTABLE(NM)
BASIC_PATH_PROGS(GNM, gnm)
BASIC_FIXUP_EXECUTABLE(GNM)
-
- BASIC_PATH_PROGS(MCS, mcs)
- BASIC_FIXUP_EXECUTABLE(MCS)
elif test "x$OPENJDK_TARGET_OS" != xwindows; then
# FIXME: we should unify this with the solaris case above.
BASIC_CHECK_TOOLS(STRIP, strip)
diff --git a/common/bin/unshuffle_list.txt b/common/bin/unshuffle_list.txt
index 0553eb826fc..c076cd3aeb7 100644
--- a/common/bin/unshuffle_list.txt
+++ b/common/bin/unshuffle_list.txt
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -1156,7 +1156,6 @@ jdk/src/java.management/share/classes/com/sun/jmx/defaults : jdk/src/share/class
jdk/src/java.management/share/classes/com/sun/jmx/interceptor : jdk/src/share/classes/com/sun/jmx/interceptor
jdk/src/java.management/share/classes/com/sun/jmx/mbeanserver : jdk/src/share/classes/com/sun/jmx/mbeanserver
jdk/src/java.management/share/classes/com/sun/jmx/remote : jdk/src/share/classes/com/sun/jmx/remote
-jdk/src/java.management/share/classes/com/sun/management : jdk/src/share/classes/com/sun/management
jdk/src/java.management/share/classes/java/lang/management : jdk/src/share/classes/java/lang/management
jdk/src/java.management/share/classes/javax/management : jdk/src/share/classes/javax/management
jdk/src/java.management/share/classes/mgmt-overview.html : jdk/src/share/classes/com/sun/management/mgmt-overview.html
@@ -1429,6 +1428,7 @@ jdk/src/jdk.localedata/share/classes/sun/util/resources/tr : jdk/src/share/class
jdk/src/jdk.localedata/share/classes/sun/util/resources/uk : jdk/src/share/classes/sun/util/resources/uk
jdk/src/jdk.localedata/share/classes/sun/util/resources/vi : jdk/src/share/classes/sun/util/resources/vi
jdk/src/jdk.localedata/share/classes/sun/util/resources/zh : jdk/src/share/classes/sun/util/resources/zh
+jdk/src/jdk.management/share/classes/com/sun/management : jdk/src/share/classes/com/sun/management
jdk/src/jdk.naming.dns/share/classes/com/sun/jndi/dns : jdk/src/share/classes/com/sun/jndi/dns
jdk/src/jdk.naming.dns/share/classes/com/sun/jndi/url/dns : jdk/src/share/classes/com/sun/jndi/url/dns
jdk/src/jdk.naming.dns/share/classes/META-INF/services : jdk/src/share/classes/sun/net/spi/nameservice/dns/META-INF/services
diff --git a/corba/.hgtags b/corba/.hgtags
index b1d9426e786..ee596409fdf 100644
--- a/corba/.hgtags
+++ b/corba/.hgtags
@@ -305,3 +305,4 @@ cda6ae062f85fac5555f4e1318885b0ecd998bd1 jdk9-b59
caa330b275f39282793466529f6864766b31d9fd jdk9-b60
d690f489ca0bb95a6157d996da2fa72bcbcf02ea jdk9-b61
d27f7e0a7aca129969de23e9934408a31b4abf4c jdk9-b62
+0acac6937de7a0868f8c6f88b7d036d780abeee6 jdk9-b63
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/corba/AnyImpl.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/corba/AnyImpl.java
index 0f3428ac029..f4af63423c0 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/corba/AnyImpl.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/corba/AnyImpl.java
@@ -222,7 +222,7 @@ public class AnyImpl extends Any
/**
* returns the type of the element contained in the Any.
*
- * @result the TypeCode for the element in the Any
+ * @return the TypeCode for the element in the Any
*/
public TypeCode type() {
return typeCode;
@@ -267,7 +267,7 @@ public class AnyImpl extends Any
* checks for equality between Anys.
*
* @param otherAny the Any to be compared with.
- * @result true if the Anys are equal, false otherwise.
+ * @return true if the Anys are equal, false otherwise.
*/
public boolean equal(Any otherAny)
{
@@ -508,7 +508,7 @@ public class AnyImpl extends Any
/**
* returns an output stream that an Any value can be marshaled into.
*
- * @result the OutputStream to marshal value of Any into
+ * @return the OutputStream to marshal value of Any into
*/
public org.omg.CORBA.portable.OutputStream create_output_stream()
{
@@ -525,7 +525,7 @@ public class AnyImpl extends Any
/**
* returns an input stream that an Any value can be marshaled out of.
*
- * @result the InputStream to marshal value of Any out of.
+ * @return the InputStream to marshal value of Any out of.
*/
public org.omg.CORBA.portable.InputStream create_input_stream()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/encoding/CDROutputObject.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/encoding/CDROutputObject.java
index b4769023e30..4c91de2ef23 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/encoding/CDROutputObject.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/encoding/CDROutputObject.java
@@ -166,7 +166,7 @@ public class CDROutputObject extends CorbaOutputObject
* Write the contents of the CDROutputStream to the specified
* output stream. Has the side-effect of pushing any current
* Message onto the Message list.
- * @param s The output stream to write to.
+ * @param connection The output stream to write to.
*/
public void writeTo(CorbaConnection connection)
throws java.io.IOException
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/encoding/CodeSetConversion.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/encoding/CodeSetConversion.java
index 83f41352c71..3247d9fee4b 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/encoding/CodeSetConversion.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/encoding/CodeSetConversion.java
@@ -54,8 +54,9 @@ import com.sun.corba.se.spi.logging.CORBALogDomains;
* use NIO ByteBuffer and NIO CharBuffer, the interaction
* and interface between this class and the CDR streams
* should be looked at more closely for optimizations to
- * avoid unnecessary copying of data between char[] &
- * CharBuffer and byte[] & ByteBuffer, especially
+ * avoid unnecessary copying of data between
+ * {@code char[] & CharBuffer} and
+ * {@code byte[] & ByteBuffer}, especially
* DirectByteBuffers.
*
*/
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/io/ValueHandlerImpl.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/io/ValueHandlerImpl.java
index fdf35018275..9a46afd1b88 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/io/ValueHandlerImpl.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/io/ValueHandlerImpl.java
@@ -171,9 +171,9 @@ public final class ValueHandlerImpl implements javax.rmi.CORBA.ValueHandlerMulti
* @param out The stream to write the value to
* @param value The value to be written to the stream
**/
- public void writeValue(org.omg.CORBA.portable.OutputStream _out,
+ public void writeValue(org.omg.CORBA.portable.OutputStream out,
java.io.Serializable value) {
- writeValueWithVersion(_out, value, STREAM_FORMAT_VERSION_1);
+ writeValueWithVersion(out, value, STREAM_FORMAT_VERSION_1);
}
private void writeValueWithVersion(org.omg.CORBA.portable.OutputStream _out,
@@ -240,25 +240,25 @@ public final class ValueHandlerImpl implements javax.rmi.CORBA.ValueHandlerMulti
* Reads a value from the stream using java semantics.
* @param in The stream to read the value from
* @param clazz The type of the value to be read in
- * @param sender The sending context runtime
+ * @param rt The sending context runtime
**/
- public java.io.Serializable readValue(org.omg.CORBA.portable.InputStream _in,
+ public java.io.Serializable readValue(org.omg.CORBA.portable.InputStream in,
int offset,
java.lang.Class clazz,
String repositoryID,
- org.omg.SendingContext.RunTime _sender)
+ org.omg.SendingContext.RunTime rt)
{
// Must use narrow rather than a direct cast to a com.sun
// class. Fix for bug 4379539.
- CodeBase sender = CodeBaseHelper.narrow(_sender);
+ CodeBase sender = CodeBaseHelper.narrow(rt);
- org.omg.CORBA_2_3.portable.InputStream in =
- (org.omg.CORBA_2_3.portable.InputStream) _in;
+ org.omg.CORBA_2_3.portable.InputStream inStream =
+ (org.omg.CORBA_2_3.portable.InputStream) in;
if (!useHashtables) {
if (inputStreamBridge == null) {
inputStreamBridge = createInputStream();
- inputStreamBridge.setOrbStream(in);
+ inputStreamBridge.setOrbStream(inStream);
inputStreamBridge.setSender(sender); //d11638
// backward compatability 4365188
inputStreamBridge.setValueHandler(this);
@@ -269,7 +269,7 @@ public final class ValueHandlerImpl implements javax.rmi.CORBA.ValueHandlerMulti
try {
inputStreamBridge.increaseRecursionDepth();
- result = (java.io.Serializable) readValueInternal(inputStreamBridge, in, offset, clazz, repositoryID, sender);
+ result = (java.io.Serializable) readValueInternal(inputStreamBridge, inStream, offset, clazz, repositoryID, sender);
} finally {
@@ -287,16 +287,16 @@ public final class ValueHandlerImpl implements javax.rmi.CORBA.ValueHandlerMulti
if (inputStreamPairs == null)
inputStreamPairs = new Hashtable();
- jdkToOrbInputStreamBridge = (IIOPInputStream)inputStreamPairs.get(_in);
+ jdkToOrbInputStreamBridge = (IIOPInputStream)inputStreamPairs.get(in);
if (jdkToOrbInputStreamBridge == null) {
jdkToOrbInputStreamBridge = createInputStream();
- jdkToOrbInputStreamBridge.setOrbStream(in);
+ jdkToOrbInputStreamBridge.setOrbStream(inStream);
jdkToOrbInputStreamBridge.setSender(sender); //d11638
// backward compatability 4365188
jdkToOrbInputStreamBridge.setValueHandler(this);
- inputStreamPairs.put(_in, jdkToOrbInputStreamBridge);
+ inputStreamPairs.put(in, jdkToOrbInputStreamBridge);
}
java.io.Serializable result = null;
@@ -304,12 +304,12 @@ public final class ValueHandlerImpl implements javax.rmi.CORBA.ValueHandlerMulti
try {
jdkToOrbInputStreamBridge.increaseRecursionDepth();
- result = (java.io.Serializable) readValueInternal(jdkToOrbInputStreamBridge, in, offset, clazz, repositoryID, sender);
+ result = (java.io.Serializable) readValueInternal(jdkToOrbInputStreamBridge, inStream, offset, clazz, repositoryID, sender);
} finally {
if (jdkToOrbInputStreamBridge.decreaseRecursionDepth() == 0) {
- inputStreamPairs.remove(_in);
+ inputStreamPairs.remove(in);
}
}
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/GenericIdentifiable.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/GenericIdentifiable.java
index 1715b4fa964..1226d9289fc 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/GenericIdentifiable.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/GenericIdentifiable.java
@@ -33,7 +33,6 @@ import org.omg.CORBA_2_3.portable.OutputStream;
import com.sun.corba.se.spi.ior.Identifiable ;
/**
- * @author
* This is used for unknown components and profiles. A TAG_MULTICOMPONENT_PROFILE will be represented this way.
*/
public abstract class GenericIdentifiable implements Identifiable
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/GenericTaggedComponent.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/GenericTaggedComponent.java
index dbb8547810e..b6d946b1d75 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/GenericTaggedComponent.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/GenericTaggedComponent.java
@@ -34,9 +34,6 @@ import com.sun.corba.se.spi.ior.iiop.GIOPVersion ;
import com.sun.corba.se.spi.ior.TaggedComponent ;
-/**
- * @author
- */
public class GenericTaggedComponent extends GenericIdentifiable
implements TaggedComponent
{
@@ -52,8 +49,6 @@ public class GenericTaggedComponent extends GenericIdentifiable
/**
* @return org.omg.IOP.TaggedComponent
- * @exception
- * @author
*/
public org.omg.IOP.TaggedComponent getIOPComponent( ORB orb )
{
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/GenericTaggedProfile.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/GenericTaggedProfile.java
index b715dce5a39..20269e901d4 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/GenericTaggedProfile.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/GenericTaggedProfile.java
@@ -39,9 +39,6 @@ import com.sun.corba.se.spi.ior.iiop.GIOPVersion ;
import com.sun.corba.se.impl.encoding.EncapsOutputStream ;
-/**
- * @author
- */
public class GenericTaggedProfile extends GenericIdentifiable implements TaggedProfile
{
private ORB orb ;
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/IORTemplateImpl.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/IORTemplateImpl.java
index 791c1386daa..2b99c887330 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/IORTemplateImpl.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/IORTemplateImpl.java
@@ -47,7 +47,6 @@ import com.sun.corba.se.spi.orb.ORB ;
/**
* This class is a container of TaggedProfileTemplates.
- * @author
*/
public class IORTemplateImpl extends IdentifiableContainerBase implements IORTemplate
{
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/ObjectAdapterIdNumber.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/ObjectAdapterIdNumber.java
index 6c57923d8a4..4fc98527107 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/ObjectAdapterIdNumber.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/ObjectAdapterIdNumber.java
@@ -31,7 +31,7 @@ import org.omg.CORBA_2_3.portable.OutputStream ;
/** ObjectAdapterIdNumber is used to represent pre-JDK 1.4 POA adapter
* IDs. The POA ID was simply represented as a single integer, which was
* mapped to the actual POA instance. Here, we just represent these
- * internally as arrays of the form { "OldRootPOA", "" },
+ * internally as arrays of the form {@code { "OldRootPOA", "" }},
* and provide an extra method to get the number back.
*/
public class ObjectAdapterIdNumber extends ObjectAdapterIdArray {
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/ObjectIdImpl.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/ObjectIdImpl.java
index 1c1d3c25fae..200f045063d 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/ObjectIdImpl.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/ObjectIdImpl.java
@@ -29,9 +29,6 @@ import java.util.Arrays ;
import com.sun.corba.se.spi.ior.ObjectId ;
import org.omg.CORBA_2_3.portable.OutputStream ;
-/**
- * @author
- */
public final class ObjectIdImpl implements ObjectId
{
private byte[] id;
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/ObjectKeyImpl.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/ObjectKeyImpl.java
index ad2019a9559..ddf84a311e4 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/ObjectKeyImpl.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/ObjectKeyImpl.java
@@ -37,9 +37,6 @@ import com.sun.corba.se.spi.ior.ObjectKeyTemplate ;
import com.sun.corba.se.impl.encoding.EncapsOutputStream ;
-/**
- * @author
- */
public class ObjectKeyImpl implements ObjectKey
{
private ObjectKeyTemplate oktemp;
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/ObjectKeyTemplateBase.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/ObjectKeyTemplateBase.java
index 845de9a4313..6f0cfbe5914 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/ObjectKeyTemplateBase.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/ObjectKeyTemplateBase.java
@@ -47,9 +47,6 @@ import com.sun.corba.se.impl.encoding.EncapsOutputStream ;
import com.sun.corba.se.impl.logging.IORSystemException ;
-/**
- * @author
- */
public abstract class ObjectKeyTemplateBase implements ObjectKeyTemplate
{
// Fixed constants for Java IDL object key template forms
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/POAObjectKeyTemplate.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/POAObjectKeyTemplate.java
index ddb7d822001..3a8eac05e2b 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/POAObjectKeyTemplate.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/POAObjectKeyTemplate.java
@@ -42,9 +42,6 @@ import com.sun.corba.se.spi.ior.ObjectAdapterId ;
import com.sun.corba.se.impl.ior.ObjectKeyFactoryImpl ;
-/**
- * @author
- */
public final class POAObjectKeyTemplate extends NewObjectKeyTemplateBase
{
/** This constructor reads the template ONLY from the stream.
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/StubIORImpl.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/StubIORImpl.java
index b495436d7de..3c455e3aca8 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/StubIORImpl.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/StubIORImpl.java
@@ -245,7 +245,7 @@ public class StubIORImpl
/**
* Returns a string representation of this stub. Returns the same string
* for all stubs that represent the same remote object.
- * "SimpleIORImpl[,[]data, ...]"
+ * {@code "SimpleIORImpl[,[]data, ...]"}
* @return a string representation of this stub.
*/
public String toString()
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/TaggedProfileFactoryFinderImpl.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/TaggedProfileFactoryFinderImpl.java
index e234f5af333..da5b1cbf283 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/TaggedProfileFactoryFinderImpl.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/TaggedProfileFactoryFinderImpl.java
@@ -33,9 +33,6 @@ import com.sun.corba.se.impl.ior.IdentifiableFactoryFinderBase ;
import org.omg.CORBA_2_3.portable.InputStream ;
-/**
- * @author
- */
public class TaggedProfileFactoryFinderImpl extends
IdentifiableFactoryFinderBase
{
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/TaggedProfileTemplateFactoryFinderImpl.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/TaggedProfileTemplateFactoryFinderImpl.java
index 8dba5d3fabb..ed09d58bf52 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/TaggedProfileTemplateFactoryFinderImpl.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/TaggedProfileTemplateFactoryFinderImpl.java
@@ -35,9 +35,6 @@ import org.omg.CORBA_2_3.portable.InputStream ;
import org.omg.CORBA.INTERNAL ;
-/**
- * @author
- */
public class TaggedProfileTemplateFactoryFinderImpl extends
IdentifiableFactoryFinderBase
{
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/CodeSetsComponentImpl.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/CodeSetsComponentImpl.java
index 12b8401ef84..1bc03c61f92 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/CodeSetsComponentImpl.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/CodeSetsComponentImpl.java
@@ -38,9 +38,6 @@ import com.sun.corba.se.impl.encoding.CodeSetComponentInfo ;
import com.sun.corba.se.impl.encoding.MarshalOutputStream ;
import com.sun.corba.se.impl.encoding.MarshalInputStream ;
-/**
- * @author
- */
public class CodeSetsComponentImpl extends TaggedComponentBase
implements CodeSetsComponent
{
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/IIOPAddressBase.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/IIOPAddressBase.java
index a019defa3b7..cc91839e0ec 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/IIOPAddressBase.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/IIOPAddressBase.java
@@ -32,9 +32,6 @@ import org.omg.CORBA_2_3.portable.OutputStream ;
import com.sun.corba.se.spi.ior.iiop.IIOPAddress ;
-/**
- * @author
- */
abstract class IIOPAddressBase implements IIOPAddress
{
// Ports are marshalled as shorts on the wire. The IDL
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/IIOPAddressClosureImpl.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/IIOPAddressClosureImpl.java
index f8e9dfd5263..80ecfac09b9 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/IIOPAddressClosureImpl.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/IIOPAddressClosureImpl.java
@@ -32,9 +32,6 @@ import org.omg.CORBA_2_3.portable.OutputStream ;
import com.sun.corba.se.spi.orbutil.closure.Closure ;
-/**
- * @author
- */
public final class IIOPAddressClosureImpl extends IIOPAddressBase
{
private Closure host;
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/IIOPAddressImpl.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/IIOPAddressImpl.java
index 79c31cffaf3..255f20aff82 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/IIOPAddressImpl.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/IIOPAddressImpl.java
@@ -36,9 +36,6 @@ import com.sun.corba.se.spi.logging.CORBALogDomains ;
import com.sun.corba.se.impl.logging.IORSystemException ;
-/**
- * @author
- */
public final class IIOPAddressImpl extends IIOPAddressBase
{
private ORB orb ;
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/IIOPProfileImpl.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/IIOPProfileImpl.java
index 060dd3f338e..bb30cc86b9d 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/IIOPProfileImpl.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/IIOPProfileImpl.java
@@ -76,9 +76,6 @@ import com.sun.corba.se.impl.util.JDKBridge;
import com.sun.corba.se.impl.logging.IORSystemException;
-/**
- * @author
- */
public class IIOPProfileImpl extends IdentifiableBase implements IIOPProfile
{
private ORB orb ;
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/IIOPProfileTemplateImpl.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/IIOPProfileTemplateImpl.java
index 32a7d66d665..4d3dbe5ee63 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/IIOPProfileTemplateImpl.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/IIOPProfileTemplateImpl.java
@@ -55,7 +55,6 @@ import com.sun.corba.se.spi.ior.iiop.GIOPVersion ;
import com.sun.corba.se.spi.orb.ORB ;
/**
- * @author
* If getMinorVersion==0, this does not contain any tagged components
*/
public class IIOPProfileTemplateImpl extends TaggedProfileTemplateBase
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/JavaCodebaseComponentImpl.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/JavaCodebaseComponentImpl.java
index 9fefb015511..f7b17e0107e 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/JavaCodebaseComponentImpl.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/ior/iiop/JavaCodebaseComponentImpl.java
@@ -33,9 +33,6 @@ import com.sun.corba.se.spi.ior.TaggedComponentBase ;
import com.sun.corba.se.spi.ior.iiop.JavaCodebaseComponent ;
-/**
- * @author
- */
public class JavaCodebaseComponentImpl extends TaggedComponentBase
implements JavaCodebaseComponent
{
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/javax/rmi/CORBA/Util.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/javax/rmi/CORBA/Util.java
index 473cf6487ca..866d22c4370 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/javax/rmi/CORBA/Util.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/javax/rmi/CORBA/Util.java
@@ -593,7 +593,7 @@ public class Util implements javax.rmi.CORBA.UtilDelegate
* @param className the name of the class.
* @param remoteCodebase a space-separated list of URLs at which
* the class might be found. May be null.
- * @param loadingContext a class whose ClassLoader may be used to
+ * @param loader a class whose ClassLoader may be used to
* load the class if all other methods fail.
* @return the Class
object representing the loaded class.
* @exception ClassNotFoundException if class cannot be loaded.
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/cosnaming/BindingIteratorImpl.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/cosnaming/BindingIteratorImpl.java
index 956d41bed4f..df8faeb7fa6 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/cosnaming/BindingIteratorImpl.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/cosnaming/BindingIteratorImpl.java
@@ -92,7 +92,7 @@ public abstract class BindingIteratorImpl extends BindingIteratorPOA
* Return the next n bindings. It also returns true or false, indicating
* whether there were more bindings.
* @param how_many The number of requested bindings in the BindingList.
- * @param bl The BindingList as an out parameter.
+ * @param blh The BindingList as an out parameter.
* @return true if there were more bindings.
* @exception org.omg.CORBA.SystemException One of a fixed set of CORBA
* system exceptions.
@@ -114,7 +114,7 @@ public abstract class BindingIteratorImpl extends BindingIteratorPOA
* scope, It will be called from NamingContext.list() operation or
* this.next_n().
* @param how_many The number of requested bindings in the BindingList.
- * @param bl The BindingList as an out parameter.
+ * @param blh The BindingList as an out parameter.
* @return true if there were more bindings.
*/
public boolean list( int how_many, org.omg.CosNaming.BindingListHolder blh)
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/cosnaming/NamingContextImpl.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/cosnaming/NamingContextImpl.java
index b591aad929c..3b2c19f3dd2 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/cosnaming/NamingContextImpl.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/cosnaming/NamingContextImpl.java
@@ -181,7 +181,7 @@ public abstract class NamingContextImpl
* exists. The NamingContext will participate in recursive resolving.
* @param n a sequence of NameComponents which is the name under which
* the object will be bound.
- * @param obj the NamingContect object reference to be bound.
+ * @param nc the NamingContext object reference to be bound.
* @exception org.omg.CosNaming.NamingContextPackage.NotFound A name with
* multiple components was supplied, but the first component could not be
* resolved.
@@ -278,7 +278,7 @@ public abstract class NamingContextImpl
* resolving.
* @param n a sequence of NameComponents which is the name under which
* the object will be bound.
- * @param obj the object reference to be bound.
+ * @param nc the object reference to be bound.
* @exception org.omg.CosNaming.NamingContextPackage.NotFound A name with
* multiple components was supplied, but the first component could not be
* resolved.
@@ -853,10 +853,9 @@ public abstract class NamingContextImpl
/**
* This operation creates a stringified name from the array of Name
* components.
- * @param n Name of the object
+ * @param n Name of the object
* @exception org.omg.CosNaming.NamingContextExtPackage.InvalidName
- * Indicates the name does not identify a binding.
- *
+ * Indicates the name does not identify a binding.
*/
public String to_string(org.omg.CosNaming.NameComponent[] n)
throws org.omg.CosNaming.NamingContextPackage.InvalidName
@@ -882,10 +881,9 @@ public abstract class NamingContextImpl
/**
* This operation converts a Stringified Name into an equivalent array
* of Name Components.
- * @param sn Stringified Name of the object
+ * @param sn Stringified Name of the object
* @exception org.omg.CosNaming.NamingContextExtPackage.InvalidName
- * Indicates the name does not identify a binding.
- *
+ * Indicates the name does not identify a binding.
*/
public org.omg.CosNaming.NameComponent[] to_name(String sn)
throws org.omg.CosNaming.NamingContextPackage.InvalidName
@@ -920,14 +918,13 @@ public abstract class NamingContextImpl
* This operation creates a URL based "iiopname://" format name
* from the Stringified Name of the object.
* @param addr internet based address of the host machine where
- * Name Service is running
- * @param sn Stringified Name of the object
+ * Name Service is running
+ * @param sn Stringified Name of the object
* @exception org.omg.CosNaming.NamingContextExtPackage.InvalidName
- * Indicates the name does not identify a binding.
+ * Indicates the name does not identify a binding.
* @exception org.omg.CosNaming.NamingContextPackage.InvalidAddress
* Indicates the internet based address of the host machine is
- * incorrect
- *
+ * incorrect
*/
public String to_url(String addr, String sn)
@@ -961,15 +958,15 @@ public abstract class NamingContextImpl
/**
* This operation resolves the Stringified name into the object
* reference.
- * @param sn Stringified Name of the object
+ * @param sn Stringified Name of the object
* @exception org.omg.CosNaming.NamingContextPackage.NotFound
- * Indicates there is no object reference for the given name.
+ * Indicates there is no object reference for the given name.
* @exception org.omg.CosNaming.NamingContextPackage.CannotProceed
- * Indicates that the given compound name is incorrect
+ * Indicates that the given compound name is incorrect
* @exception org.omg.CosNaming.NamingContextExtPackage.InvalidName
- * Indicates the name does not identify a binding.
+ * Indicates the name does not identify a binding.
* @exception org.omg.CosNaming.NamingContextPackage.AlreadyBound
- * Indicates the name is already bound.
+ * Indicates the name is already bound.
*
*/
public org.omg.CORBA.Object resolve_str(String sn)
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/cosnaming/NamingUtils.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/cosnaming/NamingUtils.java
index 4bcc774a882..d141d4d4967 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/cosnaming/NamingUtils.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/cosnaming/NamingUtils.java
@@ -90,7 +90,7 @@ public class NamingUtils {
/**
* Create a error print stream to the supplied file.
- * @param logFile the file to which error messages will go.
+ * @param errFile the file to which error messages will go.
* @exception IOException thrown if the file cannot be opened for output.
*/
public static void makeErrStream(File errFile)
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/cosnaming/TransientBindingIterator.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/cosnaming/TransientBindingIterator.java
index 736b353c56d..468a5339299 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/cosnaming/TransientBindingIterator.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/cosnaming/TransientBindingIterator.java
@@ -63,7 +63,6 @@ public class TransientBindingIterator extends BindingIteratorImpl
* @param orb a org.omg.CORBA.ORB object.
* @param aTable A hashtable containing InternalBindingValues which is
* the content of the TransientNamingContext.
- * @param java.lang.Exception a Java exception.
* @exception Exception a Java exception thrown of the base class cannot
* initialize.
*/
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/cosnaming/TransientNameService.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/cosnaming/TransientNameService.java
index 729191631b0..7842dd1f3a6 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/cosnaming/TransientNameService.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/cosnaming/TransientNameService.java
@@ -85,7 +85,7 @@ public class TransientNameService
* NamingContext, whose object
* reference can be obtained by the initialNamingContext method.
* @param orb The ORB object
- * @param nameserviceName Stringified key used for INS Service registry
+ * @param serviceName Stringified key used for INS Service registry
* @exception org.omg.CORBA.INITIALIZE Thrown if
* the TransientNameService cannot initialize.
*/
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/namingutil/CorbanameURL.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/namingutil/CorbanameURL.java
index 569468a3849..1ebae7192ad 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/namingutil/CorbanameURL.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/namingutil/CorbanameURL.java
@@ -33,7 +33,7 @@ import com.sun.corba.se.spi.logging.CORBALogDomains;
* will be stored in this object. This object is capable of storing CorbaLoc
* profiles as defined in the CorbaName grammer.
*
- * @Author Hemanth
+ * @author Hemanth
*/
public class CorbanameURL extends INSURLBase
{
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/namingutil/IIOPEndpointInfo.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/namingutil/IIOPEndpointInfo.java
index f985cf29ba5..3cee5e8042c 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/namingutil/IIOPEndpointInfo.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/namingutil/IIOPEndpointInfo.java
@@ -32,7 +32,7 @@ import com.sun.corba.se.impl.orbutil.ORBConstants;
* host information used in creating the Service Object reference
* from the -ORBInitDef and -ORBDefaultInitDef definitions.
*
- * @Author Hemanth
+ * @author Hemanth
*/
public class IIOPEndpointInfo
{
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/namingutil/INSURL.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/namingutil/INSURL.java
index bdb68b211f7..a8f54f1b8d8 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/namingutil/INSURL.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/namingutil/INSURL.java
@@ -29,7 +29,7 @@ package com.sun.corba.se.impl.naming.namingutil;
* INS URL is a generic interface for two different types of URL's specified
* in INS spec.
*
- * @Author Hemanth
+ * @author Hemanth
*/
public interface INSURL {
public boolean getRIRFlag( );
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/namingutil/INSURLHandler.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/namingutil/INSURLHandler.java
index 424ed01136f..45b45faf20c 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/namingutil/INSURLHandler.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/namingutil/INSURLHandler.java
@@ -30,7 +30,7 @@ import java.util.StringTokenizer;
/**
* This class is the entry point to parse different types of INS URL's.
*
- * @Author Hemanth
+ * @author Hemanth
*/
public class INSURLHandler {
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/namingutil/Utility.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/namingutil/Utility.java
index 02f5c7a75ea..8d692604ebf 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/namingutil/Utility.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/namingutil/Utility.java
@@ -36,7 +36,7 @@ import com.sun.corba.se.spi.logging.CORBALogDomains;
/**
* Utility methods for Naming.
*
- * @Author Hemanth
+ * @author Hemanth
*/
class Utility {
private static NamingSystemException wrapper =
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/pcosnaming/NameService.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/pcosnaming/NameService.java
index 91106f4aa29..992919b950f 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/pcosnaming/NameService.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/pcosnaming/NameService.java
@@ -186,7 +186,7 @@ public class NameService
/**
* getObjectReferenceFromKey returns the Object reference from the objectkey using POA.create_reference_with_id method
* @param Object Key as String
- * @returns reference an CORBA.Object.
+ * @return a CORBA.Object reference.
*/
org.omg.CORBA.Object getObjectReferenceFromKey( String key )
{
@@ -205,7 +205,7 @@ public class NameService
/**
* getObjectKey gets the Object Key from the reference using POA.reference_to_id method
* @param reference an CORBA.Object.
- * @returns Object Key as String
+ * @return Object Key as String
*/
String getObjectKey( org.omg.CORBA.Object reference )
{
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/pcosnaming/NamingContextImpl.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/pcosnaming/NamingContextImpl.java
index 63f191d4bc0..67483e0d35b 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/pcosnaming/NamingContextImpl.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/pcosnaming/NamingContextImpl.java
@@ -116,8 +116,8 @@ public class NamingContextImpl
* Runs the super constructor.
* @param orb an ORB object.
* @param objKey as String
- * @param TheNameService as NameService
- * @param TheServantManagerImpl as ServantManagerImpl
+ * @param theNameService as NameService
+ * @param theServantManagerImpl as ServantManagerImpl
* @exception java.lang.Exception a Java exception.
*/
@@ -223,7 +223,7 @@ public class NamingContextImpl
* exists. The NamingContext will participate in recursive resolving.
* @param n a sequence of NameComponents which is the name under which
* the object will be bound.
- * @param obj the NamingContect object reference to be bound.
+ * @param nc the NamingContect object reference to be bound.
* @exception org.omg.CosNaming.NamingContextPackage.NotFound A name with multiple
* components was supplied, but the first component could not be
* resolved.
@@ -301,7 +301,7 @@ public class NamingContextImpl
* unbound first. The NamingContext will participate in recursive resolving.
* @param n a sequence of NameComponents which is the name under which
* the object will be bound.
- * @param obj the object reference to be bound.
+ * @param nc the object reference to be bound.
* @exception org.omg.CosNaming.NamingContextPackage.NotFound A name with multiple
* components was supplied, but the first component could not be
* resolved.
@@ -907,7 +907,7 @@ public class NamingContextImpl
* The doResolve( ) method calls Resolve( ) recursively to resolve n level
* Names.
* @param n a sequence of NameComponents which is the name to be resolved.
- * @param bt Type of binding (as object or as context).
+ * @param bth Type of binding (as object or as context).
* @return the object reference bound under the supplied name.
* @exception org.omg.CosNaming.NamingContextPackage.NotFound Neither a NamingContext
* or a Corba Object reference not found under this Name
@@ -1140,10 +1140,9 @@ public class NamingContextImpl
/**
* This operation creates a stringified name from the array of Name
* components.
- * @param n Name of the object
+ * @param n Name of the object
* @exception org.omg.CosNaming.NamingContextExtPackage.InvalidName
- * Indicates the name does not identify a binding.
- *
+ * Indicates the name does not identify a binding.
*/
public String to_string(org.omg.CosNaming.NameComponent[] n)
throws org.omg.CosNaming.NamingContextPackage.InvalidName
@@ -1167,10 +1166,9 @@ public class NamingContextImpl
/**
* This operation converts a Stringified Name into an equivalent array
* of Name Components.
- * @param sn Stringified Name of the object
+ * @param sn Stringified Name of the object
* @exception org.omg.CosNaming.NamingContextExtPackage.InvalidName
- * Indicates the name does not identify a binding.
- *
+ * Indicates the name does not identify a binding.
*/
public org.omg.CosNaming.NameComponent[] to_name(String sn)
throws org.omg.CosNaming.NamingContextPackage.InvalidName
@@ -1204,14 +1202,13 @@ public class NamingContextImpl
* This operation creates a URL based "iiopname://" format name
* from the Stringified Name of the object.
* @param addr internet based address of the host machine where
- * Name Service is running
- * @param sn Stringified Name of the object
+ * Name Service is running
+ * @param sn Stringified Name of the object
* @exception org.omg.CosNaming.NamingContextExtPackage.InvalidName
- * Indicates the name does not identify a binding.
+ * Indicates the name does not identify a binding.
* @exception org.omg.CosNaming.NamingContextPackage.InvalidAddress
* Indicates the internet based address of the host machine is
- * incorrect
- *
+ * incorrect
*/
public String to_url(String addr, String sn)
@@ -1247,14 +1244,13 @@ public class NamingContextImpl
/**
* This operation resolves the Stringified name into the object
* reference.
- * @param sn Stringified Name of the object
+ * @param sn Stringified Name of the object
* @exception org.omg.CosNaming.NamingContextPackage.NotFound
- * Indicates there is no object reference for the given name.
+ * Indicates there is no object reference for the given name.
* @exception org.omg.CosNaming.NamingContextPackage.CannotProceed
- * Indicates that the given compound name is incorrect
+ * Indicates that the given compound name is incorrect
* @exception org.omg.CosNaming.NamingContextExtPackage.InvalidName
- * Indicates the name does not identify a binding.
- *
+ * Indicates the name does not identify a binding.
*/
public org.omg.CORBA.Object resolve_str(String sn)
throws org.omg.CosNaming.NamingContextPackage.NotFound,
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/pcosnaming/PersistentBindingIterator.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/pcosnaming/PersistentBindingIterator.java
index 621560888d8..7c43e6baea0 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/pcosnaming/PersistentBindingIterator.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/naming/pcosnaming/PersistentBindingIterator.java
@@ -64,7 +64,6 @@ public class PersistentBindingIterator extends BindingIteratorImpl
* @param orb a org.omg.CORBA.ORB object.
* @param aTable A hashtable containing InternalBindingValues which is
* the content of the PersistentNamingContext.
- * @param java.lang.Exception a Java exception.
* @exception Exception a Java exception thrown of the base class cannot
* initialize.
*/
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/oa/toa/TOAImpl.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/oa/toa/TOAImpl.java
index 780287b98ee..8290e179f58 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/oa/toa/TOAImpl.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/oa/toa/TOAImpl.java
@@ -61,7 +61,7 @@ import com.sun.corba.se.impl.protocol.JIDLLocalCRDImpl ;
* Its characteristics include:
*
* - There is only one OA instance of the TOA. Its OAId is { "TOA" }
-* - There is not adapter manager. The TOA manager ID is fixed.
-
+*
- There is not adapter manager. The TOA manager ID is fixed.
* - State is the same as ORB state (TBD)
*
* Other requirements:
@@ -118,7 +118,7 @@ public class TOAImpl extends ObjectAdapterBase implements TOA
* This will update thread Current, so that subsequent calls to
* returnServant and removeCurrent from the same thread are for the
* same request.
- * @param request is the request containing the rest of the request
+ * @param info is the request containing the rest of the request
*/
public void getInvocationServant( OAInvocationInfo info )
{
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orb/ORBImpl.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orb/ORBImpl.java
index c619b8a9268..bb67c3cd654 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orb/ORBImpl.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orb/ORBImpl.java
@@ -585,7 +585,7 @@ public class ORBImpl extends com.sun.corba.se.spi.orb.ORB
* Create an NVList
*
* @param count size of list to create
- * @result NVList created
+ * @return NVList created
*
* @see NVList
*/
@@ -599,7 +599,7 @@ public class ORBImpl extends com.sun.corba.se.spi.orb.ORB
* Create an NVList corresponding to an OperationDef
*
* @param oper operation def to use to create list
- * @result NVList created
+ * @return NVList created
*
* @see NVList
*/
@@ -612,7 +612,7 @@ public class ORBImpl extends com.sun.corba.se.spi.orb.ORB
/**
* Create a NamedValue
*
- * @result NamedValue created
+ * @return NamedValue created
*/
public synchronized NamedValue create_named_value(String s, Any any, int flags)
{
@@ -623,7 +623,7 @@ public class ORBImpl extends com.sun.corba.se.spi.orb.ORB
/**
* Create an ExceptionList
*
- * @result ExceptionList created
+ * @return ExceptionList created
*/
public synchronized org.omg.CORBA.ExceptionList create_exception_list()
{
@@ -634,7 +634,7 @@ public class ORBImpl extends com.sun.corba.se.spi.orb.ORB
/**
* Create a ContextList
*
- * @result ContextList created
+ * @return ContextList created
*/
public synchronized org.omg.CORBA.ContextList create_context_list()
{
@@ -645,7 +645,7 @@ public class ORBImpl extends com.sun.corba.se.spi.orb.ORB
/**
* Get the default Context object
*
- * @result the default Context object
+ * @return the default Context object
*/
public synchronized org.omg.CORBA.Context get_default_context()
{
@@ -656,7 +656,7 @@ public class ORBImpl extends com.sun.corba.se.spi.orb.ORB
/**
* Create an Environment
*
- * @result Environment created
+ * @return Environment created
*/
public synchronized org.omg.CORBA.Environment create_environment()
{
@@ -719,7 +719,7 @@ public class ORBImpl extends com.sun.corba.se.spi.orb.ORB
/**
* Get the next request that has gotten a response.
*
- * @result the next request ready with a response.
+ * @return the next request ready with a response.
*/
public org.omg.CORBA.Request get_next_response()
throws org.omg.CORBA.WrongTransaction
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orbutil/DenseIntMapImpl.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orbutil/DenseIntMapImpl.java
index 510c26c974f..95be1654dc0 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orbutil/DenseIntMapImpl.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orbutil/DenseIntMapImpl.java
@@ -42,8 +42,9 @@ public class DenseIntMapImpl
throw new IllegalArgumentException( "Key must be >= 0." ) ;
}
- /** If key >= 0, return the value bound to key, or null if none.
- * Throws IllegalArgumentException if key <0.
+ /**
+ * If {@code key >= 0}, return the value bound to key, or null if none.
+ * Throws IllegalArgumentException if {@code key < 0}.
*/
public Object get( int key )
{
@@ -56,8 +57,9 @@ public class DenseIntMapImpl
return result ;
}
- /** If key >= 0, bind value to the key.
- * Throws IllegalArgumentException if key <0.
+ /**
+ * If {@code key >= 0}, bind value to the key.
+ * Throws IllegalArgumentException if {@code key < 0}.
*/
public void set( int key, Object value )
{
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orbutil/GetPropertyAction.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orbutil/GetPropertyAction.java
index 1ad18e099d2..9cb69d875b7 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orbutil/GetPropertyAction.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orbutil/GetPropertyAction.java
@@ -35,7 +35,7 @@ package com.sun.corba.se.impl.orbutil ;
* AccessController.doPrivileged
.
*
* The following code retrieves the value of the system
- * property named "prop"
as a privileged action:
+ * property named "prop"
as a privileged action:
*
*
* String s = (String) java.security.AccessController.doPrivileged(
@@ -67,7 +67,7 @@ public class GetPropertyAction implements java.security.PrivilegedAction {
* value of that property.
*
* @param theProp the name of the system property.
- * @param defaulVal the default value.
+ * @param defaultVal the default value.
*/
public GetPropertyAction(String theProp, String defaultVal) {
this.theProp = theProp;
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orbutil/concurrent/CondVar.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orbutil/concurrent/CondVar.java
index 73e949b5493..fb605b51650 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orbutil/concurrent/CondVar.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orbutil/concurrent/CondVar.java
@@ -150,8 +150,7 @@ import com.sun.corba.se.impl.orbutil.ORBUtility ;
*
*
* @see Mutex
- * [ Introduction to this package. ]
-
+ * [ Introduction to this package. ]
**/
public class CondVar {
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orbutil/concurrent/Mutex.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orbutil/concurrent/Mutex.java
index 3195ad8ad9a..78d66113d6d 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orbutil/concurrent/Mutex.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orbutil/concurrent/Mutex.java
@@ -126,7 +126,6 @@ package com.sun.corba.se.impl.orbutil.concurrent;
* // ... other similar traversal and update methods ...
* }
*
- *
* @see Semaphore
*
[ Introduction to this package. ]
**/
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orbutil/concurrent/Sync.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orbutil/concurrent/Sync.java
index 32b22fdbc96..c696b795083 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orbutil/concurrent/Sync.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/orbutil/concurrent/Sync.java
@@ -181,11 +181,11 @@ package com.sun.corba.se.impl.orbutil.concurrent;
* }
* }
* }
- *
+ *
*
* Here is an even fancier version, that uses lock re-ordering
* upon conflict:
- *
+ * {@code
* class Cell {
* long value;
* Sync lock = ...;
@@ -214,8 +214,8 @@ package com.sun.corba.se.impl.orbutil.concurrent;
* }
* catch (InterruptedException ex) { return; }
* }
- *}
- *
+ * }
+ * }
*
* Interruptions are in general handled as early as possible.
* Normally, InterruptionExceptions are thrown
@@ -248,7 +248,7 @@ package com.sun.corba.se.impl.orbutil.concurrent;
* it is normally best to just use acquire(), various forms
* of busy waits can be implemented. For a simple example
* (but one that would probably never be preferable to using acquire()):
- *
+ * {@code
* class X {
* Sync lock = ...
* void spinUntilAcquired() throws InterruptedException {
@@ -269,17 +269,16 @@ package com.sun.corba.se.impl.orbutil.concurrent;
* }
* }
* }
- *
+ * }
*
* In addition pure synchronization control, Syncs
* may be useful in any context requiring before/after methods.
* For example, you can use an ObservableSync
* (perhaps as part of a LayeredSync) in order to obtain callbacks
* before and after each method invocation for a given class.
- *
-
- *
[ Introduction to this package. ]
-**/
+ *
+ * [ Introduction to this package. ]
+ **/
public interface Sync {
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/transport/CorbaConnectionCacheBase.java b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/transport/CorbaConnectionCacheBase.java
index 0cbce53ffc1..4f8aedafc3d 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/impl/transport/CorbaConnectionCacheBase.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/impl/transport/CorbaConnectionCacheBase.java
@@ -130,13 +130,13 @@ public abstract class CorbaConnectionCacheBase
* This method must be synchronized since one WorkerThread could
* be reclaming connections inside the synchronized backingStore
* block and a second WorkerThread (or a SelectorThread) could have
- * already executed the if (numberOfConnections <= .... ). As a
+ * already executed the if (numberOfConnections {@literal <=} .... ). As a
* result the second thread would also attempt to reclaim connections.
*
* If connection reclamation becomes a performance issue, the connection
* reclamation could make its own task and consequently executed in
* a separate thread.
- * Currently, the accept & reclaim are done in the same thread, WorkerThread
+ * Currently, the accept {@literal &} reclaim are done in the same thread, WorkerThread
* by default. It could be changed such that the SelectorThread would do
* it for SocketChannels and WorkerThreads for Sockets by updating the
* ParserTable.
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/pept/transport/Acceptor.java b/corba/src/java.corba/share/classes/com/sun/corba/se/pept/transport/Acceptor.java
index d7f8d8660f9..16e69d09daf 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/pept/transport/Acceptor.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/pept/transport/Acceptor.java
@@ -59,7 +59,7 @@ public interface Acceptor
/**
* Used to determine if an Acceptor
has been initialized.
*
- * @return trueAcceptor
has been
+ * @return true
if the Acceptor
has been
* initialized.
*/
public boolean initialized();
@@ -74,7 +74,7 @@ public interface Acceptor
/**
* Set the
- * {@link com.sun.corba.se.pept.transport.Inbound.ConnectionCache InboundConnectionCache}
+ * {@link com.sun.corba.se.pept.transport.InboundConnectionCache InboundConnectionCache}
* to be used by this Acceptor
.
*
* PEPt uses separate caches for each type of Acceptor
@@ -88,7 +88,7 @@ public interface Acceptor
/**
* Get the
- * {@link com.sun.corba.se.pept.transport.Inbound.ConnectionCache InboundConnectionCache}
+ * {@link com.sun.corba.se.pept.transport.InboundConnectionCache InboundConnectionCache}
* used by this Acceptor
*
* PEPt uses separate caches for each type of Acceptor
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/extension/ServantCachingPolicy.java b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/extension/ServantCachingPolicy.java
index cf4e6872b82..35f225dd141 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/extension/ServantCachingPolicy.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/extension/ServantCachingPolicy.java
@@ -44,7 +44,7 @@ import com.sun.corba.se.impl.orbutil.ORBConstants ;
*
* - POA current semantics
*
- Proper handling of POA destroy.
-*
+*
* POA current semantics requires maintaining a ThreadLocal stack of
* invocation information that is always available for POACurrent operations.
* Maintaining this stack is expensive on the timescale of optimized co-located
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/ior/package.html b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/ior/package.html
index 2bdbb74e804..e3784dcc4a4 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/ior/package.html
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/ior/package.html
@@ -41,8 +41,8 @@ The abstract model of IORs works as follows:
- An IIOPProfile isA TaggedProfile.
- An IIOPProfile is composed of an IIOPProfileTemplate and an object ID.
- An IIOPProfileTemplate has an ObjectKeyTemplate, and contains TaggedComponents.
-- A TaggedComponent has an ID, and can be written to an OuputStream.
-
-
- A TaggedComponentFactory reads a TaggedComponent from an InputStream.
-
+
- A TaggedComponent has an ID, and can be written to an OuputStream.
+- A TaggedComponentFactory reads a TaggedComponent from an InputStream.
In all cases, containment is represented by having the appropriate interface (IOR and
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/legacy/connection/ORBSocketFactory.java b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/legacy/connection/ORBSocketFactory.java
index d9e06b1b512..baf4eb7e560 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/legacy/connection/ORBSocketFactory.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/legacy/connection/ORBSocketFactory.java
@@ -48,7 +48,7 @@ import com.sun.corba.se.spi.transport.SocketInfo;
*
* property.
*
- * Example:
+ * Example:
*
* -Dcom.sun.CORBA.connection.ORBSocketFactoryClass=MySocketFactory
@@ -145,7 +145,7 @@ public interface ORBSocketFactory
*
* property.
*
- * Example usage:
+ * Example usage:
*
*
* ... \
@@ -203,7 +203,7 @@ public interface ORBSocketFactory
* Once you have the port you may add information to references
* created by the associated adapter by calling
*
- * IORInfo::add_ior_component
+ * IORInfo::add_ior_component
*
*
* Note: if one is using a POA and the lifespan policy of that
@@ -247,7 +247,7 @@ public interface ORBSocketFactory
*
* ORBConstants.BAD_SERVER_ID_HANDLER_CLASS_PROPERTY
*
- * Example:
+ * Example:
*
*
*
@@ -271,7 +271,7 @@ public interface ORBSocketFactory
* SOCKET_FACTORY_CLASS_PROPERTY
and
* LISTEN_SOCKET_PROPERTY
*
- * as used by other clients and servers in your distributed system.
+ * as used by other clients and servers in your distributed system.
*
*/
public ServerSocket createServerSocket(String type, int port)
@@ -324,7 +324,7 @@ public interface ORBSocketFactory
*
* The SocketInfo
given to getEndPointInfo
* is either null or an object obtained
- * from GetEndPointInfoAgainException
+ * from GetEndPointInfoAgainException
*
*/
public SocketInfo getEndPointInfo(org.omg.CORBA.ORB orb,
@@ -333,7 +333,7 @@ public interface ORBSocketFactory
/**
- * DEPRECATED. DEPRECATED. DEPRECATED. DEPRECATED.
*
* This method is used by a client side ORB.
*
@@ -346,7 +346,7 @@ public interface ORBSocketFactory
*
* If this method throws GetEndPointInfoAgainException then the
* ORB calls getEndPointInfo
again, passing it the
- * SocketInfo
object contained in the exception.
+ * SocketInfo
object contained in the exception.
*
*/
public Socket createSocket(SocketInfo socketInfo)
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/legacy/interceptor/RequestInfoExt.java b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/legacy/interceptor/RequestInfoExt.java
index b6dd89d1fb9..e57f9529a43 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/legacy/interceptor/RequestInfoExt.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/legacy/interceptor/RequestInfoExt.java
@@ -30,8 +30,7 @@ import com.sun.corba.se.spi.legacy.connection.Connection;
/**
* This interface is implemented by our implementation of
* PortableInterceptor.ClientRequestInfo and
- * PortableInterceptor.ServerRequestInfo.
- *
+ * PortableInterceptor.ServerRequestInfo.
*/
public interface RequestInfoExt
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/LongMonitoredAttributeBase.java b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/LongMonitoredAttributeBase.java
index 226d84fb919..0b3b6b396ee 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/LongMonitoredAttributeBase.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/LongMonitoredAttributeBase.java
@@ -27,13 +27,9 @@ package com.sun.corba.se.spi.monitoring;
import java.util.*;
/**
- *
- *
* @author Hemanth Puttaswamy
- *
- *
+ *
* A Cleaner Abstraction to provide a Monitored Attribute of type 'Long'
- *
*/
public abstract class LongMonitoredAttributeBase extends MonitoredAttributeBase {
@@ -42,20 +38,14 @@ public abstract class LongMonitoredAttributeBase extends MonitoredAttributeBase
/**
- *
* Constructs LongMonitoredAttribute, by creating the
* MonitoredAttributeInfo with 'Long' as the class type.
* Users are expected to extend this class and provide the implementation
* for getValue() and if needed clearState() as well.
- *
- *
*
- * @param name of tthe MonitoredAttribute
- *
- *
+ * @param name of the MonitoredAttribute
* @param description of the Attribute, Please provid a well thought out
* description, so that the admin can make sense of the attribute supplied.
- *
*/
public LongMonitoredAttributeBase(String name, String description) {
super( name );
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredAttribute.java b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredAttribute.java
index 844668ab001..094b33f5337 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredAttribute.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredAttribute.java
@@ -28,15 +28,11 @@ import com.sun.corba.se.spi.monitoring.MonitoredAttributeInfo;
import java.util.*;
/**
- *
- *
* @author Hemanth Puttaswamy
- *
- *
+ *
* Monitored Attribute is the interface to represent a Monitorable
* Attribute. Using this interface, one can get the value of the attribute
* and set the value if it is a writeable attribute.
- *
*/
public interface MonitoredAttribute {
@@ -44,34 +40,25 @@ public interface MonitoredAttribute {
// operations
/**
- *
* Gets the Monitored Attribute Info for the attribute.
- *
- *
*
- * @param monitoredAttributeInfo for this Monitored Attribute.
- *
+ * @return monitoredAttributeInfo for this Monitored Attribute.
*/
public MonitoredAttributeInfo getAttributeInfo();
/**
- *
* Sets the value for the Monitored Attribute if isWritable() is false, the
* method will throw ILLEGAL Operation exception.
*
* Also, the type of 'value' should be same as specified in the
* MonitoredAttributeInfo for a particular instance.
- *
- *
*
* @param value should be any one of the Basic Java Type Objects which are
* Long, Double, Float, String, Integer, Short, Character, Byte.
- *
*/
public void setValue(Object value);
/**
- *
* Gets the value of the Monitored Attribute. The value can be obtained
* from different parts of the module. User may choose to delegate the call
* to getValue() to other variables.
@@ -79,34 +66,21 @@ public interface MonitoredAttribute {
* NOTE: It is important to make sure that the type of Object returned in
* getvalue is same as the one specified in MonitoredAttributeInfo for this
* attribute.
- *
- *
*
- *
- *
- *
- * @param value is the current value for this MonitoredAttribute
- *
+ * @return the current value for this MonitoredAttribute
*/
public Object getValue();
/**
- *
* Gets the name of the Monitored Attribute.
- *
- *
*
- * @param name of this Attribute
- *
+ * @return name of this Attribute
*/
public String getName();
/**
- *
* If this attribute needs to be cleared, the user needs to implement this
* method to reset the state to initial state. If the Monitored Attribute
* doesn't change like for example (ConnectionManager High Water Mark),
* then clearState() is a No Op.
- *
- *
*/
public void clearState();
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredAttributeBase.java b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredAttributeBase.java
index ff779ac796e..b4456e44de2 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredAttributeBase.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredAttributeBase.java
@@ -27,16 +27,11 @@ package com.sun.corba.se.spi.monitoring;
import java.util.*;
/**
- *
- *
* @author Hemanth Puttaswamy
- *
- *
+ *
* A Convenient class provided to help users extend and implement only
* getValue(), if there is no need to clear the state and the attribute is not
* writable.
- *
- *
*/
public abstract class MonitoredAttributeBase implements MonitoredAttribute {
String name;
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredAttributeInfo.java b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredAttributeInfo.java
index 457006313c5..45f9c4ae239 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredAttributeInfo.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredAttributeInfo.java
@@ -27,14 +27,10 @@ package com.sun.corba.se.spi.monitoring;
import java.util.*;
/**
- *
- *
* @author Hemanth Puttaswamy
- *
- *
+ *
* Monitored AttributeInfo contains the meta information of the Monitored
* Attribute.
- *
*/
public interface MonitoredAttributeInfo {
@@ -42,45 +38,38 @@ public interface MonitoredAttributeInfo {
// operations
/**
- *
* If the Attribute is writable from ASAdmin then isWritable() will return
* true.
- *
- *
*
* @return a boolean with true or false
- *
*/
public boolean isWritable();
/**
- *
* isStatistic() is true if the attribute is presented as a Statistic.
- *
- *
*
* @return a boolean with true or false
- *
*/
public boolean isStatistic();
/**
- *
- * Class Type: We will allow only basic class types: 1)Boolean 2)Integer
- * 3)Byte 4)Long 5)Float 6)Double 7)String 8)Character
- *
- *
+ * Class Type: We will allow only basic class types:
+ *
+ * - Boolean
+ * - Integer
+ * - Byte
+ * - Long
+ * - Float
+ * - Double
+ * - String
+ * - Character
+ *
*
* @return a Class Type
- *
*/
public Class type();
/**
- *
* Get's the description for the Monitored Attribute.
- *
- *
*
* @return a String with description
- *
*/
public String getDescription();
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredAttributeInfoFactory.java b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredAttributeInfoFactory.java
index 47ad1596959..bac25fa7713 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredAttributeInfoFactory.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredAttributeInfoFactory.java
@@ -25,17 +25,12 @@
package com.sun.corba.se.spi.monitoring;
/**
- *
- *
* @author Hemanth Puttaswamy
- *
- *
+ *
* MonitoredAttributeInfoFactory used mostly by internal classes. If the
* User needs to define some special MonitoredAttributes like a Character
* type Monitored Attribute, they can use this factory to build the meta
* information.
- *
- *
*/
public interface MonitoredAttributeInfoFactory {
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredObject.java b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredObject.java
index 12bed76dfc0..6514a6bbfe4 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredObject.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredObject.java
@@ -30,161 +30,101 @@ import java.util.*;
import java.util.Collection;
/**
- *
- *
* @author Hemanth Puttaswamy
- *
- *
+ *
* Monitored Object provides an Hierarchichal view of the ORB Monitoring
* System. It can contain multiple children and a single parent. Each
* Monitored Object may also contain Multiple Monitored Attributes.
- *
*/
public interface MonitoredObject {
///////////////////////////////////////
// operations
/**
- *
* Gets the name of this MonitoredObject
- *
*
* @return a String with name of this Monitored Object
- *
*/
public String getName();
/**
- *
* Gets the description of MonitoredObject
- *
*
* @return a String with Monitored Object Description.
- *
*/
public String getDescription();
/**
- *
* This method will add a child Monitored Object to this Monitored Object.
- *
- *
- *
*/
public void addChild( MonitoredObject m );
/**
- *
* This method will remove child Monitored Object identified by the given name
- *
- *
+ *
* @param name of the ChildMonitored Object
- *
*/
public void removeChild( String name );
/**
- *
* Gets the child MonitoredObject associated with this MonitoredObject
* instance using name as the key. The name should be fully qualified name
* like orb.connectionmanager
- *
- *
*
* @return a MonitoredObject identified by the given name
- *
- *
* @param name of the ChildMonitored Object
- *
*/
public MonitoredObject getChild(String name);
/**
- *
* Gets all the Children registered under this instance of Monitored
* Object.
- *
- *
*
* @return Collection of immediate Children associated with this MonitoredObject.
- *
*/
public Collection getChildren();
/**
- *
* Sets the parent for this Monitored Object.
- *
- *
- *
*/
public void setParent( MonitoredObject m );
/**
- *
* There will be only one parent for an instance of MontoredObject, this
* call gets parent and returns null if the Monitored Object is the root.
- *
- *
*
* @return a MonitoredObject which is a Parent of this Monitored Object instance
- *
*/
public MonitoredObject getParent();
/**
- *
* Adds the attribute with the given name.
- *
- *
*
- *
- *
* @param value is the MonitoredAttribute which will be set as one of the
* attribute of this MonitoredObject.
- *
*/
public void addAttribute(MonitoredAttribute value);
/**
- *
* Removes the attribute with the given name.
- *
- *
*
- *
- *
* @param name is the MonitoredAttribute name
- *
*/
public void removeAttribute(String name);
/**
- *
* Gets the Monitored Object registered by the given name
- *
*
- *
* @return a MonitoredAttribute identified by the given name
- *
- *
* @param name of the attribute
- *
*/
public MonitoredAttribute getAttribute(String name);
/**
- *
* Gets all the Monitored Attributes for this Monitored Objects. It doesn't
* include the Child Monitored Object, that needs to be traversed using
* getChild() or getChildren() call.
- *
- *
*
* @return Collection of all the Attributes for this MonitoredObject
- *
*/
public Collection getAttributes();
/**
- *
* Clears the state of all the Monitored Attributes associated with the
* Monitored Object. It will also clear the state on all it's child
* Monitored Object. The call to clearState will be initiated from
* CORBAMBean.startMonitoring() call.
- *
- *
*/
public void clearState();
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredObjectFactory.java b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredObjectFactory.java
index 86feebb55b4..d7b03e7d701 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredObjectFactory.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoredObjectFactory.java
@@ -25,14 +25,9 @@
package com.sun.corba.se.spi.monitoring;
/**
- *
- *
* @author Hemanth Puttaswamy
- *
- *
*
* MonitoredObject Factory to create Monitored Object.
- *
*/
public interface MonitoredObjectFactory {
/**
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoringFactories.java b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoringFactories.java
index ed59d59d7bd..f34ad375b43 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoringFactories.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoringFactories.java
@@ -29,17 +29,13 @@ import com.sun.corba.se.impl.monitoring.MonitoredAttributeInfoFactoryImpl;
import com.sun.corba.se.impl.monitoring.MonitoringManagerFactoryImpl;
/**
- *
- *
* @author Hemanth Puttaswamy
- *
- *
+ *
* This is used for getting the default factories for
* MonitoredObject, MonitoredAttributeInfo and MonitoringManager. We do not
* expect users to use the MonitoredAttributeInfo factory most of the time
* because the Info is automatically built by StringMonitoredAttributeBase
* and LongMonitoredAttributeBase.
- *
*/
public class MonitoringFactories {
///////////////////////////////////////
@@ -57,20 +53,15 @@ public class MonitoringFactories {
// operations
/**
- *
* Gets the MonitoredObjectFactory
- *
- *
*
* @return a MonitoredObjectFactory
- *
*/
public static MonitoredObjectFactory getMonitoredObjectFactory( ) {
return monitoredObjectFactory;
}
/**
- *
* Gets the MonitoredAttributeInfoFactory. The user is not expected to use this
* Factory, since the MonitoredAttributeInfo is internally created by
* StringMonitoredAttributeBase, LongMonitoredAttributeBase and
@@ -79,11 +70,8 @@ public class MonitoringFactories {
* build a DoubleMonitoredAttributeBase like LongMonitoredAttributeBase
* and build a MonitoredAttributeInfo required by MonitoredAttributeBase
* internally by using this Factory.
- *
- *
*
* @return a MonitoredAttributeInfoFactory
- *
*/
public static MonitoredAttributeInfoFactory
getMonitoredAttributeInfoFactory( )
@@ -92,18 +80,14 @@ public class MonitoringFactories {
}
/**
- *
* Gets the MonitoredManagerFactory. The user is not expected to use this
* Factory, since the ORB will be automatically initialized with the
* MonitoringManager.
*
* User can get hold of MonitoringManager associated with ORB by calling
* orb.getMonitoringManager( )
- *
- *
*
* @return a MonitoredManagerFactory
- *
*/
public static MonitoringManagerFactory getMonitoringManagerFactory( ) {
return monitoringManagerFactory;
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoringManager.java b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoringManager.java
index 1a7e6b61f80..5b5082a042a 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoringManager.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoringManager.java
@@ -30,15 +30,11 @@ import com.sun.corba.se.spi.monitoring.MonitoredObject;
import java.util.*;
/**
- *
* Monitoring Manager will have a 1 to 1 association with the ORB. This
* gives access to the top level Monitored Object, using which more
* Monitored Objects and Attributes can be added and traversed.
- *
- *
*
* @author Hemanth Puttaswamy
- *
*/
public interface MonitoringManager extends Closeable {
@@ -46,23 +42,14 @@ public interface MonitoringManager extends Closeable {
// operations
/**
- *
* Gets the Root Monitored Object which contains a Hierarchy Of Monitored
* Objects exposing various Monitorable Attributes of Various modules.
- *
- *
- *
- * @param MonitoredObject ...
- *
*/
public MonitoredObject getRootMonitoredObject();
/**
- *
* Initialize is called whenever there is a start monitoring call to CORBA
* MBean. This will result in triaging initialize to all the
* MonitoredObjects and it's Monitored Attributes.
- *
- *
*/
public void clearState();
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoringManagerFactory.java b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoringManagerFactory.java
index 88b6238ff53..8d117392068 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoringManagerFactory.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/MonitoringManagerFactory.java
@@ -25,14 +25,10 @@
package com.sun.corba.se.spi.monitoring;
/**
- *
- *
* @author Hemanth Puttaswamy
- *
- *
+ *
* MonitoringObjectFactory is used internally by the ORB, It is not for
* general public use.
- *
*/
public interface MonitoringManagerFactory {
/**
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/StatisticMonitoredAttribute.java b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/StatisticMonitoredAttribute.java
index 54dd14c0076..f28ab0efad7 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/StatisticMonitoredAttribute.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/StatisticMonitoredAttribute.java
@@ -27,15 +27,11 @@ package com.sun.corba.se.spi.monitoring;
import java.util.*;
/**
- *
- *
* @author Hemanth Puttaswamy
- *
- *
+ *
* StatisticsMonitoredAttribute is provided as a convenience to collect the
* Statistics of any entity. The getValue() call will be delegated to the
* StatisticsAccumulator set by the user.
- *
*/
public class StatisticMonitoredAttribute extends MonitoredAttributeBase {
@@ -55,32 +51,19 @@ public class StatisticMonitoredAttribute extends MonitoredAttributeBase {
/**
- *
* Constructs the StaisticMonitoredAttribute, builds the required
* MonitoredAttributeInfo with Long as the class type and is always
* readonly attribute.
- *
- *
*
* @param name Of this attribute
- *
- *
- * @return a StatisticMonitoredAttribute
- *
- *
* @param desc should provide a good description on the kind of statistics
* collected, a good example is "Connection Response Time Stats will Provide the
* detailed stats based on the samples provided from every request completion
* time"
- *
- *
* @param s is the StatisticsAcumulator that user will use to accumulate the
* samples and this Attribute Object will get the computed statistics values
* from.
- *
- *
* @param mutex using which clearState() and getValue() calls need to be locked.
- *
*/
public StatisticMonitoredAttribute(String name, String desc,
StatisticsAccumulator s, Object mutex)
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/StatisticsAccumulator.java b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/StatisticsAccumulator.java
index 255196e809b..a172216908c 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/StatisticsAccumulator.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/StatisticsAccumulator.java
@@ -27,11 +27,8 @@ package com.sun.corba.se.spi.monitoring;
import java.util.*;
/**
- *
- *
* @author Hemanth Puttaswamy
- *
- *
+ *
* StatisticsAccumulator accumulates the samples provided by the user and
* computes the value of minimum, maximum, sum and sample square sum. When
* the StatisticMonitoredAttribute calls getValue(), it will compute all
@@ -42,7 +39,6 @@ import java.util.*;
* Users can easily extend this class and provide the implementation of
* toString() method to format the stats as desired. By default all the stats
* are printed in a single line.
- *
*/
public class StatisticsAccumulator {
@@ -72,19 +68,12 @@ public class StatisticsAccumulator {
/**
- *
* User will use this method to just register a sample with the
* StatisticsAccumulator. This is the only method that User will use to
* expose the statistics, internally the StatisticMonitoredAttribute will
* collect the information when requested from the ASAdmin.
- *
- *
- *
- *
- *
*
* @param value a double value to make it more precise
- *
*/
public void sample(double value) {
sampleCount++;
@@ -137,19 +126,11 @@ public class StatisticsAccumulator {
}
/**
- *
* Construct the Statistics Accumulator by providing the unit as a String.
- * The examples of units are "Hours", "Minutes",
- * "Seconds", "MilliSeconds", "Micro Seconds"
- * etc.,
- *
- *
+ * The examples of units are "Hours", "Minutes",
+ * "Seconds", "MilliSeconds", "Micro Seconds" etc.
*
- * @return a StatisticsAccumulator with ...
- *
- *
* @param unit a String representing the units for the samples collected
- *
*/
public StatisticsAccumulator( String unit ) {
this.unit = unit;
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/StringMonitoredAttributeBase.java b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/StringMonitoredAttributeBase.java
index 9277223af8e..5b11ae61c64 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/StringMonitoredAttributeBase.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/monitoring/StringMonitoredAttributeBase.java
@@ -27,14 +27,10 @@ package com.sun.corba.se.spi.monitoring;
import java.util.*;
/**
- *
- *
* @author Hemanth Puttaswamy
- *
- *
+ *
* A Convenient Abstraction to present String type Monitored Attribute. One
* of the examples of StringMonitoredAttribute is the State information.
- *
*/
public abstract class StringMonitoredAttributeBase
extends MonitoredAttributeBase
@@ -45,20 +41,11 @@ public abstract class StringMonitoredAttributeBase
/**
- *
* Constructs StringMonitoredAttribute with the MonitoredAttributeInfo
* built with the class type of String.
- *
- *
*
* @param name of this attribute
- *
- *
* @param description of this attribute
- *
- *
- * @return a StringMonitoredAttributeBase
- *
*/
public StringMonitoredAttributeBase(String name, String description) {
super( name );
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/oa/ObjectAdapter.java b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/oa/ObjectAdapter.java
index f5919d3bc1b..dbe1ac9648c 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/oa/ObjectAdapter.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/oa/ObjectAdapter.java
@@ -89,7 +89,7 @@ import com.sun.corba.se.spi.ior.IORTemplate ;
* A local invocation, where the dispatch is handled in the client subcontract.
* A cached local invocation, where the servant is cached when the IOR is established
* for the client subcontract, and the dispatch is handled in the client subcontract
-* to the cached subcontract.
+* to the cached subcontract.
*
*
* Each of these 3 cases is handled a bit differently. On each request, assume as known
@@ -111,11 +111,11 @@ import com.sun.corba.se.spi.ior.IORTemplate ;
*
oa.returnServant()
* oa.exit()
* pop info
-*
+*
*
-* REVISIT: Is this the required order for exit/pop? Cna they be nested instead?
+*
* The local pattern:
*
* - oa = oaf.find( oaid )
@@ -128,10 +128,10 @@ import com.sun.corba.se.spi.ior.IORTemplate ;
* - oa.returnServant()
* - oa.exit()
* - pop info
-*
+*
*
-* This is the same as the remote case, except that setExecuteReturnServantInResponseConstructor
-* is not needed (or possible, since there is no server request).
+*
* The fast local pattern: When delegate is constructed,
* first extract ObjectKey from IOR in delegate,
* then get ObjectId, ObjectAdapterId, and ObjectAdapterFactory (oaf). Then:
@@ -143,14 +143,14 @@ import com.sun.corba.se.spi.ior.IORTemplate ;
* pop info
*
* The info instance (which includes the Servant) is cached in the client subcontract.
-* Then, on each invocation:
+* Then, on each invocation:
*
* - newinfo = copy of info (clone)
* - info.setOperation( operation )
* - push newinfo
* - oa.enter()
* - dispatch to servant
-* - oa.returnServant()
// XXX This is probably wrong: remove it.
+* - oa.returnServant()
* - oa.exit()
* - pop info
*
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/orb/DataCollector.java b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/orb/DataCollector.java
index 926da503d8f..6f1e8977e16 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/orb/DataCollector.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/orb/DataCollector.java
@@ -58,7 +58,7 @@ public interface DataCollector {
* mappings in the resulting properties. Also, -ORBInitialServices
* is handled specially in applet mode: they are converted from
* relative to absolute URLs.
- * @raises IllegalStateException if setPropertyNames has not
+ * @throws IllegalStateException if setPropertyNames has not
* been called.
*/
Properties getProperties() ;
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/orbutil/fsm/Action.java b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/orbutil/fsm/Action.java
index 86a1b6f45ab..296602e1e15 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/orbutil/fsm/Action.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/orbutil/fsm/Action.java
@@ -46,8 +46,8 @@ public interface Action
* in the state machine, as the state could be corrupted.
* Any exception thrown by the Action for the transition
* will be propagated to doIt.
- * @param FSM fsm is the state machine causing this action.
- * @param Input in is the input that caused the transition.
+ * @param fsm the state machine causing this action.
+ * @param in the input that caused the transition.
*/
public void doIt( FSM fsm, Input in ) ;
}
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/orbutil/fsm/Guard.java b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/orbutil/fsm/Guard.java
index 19639992e85..2fd467c6e9d 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/orbutil/fsm/Guard.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/orbutil/fsm/Guard.java
@@ -93,8 +93,8 @@ public interface Guard
* input is retried when the thread runs again.
*
*
- * @param FSM fsm is the state machine causing this action.
- * @param Input in is the input that caused the transition.
+ * @param fsm is the state machine causing this action.
+ * @param in is the input that caused the transition.
*/
public Result evaluate( FSM fsm, Input in ) ;
}
diff --git a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/servicecontext/ServiceContext.java b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/servicecontext/ServiceContext.java
index 30994d385c3..c2b07a25384 100644
--- a/corba/src/java.corba/share/classes/com/sun/corba/se/spi/servicecontext/ServiceContext.java
+++ b/corba/src/java.corba/share/classes/com/sun/corba/se/spi/servicecontext/ServiceContext.java
@@ -42,9 +42,8 @@ import com.sun.corba.se.impl.orbutil.ORBUtility ;
* the service context and provides any needed methods for manipulating
* the service context. Each subclass must provide the following
* members:
-*
*
a public static final int SERVICE_CONTEXT_ID that gives the OMG
+* a public static final int SERVICE_CONTEXT_ID that gives the OMG
* (or other) defined id for the service context. This is needed for the
* registration mechanism defined in ServiceContexts. OMG defined
* service context ids are taken from section 13.6.7 of ptc/98-12-04.
diff --git a/corba/src/java.corba/share/classes/com/sun/org/omg/CORBA/portable/ValueHelper.java b/corba/src/java.corba/share/classes/com/sun/org/omg/CORBA/portable/ValueHelper.java
index 537b8697f83..b34a0588818 100644
--- a/corba/src/java.corba/share/classes/com/sun/org/omg/CORBA/portable/ValueHelper.java
+++ b/corba/src/java.corba/share/classes/com/sun/org/omg/CORBA/portable/ValueHelper.java
@@ -32,7 +32,7 @@ import org.omg.CORBA.portable.BoxedValueHelper;
* An interface that is implemented by valuetype helper classes.
* This interface appeared in CORBA 2.3 drafts but was removed from
* the published CORBA 2.3 specification.
- *
+ *
* @deprecated Deprecated by CORBA 2.3.
*/
@Deprecated
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/Arguments.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/Arguments.java
index 90185aec51d..35a03442c70 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/Arguments.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/Arguments.java
@@ -68,9 +68,9 @@ public class Arguments
* must check the arguments passed to it for validity and process the
* arguments appropriately. If it detects an invalid argument, it should
* throw an InvalidArgument exception. Arguments MUST be of the form
- * `/ []' or `- []' where
- * is optional (for example, -iC:\includes, `C:\includes' is the qualifier
- * for the argument `i').
+ * {@code '/ []'} or {@code '- []'} where {@code }
+ * is optional (for example, -iC:\includes, 'C:\includes' is the qualifier
+ * for the argument 'i').
* @param args The arguments which are unknown by the framework.
* @param properties Environment-style properties collected from the
* file idl.config.
@@ -289,7 +289,7 @@ public class Arguments
public Hashtable definedSymbols = new Hashtable ();
/**
- * True if new module entries are created for each
+ * (f46082.46.01) True if new module entries are created for each
* re-opened module.
**/
public boolean cppModule = false;
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/AttributeEntry.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/AttributeEntry.java
index c2dab551f5e..0ce15c334c0 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/AttributeEntry.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/AttributeEntry.java
@@ -80,7 +80,7 @@ public class AttributeEntry extends MethodEntry
} // generate
/** Access the attribute generator.
- @returns an object which implements the AttributeGen interface.
+ @return an object which implements the AttributeGen interface.
@see AttributeGen */
public Generator generator ()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/Compile.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/Compile.java
index b2668b21a1f..a3e04732873 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/Compile.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/Compile.java
@@ -52,7 +52,7 @@ import com.sun.tools.corba.se.idl.constExpr.DefaultExprFactory;
* Compiler usage:
*
*
- * java com.sun.tools.corba.se.idl.toJava.compile [options]
+ * {@code java com.sun.tools.corba.se.idl.toJava.compile [options] }
*
*
* where <idl file> is the name of a file containing IDL definitions,
@@ -62,17 +62,17 @@ import com.sun.tools.corba.se.idl.constExpr.DefaultExprFactory;
*
* Options:
*
- * - -i <include path>
+ *
- {@code -i }
*
- By default, the current directory is scanned for included files.
* This option adds another directory. See also Note 1 below.
*
- *
- -d <symbol>
- *
- This is equivalent to the following line in an IDL file: #define <symbol>
+ *
- {@code -d }
+ *
- This is equivalent to the following line in an IDL file: {@code #define }
*
- *
- -emitAll
+ *
- {@code -emitAll}
*
- Emit all types, including those found in #included files.
*
- *
- -v
+ *
- {@code -v}
*
- Verbose mode.
*
*
@@ -81,9 +81,9 @@ import com.sun.tools.corba.se.idl.constExpr.DefaultExprFactory;
* time. Instead, these can be placed into a config file called idl.config.
* This file must be in the CLASSPATH. The format of the includes line is:
*
- *
+ * {@code
* includes=;;...;
- *
+ * }
*
* Note that the path separator character, here shown as a semicolon, is
* machine dependent. For instance, on Windows 95 this character is a
@@ -481,10 +481,10 @@ public class Compile
**/
public Arguments arguments = null;
/**
- * This hashtable contains pairs. It is filled in by
+ * This hashtable contains {@code } pairs. It is filled in by
* extenders in cases where they wish to override an IDL type name with
* some other name. For instance, when mapping to Java, there could be
- * an overrideNames entry of <"TRUE", "true">. NOTE: Do NOT change this
+ * an overrideNames entry of {@code <"TRUE", "true">}. NOTE: Do NOT change this
* variable to a new Hash table. Just add elements to it.
**/
protected Hashtable overrideNames = new Hashtable ();
@@ -495,8 +495,8 @@ public class Compile
**/
protected Hashtable symbolTable = new Hashtable ();
/**
- * This is a vector of strings of the form "IDLfile" or . It is
- * a list of the files included in the given IDL file. It will be empty
+ * This is a vector of strings of the form {@code "IDLfile"} or {@code }.
+ * It is a list of the files included in the given IDL file. It will be empty
* until the parse method executes. If errors are encountered, the state
* of this vector is undefined.
**/
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ConstEntry.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ConstEntry.java
index bd5ab9200fa..5f395be021b 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ConstEntry.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ConstEntry.java
@@ -90,7 +90,7 @@ public class ConstEntry extends SymtabEntry
} // generate
/** Access the constant generator.
- @returns an object which implements the ConstGen interface.
+ @return an object which implements the ConstGen interface.
@see ConstGen */
public Generator generator ()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/DefaultSymtabFactory.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/DefaultSymtabFactory.java
index f929d03f29b..f10d86f8e96 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/DefaultSymtabFactory.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/DefaultSymtabFactory.java
@@ -202,7 +202,7 @@ public class DefaultSymtabFactory implements SymtabFactory
'char', 'octet', 'short', 'long', etc. The reason it is not limited
to these is that, as an extender, you may wish to override these names.
For instance, when generating Java code, octet translates to byte, so
- there is an entry in Compile.overrideNames: <"octet", "byte"> and a
+ there is an entry in Compile.overrideNames: {@code <"octet", "byte">} and a
PrimitiveEntry in the symbol table for "byte". */
public PrimitiveEntry primitiveEntry (String name)
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/EnumEntry.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/EnumEntry.java
index 098301c5d88..ef7c048ce9b 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/EnumEntry.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/EnumEntry.java
@@ -84,7 +84,7 @@ public class EnumEntry extends SymtabEntry
} // generate
/** Access the enumerator generator.
- @returns an object which implements the EnumGen interface.
+ @return an object which implements the EnumGen interface.
@see EnumGen */
public Generator generator ()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ExceptionEntry.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ExceptionEntry.java
index c18902b023f..a669b5d701c 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ExceptionEntry.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ExceptionEntry.java
@@ -80,7 +80,7 @@ public class ExceptionEntry extends StructEntry
} // generate
/** Access the exception generator.
- @returns an object which implements the ExceptionGen interface.
+ @return an object which implements the ExceptionGen interface.
@see ExceptionGen */
public Generator generator ()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ForwardEntry.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ForwardEntry.java
index 83a634c592f..a8da7685fd5 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ForwardEntry.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ForwardEntry.java
@@ -83,7 +83,7 @@ public class ForwardEntry extends SymtabEntry implements InterfaceType
} // generate
/** Access the interface generator.
- @returns an object which implements the InterfaceGen interface.
+ @return an object which implements the InterfaceGen interface.
@see InterfaceGen */
public Generator generator ()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ForwardValueEntry.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ForwardValueEntry.java
index ce78430d5b3..45c1a9de711 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ForwardValueEntry.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ForwardValueEntry.java
@@ -79,7 +79,7 @@ public class ForwardValueEntry extends ForwardEntry
} // generate
/** Access the interface generator.
- @returns an object which implements the ForwardValueGen interface.
+ @return an object which implements the ForwardValueGen interface.
@see ValueGen */
public Generator generator ()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/IncludeEntry.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/IncludeEntry.java
index 42b9de6d846..cfb3529f974 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/IncludeEntry.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/IncludeEntry.java
@@ -81,7 +81,7 @@ public class IncludeEntry extends SymtabEntry
} // generate
/** Access the Include type generator.
- @returns an object which implements the IncludeGen interface.
+ @return an object which implements the IncludeGen interface.
@see IncludeGen */
public Generator generator ()
{
@@ -97,7 +97,7 @@ public class IncludeEntry extends SymtabEntry
//d44810
/** Access the fully-qualified file specification of this include.
- @returns a string containing the path of the include file. */
+ @return a string containing the path of the include file. */
public String absFilename ()
{
return _absFilename;
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/InterfaceEntry.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/InterfaceEntry.java
index 3b325467d7e..a5b303cb743 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/InterfaceEntry.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/InterfaceEntry.java
@@ -111,7 +111,7 @@ public class InterfaceEntry extends SymtabEntry implements InterfaceType
} // generate
/** Access the interface generator.
- @returns an object which implements the InterfaceGen interface.
+ @return an object which implements the InterfaceGen interface.
@see InterfaceGen */
public Generator generator ()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/MethodEntry.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/MethodEntry.java
index f13d84599e2..f7acfa120c0 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/MethodEntry.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/MethodEntry.java
@@ -89,7 +89,7 @@ public class MethodEntry extends SymtabEntry
} // generate
/** Access the method generator.
- @returns an object which implements the MethodGen interface.
+ @return an object which implements the MethodGen interface.
@see MethodGen */
public Generator generator ()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ModuleEntry.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ModuleEntry.java
index a5b8164020b..1dee7aa981e 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ModuleEntry.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ModuleEntry.java
@@ -84,7 +84,7 @@ public class ModuleEntry extends SymtabEntry
} // generate
/** Access the module generator.
- @returns an object which implements the ModuleGen interface.
+ @return an object which implements the ModuleGen interface.
@see ModuleGen */
public Generator generator ()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/NativeEntry.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/NativeEntry.java
index c9075e54c63..7be0fa324b7 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/NativeEntry.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/NativeEntry.java
@@ -73,7 +73,7 @@ public class NativeEntry extends SymtabEntry
} // generate
/** Access the constant generator.
- @returns an object which implements the ConstGen interface.
+ @return an object which implements the ConstGen interface.
@see ConstGen */
public Generator generator ()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ParameterEntry.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ParameterEntry.java
index d5d18dbd140..0818c11580b 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ParameterEntry.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ParameterEntry.java
@@ -88,7 +88,7 @@ public class ParameterEntry extends SymtabEntry
} // generate
/** Access the parameter generator.
- @returns an object which implements the ParameterGen interface.
+ @return an object which implements the ParameterGen interface.
@see ParameterGen */
public Generator generator ()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/PragmaEntry.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/PragmaEntry.java
index dc805b87dde..f0f3aa2fca9 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/PragmaEntry.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/PragmaEntry.java
@@ -80,7 +80,7 @@ public class PragmaEntry extends SymtabEntry
} // generate
/** Access the Include type generator.
- @returns an object which implements the IncludeGen interface.
+ @return an object which implements the IncludeGen interface.
@see IncludeGen */
public Generator generator ()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/PrimitiveEntry.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/PrimitiveEntry.java
index 8fb700b1055..0bbaadc5910 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/PrimitiveEntry.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/PrimitiveEntry.java
@@ -81,7 +81,7 @@ public class PrimitiveEntry extends SymtabEntry
} // generate
/** Access the primitive type generator.
- @returns an object which implements the PrimitiveGen interface.
+ @return an object which implements the PrimitiveGen interface.
@see PrimitiveGen */
public Generator generator ()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/RepositoryID.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/RepositoryID.java
index 8faebf4d226..7282ab02c1a 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/RepositoryID.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/RepositoryID.java
@@ -68,8 +68,8 @@ public class RepositoryID
/**
* Determine is a supplied string meets the minimal format requirement
* for a Repository ID.
- * @return true iff supplied string has form ':', where
- * is any non-empty string not containing ':'.
+ * @return true if supplied string has form {@code ':'}, where
+ * {@code } is any non-empty string not containing ':'.
**/
public static boolean hasValidForm (String string)
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/SequenceEntry.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/SequenceEntry.java
index 20e2a4cc26d..78b20f3a3dc 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/SequenceEntry.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/SequenceEntry.java
@@ -101,7 +101,7 @@ public class SequenceEntry extends SymtabEntry
} // generate
/** Access the sequence generator.
- @returns an object which implements the SequenceGen interface.
+ @return an object which implements the SequenceGen interface.
@see SequenceGen */
public Generator generator ()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/StringEntry.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/StringEntry.java
index a07f8e15fee..1283be83356 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/StringEntry.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/StringEntry.java
@@ -91,7 +91,7 @@ public class StringEntry extends SymtabEntry
} // generate
/** Access the primitive type generator.
- @returns an object which implements the PrimitiveGen interface.
+ @return an object which implements the PrimitiveGen interface.
@see PrimitiveGen */
public Generator generator ()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/StructEntry.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/StructEntry.java
index de20327bd8d..3e500927bd5 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/StructEntry.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/StructEntry.java
@@ -90,7 +90,7 @@ public class StructEntry extends SymtabEntry
} // generate
/** Access the struct generator.
- @returns an object which implements the StructGen interface.
+ @return an object which implements the StructGen interface.
@see StructGen */
public Generator generator ()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/SymtabEntry.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/SymtabEntry.java
index 52b8f40157a..ed1772ce228 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/SymtabEntry.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/SymtabEntry.java
@@ -127,7 +127,7 @@ public class SymtabEntry
/** Get the name of this entry's module. If there are modules within
modules, each module name is separated by '/'.
- @returns this entry's module name. */
+ @return this entry's module name. */
public String module ()
{
return _module;
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/SymtabFactory.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/SymtabFactory.java
index 1a2f36d5c4c..e41d9931aef 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/SymtabFactory.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/SymtabFactory.java
@@ -97,7 +97,7 @@ public interface SymtabFactory
char, octet, short, long, etc. The reason it is not limited to
these is that, as an extender, you may wish to override these names.
For instance, when generating Java code, octet translates to byte,
- so there is an entry in Compile.overrideNames: <"octet", "byte">
+ so there is an entry in Compile.overrideNames: {@code <"octet", "byte">}
and a PrimitiveEntry in the symbol table for "byte". */
PrimitiveEntry primitiveEntry (String name);
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/Token.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/Token.java
index 8ec46d0da1d..c758a56ebfc 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/Token.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/Token.java
@@ -332,7 +332,7 @@ class Token
* @param boolean[] collidesWithKeyword is an array containing one value: a flag
* representing whether this string is an identifier that collides with a keyword.
* This is set by this method.
- * @returns Token The resulting Token corresponding to string.
+ * @return Token The resulting Token corresponding to string.
*/
public static Token makeKeywordToken(
String string, float corbaLevel, boolean escapedOK, boolean[] collision )
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/TypedefEntry.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/TypedefEntry.java
index 8ed44f5dbd7..9a617b57bdc 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/TypedefEntry.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/TypedefEntry.java
@@ -110,7 +110,7 @@ public class TypedefEntry extends SymtabEntry
}
/** Access the typedef generator.
- @returns an object which implements the TypedefGen interface.
+ @return an object which implements the TypedefGen interface.
@see TypedefGen */
public Generator generator ()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/UnionBranch.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/UnionBranch.java
index 8b0df9da391..55a7aa434e1 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/UnionBranch.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/UnionBranch.java
@@ -46,29 +46,21 @@ import com.sun.tools.corba.se.idl.TypedefEntry;
* of what it may contain:
*
* -
- *
- * case 1: short x;
- *
- * - , false>
+ * {@code case 1: short x;}
+ *
- {@code , false>}
*
-
- *
- * case 0:
- * case 8:
- * case 2: long x;
- *
- * - , false>
+ * {@code case 0:}
+ * {@code case 8:}
+ * {@code case 2: long x;}
+ * - {@code , false>}
*
-
- *
- * default: long x;
- *
- * - , true>
+ * {@code default: long x;}
+ *
- {@code , true>}
*
-
- *
- * case 0:
- * case 2:
- * default: char c;
- *
- * - , true>
+ * {@code case 0:}
+ * {@code case 2:}
+ * {@code default: char c;}
+ * - {@code , true>}
*
**/
public class UnionBranch
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/UnionEntry.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/UnionEntry.java
index b41b84003aa..0d438a6f9d4 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/UnionEntry.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/UnionEntry.java
@@ -93,7 +93,7 @@ public class UnionEntry extends SymtabEntry
} // generate
/** Access the union generator.
- @returns an object which implements the UnionGen interface.
+ @return an object which implements the UnionGen interface.
@see UnionGen */
public Generator generator ()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ValueBoxEntry.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ValueBoxEntry.java
index ebc6704e2eb..9a231e42391 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ValueBoxEntry.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ValueBoxEntry.java
@@ -77,7 +77,7 @@ public class ValueBoxEntry extends ValueEntry
} // generate
/** Access the value generator.
- @returns an object which implements the ValueGen interface.
+ @return an object which implements the ValueGen interface.
@see ValueGen */
public Generator generator ()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ValueEntry.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ValueEntry.java
index 2d92a9bdbbd..093ee990c43 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ValueEntry.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/ValueEntry.java
@@ -86,7 +86,7 @@ public class ValueEntry extends InterfaceEntry
} // generate
/** Access the value generator.
- @returns an object which implements the ValueGen interface.
+ @return an object which implements the ValueGen interface.
@see ValueGen */
public Generator generator ()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/constExpr/Terminal.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/constExpr/Terminal.java
index 71c9f29192d..ebe3fd22e62 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/constExpr/Terminal.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/constExpr/Terminal.java
@@ -43,11 +43,11 @@ import java.math.BigInteger;
/**
* This class contains values. Objects of this class are the terminal
* nodes of an expression tree.
- *
+ *
* Note that there is a constructor for Double values, but not Float.
* CORBA defines that all floating point expressions are evaluated as
* double, and that the result is coerced back to float if necessary.
- *
+ *
* Note also that there is a constructor for long values, but not for
* int or short. CORBA defines that all integral expressions are evaluated
* as unsigned long. A CORBA long is a Java int. There is no unsigned int
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/Arguments.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/Arguments.java
index 8f36cc5ad6a..6620608e8aa 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/Arguments.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/Arguments.java
@@ -260,7 +260,7 @@ public class Arguments extends com.sun.tools.corba.se.idl.Arguments
} // packageFromProps
/**
- * d57482 method added so default emitter check could be overriden.
+ * d57482 (klr) method added so default emitter check could be overriden.
**/
protected void setDefaultEmitter () {
// If the flag -fclient was not found, assume it.
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/AttributeGen24.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/AttributeGen24.java
index bf61f70baad..a06904311e9 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/AttributeGen24.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/AttributeGen24.java
@@ -64,7 +64,7 @@ public class AttributeGen24 extends MethodGenClone24
} // ctor
/**
- * Added for 2.4 RTF
+ * (d62023-klr) Added for 2.4 RTF
**/
protected void abstractMethod (Hashtable symbolTable, MethodEntry m, PrintWriter stream)
{
@@ -83,7 +83,7 @@ public class AttributeGen24 extends MethodGenClone24
} // abstractMethod
/**
- * Added for 2.4 RTF
+ * (d62023-klr) Added for 2.4 RTF
**/
protected void interfaceMethod (Hashtable symbolTable, MethodEntry m, PrintWriter stream)
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/Compile.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/Compile.java
index c9c9854b8e2..08aecd1dac5 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/Compile.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/Compile.java
@@ -87,37 +87,37 @@ import com.sun.tools.corba.se.idl.InvalidArgument;
*
* Options:
*
- * - -i <include path>
+ *
- {@code -i }
*
- By default, the current directory is scanned for included files.
* This option adds another directory. See also the note below.
*
- *
- -d <symbol>
+ *
- {@code -d }
*
- This is equivalent to the following line in an IDL file:
- * #define <symbol>
+ * {@code #define }
*
- *
- -f
- *
- Defines what bindings to emit. is one of client, server, all,
+ *
- {@code -f }
+ *
- Defines what bindings to emit. {@code } is one of client, server, all,
* serverTIE, allTIE. serverTIE and allTIE cause delegate model skeletons
* to be emitted. If this flag is not used, -fclient is assumed.
* allPOA has the same effect as all, except for generation POA type skeletons.
*
- *
- -keep
+ *
- {@code -keep}
*
- If a file to be generated already exists, do not overwrite it. By
* default it is overwritten.
*
- *
- -sep
+ *
- {@code -sep }
*
- Only valid with -m. Replace the file separator character with
- * in the file names listed in the .u file.
+ * {@code } in the file names listed in the .u file.
*
- *
- -emitAll
+ *
- {@code -emitAll}
*
- Emit all types, including those found in #included files.
*
- *
- -v
+ *
- {@code -v}
*
- Verbose mode.
*
- *
- -pkgPrefix
- *
- Whereever is encountered, make sure it resides within
- * <package> in all generated files. <type> is a fully
+ *
- {@code -pkgPrefix }
+ *
- Whereever {@code } is encountered, make sure it resides within
+ * {@code } in all generated files. {@code } is a fully
* qualified, java-style name.
*
*
@@ -127,9 +127,9 @@ import com.sun.tools.corba.se.idl.InvalidArgument;
* called idl.config. This file must be in the CLASSPATH. The format of
* the includes line is:
*
- *
+ * {@code
* includes=;;...;
- *
+ * }
*
* Note that the path separator character, here shown as a semicolon,
* is machine dependent. For instance, on Windows 95 this character
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/Helper24.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/Helper24.java
index 4f20f627613..0079b1413e8 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/Helper24.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/Helper24.java
@@ -70,7 +70,7 @@ public class Helper24 extends Helper
/**
* Generate the heading, including package, imports, class statements,
* and open curly.
- * - don't implement ValueHelper, make non-boxed helpers abstract
+ * d62023 - don't implement ValueHelper, make non-boxed helpers abstract
**/
protected void writeHeading ()
{
@@ -92,8 +92,10 @@ public class Helper24 extends Helper
/**
* Generate the instance variables.
- * - no helper instance except for boxed valuetypes.
- * - move truncatable_ids to mapped class.
+ *
+ * d62023 - no helper instance except for boxed valuetypes;
+ * - move truncatable_ids to mapped class.
+ *
**/
protected void writeInstVars ()
{
@@ -110,9 +112,9 @@ public class Helper24 extends Helper
} // writeInstVars
/**
- * generate members of BoxedValueHelper interface if boxed
+ * d62023 - generate members of BoxedValueHelper interface if boxed
*
- * Hook in here to write factory methods for non-boxed ValueTypes
+ * d62023 - hook in here to write factory methods for non-boxed ValueTypes
* into Helper.
**/
protected void writeValueHelperInterface ()
@@ -143,8 +145,8 @@ public class Helper24 extends Helper
} // writeHelperFactories
/**
- * Generate constructors only for boxed valuetype helpers
- * All other helpers are abstract.
+ * d62023 - generate constructors only for boxed valuetype helpers.
+ * All other helpers are abstract.
**/
protected void writeCtors ()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/InterfaceGen.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/InterfaceGen.java
index 4cd1a31a9bb..2667b9ee63b 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/InterfaceGen.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/InterfaceGen.java
@@ -190,17 +190,19 @@ public class InterfaceGen implements com.sun.tools.corba.se.idl.InterfaceGen, Ja
/**
* Generate the interface. Provides general algorithm for binding generation:
- * 1.) Initialize members unique to this generator. - init ()
- * 2.) Open print stream - openStream ()
- * 3.) Write class heading (package, prologue, class statement, open curly - writeHeading ()
- * 4.) Write class body (member data and methods) - write*Body ()
- * 5.) Write class closing (close curly) - writeClosing ()
- * 6.) Close the print stream - closeStream ()
+ *
+ * - Initialize members unique to this generator - init()
+ * - Open print stream - openStream()
+ * - Write class heading (package, prologue, class statement, open curly - writeHeading()
+ * - Write class body (member data and methods) - write*Body()
+ * - Write class closing (close curly) - writeClosing()
+ * - Close the print stream - closeStream ()
+ *
*
* For CORBA 2.3, interfaces are mapped to Operations and Signature
* interfaces. The Operations interface contains the method definitions.
* The Signature interface extends the Operations interface and adds
- * CORBA::Object.
+ * CORBA::Object. (klr)
**/
protected void generateInterface ()
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/MethodGen24.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/MethodGen24.java
index 99c05ecc4f5..ba84c9f1a1a 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/MethodGen24.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/MethodGen24.java
@@ -96,7 +96,7 @@ public class MethodGen24 extends MethodGen
}
/**
- * Write the methodEntry for a valuetype factory method into
+ * d62023 - write the methodEntry for a valuetype factory method into
* the Value Helper class. Contents from email from Simon,
* 4/25/99.
**/
@@ -135,7 +135,7 @@ public class MethodGen24 extends MethodGen
} // helperFactoryMethod
/**
- * - write an abstract method definition
+ * d62023 - write an abstract method definition
**/
protected void abstractMethod (Hashtable symbolTable, MethodEntry m, PrintWriter stream)
{
@@ -152,8 +152,8 @@ public class MethodGen24 extends MethodGen
} // abstractMethod
/**
- * - write a default factory method implementation for the
- * DefaultFactory. m is a methodEntry for a factory
+ * d62023 - write a default factory method implementation for the
+ * {@code DefaultFactory}. m is a methodEntry for a factory
* method contained in a non-abstract ValueEntry.
**/
protected void defaultFactoryMethod (Hashtable symbolTable, MethodEntry m, PrintWriter stream)
@@ -176,7 +176,7 @@ public class MethodGen24 extends MethodGen
} // defaultFactoryMethod
/**
- * - remove all valueInitializer junk
+ * d62023 - remove all valueInitializer junk
**/
protected void writeMethodSignature ()
{
@@ -242,7 +242,7 @@ public class MethodGen24 extends MethodGen
} // writeMethodSignature
/**
- * - delete method templates for valuetypes
+ * d62023 - delete method templates for valuetypes
**/
protected void interfaceMethod (Hashtable symbolTable, MethodEntry m, PrintWriter stream)
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/MethodGenClone24.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/MethodGenClone24.java
index 49c6bc8b985..5c2e9596b0a 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/MethodGenClone24.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/MethodGenClone24.java
@@ -74,7 +74,7 @@ public class MethodGenClone24 extends AttributeGen
} // ctor
/**
- * - write an abstract method definition
+ * d62023 - write an abstract method definition
**/
protected void abstractMethod (Hashtable symbolTable, MethodEntry m, PrintWriter stream)
{
@@ -91,7 +91,7 @@ public class MethodGenClone24 extends AttributeGen
} // abstractMethod
/**
- * - delete method templates for valuetypes
+ * d62023 - delete method templates for valuetypes
**/
protected void interfaceMethod (Hashtable symbolTable, MethodEntry m, PrintWriter stream)
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/Util.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/Util.java
index 7e0d8373dab..7397619f2f2 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/Util.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/Util.java
@@ -534,7 +534,7 @@ public class Util extends com.sun.tools.corba.se.idl.Util
} // holderName
/**
- *
+ * d61056
**/
public static String helperName (SymtabEntry entry, boolean qualifiedName)
{
@@ -1412,7 +1412,7 @@ public class Util extends com.sun.tools.corba.se.idl.Util
//
/**
* @return true if the current setting of corbaLevel is within delta of
- * the range min <= corbaLevel <= max
+ * the range {@code min <= corbaLevel <= max}
**/
public static boolean corbaLevel (float min, float max)
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/ValueBoxGen24.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/ValueBoxGen24.java
index 02d87ed09f5..dbb1ced706c 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/ValueBoxGen24.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/ValueBoxGen24.java
@@ -69,7 +69,7 @@ public class ValueBoxGen24 extends ValueBoxGen
} // ctor
/**
- * - Move from helper to mapped class
+ * d62023 - Move from helper to mapped class
**/
protected void writeTruncatable () //
{
@@ -84,7 +84,7 @@ public class ValueBoxGen24 extends ValueBoxGen
/**
- *
+ * d62023
**/
public void helperRead (String entryName, SymtabEntry entry, PrintWriter stream)
{
@@ -121,7 +121,7 @@ public class ValueBoxGen24 extends ValueBoxGen
} // helperRead
/**
- *
+ * d62023
**/
public void helperWrite (SymtabEntry entry, PrintWriter stream)
{
@@ -144,7 +144,7 @@ public class ValueBoxGen24 extends ValueBoxGen
} // helperWrite
/**
- *
+ * d62023
**/
public int write (int index, String indent, String name, SymtabEntry entry, PrintWriter stream)
{
diff --git a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/ValueGen24.java b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/ValueGen24.java
index 27930c2d59b..93701b01049 100644
--- a/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/ValueGen24.java
+++ b/corba/src/java.corba/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/ValueGen24.java
@@ -73,15 +73,17 @@ public class ValueGen24 extends ValueGen
} // ctor
/**
- * - delete constructor; helper is abstract
+ * d62023 - delete constructor; helper is abstract
**/
protected void writeConstructor ()
{
} // writeConstructor
/**
- * - delete write_value from non-boxed helpers
- * - delete _write from non-boxed helpers
+ *
+ * d62023 - delete write_value from non-boxed helpers
+ * - delete _write from non-boxed helpers
+ *
**/
public void helperWrite (SymtabEntry entry, PrintWriter stream)
{
@@ -91,7 +93,7 @@ public class ValueGen24 extends ValueGen
} // helperWrite
/**
- *
+ * d62023
**/
public void helperRead (String entryName, SymtabEntry entry, PrintWriter stream)
{
@@ -101,7 +103,7 @@ public class ValueGen24 extends ValueGen
} // helperRead
/**
- * - suppress initializers from mapped value; now generated in
+ * d62023 - suppress initializers from mapped value; now generated in
* the Helper class and Factory class
**/
protected void writeInitializers ()
@@ -110,7 +112,7 @@ public class ValueGen24 extends ValueGen
} // writeInitializers
/**
- * - Goes in mapped class, not Helper
+ * d62023 - goes in mapped class, not Helper
**/
protected void writeTruncatable () //
{
@@ -155,7 +157,7 @@ public class ValueGen24 extends ValueGen
}
/**
- * CustomMarshal -> CustomValue for custom valuetypes
+ * d62023 - CustomMarshal {@literal ->} CustomValue for custom valuetypes
* mapped class is abstract
**/
protected void writeHeading ()
@@ -217,7 +219,7 @@ public class ValueGen24 extends ValueGen
} // writeHeading
/**
- * - private state maps to protected, not default
+ * d62023 - private state maps to protected, not default
**/
protected void writeMembers ()
{
@@ -245,8 +247,7 @@ public class ValueGen24 extends ValueGen
} // writeMembers
/**
- * Methods need to be abstract
- * writeStreamable
+ * d62023 - methods need to be abstract writeStreamable
**/
protected void writeMethods ()
{
@@ -294,7 +295,7 @@ public class ValueGen24 extends ValueGen
} // writeMethods
/**
- * Call super._read()
+ * d62023 - call super._read()
**/
public int read (int index, String indent, String name, SymtabEntry entry, PrintWriter stream)
{
@@ -335,7 +336,7 @@ public class ValueGen24 extends ValueGen
} // read
/**
- * Call super._write()
+ * d62023 - call super._write()
**/
public int write (int index, String indent, String name, SymtabEntry entry, PrintWriter stream)
{
@@ -374,7 +375,7 @@ public class ValueGen24 extends ValueGen
} // write
/**
- * <62023> - generate factory interface and default factory
+ * d62023 - generate factory interface and default factory
**/
public void generate (Hashtable symbolTable, ValueEntry v, PrintWriter str)
{
diff --git a/corba/src/java.corba/share/classes/org/omg/CORBA/LocalObject.java b/corba/src/java.corba/share/classes/org/omg/CORBA/LocalObject.java
index 6aba94ad75d..4dee33450b1 100644
--- a/corba/src/java.corba/share/classes/org/omg/CORBA/LocalObject.java
+++ b/corba/src/java.corba/share/classes/org/omg/CORBA/LocalObject.java
@@ -28,7 +28,7 @@ import org.omg.CORBA.portable.*;
/**
- * Used as a base class for implementation of a local IDL interface in the
+ * Used as a base class for implementation of a local IDL interface in the
* Java language mapping. It is a class which implements all the operations
* in the org.omg.CORBA.Object interface.
*
Local interfaces are implemented by using CORBA::LocalObject
@@ -49,9 +49,10 @@ import org.omg.CORBA.portable.*;
*
LocalObject
is to be used as the base class of locally
* constrained objects, such as those in the PortableServer module.
* The specification here is based on the CORBA Components
- * Volume I - orbos/99-07-01
+ * Volume I - orbos/99-07-01
+ *
* @see CORBA
package
- * comments for unimplemented features
+ * comments for unimplemented features
*/
public class LocalObject implements org.omg.CORBA.Object
@@ -64,7 +65,7 @@ public class LocalObject implements org.omg.CORBA.Object
public LocalObject() {}
/**
- *
Determines whether the two object references are equivalent,
+ * Determines whether the two object references are equivalent,
* so far as the ORB can easily determine. Two object references are equivalent
* if they are identical. Two distinct object references which in fact refer to
* the same object are also equivalent. However, ORBs are not required
diff --git a/corba/src/jdk.rmic/share/classes/sun/rmi/rmic/iiop/CompoundType.java b/corba/src/jdk.rmic/share/classes/sun/rmi/rmic/iiop/CompoundType.java
index fcbbaccfd4a..386e3082c36 100644
--- a/corba/src/jdk.rmic/share/classes/sun/rmi/rmic/iiop/CompoundType.java
+++ b/corba/src/jdk.rmic/share/classes/sun/rmi/rmic/iiop/CompoundType.java
@@ -145,15 +145,15 @@ public abstract class CompoundType extends Type {
/**
* Return true if this type implements
- * isIDLEntity() && isException().
+ * {@code isIDLEntity() && isException()}.
*/
public boolean isIDLEntityException () {
return isIDLEntity() && isException();
}
/**
- * Return true if isIDLEntity() && !isValueBase()
+ * Return true if {@code isIDLEntity() && !isValueBase()
* && !isAbstractBase() && !isCORBAObject()
- * && !isIDLEntityException().
+ * && !isIDLEntityException()}.
*/
public boolean isBoxed () {
return (isIDLEntity() && !isValueBase() &&
@@ -1984,7 +1984,7 @@ public abstract class CompoundType extends Type {
}
/**
- * Return true if this is NOT a constructor && is not
+ * Return true if this is NOT a constructor {@code &&} is not
* an attribute.
*/
public boolean isNormalMethod () {
diff --git a/corba/src/jdk.rmic/share/classes/sun/rmi/rmic/iiop/ContextStack.java b/corba/src/jdk.rmic/share/classes/sun/rmi/rmic/iiop/ContextStack.java
index d79bfcbefd3..518d9a4f751 100644
--- a/corba/src/jdk.rmic/share/classes/sun/rmi/rmic/iiop/ContextStack.java
+++ b/corba/src/jdk.rmic/share/classes/sun/rmi/rmic/iiop/ContextStack.java
@@ -95,7 +95,7 @@ public class ContextStack {
}
/**
- * Return true if env.nerrors > 0.
+ * Return true if {@code env.nerrors > 0}.
*/
public boolean anyErrors () {
return env.nerrors > 0;
diff --git a/corba/src/jdk.rmic/share/classes/sun/rmi/rmic/iiop/IDLGenerator.java b/corba/src/jdk.rmic/share/classes/sun/rmi/rmic/iiop/IDLGenerator.java
index 5b0e56b8387..c29414a52dc 100644
--- a/corba/src/jdk.rmic/share/classes/sun/rmi/rmic/iiop/IDLGenerator.java
+++ b/corba/src/jdk.rmic/share/classes/sun/rmi/rmic/iiop/IDLGenerator.java
@@ -91,8 +91,8 @@ public class IDLGenerator extends sun.rmi.rmic.iiop.Generator {
/**
* Create and return a top-level type.
- * @param env The compiler environment.
* @param cdef The top-level class definition.
+ * @param stack The context stack.
* @return An RemoteType or null if is non-conforming.
*/
protected sun.rmi.rmic.iiop.CompoundType getTopType(ClassDefinition cdef,
@@ -169,7 +169,7 @@ public class IDLGenerator extends sun.rmi.rmic.iiop.Generator {
* Examine and consume command line arguments.
* @param argv The command line arguments. Ignore null
* and unknown arguments. Set each consumed argument to null.
- * @param error Report any errors using the main.error() methods.
+ * @param main Report any errors using the main.error() methods.
* @return true if no errors, false otherwise.
*/
public boolean parseArgs(String argv[], Main main) {
@@ -195,11 +195,11 @@ public class IDLGenerator extends sun.rmi.rmic.iiop.Generator {
else if ( argv[i].equalsIgnoreCase( "-init" ) ) {
factory = false;
argv[i] = null;
- }
+ }
else if ( argv[i].equalsIgnoreCase( "-factory" ) ) {
factory = true;
argv[i] = null;
- }
+ }
else if ( argv[i].equalsIgnoreCase( "-idlfile" ) ) {
argv[i] = null;
if ( ++i < argv.length && argv[i] != null && !argv[i].startsWith("-") ) {
@@ -210,7 +210,7 @@ public class IDLGenerator extends sun.rmi.rmic.iiop.Generator {
argv[i] = null;
ifHash.put( idlFrom,idlTo );
continue nextArg;
- }
+ }
}
main.error("rmic.option.requires.argument", "-idlfile");
result = false;
@@ -403,7 +403,7 @@ public class IDLGenerator extends sun.rmi.rmic.iiop.Generator {
* Write the output for the given OutputFileName into the output stream.
* (The IDL mapping for java.lang.Class is generated from
* javax.rmi.CORBA.ClassDesc in the tools workspace)
- * @param OutputType ot One of the items returned by getOutputTypesFor(...)
+ * @param ot One of the items returned by getOutputTypesFor(...)
* @param alreadyChecked A set of Types which have already been checked.
* Intended to be passed to Type.collectMatching(filter,alreadyChecked).
* @param p The output stream.
@@ -927,7 +927,7 @@ public class IDLGenerator extends sun.rmi.rmic.iiop.Generator {
* Add reference for given type avoiding duplication.
* Sort into specials, arrays and regular references.
* Filter out types which are not required.
- * @param t Given Type
+ * @param ref Given Type
* @param refHash Hashtable for type references
* @param spcHash Hashtable for special type references
* @param arrHash Hashtable for array references
@@ -986,7 +986,7 @@ public class IDLGenerator extends sun.rmi.rmic.iiop.Generator {
* Collect and filter thrown exceptions for a given pre-filtered method.
* Keep only 'checked' exception classes minus java.rmi.RemoteException
* and its subclasses
- * @param method The current method
+ * @param mth The current method
* @param excHash Hashtable containing non-duplicate thrown exceptions
*/
protected void getExceptions(
@@ -1077,7 +1077,7 @@ public class IDLGenerator extends sun.rmi.rmic.iiop.Generator {
* order.
* Non-static, non-transient fields are mapped.
* If the type is a custom valuetype, only public fields are mapped.
- * @param ct The current CompoundType
+ * @param t The current CompoundType
* @return Vector containing the data fields
*/
protected Vector getData(
@@ -1163,7 +1163,7 @@ public class IDLGenerator extends sun.rmi.rmic.iiop.Generator {
/**
* Write forward reference for boxed valuetype for single dimension of IDL
* sequence.
- * If the dimension is <1 and the element is a CompoundType, write a
+ * If the dimension is {@literal < 1} and the element is a CompoundType, write a
* forward declare for the element
* @param at ArrayType for forward declare
* @param dim The dimension to write
@@ -1276,7 +1276,7 @@ public class IDLGenerator extends sun.rmi.rmic.iiop.Generator {
/**
* Write #includes
- * @param incHash Hashtable loaded with Types to include
+ * @param inhHash Hashtable loaded with Types to include
* @param p The output stream.
*/
protected void writeInheritedIncludes(
diff --git a/corba/src/jdk.rmic/share/classes/sun/rmi/rmic/iiop/Type.java b/corba/src/jdk.rmic/share/classes/sun/rmi/rmic/iiop/Type.java
index c1723a5aaf9..425e9385d96 100644
--- a/corba/src/jdk.rmic/share/classes/sun/rmi/rmic/iiop/Type.java
+++ b/corba/src/jdk.rmic/share/classes/sun/rmi/rmic/iiop/Type.java
@@ -51,7 +51,7 @@ import sun.rmi.rmic.Names;
* conformance checking and name mapping as defined in the "Java to IDL
* Mapping" OMG specification. The family is composed of the following
* fixed set of types:
- *
+ * {@literal
*
* +- RemoteType <-- AbstractType
* |
@@ -67,7 +67,7 @@ import sun.rmi.rmic.Names;
* |
* +- NCClassType
*
- *
+ * }
* PrimitiveType represents a primitive or a void type.
*
* CompoundType is an abstract base representing any non-special class
diff --git a/hotspot/.hgtags b/hotspot/.hgtags
index bf3b531730c..c52975971ac 100644
--- a/hotspot/.hgtags
+++ b/hotspot/.hgtags
@@ -465,3 +465,4 @@ ee878f3d6732856f7725c590312bfbe2ffa52cc7 jdk9-b58
9c916db4bf3bc164a47b5a9cefe5ffd71e111f6a jdk9-b60
715d2da5801c410746e92f08066d53bde1496286 jdk9-b61
1eab877142cce6ca06e556e2ad0af688f993f00b jdk9-b62
+2ac9b6b36689b50d1562627067c92d51781b5684 jdk9-b63
diff --git a/hotspot/agent/src/os/bsd/libproc_impl.c b/hotspot/agent/src/os/bsd/libproc_impl.c
index 78da80617fa..b8ba361b8a9 100644
--- a/hotspot/agent/src/os/bsd/libproc_impl.c
+++ b/hotspot/agent/src/os/bsd/libproc_impl.c
@@ -215,7 +215,12 @@ lib_info* add_lib_info_fd(struct ps_prochandle* ph, const char* libname, int fd,
return NULL;
}
- strncpy(newlib->name, libname, sizeof(newlib->name));
+ if (strlen(libname) >= sizeof(newlib->name)) {
+ print_debug("libname %s too long\n", libname);
+ return NULL;
+ }
+ strcpy(newlib->name, libname);
+
newlib->base = base;
if (fd == -1) {
diff --git a/hotspot/agent/src/os/linux/libproc_impl.c b/hotspot/agent/src/os/linux/libproc_impl.c
index a04bc37d1d7..97d3acf9282 100644
--- a/hotspot/agent/src/os/linux/libproc_impl.c
+++ b/hotspot/agent/src/os/linux/libproc_impl.c
@@ -159,7 +159,12 @@ lib_info* add_lib_info_fd(struct ps_prochandle* ph, const char* libname, int fd,
return NULL;
}
- strncpy(newlib->name, libname, sizeof(newlib->name));
+ if (strlen(libname) >= sizeof(newlib->name)) {
+ print_debug("libname %s too long\n", libname);
+ return NULL;
+ }
+ strcpy(newlib->name, libname);
+
newlib->base = base;
if (fd == -1) {
diff --git a/hotspot/agent/src/share/classes/com/sun/java/swing/action/ActionManager.java b/hotspot/agent/src/share/classes/com/sun/java/swing/action/ActionManager.java
index 751d4a63114..7da5b6534ca 100644
--- a/hotspot/agent/src/share/classes/com/sun/java/swing/action/ActionManager.java
+++ b/hotspot/agent/src/share/classes/com/sun/java/swing/action/ActionManager.java
@@ -46,6 +46,11 @@ public abstract class ActionManager
return manager;
}
+ protected static void setInstance(ActionManager m)
+ {
+ manager = m;
+ }
+
protected abstract void addActions();
protected void addAction(String cmdname, Action action)
@@ -90,6 +95,6 @@ public abstract class ActionManager
private HashMap actions;
private static ActionUtilities utilities = new ActionUtilities();
- protected static ActionManager manager;
+ private static ActionManager manager;
}
diff --git a/hotspot/agent/src/share/classes/com/sun/java/swing/ui/CommonToolBar.java b/hotspot/agent/src/share/classes/com/sun/java/swing/ui/CommonToolBar.java
index b4eb95df336..d2d85c3086a 100644
--- a/hotspot/agent/src/share/classes/com/sun/java/swing/ui/CommonToolBar.java
+++ b/hotspot/agent/src/share/classes/com/sun/java/swing/ui/CommonToolBar.java
@@ -46,7 +46,7 @@ public abstract class CommonToolBar extends JToolBar
{
this.manager = manager;
statusBar = status;
- buttonSize = new Dimension(CommonUI.buttconPrefSize);
+ buttonSize = new Dimension(CommonUI.getButtconPrefSize());
buttonInsets = new Insets(0, 0, 0, 0);
addComponents();
}
diff --git a/hotspot/agent/src/share/classes/com/sun/java/swing/ui/CommonUI.java b/hotspot/agent/src/share/classes/com/sun/java/swing/ui/CommonUI.java
index e6adc50201e..a1c8127f475 100644
--- a/hotspot/agent/src/share/classes/com/sun/java/swing/ui/CommonUI.java
+++ b/hotspot/agent/src/share/classes/com/sun/java/swing/ui/CommonUI.java
@@ -373,20 +373,25 @@ public class CommonUI
comp.setCursor(Cursor.getPredefinedCursor(0));
}
- public static final int BUTTON_WIDTH = 100;
- public static final int BUTTON_HEIGHT = 26;
- public static final int BUTTCON_WIDTH = 28;
- public static final int BUTTCON_HEIGHT = 28;
- public static final int SM_BUTTON_WIDTH = 72;
- public static final int SM_BUTTON_HEIGHT = 26;
- public static final int LABEL_WIDTH = 100;
- public static final int LABEL_HEIGHT = 20;
- public static final int TEXT_WIDTH = 150;
- public static final int TEXT_HEIGHT = 20;
- public static Dimension buttonPrefSize = new Dimension(100, 26);
- public static Dimension buttconPrefSize = new Dimension(28, 28);
- public static Dimension smbuttonPrefSize = new Dimension(72, 26);
- public static Dimension labelPrefSize = new Dimension(100, 20);
- public static Dimension textPrefSize = new Dimension(150, 20);
+ public static Dimension getButtconPrefSize()
+ {
+ return buttconPrefSize;
+ }
+
+ private static final int BUTTON_WIDTH = 100;
+ private static final int BUTTON_HEIGHT = 26;
+ private static final int BUTTCON_WIDTH = 28;
+ private static final int BUTTCON_HEIGHT = 28;
+ private static final int SM_BUTTON_WIDTH = 72;
+ private static final int SM_BUTTON_HEIGHT = 26;
+ private static final int LABEL_WIDTH = 100;
+ private static final int LABEL_HEIGHT = 20;
+ private static final int TEXT_WIDTH = 150;
+ private static final int TEXT_HEIGHT = 20;
+ private static final Dimension buttonPrefSize = new Dimension(100, 26);
+ private static final Dimension buttconPrefSize = new Dimension(28, 28);
+ private static final Dimension smbuttonPrefSize = new Dimension(72, 26);
+ private static final Dimension labelPrefSize = new Dimension(100, 20);
+ private static final Dimension textPrefSize = new Dimension(150, 20);
}
diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/ci/ciMethodData.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/ci/ciMethodData.java
index 13ad04a9d30..83056d060a3 100644
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/ci/ciMethodData.java
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/ci/ciMethodData.java
@@ -148,7 +148,7 @@ public class ciMethodData extends ciMetadata implements MethodDataInterface parametersTypeData() {
Address base = getAddress().addOffsetTo(origField.getOffset());
int di = (int)parametersTypeDataDi.getValue(base);
- if (di == -1) {
+ if (di == -1 || di == -2) {
return null;
}
DataLayout dataLayout = new DataLayout(dataField.getValue(getAddress()), di);
diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java
index b0b6d8462f0..815a74bc276 100644
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java
@@ -29,9 +29,9 @@ import java.util.Observable;
import java.util.Observer;
import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.gc_interface.CollectedHeap;
import sun.jvm.hotspot.gc_interface.CollectedHeapName;
import sun.jvm.hotspot.memory.MemRegion;
-import sun.jvm.hotspot.memory.SharedHeap;
import sun.jvm.hotspot.memory.SpaceClosure;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.runtime.VMObjectFactory;
@@ -41,7 +41,7 @@ import sun.jvm.hotspot.types.TypeDataBase;
// Mirror class for G1CollectedHeap.
-public class G1CollectedHeap extends SharedHeap {
+public class G1CollectedHeap extends CollectedHeap {
// HeapRegionManager _hrm;
static private long hrmFieldOffset;
// MemRegion _g1_reserved;
diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/CollectedHeap.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/CollectedHeap.java
index daf619536ec..c66a791dfd8 100644
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/CollectedHeap.java
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/CollectedHeap.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,7 @@ import sun.jvm.hotspot.memory.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
-public class CollectedHeap extends VMObject {
+public abstract class CollectedHeap extends VMObject {
private static long reservedFieldOffset;
static {
@@ -73,9 +73,7 @@ public class CollectedHeap extends VMObject {
return reservedRegion().contains(a);
}
- public CollectedHeapName kind() {
- return CollectedHeapName.ABSTRACT;
- }
+ public abstract CollectedHeapName kind();
public void print() { printOn(System.out); }
public void printOn(PrintStream tty) {
diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/CollectedHeapName.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/CollectedHeapName.java
index 2e57740135a..7e18cfa5075 100644
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/CollectedHeapName.java
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/CollectedHeapName.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,8 +31,6 @@ public class CollectedHeapName {
private CollectedHeapName(String name) { this.name = name; }
- public static final CollectedHeapName ABSTRACT = new CollectedHeapName("abstract");
- public static final CollectedHeapName SHARED_HEAP = new CollectedHeapName("SharedHeap");
public static final CollectedHeapName GEN_COLLECTED_HEAP = new CollectedHeapName("GenCollectedHeap");
public static final CollectedHeapName G1_COLLECTED_HEAP = new CollectedHeapName("G1CollectedHeap");
public static final CollectedHeapName PARALLEL_SCAVENGE_HEAP = new CollectedHeapName("ParallelScavengeHeap");
diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/GenCollectedHeap.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/GenCollectedHeap.java
index b39c4602043..074e7fa286e 100644
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/GenCollectedHeap.java
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/GenCollectedHeap.java
@@ -33,8 +33,7 @@ import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
-public class GenCollectedHeap extends SharedHeap {
- private static CIntegerField nGensField;
+public class GenCollectedHeap extends CollectedHeap {
private static AddressField youngGenField;
private static AddressField oldGenField;
@@ -54,7 +53,6 @@ public class GenCollectedHeap extends SharedHeap {
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("GenCollectedHeap");
- nGensField = type.getCIntegerField("_n_gens");
youngGenField = type.getAddressField("_young_gen");
oldGenField = type.getAddressField("_old_gen");
@@ -70,7 +68,7 @@ public class GenCollectedHeap extends SharedHeap {
}
public int nGens() {
- return (int) nGensField.getValue(addr);
+ return 2; // Young + Old
}
public Generation getGen(int i) {
diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java
index a6654340276..ee4fbb06594 100644
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -112,11 +112,7 @@ public class Universe {
return "";
}
public CollectedHeap heap() {
- try {
- return (CollectedHeap) heapConstructor.instantiateWrapperFor(collectedHeapField.getValue());
- } catch (WrongTypeException e) {
- return new CollectedHeap(collectedHeapField.getValue());
- }
+ return (CollectedHeap) heapConstructor.instantiateWrapperFor(collectedHeapField.getValue());
}
public static long getNarrowOopBase() {
diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VirtualSpace.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/VirtualSpace.java
similarity index 97%
rename from hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VirtualSpace.java
rename to hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/VirtualSpace.java
index 6cc90f1fe23..a4acab2087e 100644
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VirtualSpace.java
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/VirtualSpace.java
@@ -22,10 +22,11 @@
*
*/
-package sun.jvm.hotspot.runtime;
+package sun.jvm.hotspot.memory;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
public class VirtualSpace extends VMObject {
diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java
index a055fe1619c..5cbc25e8ef4 100644
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java
@@ -328,7 +328,7 @@ public class ConstantPool extends Metadata implements ClassConstants {
}
public Symbol getUnresolvedStringAt(int which) {
- return getSymbolAt(which);
+ return getSlotAt(which).getSymbol();
}
// returns null, if not resolved.
diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/opto/PhaseCFG.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/opto/PhaseCFG.java
index 4429754a5fe..920e6af0fb6 100644
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/opto/PhaseCFG.java
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/opto/PhaseCFG.java
@@ -42,10 +42,10 @@ public class PhaseCFG extends Phase {
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
Type type = db.lookupType("PhaseCFG");
- numBlocksField = new CIntField(type.getCIntegerField("_num_blocks"), 0);
+ numBlocksField = new CIntField(type.getCIntegerField("_number_of_blocks"), 0);
blocksField = type.getAddressField("_blocks");
bbsField = type.getAddressField("_node_to_block_mapping");
- brootField = type.getAddressField("_broot");
+ brootField = type.getAddressField("_root_block");
}
private static CIntField numBlocksField;
diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java
index ee0b541bdbf..eb15f9f9047 100644
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java
@@ -259,8 +259,7 @@ public class VM {
saProps = new Properties();
URL url = null;
try {
- url = VM.class.getClassLoader().getResource("sa.properties");
- saProps.load(new BufferedInputStream(url.openStream()));
+ saProps.load(VM.class.getResourceAsStream("/sa.properties"));
} catch (Exception e) {
System.err.println("Unable to load properties " +
(url == null ? "null" : url.toString()) +
diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/x86/X86Frame.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/x86/X86Frame.java
index 0d6d5328967..558aa4a8dbd 100644
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/x86/X86Frame.java
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/x86/X86Frame.java
@@ -314,26 +314,17 @@ public class X86Frame extends Frame {
//------------------------------------------------------------------------------
// frame::adjust_unextended_sp
private void adjustUnextendedSP() {
- // If we are returning to a compiled MethodHandle call site, the
- // saved_fp will in fact be a saved value of the unextended SP. The
- // simplest way to tell whether we are returning to such a call site
- // is as follows:
+ // On x86, sites calling method handle intrinsics and lambda forms are treated
+ // as any other call site. Therefore, no special action is needed when we are
+ // returning to any of these call sites.
CodeBlob cb = cb();
NMethod senderNm = (cb == null) ? null : cb.asNMethodOrNull();
if (senderNm != null) {
- // If the sender PC is a deoptimization point, get the original
- // PC. For MethodHandle call site the unextended_sp is stored in
- // saved_fp.
- if (senderNm.isDeoptMhEntry(getPC())) {
- // DEBUG_ONLY(verifyDeoptMhOriginalPc(senderNm, getFP()));
- raw_unextendedSP = getFP();
- }
- else if (senderNm.isDeoptEntry(getPC())) {
- // DEBUG_ONLY(verifyDeoptOriginalPc(senderNm, raw_unextendedSp));
- }
- else if (senderNm.isMethodHandleReturn(getPC())) {
- raw_unextendedSP = getFP();
+ // If the sender PC is a deoptimization point, get the original PC.
+ if (senderNm.isDeoptEntry(getPC()) ||
+ senderNm.isDeoptMhEntry(getPC())) {
+ // DEBUG_ONLY(verifyDeoptriginalPc(senderNm, raw_unextendedSp));
}
}
}
diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java
index afe4d2a50b5..eb090565a9b 100644
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java
@@ -81,53 +81,48 @@ public class HeapSummary extends Tool {
System.out.println();
System.out.println("Heap Usage:");
- if (heap instanceof SharedHeap) {
- SharedHeap sharedHeap = (SharedHeap) heap;
- if (sharedHeap instanceof GenCollectedHeap) {
- GenCollectedHeap genHeap = (GenCollectedHeap) sharedHeap;
- for (int n = 0; n < genHeap.nGens(); n++) {
- Generation gen = genHeap.getGen(n);
- if (gen instanceof sun.jvm.hotspot.memory.DefNewGeneration) {
- System.out.println("New Generation (Eden + 1 Survivor Space):");
- printGen(gen);
+ if (heap instanceof GenCollectedHeap) {
+ GenCollectedHeap genHeap = (GenCollectedHeap) heap;
+ for (int n = 0; n < genHeap.nGens(); n++) {
+ Generation gen = genHeap.getGen(n);
+ if (gen instanceof sun.jvm.hotspot.memory.DefNewGeneration) {
+ System.out.println("New Generation (Eden + 1 Survivor Space):");
+ printGen(gen);
- ContiguousSpace eden = ((DefNewGeneration)gen).eden();
- System.out.println("Eden Space:");
- printSpace(eden);
+ ContiguousSpace eden = ((DefNewGeneration)gen).eden();
+ System.out.println("Eden Space:");
+ printSpace(eden);
- ContiguousSpace from = ((DefNewGeneration)gen).from();
- System.out.println("From Space:");
- printSpace(from);
+ ContiguousSpace from = ((DefNewGeneration)gen).from();
+ System.out.println("From Space:");
+ printSpace(from);
- ContiguousSpace to = ((DefNewGeneration)gen).to();
- System.out.println("To Space:");
- printSpace(to);
- } else {
- System.out.println(gen.name() + ":");
- printGen(gen);
- }
+ ContiguousSpace to = ((DefNewGeneration)gen).to();
+ System.out.println("To Space:");
+ printSpace(to);
+ } else {
+ System.out.println(gen.name() + ":");
+ printGen(gen);
}
- } else if (sharedHeap instanceof G1CollectedHeap) {
- G1CollectedHeap g1h = (G1CollectedHeap) sharedHeap;
- G1MonitoringSupport g1mm = g1h.g1mm();
- long edenRegionNum = g1mm.edenRegionNum();
- long survivorRegionNum = g1mm.survivorRegionNum();
- HeapRegionSetBase oldSet = g1h.oldSet();
- HeapRegionSetBase humongousSet = g1h.humongousSet();
- long oldRegionNum = oldSet.count().length()
- + humongousSet.count().capacity() / HeapRegion.grainBytes();
- printG1Space("G1 Heap:", g1h.n_regions(),
- g1h.used(), g1h.capacity());
- System.out.println("G1 Young Generation:");
- printG1Space("Eden Space:", edenRegionNum,
- g1mm.edenUsed(), g1mm.edenCommitted());
- printG1Space("Survivor Space:", survivorRegionNum,
- g1mm.survivorUsed(), g1mm.survivorCommitted());
- printG1Space("G1 Old Generation:", oldRegionNum,
- g1mm.oldUsed(), g1mm.oldCommitted());
- } else {
- throw new RuntimeException("unknown SharedHeap type : " + heap.getClass());
}
+ } else if (heap instanceof G1CollectedHeap) {
+ G1CollectedHeap g1h = (G1CollectedHeap) heap;
+ G1MonitoringSupport g1mm = g1h.g1mm();
+ long edenRegionNum = g1mm.edenRegionNum();
+ long survivorRegionNum = g1mm.survivorRegionNum();
+ HeapRegionSetBase oldSet = g1h.oldSet();
+ HeapRegionSetBase humongousSet = g1h.humongousSet();
+ long oldRegionNum = oldSet.count().length()
+ + humongousSet.count().capacity() / HeapRegion.grainBytes();
+ printG1Space("G1 Heap:", g1h.n_regions(),
+ g1h.used(), g1h.capacity());
+ System.out.println("G1 Young Generation:");
+ printG1Space("Eden Space:", edenRegionNum,
+ g1mm.edenUsed(), g1mm.edenCommitted());
+ printG1Space("Survivor Space:", survivorRegionNum,
+ g1mm.survivorUsed(), g1mm.survivorCommitted());
+ printG1Space("G1 Old Generation:", oldRegionNum,
+ g1mm.oldUsed(), g1mm.oldCommitted());
} else if (heap instanceof ParallelScavengeHeap) {
ParallelScavengeHeap psh = (ParallelScavengeHeap) heap;
PSYoungGen youngGen = psh.youngGen();
diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/action/HSDBActionManager.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/action/HSDBActionManager.java
index f234231e363..1ebcb1b3882 100644
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/action/HSDBActionManager.java
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/action/HSDBActionManager.java
@@ -32,10 +32,12 @@ import com.sun.java.swing.action.ActionManager;
public class HSDBActionManager extends ActionManager {
public static ActionManager getInstance() {
- if (manager == null) {
- manager = new HSDBActionManager();
+ ActionManager m = ActionManager.getInstance();
+ if (m == null) {
+ m = new HSDBActionManager();
+ ActionManager.setInstance(m);
}
- return manager;
+ return m;
}
protected void addActions() {
diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java
index 60df0ffe92b..319aecdaa31 100644
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -799,6 +799,18 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
writeObjectID(klass.getJavaMirror());
ClassData cd = (ClassData) classDataCache.get(klass);
+ if (cd == null) {
+ // The class is not present in the system dictionary, probably Lambda.
+ // Add it to cache here
+ if (klass instanceof InstanceKlass) {
+ InstanceKlass ik = (InstanceKlass) klass;
+ List fields = getInstanceFields(ik);
+ int instSize = getSizeForFields(fields);
+ cd = new ClassData(instSize, fields);
+ classDataCache.put(ik, cd);
+ }
+ }
+
if (Assert.ASSERTS_ENABLED) {
Assert.that(cd != null, "can not get class data for " + klass.getName().asString() + klass.getAddress());
}
diff --git a/hotspot/make/Makefile b/hotspot/make/Makefile
index d9c0dcf8125..4c22f27b4bc 100644
--- a/hotspot/make/Makefile
+++ b/hotspot/make/Makefile
@@ -98,7 +98,7 @@ COMMON_VM_DEBUG_TARGETS=debug debug1 docs export_debug
COMMON_VM_OPTIMIZED_TARGETS=optimized optimized1 docs export_optimized
# JDK directory list
-JDK_DIRS=bin include jre lib demo
+JDK_DIRS=bin include lib demo
all: all_product all_fastdebug
@@ -373,33 +373,33 @@ $(EXPORT_SERVER_DIR)/%.map: $(C2_BUILD_DIR)/%.map
$(install-file)
$(EXPORT_LIB_DIR)/%.lib: $(C2_BUILD_DIR)/%.lib
$(install-file)
-$(EXPORT_JRE_BIN_DIR)/%.diz: $(C2_BUILD_DIR)/%.diz
+$(EXPORT_BIN_DIR)/%.diz: $(C2_BUILD_DIR)/%.diz
$(install-file)
-$(EXPORT_JRE_BIN_DIR)/%.dll: $(C2_BUILD_DIR)/%.dll
+$(EXPORT_BIN_DIR)/%.dll: $(C2_BUILD_DIR)/%.dll
$(install-file)
-$(EXPORT_JRE_BIN_DIR)/%.pdb: $(C2_BUILD_DIR)/%.pdb
+$(EXPORT_BIN_DIR)/%.pdb: $(C2_BUILD_DIR)/%.pdb
$(install-file)
-$(EXPORT_JRE_BIN_DIR)/%.map: $(C2_BUILD_DIR)/%.map
+$(EXPORT_BIN_DIR)/%.map: $(C2_BUILD_DIR)/%.map
$(install-file)
# Unix
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C2_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
+$(EXPORT_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C2_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(C2_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_SERVER_DIR)/64/%.$(LIBRARY_SUFFIX): $(C2_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C2_BUILD_DIR)/%.debuginfo
+$(EXPORT_LIB_ARCH_DIR)/%.debuginfo: $(C2_BUILD_DIR)/%.debuginfo
$(install-file)
$(EXPORT_SERVER_DIR)/%.debuginfo: $(C2_BUILD_DIR)/%.debuginfo
$(install-file)
$(EXPORT_SERVER_DIR)/64/%.debuginfo: $(C2_BUILD_DIR)/%.debuginfo
$(install-file)
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C2_BUILD_DIR)/%.diz
+$(EXPORT_LIB_ARCH_DIR)/%.diz: $(C2_BUILD_DIR)/%.diz
$(install-file)
$(EXPORT_SERVER_DIR)/64/%.diz: $(C2_BUILD_DIR)/%.diz
$(install-file)
# MacOS X
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: $(C2_BUILD_DIR)/%.dSYM
+$(EXPORT_LIB_ARCH_DIR)/%.dSYM: $(C2_BUILD_DIR)/%.dSYM
$(install-dir)
$(EXPORT_SERVER_DIR)/%.dSYM: $(C2_BUILD_DIR)/%.dSYM
$(install-dir)
@@ -423,33 +423,33 @@ $(EXPORT_CLIENT_DIR)/%.map: $(C1_BUILD_DIR)/%.map
$(install-file)
$(EXPORT_LIB_DIR)/%.lib: $(C1_BUILD_DIR)/%.lib
$(install-file)
-$(EXPORT_JRE_BIN_DIR)/%.diz: $(C1_BUILD_DIR)/%.diz
+$(EXPORT_BIN_DIR)/%.diz: $(C1_BUILD_DIR)/%.diz
$(install-file)
-$(EXPORT_JRE_BIN_DIR)/%.dll: $(C1_BUILD_DIR)/%.dll
+$(EXPORT_BIN_DIR)/%.dll: $(C1_BUILD_DIR)/%.dll
$(install-file)
-$(EXPORT_JRE_BIN_DIR)/%.pdb: $(C1_BUILD_DIR)/%.pdb
+$(EXPORT_BIN_DIR)/%.pdb: $(C1_BUILD_DIR)/%.pdb
$(install-file)
-$(EXPORT_JRE_BIN_DIR)/%.map: $(C1_BUILD_DIR)/%.map
+$(EXPORT_BIN_DIR)/%.map: $(C1_BUILD_DIR)/%.map
$(install-file)
# Unix
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C1_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
+$(EXPORT_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C1_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_CLIENT_DIR)/%.$(LIBRARY_SUFFIX): $(C1_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_CLIENT_DIR)/64/%.$(LIBRARY_SUFFIX): $(C1_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C1_BUILD_DIR)/%.debuginfo
+$(EXPORT_LIB_ARCH_DIR)/%.debuginfo: $(C1_BUILD_DIR)/%.debuginfo
$(install-file)
$(EXPORT_CLIENT_DIR)/%.debuginfo: $(C1_BUILD_DIR)/%.debuginfo
$(install-file)
$(EXPORT_CLIENT_DIR)/64/%.debuginfo: $(C1_BUILD_DIR)/%.debuginfo
$(install-file)
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C1_BUILD_DIR)/%.diz
+$(EXPORT_LIB_ARCH_DIR)/%.diz: $(C1_BUILD_DIR)/%.diz
$(install-file)
$(EXPORT_CLIENT_DIR)/64/%.diz: $(C1_BUILD_DIR)/%.diz
$(install-file)
# MacOS X
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: $(C1_BUILD_DIR)/%.dSYM
+$(EXPORT_LIB_ARCH_DIR)/%.dSYM: $(C1_BUILD_DIR)/%.dSYM
$(install-dir)
$(EXPORT_CLIENT_DIR)/%.dSYM: $(C1_BUILD_DIR)/%.dSYM
$(install-dir)
@@ -473,28 +473,28 @@ $(EXPORT_MINIMAL_DIR)/%.map: $(MINIMAL1_BUILD_DIR)/%.map
$(install-file)
$(EXPORT_LIB_DIR)/%.lib: $(MINIMAL1_BUILD_DIR)/%.lib
$(install-file)
-$(EXPORT_JRE_BIN_DIR)/%.diz: $(MINIMAL1_BUILD_DIR)/%.diz
+$(EXPORT_BIN_DIR)/%.diz: $(MINIMAL1_BUILD_DIR)/%.diz
$(install-file)
-$(EXPORT_JRE_BIN_DIR)/%.dll: $(MINIMAL1_BUILD_DIR)/%.dll
+$(EXPORT_BIN_DIR)/%.dll: $(MINIMAL1_BUILD_DIR)/%.dll
$(install-file)
-$(EXPORT_JRE_BIN_DIR)/%.pdb: $(MINIMAL1_BUILD_DIR)/%.pdb
+$(EXPORT_BIN_DIR)/%.pdb: $(MINIMAL1_BUILD_DIR)/%.pdb
$(install-file)
-$(EXPORT_JRE_BIN_DIR)/%.map: $(MINIMAL1_BUILD_DIR)/%.map
+$(EXPORT_BIN_DIR)/%.map: $(MINIMAL1_BUILD_DIR)/%.map
$(install-file)
# Unix
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
+$(EXPORT_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_MINIMAL_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_MINIMAL_DIR)/64/%.$(LIBRARY_SUFFIX): $(MINIMAL1_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(MINIMAL1_BUILD_DIR)/%.debuginfo
+$(EXPORT_LIB_ARCH_DIR)/%.debuginfo: $(MINIMAL1_BUILD_DIR)/%.debuginfo
$(install-file)
$(EXPORT_MINIMAL_DIR)/%.debuginfo: $(MINIMAL1_BUILD_DIR)/%.debuginfo
$(install-file)
$(EXPORT_MINIMAL_DIR)/64/%.debuginfo: $(MINIMAL1_BUILD_DIR)/%.debuginfo
$(install-file)
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(MINIMAL1_BUILD_DIR)/%.diz
+$(EXPORT_LIB_ARCH_DIR)/%.diz: $(MINIMAL1_BUILD_DIR)/%.diz
$(install-file)
$(EXPORT_MINIMAL_DIR)/64/%.diz: $(MINIMAL1_BUILD_DIR)/%.diz
$(install-file)
@@ -509,11 +509,11 @@ $(EXPORT_LIB_DIR)/%.jar: $(ZERO_BUILD_DIR)/../generated/%.jar
$(EXPORT_INCLUDE_DIR)/%: $(ZERO_BUILD_DIR)/../generated/jvmtifiles/%
$(install-file)
# Unix
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
+$(EXPORT_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(ZERO_BUILD_DIR)/%.debuginfo
+$(EXPORT_LIB_ARCH_DIR)/%.debuginfo: $(ZERO_BUILD_DIR)/%.debuginfo
$(install-file)
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(ZERO_BUILD_DIR)/%.diz
+$(EXPORT_LIB_ARCH_DIR)/%.diz: $(ZERO_BUILD_DIR)/%.diz
$(install-file)
$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
@@ -522,7 +522,7 @@ $(EXPORT_SERVER_DIR)/%.debuginfo: $(ZERO_BUILD_DIR)/%.debuginfo
$(EXPORT_SERVER_DIR)/%.diz: $(ZERO_BUILD_DIR)/%.diz
$(install-file)
# MacOS X
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: $(ZERO_BUILD_DIR)/%.dSYM
+$(EXPORT_LIB_ARCH_DIR)/%.dSYM: $(ZERO_BUILD_DIR)/%.dSYM
$(install-dir)
$(EXPORT_SERVER_DIR)/%.dSYM: $(ZERO_BUILD_DIR)/%.dSYM
$(install-dir)
@@ -536,11 +536,11 @@ $(EXPORT_LIB_DIR)/%.jar: $(CORE_BUILD_DIR)/../generated/%.jar
$(EXPORT_INCLUDE_DIR)/%: $(CORE_BUILD_DIR)/../generated/jvmtifiles/%
$(install-file)
# Unix
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(CORE_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
+$(EXPORT_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(CORE_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(CORE_BUILD_DIR)/%.debuginfo
+$(EXPORT_LIB_ARCH_DIR)/%.debuginfo: $(CORE_BUILD_DIR)/%.debuginfo
$(install-file)
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(CORE_BUILD_DIR)/%.diz
+$(EXPORT_LIB_ARCH_DIR)/%.diz: $(CORE_BUILD_DIR)/%.diz
$(install-file)
$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(CORE_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
@@ -558,11 +558,11 @@ $(EXPORT_LIB_DIR)/%.jar: $(SHARK_BUILD_DIR)/../generated/%.jar
$(EXPORT_INCLUDE_DIR)/%: $(SHARK_BUILD_DIR)/../generated/jvmtifiles/%
$(install-file)
# Unix
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
+$(EXPORT_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo): $(SHARK_BUILD_DIR)/%.debuginfo
+$(EXPORT_LIB_ARCH_DIR)/%.debuginfo): $(SHARK_BUILD_DIR)/%.debuginfo
$(install-file)
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(SHARK_BUILD_DIR)/%.diz
+$(EXPORT_LIB_ARCH_DIR)/%.diz: $(SHARK_BUILD_DIR)/%.diz
$(install-file)
$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
@@ -571,7 +571,7 @@ $(EXPORT_SERVER_DIR)/%.debuginfo: $(SHARK_BUILD_DIR)/%.debuginfo
$(EXPORT_SERVER_DIR)/%.diz: $(SHARK_BUILD_DIR)/%.diz
$(install-file)
# MacOS X
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: $(SHARK_BUILD_DIR)/%.dSYM
+$(EXPORT_LIB_ARCH_DIR)/%.dSYM: $(SHARK_BUILD_DIR)/%.dSYM
$(install-dir)
$(EXPORT_SERVER_DIR)/%.dSYM: $(SHARK_BUILD_DIR)/%.dSYM
$(install-dir)
diff --git a/hotspot/make/aix/makefiles/adlc.make b/hotspot/make/aix/makefiles/adlc.make
index abdee74f878..63dc981410f 100644
--- a/hotspot/make/aix/makefiles/adlc.make
+++ b/hotspot/make/aix/makefiles/adlc.make
@@ -1,5 +1,5 @@
#
-# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -140,13 +140,7 @@ ADLCFLAGS += $(SYSDEFS)
# Note "+="; it is a hook so flags.make can add more flags, like -g or -DFOO.
ADLCFLAGS += -q -T
-# Normally, debugging is done directly on the ad_*.cpp files.
-# But -g will put #line directives in those files pointing back to .ad.
-# Some builds of gcc 3.2 have a bug that gets tickled by the extra #line directives
-# so skip it for 3.2 and ealier.
-ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
ADLCFLAGS += -g
-endif
ifdef LP64
ADLCFLAGS += -D_LP64
diff --git a/hotspot/make/aix/makefiles/defs.make b/hotspot/make/aix/makefiles/defs.make
index 9bf5793e773..f902830eeb2 100644
--- a/hotspot/make/aix/makefiles/defs.make
+++ b/hotspot/make/aix/makefiles/defs.make
@@ -184,17 +184,17 @@ LIBRARY_SUFFIX=so
EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html
# client and server subdirectories have symbolic links to ../libjsig.so
-EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
+EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
#ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
# ifeq ($(ZIP_DEBUGINFO_FILES),1)
-# EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.diz
+# EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.diz
# else
-# EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.debuginfo
+# EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.debuginfo
# endif
#endif
-EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
-EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
-EXPORT_MINIMAL_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/minimal
+EXPORT_SERVER_DIR = $(EXPORT_LIB_ARCH_DIR)/server
+EXPORT_CLIENT_DIR = $(EXPORT_LIB_ARCH_DIR)/client
+EXPORT_MINIMAL_DIR = $(EXPORT_LIB_ARCH_DIR)/minimal
ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK) $(JVM_VARIANT_CORE)), true)
EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
diff --git a/hotspot/make/aix/makefiles/mapfile-vers-debug b/hotspot/make/aix/makefiles/mapfile-vers-debug
index 81c7c5b7fd2..841585d27d8 100644
--- a/hotspot/make/aix/makefiles/mapfile-vers-debug
+++ b/hotspot/make/aix/makefiles/mapfile-vers-debug
@@ -19,7 +19,7 @@
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
-#
+#
#
# Define public interface.
@@ -107,6 +107,7 @@ SUNWprivate_1.1 {
JVM_GetClassTypeAnnotations;
JVM_GetDeclaredClasses;
JVM_GetDeclaringClass;
+ JVM_GetSimpleBinaryName;
JVM_GetEnclosingMethodInfo;
JVM_GetFieldIxModifiers;
JVM_GetFieldTypeAnnotations;
diff --git a/hotspot/make/aix/makefiles/mapfile-vers-product b/hotspot/make/aix/makefiles/mapfile-vers-product
index 745901bb4d6..7b0ab9194fe 100644
--- a/hotspot/make/aix/makefiles/mapfile-vers-product
+++ b/hotspot/make/aix/makefiles/mapfile-vers-product
@@ -19,7 +19,7 @@
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
-#
+#
#
# Define public interface.
@@ -107,6 +107,7 @@ SUNWprivate_1.1 {
JVM_GetClassTypeAnnotations;
JVM_GetDeclaredClasses;
JVM_GetDeclaringClass;
+ JVM_GetSimpleBinaryName;
JVM_GetEnclosingMethodInfo;
JVM_GetFieldIxModifiers;
JVM_GetInheritedAccessControlContext;
diff --git a/hotspot/make/aix/makefiles/ppc64.make b/hotspot/make/aix/makefiles/ppc64.make
index 2636a4b9742..2634d8adcc3 100644
--- a/hotspot/make/aix/makefiles/ppc64.make
+++ b/hotspot/make/aix/makefiles/ppc64.make
@@ -1,6 +1,6 @@
#
-# Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
-# Copyright 2012, 2013 SAP AG. All rights reserved.
+# Copyright (c) 2004, 2015, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2015 SAP AG. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -71,9 +71,6 @@ OPT_CFLAGS += -qstrict
OPT_CFLAGS/sharedRuntimeTrig.o = $(OPT_CFLAGS/NOOPT)
OPT_CFLAGS/sharedRuntimeTrans.o = $(OPT_CFLAGS/NOOPT)
-# xlc 10.01 parameters for ipa compile.
-QIPA_COMPILE=$(if $(CXX_IS_V10),-qipa)
-
# Xlc 10.1 parameters for aggressive optimization:
# - qhot=level=1: Most aggressive loop optimizations.
# - qignerrno: Assume errno is not modified by system calls.
@@ -88,7 +85,7 @@ QV10_OPT_CONSERVATIVE=$(if $(CXX_IS_V10),-qhot=level=1 -qignerrno -qinline)
OPT_CFLAGS/synchronizer.o = $(OPT_CFLAGS) -qnoinline
# Set all the xlC V10.1 options here.
-OPT_CFLAGS += $(QIPA_COMPILE) $(QV10_OPT) $(QV10_OPT_AGGRESSIVE)
+OPT_CFLAGS += $(QV10_OPT) $(QV10_OPT_AGGRESSIVE)
export OBJECT_MODE=64
diff --git a/hotspot/make/aix/makefiles/vm.make b/hotspot/make/aix/makefiles/vm.make
index 4d879365f64..cf10de3e214 100644
--- a/hotspot/make/aix/makefiles/vm.make
+++ b/hotspot/make/aix/makefiles/vm.make
@@ -122,7 +122,7 @@ LIBS += -lm -ldl -lpthread
# By default, link the *.o into the library, not the executable.
LINK_INTO$(LINK_INTO) = LIBJVM
-JDK_LIBDIR = $(JAVA_HOME)/jre/lib/$(LIBARCH)
+JDK_LIBDIR = $(JAVA_HOME)/lib/$(LIBARCH)
#----------------------------------------------------------------------
# jvm_db & dtrace
diff --git a/hotspot/make/aix/makefiles/xlc.make b/hotspot/make/aix/makefiles/xlc.make
index 17a71b60124..cf8d085c39a 100644
--- a/hotspot/make/aix/makefiles/xlc.make
+++ b/hotspot/make/aix/makefiles/xlc.make
@@ -1,6 +1,6 @@
#
-# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
-# Copyright (c) 2012, 2013 SAP. All rights reserved.
+# Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2012, 2015 SAP. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -34,13 +34,17 @@ HOSTCC = $(CC)
AS = $(CC) -c
-# get xlc version
-CXX_VERSION := $(shell $(CXX) -qversion 2>&1 | sed -n 's/.*Version: \([0-9.]*\)/\1/p')
+# get xlc version which comes as VV.RR.MMMM.LLLL where 'VV' is the version,
+# 'RR' is the release, 'MMMM' is the modification and 'LLLL' is the level.
+# We only use 'VV.RR.LLLL' to avoid integer overflows in bash when comparing
+# the version numbers (some shells only support 32-bit integer compares!).
+CXX_VERSION := $(shell $(CXX) -qversion 2>&1 | \
+ sed -n 's/.*Version: \([0-9]\{2\}\).\([0-9]\{2\}\).[0-9]\{4\}.\([0-9]\{4\}\)/\1\2\3/p')
# xlc 08.00.0000.0023 and higher supports -qtune=balanced
-CXX_SUPPORTS_BALANCED_TUNING=$(shell if [ $(subst .,,$(CXX_VERSION)) -ge 080000000023 ] ; then echo "true" ; fi)
+CXX_SUPPORTS_BALANCED_TUNING := $(shell if [ $(CXX_VERSION) -ge 08000023 ] ; then echo "true" ; fi)
# xlc 10.01 is used with aggressive optimizations to boost performance
-CXX_IS_V10=$(shell if [ $(subst .,,$(CXX_VERSION)) -ge 100100000000 ] ; then echo "true" ; fi)
+CXX_IS_V10 := $(shell if [ $(CXX_VERSION) -ge 10010000 ] ; then echo "true" ; fi)
# check for precompiled headers support
diff --git a/hotspot/make/bsd/makefiles/defs.make b/hotspot/make/bsd/makefiles/defs.make
index 6e427e8bd5a..74f2fedd8b7 100644
--- a/hotspot/make/bsd/makefiles/defs.make
+++ b/hotspot/make/bsd/makefiles/defs.make
@@ -265,23 +265,23 @@ endif
EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html
# client and server subdirectories have symbolic links to ../libjsig.so
-EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
+EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
- EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.diz
+ EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.diz
else
ifeq ($(OS_VENDOR), Darwin)
- EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX).dSYM
+ EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX).dSYM
else
- EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.debuginfo
+ EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.debuginfo
endif
endif
endif
-EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
-EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
-EXPORT_MINIMAL_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/minimal
+EXPORT_SERVER_DIR = $(EXPORT_LIB_ARCH_DIR)/server
+EXPORT_CLIENT_DIR = $(EXPORT_LIB_ARCH_DIR)/client
+EXPORT_MINIMAL_DIR = $(EXPORT_LIB_ARCH_DIR)/minimal
ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
@@ -324,34 +324,34 @@ endif
# Serviceability Binaries
# No SA Support for PPC, IA64, ARM or zero
-ADD_SA_BINARIES/x86 = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
+ADD_SA_BINARIES/x86 = $(EXPORT_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
$(EXPORT_LIB_DIR)/sa-jdi.jar
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
- ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz
+ ADD_SA_BINARIES/x86 += $(EXPORT_LIB_ARCH_DIR)/libsaproc.diz
else
ifeq ($(OS_VENDOR), Darwin)
- ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
+ ADD_SA_BINARIES/x86 += $(EXPORT_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
else
- ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
+ ADD_SA_BINARIES/x86 += $(EXPORT_LIB_ARCH_DIR)/libsaproc.debuginfo
endif
endif
endif
-ADD_SA_BINARIES/sparc = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
+ADD_SA_BINARIES/sparc = $(EXPORT_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
$(EXPORT_LIB_DIR)/sa-jdi.jar
-ADD_SA_BINARIES/universal = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
+ADD_SA_BINARIES/universal = $(EXPORT_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
$(EXPORT_LIB_DIR)/sa-jdi.jar
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
- ADD_SA_BINARIES/universal += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz
+ ADD_SA_BINARIES/universal += $(EXPORT_LIB_ARCH_DIR)/libsaproc.diz
else
ifeq ($(OS_VENDOR), Darwin)
- ADD_SA_BINARIES/universal += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
+ ADD_SA_BINARIES/universal += $(EXPORT_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
else
- ADD_SA_BINARIES/universal += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
+ ADD_SA_BINARIES/universal += $(EXPORT_LIB_ARCH_DIR)/libsaproc.debuginfo
endif
endif
endif
@@ -388,25 +388,25 @@ ifeq ($(OS_VENDOR), Darwin)
endif
# Binaries to 'universalize' if built
- UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX)
- UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/libsaproc.$(LIBRARY_SUFFIX)
- UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/server/libjvm.$(LIBRARY_SUFFIX)
- UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/client/libjvm.$(LIBRARY_SUFFIX)
+ UNIVERSAL_LIPO_LIST += $(EXPORT_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX)
+ UNIVERSAL_LIPO_LIST += $(EXPORT_LIB_DIR)/libsaproc.$(LIBRARY_SUFFIX)
+ UNIVERSAL_LIPO_LIST += $(EXPORT_LIB_DIR)/server/libjvm.$(LIBRARY_SUFFIX)
+ UNIVERSAL_LIPO_LIST += $(EXPORT_LIB_DIR)/client/libjvm.$(LIBRARY_SUFFIX)
# Files to simply copy in place
- UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/Xusage.txt
- UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/Xusage.txt
+ UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/server/Xusage.txt
+ UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/client/Xusage.txt
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
- UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/libjvm.diz
- UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/libjvm.diz
- UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.diz
- UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libsaproc.diz
+ UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/server/libjvm.diz
+ UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/client/libjvm.diz
+ UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/libjsig.diz
+ UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/libsaproc.diz
else
- UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/libjvm.$(LIBRARY_SUFFIX).dSYM
- UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/libjvm.$(LIBRARY_SUFFIX).dSYM
- UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX).dSYM
- UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
+ UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/server/libjvm.$(LIBRARY_SUFFIX).dSYM
+ UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/client/libjvm.$(LIBRARY_SUFFIX).dSYM
+ UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX).dSYM
+ UNIVERSAL_COPY_LIST += $(EXPORT_LIB_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
endif
endif
diff --git a/hotspot/make/bsd/makefiles/gcc.make b/hotspot/make/bsd/makefiles/gcc.make
index 1877245e435..6844d6169ae 100644
--- a/hotspot/make/bsd/makefiles/gcc.make
+++ b/hotspot/make/bsd/makefiles/gcc.make
@@ -313,22 +313,13 @@ endif
# Work around some compiler bugs.
ifeq ($(USE_CLANG), true)
- # Clang 4.2
- ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 2), 1)
+ # Clang <= 6.1
+ ifeq ($(shell expr \
+ $(CC_VER_MAJOR) \< 6 \| \
+ \( $(CC_VER_MAJOR) = 6 \& $(CC_VER_MINOR) \<= 1 \) \
+ ), 1)
OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT)
OPT_CFLAGS/unsafe.o += -O1
- # Clang 5.0
- else ifeq ($(shell expr $(CC_VER_MAJOR) = 5 \& $(CC_VER_MINOR) = 0), 1)
- OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT)
- OPT_CFLAGS/unsafe.o += -O1
- # Clang 5.1
- else ifeq ($(shell expr $(CC_VER_MAJOR) = 5 \& $(CC_VER_MINOR) = 1), 1)
- OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT)
- OPT_CFLAGS/unsafe.o += -O1
- # Clang 6.0
- else ifeq ($(shell expr $(CC_VER_MAJOR) = 6 \& $(CC_VER_MINOR) = 0), 1)
- OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT)
- OPT_CFLAGS/unsafe.o += -O1
else
$(error "Update compiler workarounds for Clang $(CC_VER_MAJOR).$(CC_VER_MINOR)")
endif
@@ -336,7 +327,7 @@ else
# 6835796. Problem in GCC 4.3.0 with mulnode.o optimized compilation.
ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 3), 1)
OPT_CFLAGS/mulnode.o += $(OPT_CFLAGS/NOOPT)
- endif
+ endif
endif
# Flags for generating make dependency flags.
diff --git a/hotspot/make/bsd/makefiles/mapfile-vers-darwin-debug b/hotspot/make/bsd/makefiles/mapfile-vers-darwin-debug
index 99880a19f4a..d7df2cb7b1d 100644
--- a/hotspot/make/bsd/makefiles/mapfile-vers-darwin-debug
+++ b/hotspot/make/bsd/makefiles/mapfile-vers-darwin-debug
@@ -105,6 +105,7 @@
_JVM_GetClassTypeAnnotations
_JVM_GetDeclaredClasses
_JVM_GetDeclaringClass
+ _JVM_GetSimpleBinaryName
_JVM_GetEnclosingMethodInfo
_JVM_GetFieldIxModifiers
_JVM_GetFieldTypeAnnotations
diff --git a/hotspot/make/bsd/makefiles/mapfile-vers-darwin-product b/hotspot/make/bsd/makefiles/mapfile-vers-darwin-product
index 99880a19f4a..d7df2cb7b1d 100644
--- a/hotspot/make/bsd/makefiles/mapfile-vers-darwin-product
+++ b/hotspot/make/bsd/makefiles/mapfile-vers-darwin-product
@@ -105,6 +105,7 @@
_JVM_GetClassTypeAnnotations
_JVM_GetDeclaredClasses
_JVM_GetDeclaringClass
+ _JVM_GetSimpleBinaryName
_JVM_GetEnclosingMethodInfo
_JVM_GetFieldIxModifiers
_JVM_GetFieldTypeAnnotations
diff --git a/hotspot/make/bsd/makefiles/mapfile-vers-debug b/hotspot/make/bsd/makefiles/mapfile-vers-debug
index 950cf57a64a..af2fe0b90bc 100644
--- a/hotspot/make/bsd/makefiles/mapfile-vers-debug
+++ b/hotspot/make/bsd/makefiles/mapfile-vers-debug
@@ -19,7 +19,7 @@
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
-#
+#
#
# Define public interface.
@@ -107,6 +107,7 @@ SUNWprivate_1.1 {
JVM_GetClassTypeAnnotations;
JVM_GetDeclaredClasses;
JVM_GetDeclaringClass;
+ JVM_GetSimpleBinaryName;
JVM_GetEnclosingMethodInfo;
JVM_GetFieldIxModifiers;
JVM_GetFieldTypeAnnotations;
diff --git a/hotspot/make/bsd/makefiles/mapfile-vers-product b/hotspot/make/bsd/makefiles/mapfile-vers-product
index 950cf57a64a..af2fe0b90bc 100644
--- a/hotspot/make/bsd/makefiles/mapfile-vers-product
+++ b/hotspot/make/bsd/makefiles/mapfile-vers-product
@@ -19,7 +19,7 @@
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
-#
+#
#
# Define public interface.
@@ -107,6 +107,7 @@ SUNWprivate_1.1 {
JVM_GetClassTypeAnnotations;
JVM_GetDeclaredClasses;
JVM_GetDeclaringClass;
+ JVM_GetSimpleBinaryName;
JVM_GetEnclosingMethodInfo;
JVM_GetFieldIxModifiers;
JVM_GetFieldTypeAnnotations;
diff --git a/hotspot/make/bsd/makefiles/universal.gmk b/hotspot/make/bsd/makefiles/universal.gmk
index 40868adf849..9fdf3832e11 100644
--- a/hotspot/make/bsd/makefiles/universal.gmk
+++ b/hotspot/make/bsd/makefiles/universal.gmk
@@ -54,12 +54,12 @@ all_debug_universal:
# Consolidate architecture builds into a single Universal binary
universalize: $(UNIVERSAL_LIPO_LIST) $(UNIVERSAL_COPY_LIST)
- $(RM) -r $(EXPORT_PATH)/jre/lib/{i386,amd64}
+ $(RM) -r $(EXPORT_PATH)/lib/{i386,amd64}
# Package built libraries in a universal binary
$(UNIVERSAL_LIPO_LIST):
- BUILT_LIPO_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) 2>/dev/null`" || test $$? = "1"; \
+ BUILT_LIPO_FILES="`find $(EXPORT_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_LIB_DIR)/,,$@) 2>/dev/null`" || test $$? = "1"; \
if [ -n "$${BUILT_LIPO_FILES}" ]; then \
$(MKDIR) -p $(shell dirname $@); \
lipo -create -output $@ $${BUILT_LIPO_FILES}; \
@@ -70,7 +70,7 @@ $(UNIVERSAL_LIPO_LIST):
# - copies directories; including empty dirs
# - copies files, symlinks, other non-directory files
$(UNIVERSAL_COPY_LIST):
- BUILT_COPY_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) -prune 2>/dev/null`" || test $$? = "1"; \
+ BUILT_COPY_FILES="`find $(EXPORT_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_LIB_DIR)/,,$@) -prune 2>/dev/null`" || test $$? = "1"; \
if [ -n "$${BUILT_COPY_FILES}" ]; then \
for i in $${BUILT_COPY_FILES}; do \
$(MKDIR) -p $(shell dirname $@); \
@@ -80,21 +80,21 @@ $(UNIVERSAL_COPY_LIST):
# Replace arch specific binaries with universal binaries
-# Do not touch jre/lib/{client,server}/libjsig.$(LIBRARY_SUFFIX)
+# Do not touch lib/{client,server}/libjsig.$(LIBRARY_SUFFIX)
# That symbolic link belongs to the 'jdk' build.
export_universal:
- $(RM) -r $(EXPORT_PATH)/jre/lib/{i386,amd64}
- $(RM) -r $(JDK_IMAGE_DIR)/jre/lib/{i386,amd64}
+ $(RM) -r $(EXPORT_PATH)/lib/{i386,amd64}
+ $(RM) -r $(JDK_IMAGE_DIR)/lib/{i386,amd64}
($(CD) $(EXPORT_PATH) && \
$(TAR) -cf - *) | \
($(CD) $(JDK_IMAGE_DIR) && $(TAR) -xpf -)
# Overlay universal binaries
-# Do not touch jre/lib/{client,server}/libjsig.$(LIBRARY_SUFFIX)
+# Do not touch lib/{client,server}/libjsig.$(LIBRARY_SUFFIX)
# That symbolic link belongs to the 'jdk' build.
copy_universal:
- $(RM) -r $(JDK_IMAGE_DIR)$(COPY_SUBDIR)/jre/lib/{i386,amd64}
+ $(RM) -r $(JDK_IMAGE_DIR)$(COPY_SUBDIR)/lib/{i386,amd64}
($(CD) $(EXPORT_PATH)$(COPY_SUBDIR) && \
$(TAR) -cf - *) | \
($(CD) $(JDK_IMAGE_DIR)$(COPY_SUBDIR) && $(TAR) -xpf -)
diff --git a/hotspot/make/build.sh b/hotspot/make/build.sh
index d05ce4474ac..c6570795011 100644
--- a/hotspot/make/build.sh
+++ b/hotspot/make/build.sh
@@ -40,7 +40,7 @@ if [ $# -lt 1 ]; then
exit 1
fi
-if [ "${JAVA_HOME-}" = "" -o ! -d "${JAVA_HOME-}" -o ! -d ${JAVA_HOME-}/jre/lib/ ]; then
+if [ "${JAVA_HOME-}" = "" -o ! -d "${JAVA_HOME-}" ]; then
echo "JAVA_HOME needs to be set to a valid JDK path"
echo "JAVA_HOME: ${JAVA_HOME-}"
exit 1
diff --git a/hotspot/make/defs.make b/hotspot/make/defs.make
index 6f4dd97cd03..a03ada784e9 100644
--- a/hotspot/make/defs.make
+++ b/hotspot/make/defs.make
@@ -350,15 +350,13 @@ MAKE_ARGS += BOOT_JDK_SOURCETARGET="$(BOOT_JDK_SOURCETARGET)"
EXPORT_INCLUDE_DIR = $(EXPORT_PATH)/include
EXPORT_DOCS_DIR = $(EXPORT_PATH)/docs
EXPORT_LIB_DIR = $(EXPORT_PATH)/lib
-EXPORT_JRE_DIR = $(EXPORT_PATH)/jre
-EXPORT_JRE_BIN_DIR = $(EXPORT_JRE_DIR)/bin
-EXPORT_JRE_LIB_DIR = $(EXPORT_JRE_DIR)/lib
-EXPORT_JRE_LIB_ARCH_DIR = $(EXPORT_JRE_LIB_DIR)/$(LIBARCH)
+EXPORT_BIN_DIR = $(EXPORT_PATH)/bin
+EXPORT_LIB_ARCH_DIR = $(EXPORT_LIB_DIR)/$(LIBARCH)
# non-universal macosx builds need to appear universal
ifeq ($(OS_VENDOR), Darwin)
ifneq ($(MACOSX_UNIVERSAL), true)
- EXPORT_JRE_LIB_ARCH_DIR = $(EXPORT_JRE_LIB_DIR)
+ EXPORT_LIB_ARCH_DIR = $(EXPORT_LIB_DIR)
endif
endif
@@ -370,4 +368,3 @@ EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/$(JDK_INCLUDE_SUBDIR)/jni_md.h
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jmm.h
.PHONY: $(HS_ALT_MAKE)/defs.make
-
diff --git a/hotspot/make/linux/makefiles/defs.make b/hotspot/make/linux/makefiles/defs.make
index c85f1ed5522..472621ffbea 100644
--- a/hotspot/make/linux/makefiles/defs.make
+++ b/hotspot/make/linux/makefiles/defs.make
@@ -244,17 +244,17 @@ LIBRARY_SUFFIX=so
EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html
# client and server subdirectories have symbolic links to ../libjsig.so
-EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
+EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
- EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.diz
+ EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.diz
else
- EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.debuginfo
+ EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.debuginfo
endif
endif
-EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
-EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
-EXPORT_MINIMAL_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/minimal
+EXPORT_SERVER_DIR = $(EXPORT_LIB_ARCH_DIR)/server
+EXPORT_CLIENT_DIR = $(EXPORT_LIB_ARCH_DIR)/client
+EXPORT_MINIMAL_DIR = $(EXPORT_LIB_ARCH_DIR)/minimal
ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK) $(JVM_VARIANT_CORE)), true)
EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
@@ -295,14 +295,14 @@ endif
# Serviceability Binaries
-ADD_SA_BINARIES/DEFAULT = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
+ADD_SA_BINARIES/DEFAULT = $(EXPORT_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
$(EXPORT_LIB_DIR)/sa-jdi.jar
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
- ADD_SA_BINARIES/DEFAULT += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz
+ ADD_SA_BINARIES/DEFAULT += $(EXPORT_LIB_ARCH_DIR)/libsaproc.diz
else
- ADD_SA_BINARIES/DEFAULT += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
+ ADD_SA_BINARIES/DEFAULT += $(EXPORT_LIB_ARCH_DIR)/libsaproc.debuginfo
endif
endif
diff --git a/hotspot/make/linux/makefiles/mapfile-vers-debug b/hotspot/make/linux/makefiles/mapfile-vers-debug
index 950cf57a64a..af2fe0b90bc 100644
--- a/hotspot/make/linux/makefiles/mapfile-vers-debug
+++ b/hotspot/make/linux/makefiles/mapfile-vers-debug
@@ -19,7 +19,7 @@
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
-#
+#
#
# Define public interface.
@@ -107,6 +107,7 @@ SUNWprivate_1.1 {
JVM_GetClassTypeAnnotations;
JVM_GetDeclaredClasses;
JVM_GetDeclaringClass;
+ JVM_GetSimpleBinaryName;
JVM_GetEnclosingMethodInfo;
JVM_GetFieldIxModifiers;
JVM_GetFieldTypeAnnotations;
diff --git a/hotspot/make/linux/makefiles/mapfile-vers-product b/hotspot/make/linux/makefiles/mapfile-vers-product
index 950cf57a64a..af2fe0b90bc 100644
--- a/hotspot/make/linux/makefiles/mapfile-vers-product
+++ b/hotspot/make/linux/makefiles/mapfile-vers-product
@@ -19,7 +19,7 @@
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
-#
+#
#
# Define public interface.
@@ -107,6 +107,7 @@ SUNWprivate_1.1 {
JVM_GetClassTypeAnnotations;
JVM_GetDeclaredClasses;
JVM_GetDeclaringClass;
+ JVM_GetSimpleBinaryName;
JVM_GetEnclosingMethodInfo;
JVM_GetFieldIxModifiers;
JVM_GetFieldTypeAnnotations;
diff --git a/hotspot/make/linux/makefiles/vm.make b/hotspot/make/linux/makefiles/vm.make
index 25679bffff0..d0737d41c1a 100644
--- a/hotspot/make/linux/makefiles/vm.make
+++ b/hotspot/make/linux/makefiles/vm.make
@@ -127,7 +127,7 @@ LIBS += -lm -ldl -lpthread
# By default, link the *.o into the library, not the executable.
LINK_INTO$(LINK_INTO) = LIBJVM
-JDK_LIBDIR = $(JAVA_HOME)/jre/lib/$(LIBARCH)
+JDK_LIBDIR = $(JAVA_HOME)/lib/$(LIBARCH)
#----------------------------------------------------------------------
# jvm_db & dtrace
diff --git a/hotspot/make/solaris/makefiles/defs.make b/hotspot/make/solaris/makefiles/defs.make
index 0a0fe381503..8fa75e904d7 100644
--- a/hotspot/make/solaris/makefiles/defs.make
+++ b/hotspot/make/solaris/makefiles/defs.make
@@ -224,17 +224,17 @@ LIBRARY_SUFFIX=so
EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html
# client and server subdirectories have symbolic links to ../libjsig.$(LIBRARY_SUFFIX)
-EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
+EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
- EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.diz
+ EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.diz
else
- EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.debuginfo
+ EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libjsig.debuginfo
endif
endif
-EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
-EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
+EXPORT_SERVER_DIR = $(EXPORT_LIB_ARCH_DIR)/server
+EXPORT_CLIENT_DIR = $(EXPORT_LIB_ARCH_DIR)/client
ifeq ($(JVM_VARIANT_SERVER),true)
EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
@@ -295,12 +295,12 @@ ifeq ($(JVM_VARIANT_CLIENT),true)
endif
endif
-EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX)
+EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX)
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
- EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz
+ EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libsaproc.diz
else
- EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
+ EXPORT_LIST += $(EXPORT_LIB_ARCH_DIR)/libsaproc.debuginfo
endif
endif
EXPORT_LIST += $(EXPORT_LIB_DIR)/sa-jdi.jar
diff --git a/hotspot/make/solaris/makefiles/dtrace.make b/hotspot/make/solaris/makefiles/dtrace.make
index 1da02fa01c4..376732d0e6b 100644
--- a/hotspot/make/solaris/makefiles/dtrace.make
+++ b/hotspot/make/solaris/makefiles/dtrace.make
@@ -130,8 +130,9 @@ endif
$(XLIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
@echo $(LOG_INFO) Making $@
$(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \
- $(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. \
- $(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
+ $(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. $(EXTRA_CFLAGS) \
+ $(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c \
+ $(EXTRA_LDFLAGS) -lc -lthread -ldoor
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DTRACE_DEBUGINFO)
# Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) is not
@@ -216,8 +217,9 @@ endif
$(LIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
@echo $(LOG_INFO) Making $@
- $(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. \
- $(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
+ $(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. $(EXTRA_CFLAGS) \
+ $(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c \
+ $(EXTRA_LDFLAGS) -lc -lthread -ldoor
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DTRACE_DEBUGINFO)
$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $@
diff --git a/hotspot/make/solaris/makefiles/jsig.make b/hotspot/make/solaris/makefiles/jsig.make
index d8acd4ee93a..392f78c1651 100644
--- a/hotspot/make/solaris/makefiles/jsig.make
+++ b/hotspot/make/solaris/makefiles/jsig.make
@@ -50,7 +50,9 @@ endif
$(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
@echo $(LOG_INFO) Making signal interposition lib...
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
- $(LFLAGS_JSIG) -o $@ $(JSIGSRCDIR)/jsig.c -ldl
+ $(EXTRA_CFLAGS) \
+ $(LFLAGS_JSIG) $(EXTRA_LDFLAGS) \
+ -o $@ $(JSIGSRCDIR)/jsig.c -ldl
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO)
$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@
diff --git a/hotspot/make/solaris/makefiles/mapfile-vers b/hotspot/make/solaris/makefiles/mapfile-vers
index 1d7838465f2..47e1f535d22 100644
--- a/hotspot/make/solaris/makefiles/mapfile-vers
+++ b/hotspot/make/solaris/makefiles/mapfile-vers
@@ -19,7 +19,7 @@
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
-#
+#
#
# Define public interface.
@@ -30,7 +30,7 @@ SUNWprivate_1.1 {
JNI_CreateJavaVM;
JNI_GetCreatedJavaVMs;
JNI_GetDefaultJavaVMInitArgs;
-
+
# JVM
JVM_ActiveProcessorCount;
JVM_ArrayCopy;
@@ -107,6 +107,7 @@ SUNWprivate_1.1 {
JVM_GetClassTypeAnnotations;
JVM_GetDeclaredClasses;
JVM_GetDeclaringClass;
+ JVM_GetSimpleBinaryName;
JVM_GetEnclosingMethodInfo;
JVM_GetFieldIxModifiers;
JVM_GetFieldTypeAnnotations;
diff --git a/hotspot/make/solaris/makefiles/product.make b/hotspot/make/solaris/makefiles/product.make
index 1f9224b17f0..7f378b17d95 100644
--- a/hotspot/make/solaris/makefiles/product.make
+++ b/hotspot/make/solaris/makefiles/product.make
@@ -37,6 +37,11 @@ ifndef USE_GCC
OPT_CFLAGS/ciEnv.o = $(OPT_CFLAGS) -xinline=no%__1cFciEnvbFpost_compiled_method_load_event6MpnHnmethod__v_
endif
+# Need extra inlining to get oop_ps_push_contents functions to perform well enough.
+ifndef USE_GCC
+OPT_CFLAGS/psPromotionManager.o = $(OPT_CFLAGS) -W2,-Ainline:inc=1000
+endif
+
# (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files)
ifeq ("${Platform_compiler}", "sparcWorks")
diff --git a/hotspot/make/solaris/makefiles/saproc.make b/hotspot/make/solaris/makefiles/saproc.make
index c5558527121..3daecc4f20c 100644
--- a/hotspot/make/solaris/makefiles/saproc.make
+++ b/hotspot/make/solaris/makefiles/saproc.make
@@ -89,6 +89,17 @@ $(shell uname -r -v \
# when actually building on Nevada-B158 or earlier:
#SOLARIS_11_B159_OR_LATER=-DSOLARIS_11_B159_OR_LATER
+$(SADISOBJ): $(SADISSRCFILES)
+ $(QUIETLY) $(CC) \
+ $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
+ -I$(SASRCDIR) \
+ -I$(GENERATED) \
+ -I$(BOOT_JAVA_HOME)/include \
+ -I$(BOOT_JAVA_HOME)/include/$(Platform_os_family) \
+ $(SOLARIS_11_B159_OR_LATER) \
+ $(EXTRA_CFLAGS) \
+ $(SADISSRCFILES) \
+ -c -o $(SADISOBJ)
$(LIBSAPROC): $(SASRCFILES) $(SADISOBJ) $(SAMAPFILE)
$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
@@ -103,23 +114,13 @@ $(LIBSAPROC): $(SASRCFILES) $(SADISOBJ) $(SAMAPFILE)
-I$(BOOT_JAVA_HOME)/include \
-I$(BOOT_JAVA_HOME)/include/$(Platform_os_family) \
$(SOLARIS_11_B159_OR_LATER) \
- $(SASRCFILES) \
+ $(EXTRA_CXXFLAGS) $(EXTRA_LDFLAGS) \
$(SADISOBJ) \
+ $(SASRCFILES) \
$(SA_LFLAGS) \
-o $@ \
-ldl -ldemangle -lthread -lc
-$(SADISOBJ): $(SADISSRCFILES)
- $(QUIETLY) $(CC) \
- $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
- -I$(SASRCDIR) \
- -I$(GENERATED) \
- -I$(BOOT_JAVA_HOME)/include \
- -I$(BOOT_JAVA_HOME)/include/$(Platform_os_family) \
- $(SOLARIS_11_B159_OR_LATER) \
- $(SADISSRCFILES) \
- -c -o $(SADISOBJ)
-
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBSAPROC_DEBUGINFO)
$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@
diff --git a/hotspot/make/solaris/makefiles/vm.make b/hotspot/make/solaris/makefiles/vm.make
index ea4ec4c1bf5..04a2b608b35 100644
--- a/hotspot/make/solaris/makefiles/vm.make
+++ b/hotspot/make/solaris/makefiles/vm.make
@@ -148,7 +148,7 @@ LIBS += -lkstat
# By default, link the *.o into the library, not the executable.
LINK_INTO$(LINK_INTO) = LIBJVM
-JDK_LIBDIR = $(JAVA_HOME)/jre/lib/$(LIBARCH)
+JDK_LIBDIR = $(JAVA_HOME)/lib/$(LIBARCH)
#----------------------------------------------------------------------
# jvm_db & dtrace
@@ -288,6 +288,8 @@ else
endif
endif
+LFLAGS_VM += $(EXTRA_LDFLAGS)
+
ifdef USE_GCC
LINK_VM = $(LINK_LIB.CC)
else
diff --git a/hotspot/make/windows/makefiles/defs.make b/hotspot/make/windows/makefiles/defs.make
index d45dae17d82..ca212624d53 100644
--- a/hotspot/make/windows/makefiles/defs.make
+++ b/hotspot/make/windows/makefiles/defs.make
@@ -249,8 +249,8 @@ ifeq ($(BUILD_WIN_SA), 1)
endif
endif
-EXPORT_SERVER_DIR = $(EXPORT_JRE_BIN_DIR)/server
-EXPORT_CLIENT_DIR = $(EXPORT_JRE_BIN_DIR)/client
+EXPORT_SERVER_DIR = $(EXPORT_BIN_DIR)/server
+EXPORT_CLIENT_DIR = $(EXPORT_BIN_DIR)/client
ifeq ($(JVM_VARIANT_SERVER),true)
EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
@@ -280,13 +280,13 @@ endif
EXPORT_LIST += $(EXPORT_LIB_DIR)/jvm.lib
ifeq ($(BUILD_WIN_SA), 1)
- EXPORT_LIST += $(EXPORT_JRE_BIN_DIR)/sawindbg.$(LIBRARY_SUFFIX)
+ EXPORT_LIST += $(EXPORT_BIN_DIR)/sawindbg.$(LIBRARY_SUFFIX)
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
- EXPORT_LIST += $(EXPORT_JRE_BIN_DIR)/sawindbg.diz
+ EXPORT_LIST += $(EXPORT_BIN_DIR)/sawindbg.diz
else
- EXPORT_LIST += $(EXPORT_JRE_BIN_DIR)/sawindbg.pdb
- EXPORT_LIST += $(EXPORT_JRE_BIN_DIR)/sawindbg.map
+ EXPORT_LIST += $(EXPORT_BIN_DIR)/sawindbg.pdb
+ EXPORT_LIST += $(EXPORT_BIN_DIR)/sawindbg.map
endif
endif
EXPORT_LIST += $(EXPORT_LIB_DIR)/sa-jdi.jar
diff --git a/hotspot/make/windows/makefiles/sa.make b/hotspot/make/windows/makefiles/sa.make
index 0f948668646..2e41a022534 100644
--- a/hotspot/make/windows/makefiles/sa.make
+++ b/hotspot/make/windows/makefiles/sa.make
@@ -91,6 +91,9 @@ $(GENERATED)/sa-jdi.jar: $(AGENT_FILES)
SAWINDBG=sawindbg.dll
+# Resource file containing VERSIONINFO
+SA_Res_Files=.\version.sares
+
checkAndBuildSA:: $(SAWINDBG)
# These do not need to be optimized (don't run a lot of code) and it
@@ -126,10 +129,13 @@ SA_CFLAGS = $(SA_CFLAGS) $(MP_FLAG)
# Note that we do not keep sawindbj.obj around as it would then
# get included in the dumpbin command in build_vm_def.sh
+# Force resources to be rebuilt every time
+$(SA_Res_Files): FORCE
+
# In VS2005 or VS2008 the link command creates a .manifest file that we want
# to insert into the linked artifact so we do not need to track it separately.
# Use ";#2" for .dll and ";#1" for .exe in the MT command below:
-$(SAWINDBG): $(SASRCFILES)
+$(SAWINDBG): $(SASRCFILES) $(SA_Res_Files)
set INCLUDE=$(SA_INCLUDE)$(INCLUDE)
$(CXX) @<<
-I"$(BootStrapDir)/include" -I"$(BootStrapDir)/include/win32"
@@ -138,7 +144,7 @@ $(SAWINDBG): $(SASRCFILES)
-out:$*.obj
<<
set LIB=$(SA_LIB)$(LIB)
- $(LD) -out:$@ -DLL sawindbg.obj sadis.obj dbgeng.lib $(SA_LFLAGS)
+ $(LD) -out:$@ -DLL sawindbg.obj sadis.obj dbgeng.lib $(SA_LFLAGS) $(SA_Res_Files)
!if "$(MT)" != ""
$(MT) -manifest $(@F).manifest -outputresource:$(@F);#2
!endif
@@ -150,6 +156,9 @@ $(SAWINDBG): $(SASRCFILES)
!endif
-@rm -f $*.obj
+{$(COMMONSRC)\os\windows\vm}.rc.sares:
+ @$(RC) $(RC_FLAGS) /D "HS_FNAME=$(SAWINDBG)" /fo"$@" $<
+
cleanall :
rm -rf $(GENERATED)/saclasses
rm -rf $(GENERATED)/sa-jdi.jar
diff --git a/hotspot/src/cpu/aarch64/vm/aarch64.ad b/hotspot/src/cpu/aarch64/vm/aarch64.ad
index 956a4ac3fd0..074e498936f 100644
--- a/hotspot/src/cpu/aarch64/vm/aarch64.ad
+++ b/hotspot/src/cpu/aarch64/vm/aarch64.ad
@@ -2614,6 +2614,8 @@ typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address
case INDINDEXSCALEDI2L:
case INDINDEXSCALEDOFFSETI2LN:
case INDINDEXSCALEDI2LN:
+ case INDINDEXOFFSETI2L:
+ case INDINDEXOFFSETI2LN:
scale = Address::sxtw(size);
break;
default:
@@ -5060,6 +5062,20 @@ operand indIndexScaledOffsetL(iRegP reg, iRegL lreg, immIScale scale, immLU12 of
%}
%}
+operand indIndexOffsetI2L(iRegP reg, iRegI ireg, immLU12 off)
+%{
+ constraint(ALLOC_IN_RC(ptr_reg));
+ match(AddP (AddP reg (ConvI2L ireg)) off);
+ op_cost(INSN_COST);
+ format %{ "$reg, $ireg, $off I2L" %}
+ interface(MEMORY_INTER) %{
+ base($reg);
+ index($ireg);
+ scale(0x0);
+ disp($off);
+ %}
+%}
+
operand indIndexScaledOffsetI2L(iRegP reg, iRegI ireg, immIScale scale, immLU12 off)
%{
constraint(ALLOC_IN_RC(ptr_reg));
@@ -5120,7 +5136,7 @@ operand indOffI(iRegP reg, immIOffset off)
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP reg off);
- op_cost(INSN_COST);
+ op_cost(0);
format %{ "[$reg, $off]" %}
interface(MEMORY_INTER) %{
base($reg);
@@ -5190,6 +5206,21 @@ operand indIndexScaledOffsetLN(iRegN reg, iRegL lreg, immIScale scale, immLU12 o
%}
%}
+operand indIndexOffsetI2LN(iRegN reg, iRegI ireg, immLU12 off)
+%{
+ predicate(Universe::narrow_oop_shift() == 0);
+ constraint(ALLOC_IN_RC(ptr_reg));
+ match(AddP (AddP (DecodeN reg) (ConvI2L ireg)) off);
+ op_cost(INSN_COST);
+ format %{ "$reg, $ireg, $off I2L\t# narrow" %}
+ interface(MEMORY_INTER) %{
+ base($reg);
+ index($ireg);
+ scale(0x0);
+ disp($off);
+ %}
+%}
+
operand indIndexScaledOffsetI2LN(iRegN reg, iRegI ireg, immIScale scale, immLU12 off)
%{
predicate(Universe::narrow_oop_shift() == 0);
@@ -5452,8 +5483,8 @@ operand iRegL2I(iRegL reg) %{
// memory is used to define read/write location for load/store
// instruction defs. we can turn a memory op into an Address
-opclass memory(indirect, indIndexScaledOffsetI, indIndexScaledOffsetL, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
- indirectN, indIndexScaledOffsetIN, indIndexScaledOffsetLN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
+opclass memory(indirect, indIndexScaledOffsetI, indIndexScaledOffsetL, indIndexOffsetI2L, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
+ indirectN, indIndexScaledOffsetIN, indIndexScaledOffsetLN, indIndexOffsetI2LN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
// iRegIorL2I is used for src inputs in rules for 32 bit int (I)
@@ -8346,7 +8377,7 @@ instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
match(Set dst (AddP src1 (ConvI2L src2)));
- ins_cost(INSN_COST);
+ ins_cost(1.9 * INSN_COST);
format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
ins_encode %{
diff --git a/hotspot/src/cpu/aarch64/vm/assembler_aarch64.hpp b/hotspot/src/cpu/aarch64/vm/assembler_aarch64.hpp
index f06d1108714..56c1df1facf 100644
--- a/hotspot/src/cpu/aarch64/vm/assembler_aarch64.hpp
+++ b/hotspot/src/cpu/aarch64/vm/assembler_aarch64.hpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1469,7 +1469,7 @@ public:
f(op, 31, 29);
f(0b11010000, 28, 21);
f(0b000000, 15, 10);
- rf(Rm, 16), rf(Rn, 5), rf(Rd, 0);
+ zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0);
}
#define INSN(NAME, op) \
diff --git a/hotspot/src/cpu/aarch64/vm/globals_aarch64.hpp b/hotspot/src/cpu/aarch64/vm/globals_aarch64.hpp
index d0baef39889..c073735a7e0 100644
--- a/hotspot/src/cpu/aarch64/vm/globals_aarch64.hpp
+++ b/hotspot/src/cpu/aarch64/vm/globals_aarch64.hpp
@@ -68,6 +68,8 @@ define_pd_global(bool, RewriteFrequentPairs, false);
define_pd_global(bool, UseMembar, true);
+define_pd_global(bool, PreserveFramePointer, false);
+
// GC Ergo Flags
define_pd_global(uintx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread
diff --git a/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.hpp b/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.hpp
index 871cc33d279..7d30ee0d16e 100644
--- a/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.hpp
+++ b/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.hpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,6 +42,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
using MacroAssembler::call_VM_leaf_base;
// Interpreter specific version of call_VM_base
+ using MacroAssembler::call_VM_leaf_base;
+
virtual void call_VM_leaf_base(address entry_point,
int number_of_arguments);
diff --git a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
index 5653e44affd..bf307e55932 100644
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2237,6 +2237,341 @@ void MacroAssembler::pop_CPU_state() {
pop(0x3fffffff, sp); // integer registers except lr & sp
}
+/**
+ * Helpers for multiply_to_len().
+ */
+void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo,
+ Register src1, Register src2) {
+ adds(dest_lo, dest_lo, src1);
+ adc(dest_hi, dest_hi, zr);
+ adds(dest_lo, dest_lo, src2);
+ adc(final_dest_hi, dest_hi, zr);
+}
+
+// Generate an address from (r + r1 extend offset). "size" is the
+// size of the operand. The result may be in rscratch2.
+Address MacroAssembler::offsetted_address(Register r, Register r1,
+ Address::extend ext, int offset, int size) {
+ if (offset || (ext.shift() % size != 0)) {
+ lea(rscratch2, Address(r, r1, ext));
+ return Address(rscratch2, offset);
+ } else {
+ return Address(r, r1, ext);
+ }
+}
+
+/**
+ * Multiply 64 bit by 64 bit first loop.
+ */
+void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
+ Register y, Register y_idx, Register z,
+ Register carry, Register product,
+ Register idx, Register kdx) {
+ //
+ // jlong carry, x[], y[], z[];
+ // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
+ // huge_128 product = y[idx] * x[xstart] + carry;
+ // z[kdx] = (jlong)product;
+ // carry = (jlong)(product >>> 64);
+ // }
+ // z[xstart] = carry;
+ //
+
+ Label L_first_loop, L_first_loop_exit;
+ Label L_one_x, L_one_y, L_multiply;
+
+ subsw(xstart, xstart, 1);
+ br(Assembler::MI, L_one_x);
+
+ lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt)));
+ ldr(x_xstart, Address(rscratch1));
+ ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian
+
+ bind(L_first_loop);
+ subsw(idx, idx, 1);
+ br(Assembler::MI, L_first_loop_exit);
+ subsw(idx, idx, 1);
+ br(Assembler::MI, L_one_y);
+ lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
+ ldr(y_idx, Address(rscratch1));
+ ror(y_idx, y_idx, 32); // convert big-endian to little-endian
+ bind(L_multiply);
+
+ // AArch64 has a multiply-accumulate instruction that we can't use
+ // here because it has no way to process carries, so we have to use
+ // separate add and adc instructions. Bah.
+ umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product
+ mul(product, x_xstart, y_idx);
+ adds(product, product, carry);
+ adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product
+
+ subw(kdx, kdx, 2);
+ ror(product, product, 32); // back to big-endian
+ str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong));
+
+ b(L_first_loop);
+
+ bind(L_one_y);
+ ldrw(y_idx, Address(y, 0));
+ b(L_multiply);
+
+ bind(L_one_x);
+ ldrw(x_xstart, Address(x, 0));
+ b(L_first_loop);
+
+ bind(L_first_loop_exit);
+}
+
+/**
+ * Multiply 128 bit by 128. Unrolled inner loop.
+ *
+ */
+void MacroAssembler::multiply_128_x_128_loop(Register y, Register z,
+ Register carry, Register carry2,
+ Register idx, Register jdx,
+ Register yz_idx1, Register yz_idx2,
+ Register tmp, Register tmp3, Register tmp4,
+ Register tmp6, Register product_hi) {
+
+ // jlong carry, x[], y[], z[];
+ // int kdx = ystart+1;
+ // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
+ // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry;
+ // jlong carry2 = (jlong)(tmp3 >>> 64);
+ // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2;
+ // carry = (jlong)(tmp4 >>> 64);
+ // z[kdx+idx+1] = (jlong)tmp3;
+ // z[kdx+idx] = (jlong)tmp4;
+ // }
+ // idx += 2;
+ // if (idx > 0) {
+ // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry;
+ // z[kdx+idx] = (jlong)yz_idx1;
+ // carry = (jlong)(yz_idx1 >>> 64);
+ // }
+ //
+
+ Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
+
+ lsrw(jdx, idx, 2);
+
+ bind(L_third_loop);
+
+ subsw(jdx, jdx, 1);
+ br(Assembler::MI, L_third_loop_exit);
+ subw(idx, idx, 4);
+
+ lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
+
+ ldp(yz_idx2, yz_idx1, Address(rscratch1, 0));
+
+ lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt)));
+
+ ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
+ ror(yz_idx2, yz_idx2, 32);
+
+ ldp(rscratch2, rscratch1, Address(tmp6, 0));
+
+ mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3
+ umulh(tmp4, product_hi, yz_idx1);
+
+ ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian
+ ror(rscratch2, rscratch2, 32);
+
+ mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp
+ umulh(carry2, product_hi, yz_idx2);
+
+ // propagate sum of both multiplications into carry:tmp4:tmp3
+ adds(tmp3, tmp3, carry);
+ adc(tmp4, tmp4, zr);
+ adds(tmp3, tmp3, rscratch1);
+ adcs(tmp4, tmp4, tmp);
+ adc(carry, carry2, zr);
+ adds(tmp4, tmp4, rscratch2);
+ adc(carry, carry, zr);
+
+ ror(tmp3, tmp3, 32); // convert little-endian to big-endian
+ ror(tmp4, tmp4, 32);
+ stp(tmp4, tmp3, Address(tmp6, 0));
+
+ b(L_third_loop);
+ bind (L_third_loop_exit);
+
+ andw (idx, idx, 0x3);
+ cbz(idx, L_post_third_loop_done);
+
+ Label L_check_1;
+ subsw(idx, idx, 2);
+ br(Assembler::MI, L_check_1);
+
+ lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt)));
+ ldr(yz_idx1, Address(rscratch1, 0));
+ ror(yz_idx1, yz_idx1, 32);
+ mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3
+ umulh(tmp4, product_hi, yz_idx1);
+ lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt)));
+ ldr(yz_idx2, Address(rscratch1, 0));
+ ror(yz_idx2, yz_idx2, 32);
+
+ add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2);
+
+ ror(tmp3, tmp3, 32);
+ str(tmp3, Address(rscratch1, 0));
+
+ bind (L_check_1);
+
+ andw (idx, idx, 0x1);
+ subsw(idx, idx, 1);
+ br(Assembler::MI, L_post_third_loop_done);
+ ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt)));
+ mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3
+ umulh(carry2, tmp4, product_hi);
+ ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt)));
+
+ add2_with_carry(carry2, tmp3, tmp4, carry);
+
+ strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt)));
+ extr(carry, carry2, tmp3, 32);
+
+ bind(L_post_third_loop_done);
+}
+
+/**
+ * Code for BigInteger::multiplyToLen() instrinsic.
+ *
+ * r0: x
+ * r1: xlen
+ * r2: y
+ * r3: ylen
+ * r4: z
+ * r5: zlen
+ * r10: tmp1
+ * r11: tmp2
+ * r12: tmp3
+ * r13: tmp4
+ * r14: tmp5
+ * r15: tmp6
+ * r16: tmp7
+ *
+ */
+void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen,
+ Register z, Register zlen,
+ Register tmp1, Register tmp2, Register tmp3, Register tmp4,
+ Register tmp5, Register tmp6, Register product_hi) {
+
+ assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6);
+
+ const Register idx = tmp1;
+ const Register kdx = tmp2;
+ const Register xstart = tmp3;
+
+ const Register y_idx = tmp4;
+ const Register carry = tmp5;
+ const Register product = xlen;
+ const Register x_xstart = zlen; // reuse register
+
+ // First Loop.
+ //
+ // final static long LONG_MASK = 0xffffffffL;
+ // int xstart = xlen - 1;
+ // int ystart = ylen - 1;
+ // long carry = 0;
+ // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
+ // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
+ // z[kdx] = (int)product;
+ // carry = product >>> 32;
+ // }
+ // z[xstart] = (int)carry;
+ //
+
+ movw(idx, ylen); // idx = ylen;
+ movw(kdx, zlen); // kdx = xlen+ylen;
+ mov(carry, zr); // carry = 0;
+
+ Label L_done;
+
+ movw(xstart, xlen);
+ subsw(xstart, xstart, 1);
+ br(Assembler::MI, L_done);
+
+ multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
+
+ Label L_second_loop;
+ cbzw(kdx, L_second_loop);
+
+ Label L_carry;
+ subw(kdx, kdx, 1);
+ cbzw(kdx, L_carry);
+
+ strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt)));
+ lsr(carry, carry, 32);
+ subw(kdx, kdx, 1);
+
+ bind(L_carry);
+ strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt)));
+
+ // Second and third (nested) loops.
+ //
+ // for (int i = xstart-1; i >= 0; i--) { // Second loop
+ // carry = 0;
+ // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
+ // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
+ // (z[k] & LONG_MASK) + carry;
+ // z[k] = (int)product;
+ // carry = product >>> 32;
+ // }
+ // z[i] = (int)carry;
+ // }
+ //
+ // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi
+
+ const Register jdx = tmp1;
+
+ bind(L_second_loop);
+ mov(carry, zr); // carry = 0;
+ movw(jdx, ylen); // j = ystart+1
+
+ subsw(xstart, xstart, 1); // i = xstart-1;
+ br(Assembler::MI, L_done);
+
+ str(z, Address(pre(sp, -4 * wordSize)));
+
+ Label L_last_x;
+ lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j
+ subsw(xstart, xstart, 1); // i = xstart-1;
+ br(Assembler::MI, L_last_x);
+
+ lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt)));
+ ldr(product_hi, Address(rscratch1));
+ ror(product_hi, product_hi, 32); // convert big-endian to little-endian
+
+ Label L_third_loop_prologue;
+ bind(L_third_loop_prologue);
+
+ str(ylen, Address(sp, wordSize));
+ stp(x, xstart, Address(sp, 2 * wordSize));
+ multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product,
+ tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi);
+ ldp(z, ylen, Address(post(sp, 2 * wordSize)));
+ ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen
+
+ addw(tmp3, xlen, 1);
+ strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt)));
+ subsw(tmp3, tmp3, 1);
+ br(Assembler::MI, L_done);
+
+ lsr(carry, carry, 32);
+ strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt)));
+ b(L_second_loop);
+
+ // Next infrequent code is moved outside loops.
+ bind(L_last_x);
+ ldrw(product_hi, Address(x, 0));
+ b(L_third_loop_prologue);
+
+ bind(L_done);
+}
+
/**
* Emits code to update CRC-32 with a byte value according to constants in table
*
diff --git a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
index b922057f897..21f89fa17c4 100644
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -167,9 +167,8 @@ class MacroAssembler: public Assembler {
// aliases defined in AARCH64 spec
-
template
- inline void cmpw(Register Rd, T imm) { subsw(zr, Rd, imm); }
+ inline void cmpw(Register Rd, T imm) { subsw(zr, Rd, imm); }
inline void cmp(Register Rd, unsigned imm) { subs(zr, Rd, imm); }
inline void cmnw(Register Rd, unsigned imm) { addsw(zr, Rd, imm); }
@@ -1121,9 +1120,34 @@ public:
Register tmp1, Register tmp2,
Register tmp3, Register tmp4,
int int_cnt1, Register result);
-
+private:
+ void add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo,
+ Register src1, Register src2);
+ void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) {
+ add2_with_carry(dest_hi, dest_hi, dest_lo, src1, src2);
+ }
+ void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
+ Register y, Register y_idx, Register z,
+ Register carry, Register product,
+ Register idx, Register kdx);
+ void multiply_128_x_128_loop(Register y, Register z,
+ Register carry, Register carry2,
+ Register idx, Register jdx,
+ Register yz_idx1, Register yz_idx2,
+ Register tmp, Register tmp3, Register tmp4,
+ Register tmp7, Register product_hi);
+public:
+ void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z,
+ Register zlen, Register tmp1, Register tmp2, Register tmp3,
+ Register tmp4, Register tmp5, Register tmp6, Register tmp7);
// ISB may be needed because of a safepoint
void maybe_isb() { isb(); }
+
+private:
+ // Return the effective address r + (r1 << ext) + offset.
+ // Uses rscratch2.
+ Address offsetted_address(Register r, Register r1, Address::extend ext,
+ int offset, int size);
};
// Used by aarch64.ad to control code generation
diff --git a/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp
index 7e70f8d7754..5cf5bd77700 100644
--- a/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp
+++ b/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2356,8 +2356,45 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
-#undef __
-#define __ masm->
+ /**
+ * Arguments:
+ *
+ * Input:
+ * c_rarg0 - x address
+ * c_rarg1 - x length
+ * c_rarg2 - y address
+ * c_rarg3 - y lenth
+ * c_rarg4 - z address
+ * c_rarg5 - z length
+ */
+ address generate_multiplyToLen() {
+ __ align(CodeEntryAlignment);
+ StubCodeMark mark(this, "StubRoutines", "multiplyToLen");
+
+ address start = __ pc();
+ const Register x = r0;
+ const Register xlen = r1;
+ const Register y = r2;
+ const Register ylen = r3;
+ const Register z = r4;
+ const Register zlen = r5;
+
+ const Register tmp1 = r10;
+ const Register tmp2 = r11;
+ const Register tmp3 = r12;
+ const Register tmp4 = r13;
+ const Register tmp5 = r14;
+ const Register tmp6 = r15;
+ const Register tmp7 = r16;
+
+ BLOCK_COMMENT("Entry:");
+ __ enter(); // required for proper stackwalking of RuntimeStub frame
+ __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
+ __ leave(); // required for proper stackwalking of RuntimeStub frame
+ __ ret(lr);
+
+ return start;
+ }
// Continuation point for throwing of implicit exceptions that are
// not handled in the current activation. Fabricates an exception
@@ -2375,6 +2412,9 @@ class StubGenerator: public StubCodeGenerator {
// otherwise assume that stack unwinding will be initiated, so
// caller saved registers were assumed volatile in the compiler.
+#undef __
+#define __ masm->
+
address generate_throw_exception(const char* name,
address runtime_entry,
Register arg1 = noreg,
@@ -2518,6 +2558,10 @@ class StubGenerator: public StubCodeGenerator {
// arraycopy stubs used by compilers
generate_arraycopy_stubs();
+ if (UseMultiplyToLenIntrinsic) {
+ StubRoutines::_multiplyToLen = generate_multiplyToLen();
+ }
+
#ifndef BUILTIN_SIM
if (UseAESIntrinsics) {
StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
diff --git a/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp
index 19b0b3759aa..0d026e07c6b 100644
--- a/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp
+++ b/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp
@@ -502,10 +502,17 @@ void TemplateTable::locals_index(Register reg, int offset)
__ neg(reg, reg);
}
-void TemplateTable::iload()
-{
+void TemplateTable::iload() {
+ iload_internal();
+}
+
+void TemplateTable::nofast_iload() {
+ iload_internal(may_not_rewrite);
+}
+
+void TemplateTable::iload_internal(RewriteControl rc) {
transition(vtos, itos);
- if (RewriteFrequentPairs) {
+ if (RewriteFrequentPairs && rc == may_rewrite) {
// TODO : check x86 code for what to do here
__ call_Unimplemented();
} else {
@@ -759,8 +766,15 @@ void TemplateTable::aload(int n)
__ ldr(r0, iaddress(n));
}
-void TemplateTable::aload_0()
-{
+void TemplateTable::aload_0() {
+ aload_0_internal();
+}
+
+void TemplateTable::nofast_aload_0() {
+ aload_0_internal(may_not_rewrite);
+}
+
+void TemplateTable::aload_0_internal(RewriteControl rc) {
// According to bytecode histograms, the pairs:
//
// _aload_0, _fast_igetfield
@@ -782,7 +796,7 @@ void TemplateTable::aload_0()
// aload_0, iload_1
// These bytecodes with a small amount of code are most profitable
// to rewrite
- if (RewriteFrequentPairs) {
+ if (RewriteFrequentPairs && rc == may_rewrite) {
__ call_Unimplemented();
} else {
aload(0);
@@ -2132,37 +2146,21 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
assert_different_registers(Rcache, index, temp);
Label resolved;
+
+ Bytecodes::Code code = bytecode();
+ switch (code) {
+ case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
+ case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
+ }
+
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
__ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
- __ cmp(temp, (int) bytecode()); // have we resolved this bytecode?
+ __ cmp(temp, (int) code); // have we resolved this bytecode?
__ br(Assembler::EQ, resolved);
// resolve first time through
- address entry;
- switch (bytecode()) {
- case Bytecodes::_getstatic:
- case Bytecodes::_putstatic:
- case Bytecodes::_getfield:
- case Bytecodes::_putfield:
- entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put);
- break;
- case Bytecodes::_invokevirtual:
- case Bytecodes::_invokespecial:
- case Bytecodes::_invokestatic:
- case Bytecodes::_invokeinterface:
- entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);
- break;
- case Bytecodes::_invokehandle:
- entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle);
- break;
- case Bytecodes::_invokedynamic:
- entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
- break;
- default:
- fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
- break;
- }
- __ mov(temp, (int) bytecode());
+ address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
+ __ mov(temp, (int) code);
__ call_VM(noreg, entry, temp);
// Update registers with resolved info
@@ -2280,7 +2278,7 @@ void TemplateTable::pop_and_check_object(Register r)
__ verify_oop(r);
}
-void TemplateTable::getfield_or_static(int byte_no, bool is_static)
+void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc)
{
const Register cache = r2;
const Register index = r3;
@@ -2310,11 +2308,14 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static)
assert(btos == 0, "change code, btos != 0");
__ cbnz(flags, notByte);
+ // Don't rewrite getstatic, only getfield
+ if (is_static) rc = may_not_rewrite;
+
// btos
__ load_signed_byte(r0, field);
__ push(btos);
// Rewrite bytecode to be faster
- if (!is_static) {
+ if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
}
__ b(Done);
@@ -2325,7 +2326,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static)
// atos
__ load_heap_oop(r0, field);
__ push(atos);
- if (!is_static) {
+ if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
}
__ b(Done);
@@ -2337,7 +2338,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static)
__ ldrw(r0, field);
__ push(itos);
// Rewrite bytecode to be faster
- if (!is_static) {
+ if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_igetfield, bc, r1);
}
__ b(Done);
@@ -2349,7 +2350,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static)
__ load_unsigned_short(r0, field);
__ push(ctos);
// Rewrite bytecode to be faster
- if (!is_static) {
+ if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_cgetfield, bc, r1);
}
__ b(Done);
@@ -2361,7 +2362,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static)
__ load_signed_short(r0, field);
__ push(stos);
// Rewrite bytecode to be faster
- if (!is_static) {
+ if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_sgetfield, bc, r1);
}
__ b(Done);
@@ -2373,7 +2374,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static)
__ ldr(r0, field);
__ push(ltos);
// Rewrite bytecode to be faster
- if (!is_static) {
+ if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_lgetfield, bc, r1);
}
__ b(Done);
@@ -2385,7 +2386,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static)
__ ldrs(v0, field);
__ push(ftos);
// Rewrite bytecode to be faster
- if (!is_static) {
+ if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_fgetfield, bc, r1);
}
__ b(Done);
@@ -2399,7 +2400,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static)
__ ldrd(v0, field);
__ push(dtos);
// Rewrite bytecode to be faster
- if (!is_static) {
+ if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_dgetfield, bc, r1);
}
#ifdef ASSERT
@@ -2421,6 +2422,10 @@ void TemplateTable::getfield(int byte_no)
getfield_or_static(byte_no, false);
}
+void TemplateTable::nofast_getfield(int byte_no) {
+ getfield_or_static(byte_no, false, may_not_rewrite);
+}
+
void TemplateTable::getstatic(int byte_no)
{
getfield_or_static(byte_no, true);
@@ -2484,7 +2489,7 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is
}
}
-void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
+void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
transition(vtos, vtos);
const Register cache = r2;
@@ -2521,12 +2526,15 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
assert(btos == 0, "change code, btos != 0");
__ cbnz(flags, notByte);
+ // Don't rewrite putstatic, only putfield
+ if (is_static) rc = may_not_rewrite;
+
// btos
{
__ pop(btos);
if (!is_static) pop_and_check_object(obj);
__ strb(r0, field);
- if (!is_static) {
+ if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
}
__ b(Done);
@@ -2542,7 +2550,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
if (!is_static) pop_and_check_object(obj);
// Store into the field
do_oop_store(_masm, field, r0, _bs->kind(), false);
- if (!is_static) {
+ if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
}
__ b(Done);
@@ -2557,7 +2565,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(itos);
if (!is_static) pop_and_check_object(obj);
__ strw(r0, field);
- if (!is_static) {
+ if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
}
__ b(Done);
@@ -2572,7 +2580,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(ctos);
if (!is_static) pop_and_check_object(obj);
__ strh(r0, field);
- if (!is_static) {
+ if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_cputfield, bc, r1, true, byte_no);
}
__ b(Done);
@@ -2587,7 +2595,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(stos);
if (!is_static) pop_and_check_object(obj);
__ strh(r0, field);
- if (!is_static) {
+ if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_sputfield, bc, r1, true, byte_no);
}
__ b(Done);
@@ -2602,7 +2610,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(ltos);
if (!is_static) pop_and_check_object(obj);
__ str(r0, field);
- if (!is_static) {
+ if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_lputfield, bc, r1, true, byte_no);
}
__ b(Done);
@@ -2617,7 +2625,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(ftos);
if (!is_static) pop_and_check_object(obj);
__ strs(v0, field);
- if (!is_static) {
+ if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_fputfield, bc, r1, true, byte_no);
}
__ b(Done);
@@ -2634,7 +2642,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(dtos);
if (!is_static) pop_and_check_object(obj);
__ strd(v0, field);
- if (!is_static) {
+ if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_dputfield, bc, r1, true, byte_no);
}
}
@@ -2661,6 +2669,10 @@ void TemplateTable::putfield(int byte_no)
putfield_or_static(byte_no, false);
}
+void TemplateTable::nofast_putfield(int byte_no) {
+ putfield_or_static(byte_no, false, may_not_rewrite);
+}
+
void TemplateTable::putstatic(int byte_no) {
putfield_or_static(byte_no, true);
}
diff --git a/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.cpp
index 68c75eb3195..eaee6ccd4f1 100644
--- a/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.cpp
+++ b/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -193,6 +193,15 @@ void VM_Version::get_processor_features() {
}
}
+ // This machine allows unaligned memory accesses
+ if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
+ FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
+ }
+
+ if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
+ UseMultiplyToLenIntrinsic = true;
+ }
+
#ifdef COMPILER2
if (FLAG_IS_DEFAULT(OptoScheduling)) {
OptoScheduling = true;
diff --git a/hotspot/src/cpu/ppc/vm/assembler_ppc.cpp b/hotspot/src/cpu/ppc/vm/assembler_ppc.cpp
index 6ab0f8a61bd..7b19ddb0018 100644
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2014 SAP AG. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -85,8 +85,7 @@ int Assembler::branch_destination(int inst, int pos) {
}
// Low-level andi-one-instruction-macro.
-void Assembler::andi(Register a, Register s, const int ui16) {
- assert(is_uimm(ui16, 16), "must be 16-bit unsigned immediate");
+void Assembler::andi(Register a, Register s, const long ui16) {
if (is_power_of_2_long(((jlong) ui16)+1)) {
// pow2minus1
clrldi(a, s, 64-log2_long((((jlong) ui16)+1)));
@@ -97,6 +96,7 @@ void Assembler::andi(Register a, Register s, const int ui16) {
// negpow2
clrrdi(a, s, log2_long((jlong)-ui16));
} else {
+ assert(is_uimm(ui16, 16), "must be 16-bit unsigned immediate");
andi_(a, s, ui16);
}
}
@@ -356,7 +356,6 @@ void Assembler::load_const(Register d, long x, Register tmp) {
// 16 bit immediate offset.
int Assembler::load_const_optimized(Register d, long x, Register tmp, bool return_simm16_rest) {
// Avoid accidentally trying to use R0 for indexed addressing.
- assert(d != R0, "R0 not allowed");
assert_different_registers(d, tmp);
short xa, xb, xc, xd; // Four 16-bit chunks of const.
@@ -370,6 +369,58 @@ int Assembler::load_const_optimized(Register d, long x, Register tmp, bool retur
return 0;
}
+ int retval = 0;
+ if (return_simm16_rest) {
+ retval = xd;
+ x = rem << 16;
+ xd = 0;
+ }
+
+ if (d == R0) { // Can't use addi.
+ if (is_simm(x, 32)) { // opt 2: simm32
+ lis(d, x >> 16);
+ if (xd) ori(d, d, (unsigned short)xd);
+ } else {
+ // 64-bit value: x = xa xb xc xd
+ xa = (x >> 48) & 0xffff;
+ xb = (x >> 32) & 0xffff;
+ xc = (x >> 16) & 0xffff;
+ bool xa_loaded = (xb & 0x8000) ? (xa != -1) : (xa != 0);
+ if (tmp == noreg || (xc == 0 && xd == 0)) {
+ if (xa_loaded) {
+ lis(d, xa);
+ if (xb) { ori(d, d, (unsigned short)xb); }
+ } else {
+ li(d, xb);
+ }
+ sldi(d, d, 32);
+ if (xc) { oris(d, d, (unsigned short)xc); }
+ if (xd) { ori( d, d, (unsigned short)xd); }
+ } else {
+ // Exploit instruction level parallelism if we have a tmp register.
+ bool xc_loaded = (xd & 0x8000) ? (xc != -1) : (xc != 0);
+ if (xa_loaded) {
+ lis(tmp, xa);
+ }
+ if (xc_loaded) {
+ lis(d, xc);
+ }
+ if (xa_loaded) {
+ if (xb) { ori(tmp, tmp, (unsigned short)xb); }
+ } else {
+ li(tmp, xb);
+ }
+ if (xc_loaded) {
+ if (xd) { ori(d, d, (unsigned short)xd); }
+ } else {
+ li(d, xd);
+ }
+ insrdi(d, tmp, 32, 0);
+ }
+ }
+ return retval;
+ }
+
xc = rem & 0xFFFF; // Next 16-bit chunk.
rem = (rem >> 16) + ((unsigned short)xc >> 15); // Compensation for sign extend.
@@ -377,28 +428,27 @@ int Assembler::load_const_optimized(Register d, long x, Register tmp, bool retur
lis(d, xc);
} else { // High 32 bits needed.
- if (tmp != noreg) { // opt 3: We have a temp reg.
+ if (tmp != noreg && (int)x != 0) { // opt 3: We have a temp reg.
// No carry propagation between xc and higher chunks here (use logical instructions).
xa = (x >> 48) & 0xffff;
xb = (x >> 32) & 0xffff; // No sign compensation, we use lis+ori or li to allow usage of R0.
- bool load_xa = (xa != 0) || (xb < 0);
+ bool xa_loaded = (xb & 0x8000) ? (xa != -1) : (xa != 0);
bool return_xd = false;
- if (load_xa) { lis(tmp, xa); }
+ if (xa_loaded) { lis(tmp, xa); }
if (xc) { lis(d, xc); }
- if (load_xa) {
+ if (xa_loaded) {
if (xb) { ori(tmp, tmp, (unsigned short)xb); } // No addi, we support tmp == R0.
} else {
- li(tmp, xb); // non-negative
+ li(tmp, xb);
}
if (xc) {
- if (return_simm16_rest && xd >= 0) { return_xd = true; } // >= 0 to avoid carry propagation after insrdi/rldimi.
- else if (xd) { addi(d, d, xd); }
+ if (xd) { addi(d, d, xd); }
} else {
li(d, xd);
}
insrdi(d, tmp, 32, 0);
- return return_xd ? xd : 0; // non-negative
+ return retval;
}
xb = rem & 0xFFFF; // Next 16-bit chunk.
@@ -417,11 +467,51 @@ int Assembler::load_const_optimized(Register d, long x, Register tmp, bool retur
if (xc) { addis(d, d, xc); }
}
- // opt 5: Return offset to be inserted into following instruction.
- if (return_simm16_rest) return xd;
-
if (xd) { addi(d, d, xd); }
- return 0;
+ return retval;
+}
+
+// We emit only one addition to s to optimize latency.
+int Assembler::add_const_optimized(Register d, Register s, long x, Register tmp, bool return_simm16_rest) {
+ assert(s != R0 && s != tmp, "unsupported");
+ long rem = x;
+
+ // Case 1: Can use mr or addi.
+ short xd = rem & 0xFFFF; // Lowest 16-bit chunk.
+ rem = (rem >> 16) + ((unsigned short)xd >> 15);
+ if (rem == 0) {
+ if (xd == 0) {
+ if (d != s) { mr(d, s); }
+ return 0;
+ }
+ if (return_simm16_rest) {
+ return xd;
+ }
+ addi(d, s, xd);
+ return 0;
+ }
+
+ // Case 2: Can use addis.
+ if (xd == 0) {
+ short xc = rem & 0xFFFF; // 2nd 16-bit chunk.
+ rem = (rem >> 16) + ((unsigned short)xd >> 15);
+ if (rem == 0) {
+ addis(d, s, xc);
+ return 0;
+ }
+ }
+
+ // Other cases: load & add.
+ Register tmp1 = tmp,
+ tmp2 = noreg;
+ if ((d != tmp) && (d != s)) {
+ // Can use d.
+ tmp1 = d;
+ tmp2 = tmp;
+ }
+ int simm16_rest = load_const_optimized(tmp1, x, tmp2, return_simm16_rest);
+ add(d, tmp1, s);
+ return simm16_rest;
}
#ifndef PRODUCT
diff --git a/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp b/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp
index f1087dbc02c..6c7103aefa4 100644
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp
+++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp
@@ -224,10 +224,13 @@ class Assembler : public AbstractAssembler {
ADDIS_OPCODE = (15u << OPCODE_SHIFT),
ADDIC__OPCODE = (13u << OPCODE_SHIFT),
ADDE_OPCODE = (31u << OPCODE_SHIFT | 138u << 1),
+ ADDME_OPCODE = (31u << OPCODE_SHIFT | 234u << 1),
+ ADDZE_OPCODE = (31u << OPCODE_SHIFT | 202u << 1),
SUBF_OPCODE = (31u << OPCODE_SHIFT | 40u << 1),
SUBFC_OPCODE = (31u << OPCODE_SHIFT | 8u << 1),
SUBFE_OPCODE = (31u << OPCODE_SHIFT | 136u << 1),
SUBFIC_OPCODE = (8u << OPCODE_SHIFT),
+ SUBFME_OPCODE = (31u << OPCODE_SHIFT | 232u << 1),
SUBFZE_OPCODE = (31u << OPCODE_SHIFT | 200u << 1),
DIVW_OPCODE = (31u << OPCODE_SHIFT | 491u << 1),
MULLW_OPCODE = (31u << OPCODE_SHIFT | 235u << 1),
@@ -657,6 +660,9 @@ class Assembler : public AbstractAssembler {
SYNC_OPCODE = (31u << OPCODE_SHIFT | 598u << 1),
EIEIO_OPCODE = (31u << OPCODE_SHIFT | 854u << 1),
+ // Wait instructions for polling.
+ WAIT_OPCODE = (31u << OPCODE_SHIFT | 62u << 1),
+
// Trap instructions
TDI_OPCODE = (2u << OPCODE_SHIFT),
TWI_OPCODE = (3u << OPCODE_SHIFT),
@@ -666,8 +672,10 @@ class Assembler : public AbstractAssembler {
// Atomics.
LWARX_OPCODE = (31u << OPCODE_SHIFT | 20u << 1),
LDARX_OPCODE = (31u << OPCODE_SHIFT | 84u << 1),
+ LQARX_OPCODE = (31u << OPCODE_SHIFT | 276u << 1),
STWCX_OPCODE = (31u << OPCODE_SHIFT | 150u << 1),
- STDCX_OPCODE = (31u << OPCODE_SHIFT | 214u << 1)
+ STDCX_OPCODE = (31u << OPCODE_SHIFT | 214u << 1),
+ STQCX_OPCODE = (31u << OPCODE_SHIFT | 182u << 1)
};
@@ -1171,6 +1179,14 @@ class Assembler : public AbstractAssembler {
inline void adde_( Register d, Register a, Register b);
inline void subfe( Register d, Register a, Register b);
inline void subfe_( Register d, Register a, Register b);
+ inline void addme( Register d, Register a);
+ inline void addme_( Register d, Register a);
+ inline void subfme( Register d, Register a);
+ inline void subfme_(Register d, Register a);
+ inline void addze( Register d, Register a);
+ inline void addze_( Register d, Register a);
+ inline void subfze( Register d, Register a);
+ inline void subfze_(Register d, Register a);
inline void neg( Register d, Register a);
inline void neg_( Register d, Register a);
inline void mulli( Register d, Register a, int si16);
@@ -1189,6 +1205,38 @@ class Assembler : public AbstractAssembler {
inline void divw( Register d, Register a, Register b);
inline void divw_( Register d, Register a, Register b);
+ // Fixed-Point Arithmetic Instructions with Overflow detection
+ inline void addo( Register d, Register a, Register b);
+ inline void addo_( Register d, Register a, Register b);
+ inline void subfo( Register d, Register a, Register b);
+ inline void subfo_( Register d, Register a, Register b);
+ inline void addco( Register d, Register a, Register b);
+ inline void addco_( Register d, Register a, Register b);
+ inline void subfco( Register d, Register a, Register b);
+ inline void subfco_( Register d, Register a, Register b);
+ inline void addeo( Register d, Register a, Register b);
+ inline void addeo_( Register d, Register a, Register b);
+ inline void subfeo( Register d, Register a, Register b);
+ inline void subfeo_( Register d, Register a, Register b);
+ inline void addmeo( Register d, Register a);
+ inline void addmeo_( Register d, Register a);
+ inline void subfmeo( Register d, Register a);
+ inline void subfmeo_(Register d, Register a);
+ inline void addzeo( Register d, Register a);
+ inline void addzeo_( Register d, Register a);
+ inline void subfzeo( Register d, Register a);
+ inline void subfzeo_(Register d, Register a);
+ inline void nego( Register d, Register a);
+ inline void nego_( Register d, Register a);
+ inline void mulldo( Register d, Register a, Register b);
+ inline void mulldo_( Register d, Register a, Register b);
+ inline void mullwo( Register d, Register a, Register b);
+ inline void mullwo_( Register d, Register a, Register b);
+ inline void divdo( Register d, Register a, Register b);
+ inline void divdo_( Register d, Register a, Register b);
+ inline void divwo( Register d, Register a, Register b);
+ inline void divwo_( Register d, Register a, Register b);
+
// extended mnemonics
inline void li( Register d, int si16);
inline void lis( Register d, int si16);
@@ -1303,7 +1351,7 @@ class Assembler : public AbstractAssembler {
inline void isel_0( Register d, ConditionRegister cr, Condition cc, Register b = noreg);
// PPC 1, section 3.3.11, Fixed-Point Logical Instructions
- void andi( Register a, Register s, int ui16); // optimized version
+ void andi( Register a, Register s, long ui16); // optimized version
inline void andi_( Register a, Register s, int ui16);
inline void andis_( Register a, Register s, int ui16);
inline void ori( Register a, Register s, int ui16);
@@ -1688,14 +1736,21 @@ class Assembler : public AbstractAssembler {
inline void isync();
inline void elemental_membar(int e); // Elemental Memory Barriers (>=Power 8)
+ // Wait instructions for polling. Attention: May result in SIGILL.
+ inline void wait();
+ inline void waitrsv(); // >=Power7
+
// atomics
inline void lwarx_unchecked(Register d, Register a, Register b, int eh1 = 0);
inline void ldarx_unchecked(Register d, Register a, Register b, int eh1 = 0);
+ inline void lqarx_unchecked(Register d, Register a, Register b, int eh1 = 0);
inline bool lxarx_hint_exclusive_access();
inline void lwarx( Register d, Register a, Register b, bool hint_exclusive_access = false);
inline void ldarx( Register d, Register a, Register b, bool hint_exclusive_access = false);
+ inline void lqarx( Register d, Register a, Register b, bool hint_exclusive_access = false);
inline void stwcx_( Register s, Register a, Register b);
inline void stdcx_( Register s, Register a, Register b);
+ inline void stqcx_( Register s, Register a, Register b);
// Instructions for adjusting thread priority for simultaneous
// multithreading (SMT) on Power5.
@@ -2054,10 +2109,13 @@ class Assembler : public AbstractAssembler {
// Atomics: use ra0mem to disallow R0 as base.
inline void lwarx_unchecked(Register d, Register b, int eh1);
inline void ldarx_unchecked(Register d, Register b, int eh1);
+ inline void lqarx_unchecked(Register d, Register b, int eh1);
inline void lwarx( Register d, Register b, bool hint_exclusive_access);
inline void ldarx( Register d, Register b, bool hint_exclusive_access);
+ inline void lqarx( Register d, Register b, bool hint_exclusive_access);
inline void stwcx_(Register s, Register b);
inline void stdcx_(Register s, Register b);
+ inline void stqcx_(Register s, Register b);
inline void lfs( FloatRegister d, int si16);
inline void lfsx( FloatRegister d, Register b);
inline void lfd( FloatRegister d, int si16);
@@ -2120,6 +2178,20 @@ class Assembler : public AbstractAssembler {
return load_const_optimized(d, (long)(unsigned long)a, tmp, return_simm16_rest);
}
+ // If return_simm16_rest, the return value needs to get added afterwards.
+ int add_const_optimized(Register d, Register s, long x, Register tmp = R0, bool return_simm16_rest = false);
+ inline int add_const_optimized(Register d, Register s, void* a, Register tmp = R0, bool return_simm16_rest = false) {
+ return add_const_optimized(d, s, (long)(unsigned long)a, tmp, return_simm16_rest);
+ }
+
+ // If return_simm16_rest, the return value needs to get added afterwards.
+ inline int sub_const_optimized(Register d, Register s, long x, Register tmp = R0, bool return_simm16_rest = false) {
+ return add_const_optimized(d, s, -x, tmp, return_simm16_rest);
+ }
+ inline int sub_const_optimized(Register d, Register s, void* a, Register tmp = R0, bool return_simm16_rest = false) {
+ return sub_const_optimized(d, s, (long)(unsigned long)a, tmp, return_simm16_rest);
+ }
+
// Creation
Assembler(CodeBuffer* code) : AbstractAssembler(code) {
#ifdef CHECK_DELAY
diff --git a/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp b/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp
index 5493f124371..e860dac7d43 100644
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp
+++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp
@@ -100,6 +100,14 @@ inline void Assembler::adde( Register d, Register a, Register b) { emit_int32(
inline void Assembler::adde_( Register d, Register a, Register b) { emit_int32(ADDE_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
inline void Assembler::subfe( Register d, Register a, Register b) { emit_int32(SUBFE_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
inline void Assembler::subfe_( Register d, Register a, Register b) { emit_int32(SUBFE_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
+inline void Assembler::addme( Register d, Register a) { emit_int32(ADDME_OPCODE | rt(d) | ra(a) | oe(0) | rc(0)); }
+inline void Assembler::addme_( Register d, Register a) { emit_int32(ADDME_OPCODE | rt(d) | ra(a) | oe(0) | rc(1)); }
+inline void Assembler::subfme( Register d, Register a) { emit_int32(SUBFME_OPCODE | rt(d) | ra(a) | oe(0) | rc(0)); }
+inline void Assembler::subfme_(Register d, Register a) { emit_int32(SUBFME_OPCODE | rt(d) | ra(a) | oe(0) | rc(1)); }
+inline void Assembler::addze( Register d, Register a) { emit_int32(ADDZE_OPCODE | rt(d) | ra(a) | oe(0) | rc(0)); }
+inline void Assembler::addze_( Register d, Register a) { emit_int32(ADDZE_OPCODE | rt(d) | ra(a) | oe(0) | rc(1)); }
+inline void Assembler::subfze( Register d, Register a) { emit_int32(SUBFZE_OPCODE | rt(d) | ra(a) | oe(0) | rc(0)); }
+inline void Assembler::subfze_(Register d, Register a) { emit_int32(SUBFZE_OPCODE | rt(d) | ra(a) | oe(0) | rc(1)); }
inline void Assembler::neg( Register d, Register a) { emit_int32(NEG_OPCODE | rt(d) | ra(a) | oe(0) | rc(0)); }
inline void Assembler::neg_( Register d, Register a) { emit_int32(NEG_OPCODE | rt(d) | ra(a) | oe(0) | rc(1)); }
inline void Assembler::mulli( Register d, Register a, int si16) { emit_int32(MULLI_OPCODE | rt(d) | ra(a) | simm(si16, 16)); }
@@ -118,6 +126,38 @@ inline void Assembler::divd_( Register d, Register a, Register b) { emit_int32(
inline void Assembler::divw( Register d, Register a, Register b) { emit_int32(DIVW_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
inline void Assembler::divw_( Register d, Register a, Register b) { emit_int32(DIVW_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
+// Fixed-Point Arithmetic Instructions with Overflow detection
+inline void Assembler::addo( Register d, Register a, Register b) { emit_int32(ADD_OPCODE | rt(d) | ra(a) | rb(b) | oe(1) | rc(0)); }
+inline void Assembler::addo_( Register d, Register a, Register b) { emit_int32(ADD_OPCODE | rt(d) | ra(a) | rb(b) | oe(1) | rc(1)); }
+inline void Assembler::subfo( Register d, Register a, Register b) { emit_int32(SUBF_OPCODE | rt(d) | ra(a) | rb(b) | oe(1) | rc(0)); }
+inline void Assembler::subfo_( Register d, Register a, Register b) { emit_int32(SUBF_OPCODE | rt(d) | ra(a) | rb(b) | oe(1) | rc(1)); }
+inline void Assembler::addco( Register d, Register a, Register b) { emit_int32(ADDC_OPCODE | rt(d) | ra(a) | rb(b) | oe(1) | rc(0)); }
+inline void Assembler::addco_( Register d, Register a, Register b) { emit_int32(ADDC_OPCODE | rt(d) | ra(a) | rb(b) | oe(1) | rc(1)); }
+inline void Assembler::subfco( Register d, Register a, Register b) { emit_int32(SUBFC_OPCODE | rt(d) | ra(a) | rb(b) | oe(1) | rc(0)); }
+inline void Assembler::subfco_( Register d, Register a, Register b) { emit_int32(SUBFC_OPCODE | rt(d) | ra(a) | rb(b) | oe(1) | rc(1)); }
+inline void Assembler::addeo( Register d, Register a, Register b) { emit_int32(ADDE_OPCODE | rt(d) | ra(a) | rb(b) | oe(1) | rc(0)); }
+inline void Assembler::addeo_( Register d, Register a, Register b) { emit_int32(ADDE_OPCODE | rt(d) | ra(a) | rb(b) | oe(1) | rc(1)); }
+inline void Assembler::subfeo( Register d, Register a, Register b) { emit_int32(SUBFE_OPCODE | rt(d) | ra(a) | rb(b) | oe(1) | rc(0)); }
+inline void Assembler::subfeo_( Register d, Register a, Register b) { emit_int32(SUBFE_OPCODE | rt(d) | ra(a) | rb(b) | oe(1) | rc(1)); }
+inline void Assembler::addmeo( Register d, Register a) { emit_int32(ADDME_OPCODE | rt(d) | ra(a) | oe(1) | rc(0)); }
+inline void Assembler::addmeo_( Register d, Register a) { emit_int32(ADDME_OPCODE | rt(d) | ra(a) | oe(1) | rc(1)); }
+inline void Assembler::subfmeo( Register d, Register a) { emit_int32(SUBFME_OPCODE | rt(d) | ra(a) | oe(1) | rc(0)); }
+inline void Assembler::subfmeo_(Register d, Register a) { emit_int32(SUBFME_OPCODE | rt(d) | ra(a) | oe(1) | rc(1)); }
+inline void Assembler::addzeo( Register d, Register a) { emit_int32(ADDZE_OPCODE | rt(d) | ra(a) | oe(1) | rc(0)); }
+inline void Assembler::addzeo_( Register d, Register a) { emit_int32(ADDZE_OPCODE | rt(d) | ra(a) | oe(1) | rc(1)); }
+inline void Assembler::subfzeo( Register d, Register a) { emit_int32(SUBFZE_OPCODE | rt(d) | ra(a) | oe(1) | rc(0)); }
+inline void Assembler::subfzeo_(Register d, Register a) { emit_int32(SUBFZE_OPCODE | rt(d) | ra(a) | oe(1) | rc(1)); }
+inline void Assembler::nego( Register d, Register a) { emit_int32(NEG_OPCODE | rt(d) | ra(a) | oe(1) | rc(0)); }
+inline void Assembler::nego_( Register d, Register a) { emit_int32(NEG_OPCODE | rt(d) | ra(a) | oe(1) | rc(1)); }
+inline void Assembler::mulldo( Register d, Register a, Register b) { emit_int32(MULLD_OPCODE | rt(d) | ra(a) | rb(b) | oe(1) | rc(0)); }
+inline void Assembler::mulldo_( Register d, Register a, Register b) { emit_int32(MULLD_OPCODE | rt(d) | ra(a) | rb(b) | oe(1) | rc(1)); }
+inline void Assembler::mullwo( Register d, Register a, Register b) { emit_int32(MULLW_OPCODE | rt(d) | ra(a) | rb(b) | oe(1) | rc(0)); }
+inline void Assembler::mullwo_( Register d, Register a, Register b) { emit_int32(MULLW_OPCODE | rt(d) | ra(a) | rb(b) | oe(1) | rc(1)); }
+inline void Assembler::divdo( Register d, Register a, Register b) { emit_int32(DIVD_OPCODE | rt(d) | ra(a) | rb(b) | oe(1) | rc(0)); }
+inline void Assembler::divdo_( Register d, Register a, Register b) { emit_int32(DIVD_OPCODE | rt(d) | ra(a) | rb(b) | oe(1) | rc(1)); }
+inline void Assembler::divwo( Register d, Register a, Register b) { emit_int32(DIVW_OPCODE | rt(d) | ra(a) | rb(b) | oe(1) | rc(0)); }
+inline void Assembler::divwo_( Register d, Register a, Register b) { emit_int32(DIVW_OPCODE | rt(d) | ra(a) | rb(b) | oe(1) | rc(1)); }
+
// extended mnemonics
inline void Assembler::li( Register d, int si16) { Assembler::addi_r0ok( d, R0, si16); }
inline void Assembler::lis( Register d, int si16) { Assembler::addis_r0ok(d, R0, si16); }
@@ -540,15 +580,22 @@ inline void Assembler::eieio() { emit_int32( EIEIO_OPCODE); }
inline void Assembler::isync() { emit_int32( ISYNC_OPCODE); }
inline void Assembler::elemental_membar(int e) { assert(0 < e && e < 16, "invalid encoding"); emit_int32( SYNC_OPCODE | e1215(e)); }
+// Wait instructions for polling.
+inline void Assembler::wait() { emit_int32( WAIT_OPCODE); }
+inline void Assembler::waitrsv() { emit_int32( WAIT_OPCODE | 1<<(31-10)); } // WC=0b01 >=Power7
+
// atomics
// Use ra0mem to disallow R0 as base.
inline void Assembler::lwarx_unchecked(Register d, Register a, Register b, int eh1) { emit_int32( LWARX_OPCODE | rt(d) | ra0mem(a) | rb(b) | eh(eh1)); }
inline void Assembler::ldarx_unchecked(Register d, Register a, Register b, int eh1) { emit_int32( LDARX_OPCODE | rt(d) | ra0mem(a) | rb(b) | eh(eh1)); }
+inline void Assembler::lqarx_unchecked(Register d, Register a, Register b, int eh1) { emit_int32( LQARX_OPCODE | rt(d) | ra0mem(a) | rb(b) | eh(eh1)); }
inline bool Assembler::lxarx_hint_exclusive_access() { return VM_Version::has_lxarxeh(); }
inline void Assembler::lwarx( Register d, Register a, Register b, bool hint_exclusive_access) { lwarx_unchecked(d, a, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
inline void Assembler::ldarx( Register d, Register a, Register b, bool hint_exclusive_access) { ldarx_unchecked(d, a, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
+inline void Assembler::lqarx( Register d, Register a, Register b, bool hint_exclusive_access) { lqarx_unchecked(d, a, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
inline void Assembler::stwcx_(Register s, Register a, Register b) { emit_int32( STWCX_OPCODE | rs(s) | ra0mem(a) | rb(b) | rc(1)); }
inline void Assembler::stdcx_(Register s, Register a, Register b) { emit_int32( STDCX_OPCODE | rs(s) | ra0mem(a) | rb(b) | rc(1)); }
+inline void Assembler::stqcx_(Register s, Register a, Register b) { emit_int32( STQCX_OPCODE | rs(s) | ra0mem(a) | rb(b) | rc(1)); }
// Instructions for adjusting thread priority
// for simultaneous multithreading (SMT) on POWER5.
@@ -873,10 +920,13 @@ inline void Assembler::dcbtstct(Register s2, int ct) { emit_int32( DCBTST_OPCOD
// ra0 version
inline void Assembler::lwarx_unchecked(Register d, Register b, int eh1) { emit_int32( LWARX_OPCODE | rt(d) | rb(b) | eh(eh1)); }
inline void Assembler::ldarx_unchecked(Register d, Register b, int eh1) { emit_int32( LDARX_OPCODE | rt(d) | rb(b) | eh(eh1)); }
+inline void Assembler::lqarx_unchecked(Register d, Register b, int eh1) { emit_int32( LQARX_OPCODE | rt(d) | rb(b) | eh(eh1)); }
inline void Assembler::lwarx( Register d, Register b, bool hint_exclusive_access){ lwarx_unchecked(d, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
inline void Assembler::ldarx( Register d, Register b, bool hint_exclusive_access){ ldarx_unchecked(d, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
+inline void Assembler::lqarx( Register d, Register b, bool hint_exclusive_access){ lqarx_unchecked(d, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
inline void Assembler::stwcx_(Register s, Register b) { emit_int32( STWCX_OPCODE | rs(s) | rb(b) | rc(1)); }
inline void Assembler::stdcx_(Register s, Register b) { emit_int32( STDCX_OPCODE | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::stqcx_(Register s, Register b) { emit_int32( STQCX_OPCODE | rs(s) | rb(b) | rc(1)); }
// ra0 version
inline void Assembler::lfs( FloatRegister d, int si16) { emit_int32( LFS_OPCODE | frt(d) | simm(si16,16)); }
diff --git a/hotspot/src/cpu/ppc/vm/c2_globals_ppc.hpp b/hotspot/src/cpu/ppc/vm/c2_globals_ppc.hpp
index 3b4b9e3660b..d797ef6eb8c 100644
--- a/hotspot/src/cpu/ppc/vm/c2_globals_ppc.hpp
+++ b/hotspot/src/cpu/ppc/vm/c2_globals_ppc.hpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2014 SAP AG. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -47,7 +47,7 @@ define_pd_global(intx, ConditionalMoveLimit, 3);
define_pd_global(intx, FLOATPRESSURE, 28);
define_pd_global(intx, FreqInlineSize, 175);
define_pd_global(intx, MinJumpTableSize, 10);
-define_pd_global(intx, INTPRESSURE, 25);
+define_pd_global(intx, INTPRESSURE, 26);
define_pd_global(intx, InteriorEntryAlignment, 16);
define_pd_global(size_t, NewSizeThreadIncrease, ScaleForWordSize(4*K));
define_pd_global(intx, RegisterCostAreaRatio, 16000);
diff --git a/hotspot/src/cpu/ppc/vm/globals_ppc.hpp b/hotspot/src/cpu/ppc/vm/globals_ppc.hpp
index f2391d251eb..6c3cf73cb56 100644
--- a/hotspot/src/cpu/ppc/vm/globals_ppc.hpp
+++ b/hotspot/src/cpu/ppc/vm/globals_ppc.hpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -55,10 +55,12 @@ define_pd_global(bool, RewriteFrequentPairs, true);
define_pd_global(bool, UseMembar, false);
+define_pd_global(bool, PreserveFramePointer, false);
+
// GC Ergo Flags
define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // Default max size of CMS young gen, per GC worker thread.
-define_pd_global(uintx, TypeProfileLevel, 0);
+define_pd_global(uintx, TypeProfileLevel, 111);
// Platform dependent flag handling: flags only defined on this platform.
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
@@ -71,14 +73,26 @@ define_pd_global(uintx, TypeProfileLevel, 0);
\
product(uintx, PowerArchitecturePPC64, 0, \
"CPU Version: x for PowerX. Currently recognizes Power5 to " \
- "Power7. Default is 0. CPUs newer than Power7 will be " \
- "recognized as Power7.") \
+ "Power8. Default is 0. Newer CPUs will be recognized as Power8.") \
\
/* Reoptimize code-sequences of calls at runtime, e.g. replace an */ \
/* indirect call by a direct call. */ \
product(bool, ReoptimizeCallSequences, true, \
"Reoptimize code-sequences of calls at runtime.") \
\
+ /* Power 8: Configure Data Stream Control Register. */ \
+ product(uint64_t,DSCR_PPC64, (uintx)-1, \
+ "Power8 or later: Specify encoded value for Data Stream Control " \
+ "Register") \
+ product(uint64_t,DSCR_DPFD_PPC64, 8, \
+ "Power8 or later: DPFD (default prefetch depth) value of the " \
+ "Data Stream Control Register." \
+ " 0: hardware default, 1: none, 2-7: min-max, 8: don't touch") \
+ product(uint64_t,DSCR_URG_PPC64, 8, \
+ "Power8 or later: URG (depth attainment urgency) value of the " \
+ "Data Stream Control Register." \
+ " 0: hardware default, 1: none, 2-7: min-max, 8: don't touch") \
+ \
product(bool, UseLoadInstructionsForStackBangingPPC64, false, \
"Use load instructions for stack banging.") \
\
@@ -121,6 +135,41 @@ define_pd_global(uintx, TypeProfileLevel, 0);
\
product(bool, ZapMemory, false, "Write 0x0101... to empty memory." \
" Use this to ease debugging.") \
-
+ \
+ /* Use Restricted Transactional Memory for lock eliding */ \
+ product(bool, UseRTMLocking, false, \
+ "Enable RTM lock eliding for inflated locks in compiled code") \
+ \
+ experimental(bool, UseRTMForStackLocks, false, \
+ "Enable RTM lock eliding for stack locks in compiled code") \
+ \
+ product(bool, UseRTMDeopt, false, \
+ "Perform deopt and recompilation based on RTM abort ratio") \
+ \
+ product(uintx, RTMRetryCount, 5, \
+ "Number of RTM retries on lock abort or busy") \
+ \
+ experimental(intx, RTMSpinLoopCount, 100, \
+ "Spin count for lock to become free before RTM retry") \
+ \
+ experimental(intx, RTMAbortThreshold, 1000, \
+ "Calculate abort ratio after this number of aborts") \
+ \
+ experimental(intx, RTMLockingThreshold, 10000, \
+ "Lock count at which to do RTM lock eliding without " \
+ "abort ratio calculation") \
+ \
+ experimental(intx, RTMAbortRatio, 50, \
+ "Lock abort ratio at which to stop use RTM lock eliding") \
+ \
+ experimental(intx, RTMTotalCountIncrRate, 64, \
+ "Increment total RTM attempted lock count once every n times") \
+ \
+ experimental(intx, RTMLockingCalculationDelay, 0, \
+ "Number of milliseconds to wait before start calculating aborts " \
+ "for RTM locking") \
+ \
+ experimental(bool, UseRTMXendForLockBusy, true, \
+ "Use RTM Xend instead of Xabort when lock busy") \
#endif // CPU_PPC_VM_GLOBALS_PPC_HPP
diff --git a/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp b/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp
index b79b1582bb7..00330ef3461 100644
--- a/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp
+++ b/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp
@@ -446,7 +446,7 @@ void InterpreterMacroAssembler::get_u4(Register Rdst, Register Rsrc, int offset,
}
// Load object from cpool->resolved_references(index).
-void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result, Register index) {
+void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result, Register index, Label *is_null) {
assert_different_registers(result, index);
get_constant_pool(result);
@@ -469,7 +469,7 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
#endif
// Add in the index.
add(result, tmp, result);
- load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result);
+ load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result, is_null);
}
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
@@ -876,7 +876,6 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
// If condition is true we are done and hence we can store 0 in the displaced
// header indicating it is a recursive lock.
bne(CCR0, slow_case);
- release();
std(R0/*==0!*/, BasicObjectLock::lock_offset_in_bytes() +
BasicLock::displaced_header_offset_in_bytes(), monitor);
b(done);
@@ -1861,7 +1860,7 @@ void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register
const Register mdp = tmp1;
add(mdp, tmp1, R28_mdx);
- // Pffset of the current profile entry to update.
+ // Offset of the current profile entry to update.
const Register entry_offset = tmp2;
// entry_offset = array len in number of cells
ld(entry_offset, in_bytes(ArrayData::array_len_offset()), mdp);
diff --git a/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.hpp b/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.hpp
index 555cc5ee9d8..571f87e45cd 100644
--- a/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.hpp
+++ b/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2014 SAP AG. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -85,7 +85,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
Register tmp1, Register tmp2, Register tmp3, Label &ok_is_subtype);
// Load object from cpool->resolved_references(index).
- void load_resolved_reference_at_index(Register result, Register index);
+ void load_resolved_reference_at_index(Register result, Register index, Label *is_null = NULL);
void generate_stack_overflow_check_with_compare_and_throw(Register Rmem_frame_size, Register Rscratch1);
void load_receiver(Register Rparam_count, Register Rrecv_dst);
@@ -137,7 +137,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
void field_offset_at(int n, Register tmp, Register dest, Register base);
int field_offset_at(Register object, address bcp, int offset);
void fast_iaaccess(int n, address bcp);
- void fast_iagetfield(address bcp);
void fast_iaputfield(address bcp, bool do_store_check);
void index_check(Register array, Register index, int index_shift, Register tmp, Register res);
diff --git a/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp b/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp
index be85db68c82..7f301e255b0 100644
--- a/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2014 SAP AG. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -427,7 +427,6 @@ address AbstractInterpreterGenerator::generate_result_handler_for(BasicType type
return entry;
}
-
// Call an accessor method (assuming it is resolved, otherwise drop into
// vanilla (slow path) entry.
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
@@ -473,7 +472,8 @@ address InterpreterGenerator::generate_abstract_entry(void) {
// This is not a leaf but we have a JavaFrameAnchor now and we will
// check (create) exceptions afterward so this is ok.
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError),
+ R16_thread);
// Pop the C frame and restore LR.
__ pop_frame();
diff --git a/hotspot/src/cpu/ppc/vm/interpreter_ppc.hpp b/hotspot/src/cpu/ppc/vm/interpreter_ppc.hpp
index 48864ae7213..e42e66c6914 100644
--- a/hotspot/src/cpu/ppc/vm/interpreter_ppc.hpp
+++ b/hotspot/src/cpu/ppc/vm/interpreter_ppc.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2014 SAP AG. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -47,4 +47,4 @@
}
#endif
-#endif // CPU_PPC_VM_INTERPRETER_PPC_PP
+#endif // CPU_PPC_VM_INTERPRETER_PPC_HPP
diff --git a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp
index 89973056451..4d0e6df26b5 100644
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2014 SAP AG. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1455,7 +1455,7 @@ void MacroAssembler::cmpxchgw(ConditionRegister flag, Register dest_current_valu
// Several special cases exist to avoid that unnecessary information is generated.
//
void MacroAssembler::cmpxchgd(ConditionRegister flag,
- Register dest_current_value, Register compare_value, Register exchange_value,
+ Register dest_current_value, RegisterOrConstant compare_value, Register exchange_value,
Register addr_base, int semantics, bool cmpxchgx_hint,
Register int_flag_success, Label* failed_ext, bool contention_hint) {
Label retry;
@@ -1465,7 +1465,7 @@ void MacroAssembler::cmpxchgd(ConditionRegister flag,
// Save one branch if result is returned via register and result register is different from the other ones.
bool use_result_reg = (int_flag_success!=noreg);
- bool preset_result_reg = (int_flag_success!=dest_current_value && int_flag_success!=compare_value &&
+ bool preset_result_reg = (int_flag_success!=dest_current_value && int_flag_success!=compare_value.register_or_noreg() &&
int_flag_success!=exchange_value && int_flag_success!=addr_base);
assert(int_flag_success == noreg || failed_ext == NULL, "cannot have both");
@@ -1481,7 +1481,7 @@ void MacroAssembler::cmpxchgd(ConditionRegister flag,
// Add simple guard in order to reduce risk of starving under high contention (recommended by IBM).
if (contention_hint) { // Don't try to reserve if cmp fails.
ld(dest_current_value, 0, addr_base);
- cmpd(flag, dest_current_value, compare_value);
+ cmpd(flag, compare_value, dest_current_value);
bne(flag, failed);
}
@@ -1489,7 +1489,7 @@ void MacroAssembler::cmpxchgd(ConditionRegister flag,
bind(retry);
ldarx(dest_current_value, addr_base, cmpxchgx_hint);
- cmpd(flag, dest_current_value, compare_value);
+ cmpd(flag, compare_value, dest_current_value);
if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
bne_predict_not_taken(flag, failed);
} else {
@@ -1873,7 +1873,6 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
// CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
- fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
/*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
/*where=*/obj_reg,
@@ -1909,7 +1908,6 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
// CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
- fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
/*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
/*where=*/obj_reg,
@@ -1946,7 +1944,6 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
// CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
- fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
/*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
/*where=*/obj_reg,
@@ -1987,9 +1984,371 @@ void MacroAssembler::biased_locking_exit (ConditionRegister cr_reg, Register mar
beq(cr_reg, done);
}
+// TM on PPC64.
+void MacroAssembler::atomic_inc_ptr(Register addr, Register result, int simm16) {
+ Label retry;
+ bind(retry);
+ ldarx(result, addr, /*hint*/ false);
+ addi(result, result, simm16);
+ stdcx_(result, addr);
+ if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
+ bne_predict_not_taken(CCR0, retry); // stXcx_ sets CCR0
+ } else {
+ bne( CCR0, retry); // stXcx_ sets CCR0
+ }
+}
+
+void MacroAssembler::atomic_ori_int(Register addr, Register result, int uimm16) {
+ Label retry;
+ bind(retry);
+ lwarx(result, addr, /*hint*/ false);
+ ori(result, result, uimm16);
+ stwcx_(result, addr);
+ if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
+ bne_predict_not_taken(CCR0, retry); // stXcx_ sets CCR0
+ } else {
+ bne( CCR0, retry); // stXcx_ sets CCR0
+ }
+}
+
+#if INCLUDE_RTM_OPT
+
+// Update rtm_counters based on abort status
+// input: abort_status
+// rtm_counters (RTMLockingCounters*)
+void MacroAssembler::rtm_counters_update(Register abort_status, Register rtm_counters_Reg) {
+ // Mapping to keep PreciseRTMLockingStatistics similar to x86.
+ // x86 ppc (! means inverted, ? means not the same)
+ // 0 31 Set if abort caused by XABORT instruction.
+ // 1 ! 7 If set, the transaction may succeed on a retry. This bit is always clear if bit 0 is set.
+ // 2 13 Set if another logical processor conflicted with a memory address that was part of the transaction that aborted.
+ // 3 10 Set if an internal buffer overflowed.
+ // 4 ?12 Set if a debug breakpoint was hit.
+ // 5 ?32 Set if an abort occurred during execution of a nested transaction.
+ const int tm_failure_bit[] = {Assembler::tm_tabort, // Note: Seems like signal handler sets this, too.
+ Assembler::tm_failure_persistent, // inverted: transient
+ Assembler::tm_trans_cf,
+ Assembler::tm_footprint_of,
+ Assembler::tm_non_trans_cf,
+ Assembler::tm_suspended};
+ const bool tm_failure_inv[] = {false, true, false, false, false, false};
+ assert(sizeof(tm_failure_bit)/sizeof(int) == RTMLockingCounters::ABORT_STATUS_LIMIT, "adapt mapping!");
+
+ const Register addr_Reg = R0;
+ // Keep track of offset to where rtm_counters_Reg had pointed to.
+ int counters_offs = RTMLockingCounters::abort_count_offset();
+ addi(addr_Reg, rtm_counters_Reg, counters_offs);
+ const Register temp_Reg = rtm_counters_Reg;
+
+ //atomic_inc_ptr(addr_Reg, temp_Reg); We don't increment atomically
+ ldx(temp_Reg, addr_Reg);
+ addi(temp_Reg, temp_Reg, 1);
+ stdx(temp_Reg, addr_Reg);
+
+ if (PrintPreciseRTMLockingStatistics) {
+ int counters_offs_delta = RTMLockingCounters::abortX_count_offset() - counters_offs;
+
+ //mftexasr(abort_status); done by caller
+ for (int i = 0; i < RTMLockingCounters::ABORT_STATUS_LIMIT; i++) {
+ counters_offs += counters_offs_delta;
+ li(temp_Reg, counters_offs_delta); // can't use addi with R0
+ add(addr_Reg, addr_Reg, temp_Reg); // point to next counter
+ counters_offs_delta = sizeof(uintx);
+
+ Label check_abort;
+ rldicr_(temp_Reg, abort_status, tm_failure_bit[i], 0);
+ if (tm_failure_inv[i]) {
+ bne(CCR0, check_abort);
+ } else {
+ beq(CCR0, check_abort);
+ }
+ //atomic_inc_ptr(addr_Reg, temp_Reg); We don't increment atomically
+ ldx(temp_Reg, addr_Reg);
+ addi(temp_Reg, temp_Reg, 1);
+ stdx(temp_Reg, addr_Reg);
+ bind(check_abort);
+ }
+ }
+ li(temp_Reg, -counters_offs); // can't use addi with R0
+ add(rtm_counters_Reg, addr_Reg, temp_Reg); // restore
+}
+
+// Branch if (random & (count-1) != 0), count is 2^n
+// tmp and CR0 are killed
+void MacroAssembler::branch_on_random_using_tb(Register tmp, int count, Label& brLabel) {
+ mftb(tmp);
+ andi_(tmp, tmp, count-1);
+ bne(CCR0, brLabel);
+}
+
+// Perform abort ratio calculation, set no_rtm bit if high ratio.
+// input: rtm_counters_Reg (RTMLockingCounters* address) - KILLED
+void MacroAssembler::rtm_abort_ratio_calculation(Register rtm_counters_Reg,
+ RTMLockingCounters* rtm_counters,
+ Metadata* method_data) {
+ Label L_done, L_check_always_rtm1, L_check_always_rtm2;
+
+ if (RTMLockingCalculationDelay > 0) {
+ // Delay calculation.
+ ld(rtm_counters_Reg, (RegisterOrConstant)(intptr_t)RTMLockingCounters::rtm_calculation_flag_addr());
+ cmpdi(CCR0, rtm_counters_Reg, 0);
+ beq(CCR0, L_done);
+ load_const_optimized(rtm_counters_Reg, (address)rtm_counters, R0); // reload
+ }
+ // Abort ratio calculation only if abort_count > RTMAbortThreshold.
+ // Aborted transactions = abort_count * 100
+ // All transactions = total_count * RTMTotalCountIncrRate
+ // Set no_rtm bit if (Aborted transactions >= All transactions * RTMAbortRatio)
+ ld(R0, RTMLockingCounters::abort_count_offset(), rtm_counters_Reg);
+ cmpdi(CCR0, R0, RTMAbortThreshold);
+ blt(CCR0, L_check_always_rtm2);
+ mulli(R0, R0, 100);
+
+ const Register tmpReg = rtm_counters_Reg;
+ ld(tmpReg, RTMLockingCounters::total_count_offset(), rtm_counters_Reg);
+ mulli(tmpReg, tmpReg, RTMTotalCountIncrRate);
+ mulli(tmpReg, tmpReg, RTMAbortRatio);
+ cmpd(CCR0, R0, tmpReg);
+ blt(CCR0, L_check_always_rtm1); // jump to reload
+ if (method_data != NULL) {
+ // Set rtm_state to "no rtm" in MDO.
+ // Not using a metadata relocation. Method and Class Loader are kept alive anyway.
+ // (See nmethod::metadata_do and CodeBuffer::finalize_oop_references.)
+ load_const(R0, (address)method_data + MethodData::rtm_state_offset_in_bytes(), tmpReg);
+ atomic_ori_int(R0, tmpReg, NoRTM);
+ }
+ b(L_done);
+
+ bind(L_check_always_rtm1);
+ load_const_optimized(rtm_counters_Reg, (address)rtm_counters, R0); // reload
+ bind(L_check_always_rtm2);
+ ld(tmpReg, RTMLockingCounters::total_count_offset(), rtm_counters_Reg);
+ cmpdi(CCR0, tmpReg, RTMLockingThreshold / RTMTotalCountIncrRate);
+ blt(CCR0, L_done);
+ if (method_data != NULL) {
+ // Set rtm_state to "always rtm" in MDO.
+ // Not using a metadata relocation. See above.
+ load_const(R0, (address)method_data + MethodData::rtm_state_offset_in_bytes(), tmpReg);
+ atomic_ori_int(R0, tmpReg, UseRTM);
+ }
+ bind(L_done);
+}
+
+// Update counters and perform abort ratio calculation.
+// input: abort_status_Reg
+void MacroAssembler::rtm_profiling(Register abort_status_Reg, Register temp_Reg,
+ RTMLockingCounters* rtm_counters,
+ Metadata* method_data,
+ bool profile_rtm) {
+
+ assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
+ // Update rtm counters based on state at abort.
+ // Reads abort_status_Reg, updates flags.
+ assert_different_registers(abort_status_Reg, temp_Reg);
+ load_const_optimized(temp_Reg, (address)rtm_counters, R0);
+ rtm_counters_update(abort_status_Reg, temp_Reg);
+ if (profile_rtm) {
+ assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
+ rtm_abort_ratio_calculation(temp_Reg, rtm_counters, method_data);
+ }
+}
+
+// Retry on abort if abort's status indicates non-persistent failure.
+// inputs: retry_count_Reg
+// : abort_status_Reg
+// output: retry_count_Reg decremented by 1
+void MacroAssembler::rtm_retry_lock_on_abort(Register retry_count_Reg, Register abort_status_Reg,
+ Label& retryLabel, Label* checkRetry) {
+ Label doneRetry;
+ rldicr_(R0, abort_status_Reg, tm_failure_persistent, 0);
+ bne(CCR0, doneRetry);
+ if (checkRetry) { bind(*checkRetry); }
+ addic_(retry_count_Reg, retry_count_Reg, -1);
+ blt(CCR0, doneRetry);
+ smt_yield(); // Can't use wait(). No permission (SIGILL).
+ b(retryLabel);
+ bind(doneRetry);
+}
+
+// Spin and retry if lock is busy.
+// inputs: box_Reg (monitor address)
+// : retry_count_Reg
+// output: retry_count_Reg decremented by 1
+// CTR is killed
+void MacroAssembler::rtm_retry_lock_on_busy(Register retry_count_Reg, Register owner_addr_Reg, Label& retryLabel) {
+ Label SpinLoop, doneRetry;
+ addic_(retry_count_Reg, retry_count_Reg, -1);
+ blt(CCR0, doneRetry);
+ li(R0, RTMSpinLoopCount);
+ mtctr(R0);
+
+ bind(SpinLoop);
+ smt_yield(); // Can't use waitrsv(). No permission (SIGILL).
+ bdz(retryLabel);
+ ld(R0, 0, owner_addr_Reg);
+ cmpdi(CCR0, R0, 0);
+ bne(CCR0, SpinLoop);
+ b(retryLabel);
+
+ bind(doneRetry);
+}
+
+// Use RTM for normal stack locks.
+// Input: objReg (object to lock)
+void MacroAssembler::rtm_stack_locking(ConditionRegister flag,
+ Register obj, Register mark_word, Register tmp,
+ Register retry_on_abort_count_Reg,
+ RTMLockingCounters* stack_rtm_counters,
+ Metadata* method_data, bool profile_rtm,
+ Label& DONE_LABEL, Label& IsInflated) {
+ assert(UseRTMForStackLocks, "why call this otherwise?");
+ assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
+ Label L_rtm_retry, L_decrement_retry, L_on_abort;
+
+ if (RTMRetryCount > 0) {
+ load_const_optimized(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
+ bind(L_rtm_retry);
+ }
+ andi_(R0, mark_word, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
+ bne(CCR0, IsInflated);
+
+ if (PrintPreciseRTMLockingStatistics || profile_rtm) {
+ Label L_noincrement;
+ if (RTMTotalCountIncrRate > 1) {
+ branch_on_random_using_tb(tmp, (int)RTMTotalCountIncrRate, L_noincrement);
+ }
+ assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM");
+ load_const_optimized(tmp, (address)stack_rtm_counters->total_count_addr(), R0);
+ //atomic_inc_ptr(tmp, /*temp, will be reloaded*/mark_word); We don't increment atomically
+ ldx(mark_word, tmp);
+ addi(mark_word, mark_word, 1);
+ stdx(mark_word, tmp);
+ bind(L_noincrement);
+ }
+ tbegin_();
+ beq(CCR0, L_on_abort);
+ ld(mark_word, oopDesc::mark_offset_in_bytes(), obj); // Reload in transaction, conflicts need to be tracked.
+ andi(R0, mark_word, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
+ cmpwi(flag, R0, markOopDesc::unlocked_value); // bits = 001 unlocked
+ beq(flag, DONE_LABEL); // all done if unlocked
+
+ if (UseRTMXendForLockBusy) {
+ tend_();
+ b(L_decrement_retry);
+ } else {
+ tabort_();
+ }
+ bind(L_on_abort);
+ const Register abort_status_Reg = tmp;
+ mftexasr(abort_status_Reg);
+ if (PrintPreciseRTMLockingStatistics || profile_rtm) {
+ rtm_profiling(abort_status_Reg, /*temp*/mark_word, stack_rtm_counters, method_data, profile_rtm);
+ }
+ ld(mark_word, oopDesc::mark_offset_in_bytes(), obj); // reload
+ if (RTMRetryCount > 0) {
+ // Retry on lock abort if abort status is not permanent.
+ rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry, &L_decrement_retry);
+ } else {
+ bind(L_decrement_retry);
+ }
+}
+
+// Use RTM for inflating locks
+// inputs: obj (object to lock)
+// mark_word (current header - KILLED)
+// boxReg (on-stack box address (displaced header location) - KILLED)
+void MacroAssembler::rtm_inflated_locking(ConditionRegister flag,
+ Register obj, Register mark_word, Register boxReg,
+ Register retry_on_busy_count_Reg, Register retry_on_abort_count_Reg,
+ RTMLockingCounters* rtm_counters,
+ Metadata* method_data, bool profile_rtm,
+ Label& DONE_LABEL) {
+ assert(UseRTMLocking, "why call this otherwise?");
+ Label L_rtm_retry, L_decrement_retry, L_on_abort;
+ // Clean monitor_value bit to get valid pointer.
+ int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
+
+ // Store non-null, using boxReg instead of (intptr_t)markOopDesc::unused_mark().
+ std(boxReg, BasicLock::displaced_header_offset_in_bytes(), boxReg);
+ const Register tmpReg = boxReg;
+ const Register owner_addr_Reg = mark_word;
+ addi(owner_addr_Reg, mark_word, owner_offset);
+
+ if (RTMRetryCount > 0) {
+ load_const_optimized(retry_on_busy_count_Reg, RTMRetryCount); // Retry on lock busy.
+ load_const_optimized(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort.
+ bind(L_rtm_retry);
+ }
+ if (PrintPreciseRTMLockingStatistics || profile_rtm) {
+ Label L_noincrement;
+ if (RTMTotalCountIncrRate > 1) {
+ branch_on_random_using_tb(R0, (int)RTMTotalCountIncrRate, L_noincrement);
+ }
+ assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
+ load_const(R0, (address)rtm_counters->total_count_addr(), tmpReg);
+ //atomic_inc_ptr(R0, tmpReg); We don't increment atomically
+ ldx(tmpReg, R0);
+ addi(tmpReg, tmpReg, 1);
+ stdx(tmpReg, R0);
+ bind(L_noincrement);
+ }
+ tbegin_();
+ beq(CCR0, L_on_abort);
+ // We don't reload mark word. Will only be reset at safepoint.
+ ld(R0, 0, owner_addr_Reg); // Load in transaction, conflicts need to be tracked.
+ cmpdi(flag, R0, 0);
+ beq(flag, DONE_LABEL);
+
+ if (UseRTMXendForLockBusy) {
+ tend_();
+ b(L_decrement_retry);
+ } else {
+ tabort_();
+ }
+ bind(L_on_abort);
+ const Register abort_status_Reg = tmpReg;
+ mftexasr(abort_status_Reg);
+ if (PrintPreciseRTMLockingStatistics || profile_rtm) {
+ rtm_profiling(abort_status_Reg, /*temp*/ owner_addr_Reg, rtm_counters, method_data, profile_rtm);
+ // Restore owner_addr_Reg
+ ld(mark_word, oopDesc::mark_offset_in_bytes(), obj);
+#ifdef ASSERT
+ andi_(R0, mark_word, markOopDesc::monitor_value);
+ asm_assert_ne("must be inflated", 0xa754); // Deflating only allowed at safepoint.
+#endif
+ addi(owner_addr_Reg, mark_word, owner_offset);
+ }
+ if (RTMRetryCount > 0) {
+ // Retry on lock abort if abort status is not permanent.
+ rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry);
+ }
+
+ // Appears unlocked - try to swing _owner from null to non-null.
+ cmpxchgd(flag, /*current val*/ R0, (intptr_t)0, /*new val*/ R16_thread, owner_addr_Reg,
+ MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
+ MacroAssembler::cmpxchgx_hint_acquire_lock(), noreg, &L_decrement_retry, true);
+
+ if (RTMRetryCount > 0) {
+ // success done else retry
+ b(DONE_LABEL);
+ bind(L_decrement_retry);
+ // Spin and retry if lock is busy.
+ rtm_retry_lock_on_busy(retry_on_busy_count_Reg, owner_addr_Reg, L_rtm_retry);
+ } else {
+ bind(L_decrement_retry);
+ }
+}
+
+#endif // INCLUDE_RTM_OPT
+
// "The box" is the space on the stack where we copy the object mark.
void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box,
- Register temp, Register displaced_header, Register current_header) {
+ Register temp, Register displaced_header, Register current_header,
+ bool try_bias,
+ RTMLockingCounters* rtm_counters,
+ RTMLockingCounters* stack_rtm_counters,
+ Metadata* method_data,
+ bool use_rtm, bool profile_rtm) {
assert_different_registers(oop, box, temp, displaced_header, current_header);
assert(flag != CCR0, "bad condition register");
Label cont;
@@ -2006,10 +2365,18 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
return;
}
- if (UseBiasedLocking) {
+ if (try_bias) {
biased_locking_enter(flag, oop, displaced_header, temp, current_header, cont);
}
+#if INCLUDE_RTM_OPT
+ if (UseRTMForStackLocks && use_rtm) {
+ rtm_stack_locking(flag, oop, displaced_header, temp, /*temp*/ current_header,
+ stack_rtm_counters, method_data, profile_rtm,
+ cont, object_has_monitor);
+ }
+#endif // INCLUDE_RTM_OPT
+
// Handle existing monitor.
if ((EmitSync & 0x02) == 0) {
// The object has an existing monitor iff (mark & monitor_value) != 0.
@@ -2066,14 +2433,22 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
bind(object_has_monitor);
// The object's monitor m is unlocked iff m->owner == NULL,
// otherwise m->owner may contain a thread or a stack address.
- //
+
+#if INCLUDE_RTM_OPT
+ // Use the same RTM locking code in 32- and 64-bit VM.
+ if (use_rtm) {
+ rtm_inflated_locking(flag, oop, displaced_header, box, temp, /*temp*/ current_header,
+ rtm_counters, method_data, profile_rtm, cont);
+ } else {
+#endif // INCLUDE_RTM_OPT
+
// Try to CAS m->owner from NULL to current thread.
addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value);
li(displaced_header, 0);
// CmpxchgX sets flag to cmpX(current, displaced).
cmpxchgd(/*flag=*/flag,
/*current_value=*/current_header,
- /*compare_value=*/displaced_header,
+ /*compare_value=*/(intptr_t)0,
/*exchange_value=*/R16_thread,
/*where=*/temp,
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
@@ -2095,6 +2470,10 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
//asm_assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), temp,
// "monitor->OwnerIsThread shouldn't be 0", -1);
# endif
+
+#if INCLUDE_RTM_OPT
+ } // use_rtm()
+#endif
}
bind(cont);
@@ -2103,7 +2482,8 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
}
void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box,
- Register temp, Register displaced_header, Register current_header) {
+ Register temp, Register displaced_header, Register current_header,
+ bool try_bias, bool use_rtm) {
assert_different_registers(oop, box, temp, displaced_header, current_header);
assert(flag != CCR0, "bad condition register");
Label cont;
@@ -2115,10 +2495,24 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
return;
}
- if (UseBiasedLocking) {
+ if (try_bias) {
biased_locking_exit(flag, oop, current_header, cont);
}
+#if INCLUDE_RTM_OPT
+ if (UseRTMForStackLocks && use_rtm) {
+ assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
+ Label L_regular_unlock;
+ ld(current_header, oopDesc::mark_offset_in_bytes(), oop); // fetch markword
+ andi(R0, current_header, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
+ cmpwi(flag, R0, markOopDesc::unlocked_value); // bits = 001 unlocked
+ bne(flag, L_regular_unlock); // else RegularLock
+ tend_(); // otherwise end...
+ b(cont); // ... and we're done
+ bind(L_regular_unlock);
+ }
+#endif
+
// Find the lock address and load the displaced header from the stack.
ld(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
@@ -2129,13 +2523,12 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
// Handle existing monitor.
if ((EmitSync & 0x02) == 0) {
// The object has an existing monitor iff (mark & monitor_value) != 0.
+ RTM_OPT_ONLY( if (!(UseRTMForStackLocks && use_rtm)) ) // skip load if already done
ld(current_header, oopDesc::mark_offset_in_bytes(), oop);
- andi(temp, current_header, markOopDesc::monitor_value);
- cmpdi(flag, temp, 0);
- bne(flag, object_has_monitor);
+ andi_(R0, current_header, markOopDesc::monitor_value);
+ bne(CCR0, object_has_monitor);
}
-
// Check if it is still a light weight lock, this is is true if we see
// the stack address of the basicLock in the markOop of the object.
// Cmpxchg sets flag to cmpd(current_header, box).
@@ -2158,6 +2551,20 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
bind(object_has_monitor);
addi(current_header, current_header, -markOopDesc::monitor_value); // monitor
ld(temp, ObjectMonitor::owner_offset_in_bytes(), current_header);
+
+ // It's inflated.
+#if INCLUDE_RTM_OPT
+ if (use_rtm) {
+ Label L_regular_inflated_unlock;
+ // Clean monitor_value bit to get valid pointer
+ cmpdi(flag, temp, 0);
+ bne(flag, L_regular_inflated_unlock);
+ tend_();
+ b(cont);
+ bind(L_regular_inflated_unlock);
+ }
+#endif
+
ld(displaced_header, ObjectMonitor::recursions_offset_in_bytes(), current_header);
xorr(temp, R16_thread, temp); // Will be 0 if we are the owner.
orr(temp, temp, displaced_header); // Will be 0 if there are 0 recursions.
@@ -2441,6 +2848,8 @@ void MacroAssembler::get_vm_result(Register oop_result) {
// oop_result
// R16_thread->in_bytes(JavaThread::vm_result_offset())
+ verify_thread();
+
ld(oop_result, in_bytes(JavaThread::vm_result_offset()), R16_thread);
li(R0, 0);
std(R0, in_bytes(JavaThread::vm_result_offset()), R16_thread);
@@ -2462,26 +2871,24 @@ void MacroAssembler::get_vm_result_2(Register metadata_result) {
std(R0, in_bytes(JavaThread::vm_result_2_offset()), R16_thread);
}
-
-void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
+Register MacroAssembler::encode_klass_not_null(Register dst, Register src) {
Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided.
if (Universe::narrow_klass_base() != 0) {
// Use dst as temp if it is free.
- load_const(R0, Universe::narrow_klass_base(), (dst != current && dst != R0) ? dst : noreg);
- sub(dst, current, R0);
+ sub_const_optimized(dst, current, Universe::narrow_klass_base(), R0);
current = dst;
}
if (Universe::narrow_klass_shift() != 0) {
srdi(dst, current, Universe::narrow_klass_shift());
current = dst;
}
- mr_if_needed(dst, current); // Move may be required.
+ return current;
}
void MacroAssembler::store_klass(Register dst_oop, Register klass, Register ck) {
if (UseCompressedClassPointers) {
- encode_klass_not_null(ck, klass);
- stw(ck, oopDesc::klass_offset_in_bytes(), dst_oop);
+ Register compressedKlass = encode_klass_not_null(ck, klass);
+ stw(compressedKlass, oopDesc::klass_offset_in_bytes(), dst_oop);
} else {
std(klass, oopDesc::klass_offset_in_bytes(), dst_oop);
}
@@ -2514,8 +2921,7 @@ void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
sldi(shifted_src, src, Universe::narrow_klass_shift());
}
if (Universe::narrow_klass_base() != 0) {
- load_const(R0, Universe::narrow_klass_base());
- add(dst, shifted_src, R0);
+ add_const_optimized(dst, shifted_src, Universe::narrow_klass_base(), R0);
}
}
diff --git a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp
index 46216782c97..2ed004aba44 100644
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2014 SAP AG. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#define CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
#include "asm/assembler.hpp"
+#include "runtime/rtmLocking.hpp"
#include "utilities/macros.hpp"
// MacroAssembler extends Assembler by a few frequently used macros.
@@ -432,8 +433,8 @@ class MacroAssembler: public Assembler {
int semantics, bool cmpxchgx_hint = false,
Register int_flag_success = noreg, bool contention_hint = false);
void cmpxchgd(ConditionRegister flag,
- Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
- int semantics, bool cmpxchgx_hint = false,
+ Register dest_current_value, RegisterOrConstant compare_value, Register exchange_value,
+ Register addr_base, int semantics, bool cmpxchgx_hint = false,
Register int_flag_success = noreg, Label* failed = NULL, bool contention_hint = false);
// interface method calling
@@ -506,8 +507,42 @@ class MacroAssembler: public Assembler {
// biased locking exit case failed.
void biased_locking_exit(ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done);
- void compiler_fast_lock_object( ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3);
- void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3);
+ void atomic_inc_ptr(Register addr, Register result, int simm16 = 1);
+ void atomic_ori_int(Register addr, Register result, int uimm16);
+
+#if INCLUDE_RTM_OPT
+ void rtm_counters_update(Register abort_status, Register rtm_counters);
+ void branch_on_random_using_tb(Register tmp, int count, Label& brLabel);
+ void rtm_abort_ratio_calculation(Register rtm_counters_reg, RTMLockingCounters* rtm_counters,
+ Metadata* method_data);
+ void rtm_profiling(Register abort_status_Reg, Register temp_Reg,
+ RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm);
+ void rtm_retry_lock_on_abort(Register retry_count, Register abort_status,
+ Label& retryLabel, Label* checkRetry = NULL);
+ void rtm_retry_lock_on_busy(Register retry_count, Register owner_addr, Label& retryLabel);
+ void rtm_stack_locking(ConditionRegister flag, Register obj, Register mark_word, Register tmp,
+ Register retry_on_abort_count,
+ RTMLockingCounters* stack_rtm_counters,
+ Metadata* method_data, bool profile_rtm,
+ Label& DONE_LABEL, Label& IsInflated);
+ void rtm_inflated_locking(ConditionRegister flag, Register obj, Register mark_word, Register box,
+ Register retry_on_busy_count, Register retry_on_abort_count,
+ RTMLockingCounters* rtm_counters,
+ Metadata* method_data, bool profile_rtm,
+ Label& DONE_LABEL);
+#endif
+
+ void compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box,
+ Register tmp1, Register tmp2, Register tmp3,
+ bool try_bias = UseBiasedLocking,
+ RTMLockingCounters* rtm_counters = NULL,
+ RTMLockingCounters* stack_rtm_counters = NULL,
+ Metadata* method_data = NULL,
+ bool use_rtm = false, bool profile_rtm = false);
+
+ void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box,
+ Register tmp1, Register tmp2, Register tmp3,
+ bool try_bias = UseBiasedLocking, bool use_rtm = false);
// Support for serializing memory accesses between threads
void serialize_memory(Register thread, Register tmp1, Register tmp2);
@@ -576,7 +611,7 @@ class MacroAssembler: public Assembler {
Register tmp = noreg);
// Null allowed.
- inline void load_heap_oop(Register d, RegisterOrConstant offs, Register s1 = noreg);
+ inline void load_heap_oop(Register d, RegisterOrConstant offs, Register s1 = noreg, Label *is_null = NULL);
// Encode/decode heap oop. Oop may not be null, else en/decoding goes wrong.
// src == d allowed.
@@ -593,7 +628,7 @@ class MacroAssembler: public Assembler {
void store_klass_gap(Register dst_oop, Register val = noreg); // Will store 0 if val not specified.
static int instr_size_for_decode_klass_not_null();
void decode_klass_not_null(Register dst, Register src = noreg);
- void encode_klass_not_null(Register dst, Register src = noreg);
+ Register encode_klass_not_null(Register dst, Register src = noreg);
// Load common heap base into register.
void reinit_heapbase(Register d, Register tmp = noreg);
diff --git a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.inline.hpp b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.inline.hpp
index f5d19dff066..a52931d860d 100644
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.inline.hpp
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.inline.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2014 SAP AG. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -333,19 +333,29 @@ inline void MacroAssembler::store_heap_oop_not_null(Register d, RegisterOrConsta
}
}
-inline void MacroAssembler::load_heap_oop(Register d, RegisterOrConstant offs, Register s1) {
+inline void MacroAssembler::load_heap_oop(Register d, RegisterOrConstant offs, Register s1, Label *is_null) {
if (UseCompressedOops) {
lwz(d, offs, s1);
- decode_heap_oop(d);
+ if (is_null != NULL) {
+ cmpwi(CCR0, d, 0);
+ beq(CCR0, *is_null);
+ decode_heap_oop_not_null(d);
+ } else {
+ decode_heap_oop(d);
+ }
} else {
ld(d, offs, s1);
+ if (is_null != NULL) {
+ cmpdi(CCR0, d, 0);
+ beq(CCR0, *is_null);
+ }
}
}
inline Register MacroAssembler::encode_heap_oop_not_null(Register d, Register src) {
Register current = (src != noreg) ? src : d; // Oop to be compressed is in d if no src provided.
if (Universe::narrow_oop_base_overlaps()) {
- sub(d, current, R30);
+ sub_const_optimized(d, current, Universe::narrow_oop_base(), R0);
current = d;
}
if (Universe::narrow_oop_shift() != 0) {
@@ -358,7 +368,7 @@ inline Register MacroAssembler::encode_heap_oop_not_null(Register d, Register sr
inline Register MacroAssembler::decode_heap_oop_not_null(Register d, Register src) {
if (Universe::narrow_oop_base_disjoint() && src != noreg && src != d &&
Universe::narrow_oop_shift() != 0) {
- mr(d, R30);
+ load_const_optimized(d, Universe::narrow_oop_base(), R0);
rldimi(d, src, Universe::narrow_oop_shift(), 32-Universe::narrow_oop_shift());
return d;
}
@@ -369,7 +379,7 @@ inline Register MacroAssembler::decode_heap_oop_not_null(Register d, Register sr
current = d;
}
if (Universe::narrow_oop_base() != NULL) {
- add(d, current, R30);
+ add_const_optimized(d, current, Universe::narrow_oop_base(), R0);
current = d;
}
return current; // Decoded oop is in this register.
@@ -377,11 +387,19 @@ inline Register MacroAssembler::decode_heap_oop_not_null(Register d, Register sr
inline void MacroAssembler::decode_heap_oop(Register d) {
Label isNull;
+ bool use_isel = false;
if (Universe::narrow_oop_base() != NULL) {
cmpwi(CCR0, d, 0);
- beq(CCR0, isNull);
+ if (VM_Version::has_isel()) {
+ use_isel = true;
+ } else {
+ beq(CCR0, isNull);
+ }
}
decode_heap_oop_not_null(d);
+ if (use_isel) {
+ isel_0(d, CCR0, Assembler::equal);
+ }
bind(isNull);
}
diff --git a/hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp b/hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp
index 32b5c5c5a5b..1d2dea0cc02 100644
--- a/hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp
@@ -466,7 +466,7 @@ void trace_method_handle_stub(const char* adaptername,
strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH
const char* mh_reg_name = has_mh ? "R23_method_handle" : "G23";
tty->print_cr("MH %s %s="INTPTR_FORMAT " sp=" INTPTR_FORMAT,
- adaptername, mh_reg_name, (intptr_t) mh, entry_sp);
+ adaptername, mh_reg_name, p2i(mh), p2i(entry_sp));
if (Verbose) {
tty->print_cr("Registers:");
diff --git a/hotspot/src/cpu/ppc/vm/methodHandles_ppc.hpp b/hotspot/src/cpu/ppc/vm/methodHandles_ppc.hpp
index 640813e7d3e..63fd6070272 100644
--- a/hotspot/src/cpu/ppc/vm/methodHandles_ppc.hpp
+++ b/hotspot/src/cpu/ppc/vm/methodHandles_ppc.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,9 +27,6 @@
// These definitions are inlined into class MethodHandles.
// Adapters
-//static unsigned int adapter_code_size() {
-// return 32*K DEBUG_ONLY(+ 16*K) + (TraceMethodHandles ? 16*K : 0) + (VerifyMethodHandles ? 32*K : 0);
-//}
enum /* platform_dependent_constants */ {
adapter_code_size = NOT_LP64(16000 DEBUG_ONLY(+ 25000)) LP64_ONLY(32000 DEBUG_ONLY(+ 150000))
};
@@ -45,7 +42,9 @@ public:
static void verify_method_handle(MacroAssembler* _masm, Register mh_reg,
Register temp_reg, Register temp2_reg) {
- Unimplemented();
+ verify_klass(_masm, mh_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_invoke_MethodHandle),
+ temp_reg, temp2_reg,
+ "reference is a MH");
}
static void verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) NOT_DEBUG_RETURN;
diff --git a/hotspot/src/cpu/ppc/vm/ppc.ad b/hotspot/src/cpu/ppc/vm/ppc.ad
index 4c64d96a3e4..388b102ee11 100644
--- a/hotspot/src/cpu/ppc/vm/ppc.ad
+++ b/hotspot/src/cpu/ppc/vm/ppc.ad
@@ -447,8 +447,8 @@ reg_class bits32_reg_rw(
R26,
R27,
R28,
-/*R29*/ // global TOC
-/*R30*/ // Narrow Oop Base
+/*R29,*/ // global TOC
+ R30,
R31
);
@@ -484,58 +484,11 @@ reg_class bits32_reg_ro(
R26,
R27,
R28,
-/*R29*/
-/*R30*/ // Narrow Oop Base
+/*R29,*/
+ R30,
R31
);
-// Complement-required-in-pipeline operands for narrow oops.
-reg_class bits32_reg_ro_not_complement (
-/*R0*/ // R0
- R1, // SP
- R2, // TOC
- R3,
- R4,
- R5,
- R6,
- R7,
- R8,
- R9,
- R10,
- R11,
- R12,
-/*R13,*/ // system thread id
- R14,
- R15,
- R16, // R16_thread
- R17,
- R18,
- R19,
- R20,
- R21,
- R22,
-/*R23,
- R24,
- R25,
- R26,
- R27,
- R28,*/
-/*R29,*/ // TODO: let allocator handle TOC!!
-/*R30,*/
- R31
-);
-
-// Complement-required-in-pipeline operands for narrow oops.
-// See 64-bit declaration.
-reg_class bits32_reg_ro_complement (
- R23,
- R24,
- R25,
- R26,
- R27,
- R28
-);
-
reg_class rscratch1_bits32_reg(R11);
reg_class rscratch2_bits32_reg(R12);
reg_class rarg1_bits32_reg(R3);
@@ -591,8 +544,8 @@ reg_class bits64_reg_rw(
R26_H, R26,
R27_H, R27,
R28_H, R28,
-/*R29_H, R29*/
-/*R30_H, R30*/
+/*R29_H, R29,*/
+ R30_H, R30,
R31_H, R31
);
@@ -629,8 +582,8 @@ reg_class bits64_reg_leaf_call(
R26_H, R26,
R27_H, R27,
R28_H, R28,
-/*R29_H, R29*/
-/*R30_H, R30*/
+/*R29_H, R29,*/
+ R30_H, R30,
R31_H, R31
);
@@ -667,8 +620,8 @@ reg_class bits64_constant_table_base(
R26_H, R26,
R27_H, R27,
R28_H, R28,
-/*R29_H, R29*/
-/*R30_H, R30*/
+/*R29_H, R29,*/
+ R30_H, R30,
R31_H, R31
);
@@ -704,64 +657,11 @@ reg_class bits64_reg_ro(
R26_H, R26,
R27_H, R27,
R28_H, R28,
-/*R29_H, R29*/ // TODO: let allocator handle TOC!!
-/*R30_H, R30,*/
+/*R29_H, R29,*/ // TODO: let allocator handle TOC!!
+ R30_H, R30,
R31_H, R31
);
-// Complement-required-in-pipeline operands.
-reg_class bits64_reg_ro_not_complement (
-/*R0_H, R0*/ // R0
- R1_H, R1, // SP
- R2_H, R2, // TOC
- R3_H, R3,
- R4_H, R4,
- R5_H, R5,
- R6_H, R6,
- R7_H, R7,
- R8_H, R8,
- R9_H, R9,
- R10_H, R10,
- R11_H, R11,
- R12_H, R12,
-/*R13_H, R13*/ // system thread id
- R14_H, R14,
- R15_H, R15,
- R16_H, R16, // R16_thread
- R17_H, R17,
- R18_H, R18,
- R19_H, R19,
- R20_H, R20,
- R21_H, R21,
- R22_H, R22,
-/*R23_H, R23,
- R24_H, R24,
- R25_H, R25,
- R26_H, R26,
- R27_H, R27,
- R28_H, R28,*/
-/*R29_H, R29*/ // TODO: let allocator handle TOC!!
-/*R30_H, R30,*/
- R31_H, R31
-);
-
-// Complement-required-in-pipeline operands.
-// This register mask is used for the trap instructions that implement
-// the null checks on AIX. The trap instruction first computes the
-// complement of the value it shall trap on. Because of this, the
-// instruction can not be scheduled in the same cycle as an other
-// instruction reading the normal value of the same register. So we
-// force the value to check into 'bits64_reg_ro_not_complement'
-// and then copy it to 'bits64_reg_ro_complement' for the trap.
-reg_class bits64_reg_ro_complement (
- R23_H, R23,
- R24_H, R24,
- R25_H, R25,
- R26_H, R26,
- R27_H, R27,
- R28_H, R28
-);
-
// ----------------------------
// Special Class for Condition Code Flags Register
@@ -777,6 +677,17 @@ reg_class int_flags(
CCR7
);
+reg_class int_flags_ro(
+ CCR0,
+ CCR1,
+ CCR2,
+ CCR3,
+ CCR4,
+ CCR5,
+ CCR6,
+ CCR7
+);
+
reg_class int_flags_CR0(CCR0);
reg_class int_flags_CR1(CCR1);
reg_class int_flags_CR6(CCR6);
@@ -2876,7 +2787,7 @@ encode %{
// Use release_store for card-marking to ensure that previous
// oop-stores are visible before the card-mark change.
- enc_class enc_cms_card_mark(memory mem, iRegLdst releaseFieldAddr) %{
+ enc_class enc_cms_card_mark(memory mem, iRegLdst releaseFieldAddr, flagsReg crx) %{
// TODO: PPC port $archOpcode(ppc64Opcode_compound);
// FIXME: Implement this as a cmove and use a fixed condition code
// register which is written on every transition to compiled code,
@@ -2897,8 +2808,8 @@ encode %{
// Check CMSCollectorCardTableModRefBSExt::_requires_release and do the
// StoreStore barrier conditionally.
__ lwz(R0, 0, $releaseFieldAddr$$Register);
- __ cmpwi(CCR0, R0, 0);
- __ beq_predict_taken(CCR0, skip_storestore);
+ __ cmpwi($crx$$CondRegister, R0, 0);
+ __ beq_predict_taken($crx$$CondRegister, skip_storestore);
#endif
__ li(R0, 0);
__ membar(Assembler::StoreStore);
@@ -3108,7 +3019,7 @@ encode %{
nodes->push(n2);
%}
- enc_class enc_cmove_reg(iRegIdst dst, flagsReg crx, iRegIsrc src, cmpOp cmp) %{
+ enc_class enc_cmove_reg(iRegIdst dst, flagsRegSrc crx, iRegIsrc src, cmpOp cmp) %{
// TODO: PPC port $archOpcode(ppc64Opcode_cmove);
MacroAssembler _masm(&cbuf);
@@ -3123,7 +3034,7 @@ encode %{
__ bind(done);
%}
- enc_class enc_cmove_imm(iRegIdst dst, flagsReg crx, immI16 src, cmpOp cmp) %{
+ enc_class enc_cmove_imm(iRegIdst dst, flagsRegSrc crx, immI16 src, cmpOp cmp) %{
// TODO: PPC port $archOpcode(ppc64Opcode_cmove);
MacroAssembler _masm(&cbuf);
@@ -3269,7 +3180,7 @@ encode %{
__ bind(done);
%}
- enc_class enc_cmove_bso_stackSlotL(iRegLdst dst, flagsReg crx, stackSlotL mem ) %{
+ enc_class enc_cmove_bso_stackSlotL(iRegLdst dst, flagsRegSrc crx, stackSlotL mem ) %{
// TODO: PPC port $archOpcode(ppc64Opcode_cmove);
MacroAssembler _masm(&cbuf);
@@ -3281,7 +3192,7 @@ encode %{
__ bind(done);
%}
- enc_class enc_bc(flagsReg crx, cmpOp cmp, Label lbl) %{
+ enc_class enc_bc(flagsRegSrc crx, cmpOp cmp, Label lbl) %{
// TODO: PPC port $archOpcode(ppc64Opcode_bc);
MacroAssembler _masm(&cbuf);
@@ -3309,7 +3220,7 @@ encode %{
l);
%}
- enc_class enc_bc_far(flagsReg crx, cmpOp cmp, Label lbl) %{
+ enc_class enc_bc_far(flagsRegSrc crx, cmpOp cmp, Label lbl) %{
// The scheduler doesn't know about branch shortening, so we set the opcode
// to ppc64Opcode_bc in order to hide this detail from the scheduler.
// TODO: PPC port $archOpcode(ppc64Opcode_bc);
@@ -3341,7 +3252,7 @@ encode %{
%}
// Branch used with Power6 scheduling (can be shortened without changing the node).
- enc_class enc_bc_short_far(flagsReg crx, cmpOp cmp, Label lbl) %{
+ enc_class enc_bc_short_far(flagsRegSrc crx, cmpOp cmp, Label lbl) %{
// The scheduler doesn't know about branch shortening, so we set the opcode
// to ppc64Opcode_bc in order to hide this detail from the scheduler.
// TODO: PPC port $archOpcode(ppc64Opcode_bc);
@@ -4700,6 +4611,15 @@ operand flagsReg() %{
interface(REG_INTER);
%}
+operand flagsRegSrc() %{
+ constraint(ALLOC_IN_RC(int_flags_ro));
+ match(RegFlags);
+ match(flagsReg);
+ match(flagsRegCR0);
+ format %{ %}
+ interface(REG_INTER);
+%}
+
// Condition Code Flag Register CR0
operand flagsRegCR0() %{
constraint(ALLOC_IN_RC(int_flags_CR0));
@@ -4783,6 +4703,13 @@ operand iRegN2P(iRegNsrc reg) %{
predicate(false /* TODO: PPC port MatchDecodeNodes*/);
constraint(ALLOC_IN_RC(bits32_reg_ro));
match(DecodeN reg);
+ format %{ "$reg" %}
+ interface(REG_INTER)
+%}
+
+operand iRegN2P_klass(iRegNsrc reg) %{
+ predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
+ constraint(ALLOC_IN_RC(bits32_reg_ro));
match(DecodeNKlass reg);
format %{ "$reg" %}
interface(REG_INTER)
@@ -4839,6 +4766,19 @@ operand indirectNarrow(iRegNsrc reg) %{
predicate(false /* TODO: PPC port MatchDecodeNodes*/);
constraint(ALLOC_IN_RC(bits64_reg_ro));
match(DecodeN reg);
+ op_cost(100);
+ format %{ "[$reg]" %}
+ interface(MEMORY_INTER) %{
+ base($reg);
+ index(0x0);
+ scale(0x0);
+ disp(0x0);
+ %}
+%}
+
+operand indirectNarrow_klass(iRegNsrc reg) %{
+ predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
+ constraint(ALLOC_IN_RC(bits64_reg_ro));
match(DecodeNKlass reg);
op_cost(100);
format %{ "[$reg]" %}
@@ -4855,6 +4795,19 @@ operand indOffset16Narrow(iRegNsrc reg, immL16 offset) %{
predicate(false /* TODO: PPC port MatchDecodeNodes*/);
constraint(ALLOC_IN_RC(bits64_reg_ro));
match(AddP (DecodeN reg) offset);
+ op_cost(100);
+ format %{ "[$reg + $offset]" %}
+ interface(MEMORY_INTER) %{
+ base($reg);
+ index(0x0);
+ scale(0x0);
+ disp($offset);
+ %}
+%}
+
+operand indOffset16Narrow_klass(iRegNsrc reg, immL16 offset) %{
+ predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
+ constraint(ALLOC_IN_RC(bits64_reg_ro));
match(AddP (DecodeNKlass reg) offset);
op_cost(100);
format %{ "[$reg + $offset]" %}
@@ -4871,6 +4824,19 @@ operand indOffset16NarrowAlg4(iRegNsrc reg, immL16Alg4 offset) %{
predicate(false /* TODO: PPC port MatchDecodeNodes*/);
constraint(ALLOC_IN_RC(bits64_reg_ro));
match(AddP (DecodeN reg) offset);
+ op_cost(100);
+ format %{ "[$reg + $offset]" %}
+ interface(MEMORY_INTER) %{
+ base($reg);
+ index(0x0);
+ scale(0x0);
+ disp($offset);
+ %}
+%}
+
+operand indOffset16NarrowAlg4_klass(iRegNsrc reg, immL16Alg4 offset) %{
+ predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
+ constraint(ALLOC_IN_RC(bits64_reg_ro));
match(AddP (DecodeNKlass reg) offset);
op_cost(100);
format %{ "[$reg + $offset]" %}
@@ -4998,9 +4964,9 @@ operand cmpOp() %{
// encoding and format. The classic case of this is memory operands.
// Indirect is not included since its use is limited to Compare & Swap.
-opclass memory(indirect, indOffset16 /*, indIndex, tlsReference*/, indirectNarrow, indOffset16Narrow);
+opclass memory(indirect, indOffset16 /*, indIndex, tlsReference*/, indirectNarrow, indirectNarrow_klass, indOffset16Narrow, indOffset16Narrow_klass);
// Memory operand where offsets are 4-aligned. Required for ld, std.
-opclass memoryAlg4(indirect, indOffset16Alg4, indirectNarrow, indOffset16NarrowAlg4);
+opclass memoryAlg4(indirect, indOffset16Alg4, indirectNarrow, indOffset16NarrowAlg4, indOffset16NarrowAlg4_klass);
opclass indirectMemory(indirect, indirectNarrow);
// Special opclass for I and ConvL2I.
@@ -5009,7 +4975,7 @@ opclass iRegIsrc_iRegL2Isrc(iRegIsrc, iRegL2Isrc);
// Operand classes to match encode and decode. iRegN_P2N is only used
// for storeN. I have never seen an encode node elsewhere.
opclass iRegN_P2N(iRegNsrc, iRegP2N);
-opclass iRegP_N2P(iRegPsrc, iRegN2P);
+opclass iRegP_N2P(iRegPsrc, iRegN2P, iRegN2P_klass);
//----------PIPELINE-----------------------------------------------------------
@@ -5593,6 +5559,19 @@ instruct loadN2P_unscaled(iRegPdst dst, memory mem) %{
ins_pipe(pipe_class_memory);
%}
+instruct loadN2P_klass_unscaled(iRegPdst dst, memory mem) %{
+ match(Set dst (DecodeNKlass (LoadNKlass mem)));
+ // SAPJVM GL 2014-05-21 Differs.
+ predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0 &&
+ _kids[0]->_leaf->as_Load()->is_unordered());
+ ins_cost(MEMORY_REF_COST);
+
+ format %{ "LWZ $dst, $mem \t// DecodeN (unscaled)" %}
+ size(4);
+ ins_encode( enc_lwz(dst, mem) );
+ ins_pipe(pipe_class_memory);
+%}
+
// Load Pointer
instruct loadP(iRegPdst dst, memoryAlg4 mem) %{
match(Set dst (LoadP mem));
@@ -5669,8 +5648,9 @@ instruct loadF(regF dst, memory mem) %{
%}
// Load Float acquire.
-instruct loadF_ac(regF dst, memory mem) %{
+instruct loadF_ac(regF dst, memory mem, flagsRegCR0 cr0) %{
match(Set dst (LoadF mem));
+ effect(TEMP cr0);
ins_cost(3*MEMORY_REF_COST);
format %{ "LFS $dst, $mem \t// acquire\n\t"
@@ -5705,8 +5685,9 @@ instruct loadD(regD dst, memory mem) %{
%}
// Load Double - aligned acquire.
-instruct loadD_ac(regD dst, memory mem) %{
+instruct loadD_ac(regD dst, memory mem, flagsRegCR0 cr0) %{
match(Set dst (LoadD mem));
+ effect(TEMP cr0);
ins_cost(3*MEMORY_REF_COST);
format %{ "LFD $dst, $mem \t// acquire\n\t"
@@ -6034,11 +6015,10 @@ instruct clearMs32b(iRegNdst dst, iRegNsrc src) %{
instruct loadBase(iRegLdst dst) %{
effect(DEF dst);
- format %{ "MR $dst, r30_heapbase" %}
- size(4);
+ format %{ "LoadConst $dst, heapbase" %}
ins_encode %{
- // TODO: PPC port $archOpcode(ppc64Opcode_or);
- __ mr($dst$$Register, R30);
+ // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+ __ load_const_optimized($dst$$Register, Universe::narrow_oop_base(), R0);
%}
ins_pipe(pipe_class_default);
%}
@@ -6114,7 +6094,7 @@ instruct loadConNKlass_lo(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
effect(TEMP src2);
ins_cost(DEFAULT_COST);
- format %{ "ORI $dst, $src1, $src2 \t// narrow klass lo" %}
+ format %{ "ORI $dst, $src1, $src2 \t// narrow klass lo" %}
size(4);
ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_ori);
@@ -6563,8 +6543,9 @@ instruct storeD(memory mem, regD src) %{
// do a releasing store. For this, it gets the address of
// CMSCollectorCardTableModRefBSExt::_requires_release as input.
// (Using releaseFieldAddr in the match rule is a hack.)
-instruct storeCM_CMS(memory mem, iRegLdst releaseFieldAddr) %{
+instruct storeCM_CMS(memory mem, iRegLdst releaseFieldAddr, flagsReg crx) %{
match(Set mem (StoreCM mem releaseFieldAddr));
+ effect(TEMP crx);
predicate(false);
ins_cost(MEMORY_REF_COST);
@@ -6572,7 +6553,7 @@ instruct storeCM_CMS(memory mem, iRegLdst releaseFieldAddr) %{
ins_cannot_rematerialize(true);
format %{ "STB #0, $mem \t// CMS card-mark byte (must be 0!), checking requires_release in [$releaseFieldAddr]" %}
- ins_encode( enc_cms_card_mark(mem, releaseFieldAddr) );
+ ins_encode( enc_cms_card_mark(mem, releaseFieldAddr, crx) );
ins_pipe(pipe_class_memory);
%}
@@ -6589,8 +6570,9 @@ instruct storeCM_CMS_ExEx(memory mem, immI_0 zero) %{
expand %{
immL baseImm %{ 0 /* TODO: PPC port (jlong)CMSCollectorCardTableModRefBSExt::requires_release_address() */ %}
iRegLdst releaseFieldAddress;
+ flagsReg crx;
loadConL_Ex(releaseFieldAddress, baseImm);
- storeCM_CMS(mem, releaseFieldAddress);
+ storeCM_CMS(mem, releaseFieldAddress, crx);
%}
%}
@@ -6639,39 +6621,34 @@ instruct encodeP_sub(iRegPdst dst, iRegPdst src) %{
predicate(false);
format %{ "SUB $dst, $src, oop_base \t// encode" %}
- size(4);
ins_encode %{
- // TODO: PPC port $archOpcode(ppc64Opcode_subf);
- __ subf($dst$$Register, R30, $src$$Register);
+ // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+ __ sub_const_optimized($dst$$Register, $src$$Register, Universe::narrow_oop_base(), R0);
%}
ins_pipe(pipe_class_default);
%}
// Conditional sub base.
-instruct cond_sub_base(iRegNdst dst, flagsReg crx, iRegPsrc src1) %{
+instruct cond_sub_base(iRegNdst dst, flagsRegSrc crx, iRegPsrc src1) %{
// The match rule is needed to make it a 'MachTypeNode'!
match(Set dst (EncodeP (Binary crx src1)));
predicate(false);
- ins_variable_size_depending_on_alignment(true);
-
format %{ "BEQ $crx, done\n\t"
- "SUB $dst, $src1, R30 \t// encode: subtract base if != NULL\n"
+ "SUB $dst, $src1, heapbase \t// encode: subtract base if != NULL\n"
"done:" %}
- size(false /* TODO: PPC PORT (InsertEndGroupPPC64 && Compile::current()->do_hb_scheduling())*/ ? 12 : 8);
ins_encode %{
- // TODO: PPC port $archOpcode(ppc64Opcode_cmove);
+ // TODO: PPC port $archOpcode(ppc64Opcode_compound);
Label done;
__ beq($crx$$CondRegister, done);
- __ subf($dst$$Register, R30, $src1$$Register);
- // TODO PPC port __ endgroup_if_needed(_size == 12);
+ __ sub_const_optimized($dst$$Register, $src1$$Register, Universe::narrow_oop_base(), R0);
__ bind(done);
%}
ins_pipe(pipe_class_default);
%}
// Power 7 can use isel instruction
-instruct cond_set_0_oop(iRegNdst dst, flagsReg crx, iRegPsrc src1) %{
+instruct cond_set_0_oop(iRegNdst dst, flagsRegSrc crx, iRegPsrc src1) %{
// The match rule is needed to make it a 'MachTypeNode'!
match(Set dst (EncodeP (Binary crx src1)));
predicate(false);
@@ -6777,42 +6754,37 @@ instruct decodeN_add(iRegPdst dst, iRegPdst src) %{
match(Set dst (DecodeN src));
predicate(false);
- format %{ "ADD $dst, $src, R30 \t// DecodeN, add oop base" %}
- size(4);
+ format %{ "ADD $dst, $src, heapbase \t// DecodeN, add oop base" %}
ins_encode %{
- // TODO: PPC port $archOpcode(ppc64Opcode_add);
- __ add($dst$$Register, $src$$Register, R30);
+ // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+ __ add_const_optimized($dst$$Register, $src$$Register, Universe::narrow_oop_base(), R0);
%}
ins_pipe(pipe_class_default);
%}
// conditianal add base for expand
-instruct cond_add_base(iRegPdst dst, flagsReg crx, iRegPsrc src1) %{
+instruct cond_add_base(iRegPdst dst, flagsRegSrc crx, iRegPsrc src) %{
// The match rule is needed to make it a 'MachTypeNode'!
// NOTICE that the rule is nonsense - we just have to make sure that:
// - _matrule->_rChild->_opType == "DecodeN" (see InstructForm::captures_bottom_type() in formssel.cpp)
// - we have to match 'crx' to avoid an "illegal USE of non-input: flagsReg crx" error in ADLC.
- match(Set dst (DecodeN (Binary crx src1)));
+ match(Set dst (DecodeN (Binary crx src)));
predicate(false);
- ins_variable_size_depending_on_alignment(true);
-
format %{ "BEQ $crx, done\n\t"
- "ADD $dst, $src1, R30 \t// DecodeN: add oop base if $src1 != NULL\n"
+ "ADD $dst, $src, heapbase \t// DecodeN: add oop base if $src != NULL\n"
"done:" %}
- size(false /* TODO: PPC PORT (InsertEndGroupPPC64 && Compile::current()->do_hb_scheduling()) */? 12 : 8);
ins_encode %{
- // TODO: PPC port $archOpcode(ppc64Opcode_cmove);
+ // TODO: PPC port $archOpcode(ppc64Opcode_compound);
Label done;
__ beq($crx$$CondRegister, done);
- __ add($dst$$Register, $src1$$Register, R30);
- // TODO PPC port __ endgroup_if_needed(_size == 12);
+ __ add_const_optimized($dst$$Register, $src$$Register, Universe::narrow_oop_base(), R0);
__ bind(done);
%}
ins_pipe(pipe_class_default);
%}
-instruct cond_set_0_ptr(iRegPdst dst, flagsReg crx, iRegPsrc src1) %{
+instruct cond_set_0_ptr(iRegPdst dst, flagsRegSrc crx, iRegPsrc src1) %{
// The match rule is needed to make it a 'MachTypeNode'!
// NOTICE that the rule is nonsense - we just have to make sure that:
// - _matrule->_rChild->_opType == "DecodeN" (see InstructForm::captures_bottom_type() in formssel.cpp)
@@ -6888,7 +6860,7 @@ instruct decodeN_Disjoint_notNull_Ex(iRegPdst dst, iRegNsrc src) %{
Universe::narrow_oop_base_disjoint());
ins_cost(DEFAULT_COST);
- format %{ "MOV $dst, R30 \t\n"
+ format %{ "MOV $dst, heapbase \t\n"
"RLDIMI $dst, $src, shift, 32-shift \t// decode with disjoint base" %}
postalloc_expand %{
loadBaseNode *n1 = new loadBaseNode();
@@ -6946,7 +6918,7 @@ instruct decodeN_Disjoint_isel_Ex(iRegPdst dst, iRegNsrc src, flagsReg crx) %{
assert(ra_->is_oop(this) == true, "A decodeN node must produce an oop!");
ra_->set_oop(n_cond_set, true);
-
+
ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx));
ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
@@ -7303,7 +7275,7 @@ instruct membar_CPUOrder() %{
//----------Conditional Move---------------------------------------------------
// Cmove using isel.
-instruct cmovI_reg_isel(cmpOp cmp, flagsReg crx, iRegIdst dst, iRegIsrc src) %{
+instruct cmovI_reg_isel(cmpOp cmp, flagsRegSrc crx, iRegIdst dst, iRegIsrc src) %{
match(Set dst (CMoveI (Binary cmp crx) (Binary dst src)));
predicate(VM_Version::has_isel());
ins_cost(DEFAULT_COST);
@@ -7321,7 +7293,7 @@ instruct cmovI_reg_isel(cmpOp cmp, flagsReg crx, iRegIdst dst, iRegIsrc src) %{
ins_pipe(pipe_class_default);
%}
-instruct cmovI_reg(cmpOp cmp, flagsReg crx, iRegIdst dst, iRegIsrc src) %{
+instruct cmovI_reg(cmpOp cmp, flagsRegSrc crx, iRegIdst dst, iRegIsrc src) %{
match(Set dst (CMoveI (Binary cmp crx) (Binary dst src)));
predicate(!VM_Version::has_isel());
ins_cost(DEFAULT_COST+BRANCH_COST);
@@ -7335,7 +7307,7 @@ instruct cmovI_reg(cmpOp cmp, flagsReg crx, iRegIdst dst, iRegIsrc src) %{
ins_pipe(pipe_class_default);
%}
-instruct cmovI_imm(cmpOp cmp, flagsReg crx, iRegIdst dst, immI16 src) %{
+instruct cmovI_imm(cmpOp cmp, flagsRegSrc crx, iRegIdst dst, immI16 src) %{
match(Set dst (CMoveI (Binary cmp crx) (Binary dst src)));
ins_cost(DEFAULT_COST+BRANCH_COST);
@@ -7349,7 +7321,7 @@ instruct cmovI_imm(cmpOp cmp, flagsReg crx, iRegIdst dst, immI16 src) %{
%}
// Cmove using isel.
-instruct cmovL_reg_isel(cmpOp cmp, flagsReg crx, iRegLdst dst, iRegLsrc src) %{
+instruct cmovL_reg_isel(cmpOp cmp, flagsRegSrc crx, iRegLdst dst, iRegLsrc src) %{
match(Set dst (CMoveL (Binary cmp crx) (Binary dst src)));
predicate(VM_Version::has_isel());
ins_cost(DEFAULT_COST);
@@ -7367,7 +7339,7 @@ instruct cmovL_reg_isel(cmpOp cmp, flagsReg crx, iRegLdst dst, iRegLsrc src) %{
ins_pipe(pipe_class_default);
%}
-instruct cmovL_reg(cmpOp cmp, flagsReg crx, iRegLdst dst, iRegLsrc src) %{
+instruct cmovL_reg(cmpOp cmp, flagsRegSrc crx, iRegLdst dst, iRegLsrc src) %{
match(Set dst (CMoveL (Binary cmp crx) (Binary dst src)));
predicate(!VM_Version::has_isel());
ins_cost(DEFAULT_COST+BRANCH_COST);
@@ -7381,7 +7353,7 @@ instruct cmovL_reg(cmpOp cmp, flagsReg crx, iRegLdst dst, iRegLsrc src) %{
ins_pipe(pipe_class_default);
%}
-instruct cmovL_imm(cmpOp cmp, flagsReg crx, iRegLdst dst, immL16 src) %{
+instruct cmovL_imm(cmpOp cmp, flagsRegSrc crx, iRegLdst dst, immL16 src) %{
match(Set dst (CMoveL (Binary cmp crx) (Binary dst src)));
ins_cost(DEFAULT_COST+BRANCH_COST);
@@ -7395,7 +7367,7 @@ instruct cmovL_imm(cmpOp cmp, flagsReg crx, iRegLdst dst, immL16 src) %{
%}
// Cmove using isel.
-instruct cmovN_reg_isel(cmpOp cmp, flagsReg crx, iRegNdst dst, iRegNsrc src) %{
+instruct cmovN_reg_isel(cmpOp cmp, flagsRegSrc crx, iRegNdst dst, iRegNsrc src) %{
match(Set dst (CMoveN (Binary cmp crx) (Binary dst src)));
predicate(VM_Version::has_isel());
ins_cost(DEFAULT_COST);
@@ -7414,7 +7386,7 @@ instruct cmovN_reg_isel(cmpOp cmp, flagsReg crx, iRegNdst dst, iRegNsrc src) %{
%}
// Conditional move for RegN. Only cmov(reg, reg).
-instruct cmovN_reg(cmpOp cmp, flagsReg crx, iRegNdst dst, iRegNsrc src) %{
+instruct cmovN_reg(cmpOp cmp, flagsRegSrc crx, iRegNdst dst, iRegNsrc src) %{
match(Set dst (CMoveN (Binary cmp crx) (Binary dst src)));
predicate(!VM_Version::has_isel());
ins_cost(DEFAULT_COST+BRANCH_COST);
@@ -7428,7 +7400,7 @@ instruct cmovN_reg(cmpOp cmp, flagsReg crx, iRegNdst dst, iRegNsrc src) %{
ins_pipe(pipe_class_default);
%}
-instruct cmovN_imm(cmpOp cmp, flagsReg crx, iRegNdst dst, immN_0 src) %{
+instruct cmovN_imm(cmpOp cmp, flagsRegSrc crx, iRegNdst dst, immN_0 src) %{
match(Set dst (CMoveN (Binary cmp crx) (Binary dst src)));
ins_cost(DEFAULT_COST+BRANCH_COST);
@@ -7442,7 +7414,7 @@ instruct cmovN_imm(cmpOp cmp, flagsReg crx, iRegNdst dst, immN_0 src) %{
%}
// Cmove using isel.
-instruct cmovP_reg_isel(cmpOp cmp, flagsReg crx, iRegPdst dst, iRegPsrc src) %{
+instruct cmovP_reg_isel(cmpOp cmp, flagsRegSrc crx, iRegPdst dst, iRegPsrc src) %{
match(Set dst (CMoveP (Binary cmp crx) (Binary dst src)));
predicate(VM_Version::has_isel());
ins_cost(DEFAULT_COST);
@@ -7460,7 +7432,7 @@ instruct cmovP_reg_isel(cmpOp cmp, flagsReg crx, iRegPdst dst, iRegPsrc src) %{
ins_pipe(pipe_class_default);
%}
-instruct cmovP_reg(cmpOp cmp, flagsReg crx, iRegPdst dst, iRegP_N2P src) %{
+instruct cmovP_reg(cmpOp cmp, flagsRegSrc crx, iRegPdst dst, iRegP_N2P src) %{
match(Set dst (CMoveP (Binary cmp crx) (Binary dst src)));
predicate(!VM_Version::has_isel());
ins_cost(DEFAULT_COST+BRANCH_COST);
@@ -7474,7 +7446,7 @@ instruct cmovP_reg(cmpOp cmp, flagsReg crx, iRegPdst dst, iRegP_N2P src) %{
ins_pipe(pipe_class_default);
%}
-instruct cmovP_imm(cmpOp cmp, flagsReg crx, iRegPdst dst, immP_0 src) %{
+instruct cmovP_imm(cmpOp cmp, flagsRegSrc crx, iRegPdst dst, immP_0 src) %{
match(Set dst (CMoveP (Binary cmp crx) (Binary dst src)));
ins_cost(DEFAULT_COST+BRANCH_COST);
@@ -7487,7 +7459,7 @@ instruct cmovP_imm(cmpOp cmp, flagsReg crx, iRegPdst dst, immP_0 src) %{
ins_pipe(pipe_class_default);
%}
-instruct cmovF_reg(cmpOp cmp, flagsReg crx, regF dst, regF src) %{
+instruct cmovF_reg(cmpOp cmp, flagsRegSrc crx, regF dst, regF src) %{
match(Set dst (CMoveF (Binary cmp crx) (Binary dst src)));
ins_cost(DEFAULT_COST+BRANCH_COST);
@@ -7509,7 +7481,7 @@ instruct cmovF_reg(cmpOp cmp, flagsReg crx, regF dst, regF src) %{
ins_pipe(pipe_class_default);
%}
-instruct cmovD_reg(cmpOp cmp, flagsReg crx, regD dst, regD src) %{
+instruct cmovD_reg(cmpOp cmp, flagsRegSrc crx, regD dst, regD src) %{
match(Set dst (CMoveD (Binary cmp crx) (Binary dst src)));
ins_cost(DEFAULT_COST+BRANCH_COST);
@@ -7542,8 +7514,9 @@ instruct cmovD_reg(cmpOp cmp, flagsReg crx, regD dst, regD src) %{
// Mem_ptr must be a memory operand, else this node does not get
// Flag_needs_anti_dependence_check set by adlc. If this is not set this node
// can be rematerialized which leads to errors.
-instruct storeLConditional_regP_regL_regL(flagsReg crx, indirect mem_ptr, iRegLsrc oldVal, iRegLsrc newVal) %{
+instruct storeLConditional_regP_regL_regL(flagsReg crx, indirect mem_ptr, iRegLsrc oldVal, iRegLsrc newVal, flagsRegCR0 cr0) %{
match(Set crx (StoreLConditional mem_ptr (Binary oldVal newVal)));
+ effect(TEMP cr0);
format %{ "CMPXCHGD if ($crx = ($oldVal == *$mem_ptr)) *mem_ptr = $newVal; as bool" %}
ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_compound);
@@ -7560,16 +7533,16 @@ instruct storeLConditional_regP_regL_regL(flagsReg crx, indirect mem_ptr, iRegLs
// Mem_ptr must be a memory operand, else this node does not get
// Flag_needs_anti_dependence_check set by adlc. If this is not set this node
// can be rematerialized which leads to errors.
-instruct storePConditional_regP_regP_regP(flagsReg crx, indirect mem_ptr, iRegPsrc oldVal, iRegPsrc newVal) %{
- match(Set crx (StorePConditional mem_ptr (Binary oldVal newVal)));
- format %{ "CMPXCHGD if ($crx = ($oldVal == *$mem_ptr)) *mem_ptr = $newVal; as bool" %}
+instruct storePConditional_regP_regP_regP(flagsRegCR0 cr0, indirect mem_ptr, iRegPsrc oldVal, iRegPsrc newVal) %{
+ match(Set cr0 (StorePConditional mem_ptr (Binary oldVal newVal)));
+ ins_cost(2*MEMORY_REF_COST);
+
+ format %{ "STDCX_ if ($cr0 = ($oldVal == *$mem_ptr)) *mem_ptr = $newVal; as bool" %}
ins_encode %{
- // TODO: PPC port $archOpcode(ppc64Opcode_compound);
- __ cmpxchgd($crx$$CondRegister, R0, $oldVal$$Register, $newVal$$Register, $mem_ptr$$Register,
- MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
- noreg, NULL, true);
+ // TODO: PPC port $archOpcode(ppc64Opcode_stdcx_);
+ __ stdcx_($newVal$$Register, $mem_ptr$$Register);
%}
- ins_pipe(pipe_class_default);
+ ins_pipe(pipe_class_memory);
%}
// Implement LoadPLocked. Must be ordered against changes of the memory location
@@ -7577,13 +7550,14 @@ instruct storePConditional_regP_regP_regP(flagsReg crx, indirect mem_ptr, iRegPs
// Don't know whether this is ever used.
instruct loadPLocked(iRegPdst dst, memory mem) %{
match(Set dst (LoadPLocked mem));
- ins_cost(MEMORY_REF_COST);
+ ins_cost(2*MEMORY_REF_COST);
- format %{ "LD $dst, $mem \t// loadPLocked\n\t"
- "TWI $dst\n\t"
- "ISYNC" %}
- size(12);
- ins_encode( enc_ld_ac(dst, mem) );
+ format %{ "LDARX $dst, $mem \t// loadPLocked\n\t" %}
+ size(4);
+ ins_encode %{
+ // TODO: PPC port $archOpcode(ppc64Opcode_ldarx);
+ __ ldarx($dst$$Register, $mem$$Register, MacroAssembler::cmpxchgx_hint_atomic_update());
+ %}
ins_pipe(pipe_class_memory);
%}
@@ -7593,8 +7567,9 @@ instruct loadPLocked(iRegPdst dst, memory mem) %{
// (CompareAndSwap ...)" or "If (CmpI (CompareAndSwap ..))" cannot be
// matched.
-instruct compareAndSwapI_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2) %{
+instruct compareAndSwapI_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
match(Set res (CompareAndSwapI mem_ptr (Binary src1 src2)));
+ effect(TEMP cr0);
format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %}
// Variable size: instruction count smaller if regs are disjoint.
ins_encode %{
@@ -7607,8 +7582,9 @@ instruct compareAndSwapI_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc
ins_pipe(pipe_class_default);
%}
-instruct compareAndSwapN_regP_regN_regN(iRegIdst res, iRegPdst mem_ptr, iRegNsrc src1, iRegNsrc src2) %{
+instruct compareAndSwapN_regP_regN_regN(iRegIdst res, iRegPdst mem_ptr, iRegNsrc src1, iRegNsrc src2, flagsRegCR0 cr0) %{
match(Set res (CompareAndSwapN mem_ptr (Binary src1 src2)));
+ effect(TEMP cr0);
format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %}
// Variable size: instruction count smaller if regs are disjoint.
ins_encode %{
@@ -7621,8 +7597,9 @@ instruct compareAndSwapN_regP_regN_regN(iRegIdst res, iRegPdst mem_ptr, iRegNsrc
ins_pipe(pipe_class_default);
%}
-instruct compareAndSwapL_regP_regL_regL(iRegIdst res, iRegPdst mem_ptr, iRegLsrc src1, iRegLsrc src2) %{
+instruct compareAndSwapL_regP_regL_regL(iRegIdst res, iRegPdst mem_ptr, iRegLsrc src1, iRegLsrc src2, flagsRegCR0 cr0) %{
match(Set res (CompareAndSwapL mem_ptr (Binary src1 src2)));
+ effect(TEMP cr0);
format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool" %}
// Variable size: instruction count smaller if regs are disjoint.
ins_encode %{
@@ -7635,8 +7612,9 @@ instruct compareAndSwapL_regP_regL_regL(iRegIdst res, iRegPdst mem_ptr, iRegLsrc
ins_pipe(pipe_class_default);
%}
-instruct compareAndSwapP_regP_regP_regP(iRegIdst res, iRegPdst mem_ptr, iRegPsrc src1, iRegPsrc src2) %{
+instruct compareAndSwapP_regP_regP_regP(iRegIdst res, iRegPdst mem_ptr, iRegPsrc src1, iRegPsrc src2, flagsRegCR0 cr0) %{
match(Set res (CompareAndSwapP mem_ptr (Binary src1 src2)));
+ effect(TEMP cr0);
format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool; ptr" %}
// Variable size: instruction count smaller if regs are disjoint.
ins_encode %{
@@ -7649,48 +7627,54 @@ instruct compareAndSwapP_regP_regP_regP(iRegIdst res, iRegPdst mem_ptr, iRegPsrc
ins_pipe(pipe_class_default);
%}
-instruct getAndAddI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src) %{
+instruct getAndAddI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src, flagsRegCR0 cr0) %{
match(Set res (GetAndAddI mem_ptr src));
+ effect(TEMP cr0);
format %{ "GetAndAddI $res, $mem_ptr, $src" %}
// Variable size: instruction count smaller if regs are disjoint.
ins_encode( enc_GetAndAddI(res, mem_ptr, src) );
ins_pipe(pipe_class_default);
%}
-instruct getAndAddL(iRegLdst res, iRegPdst mem_ptr, iRegLsrc src) %{
+instruct getAndAddL(iRegLdst res, iRegPdst mem_ptr, iRegLsrc src, flagsRegCR0 cr0) %{
match(Set res (GetAndAddL mem_ptr src));
+ effect(TEMP cr0);
format %{ "GetAndAddL $res, $mem_ptr, $src" %}
// Variable size: instruction count smaller if regs are disjoint.
ins_encode( enc_GetAndAddL(res, mem_ptr, src) );
ins_pipe(pipe_class_default);
%}
-instruct getAndSetI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src) %{
+instruct getAndSetI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src, flagsRegCR0 cr0) %{
match(Set res (GetAndSetI mem_ptr src));
+ effect(TEMP cr0);
format %{ "GetAndSetI $res, $mem_ptr, $src" %}
// Variable size: instruction count smaller if regs are disjoint.
ins_encode( enc_GetAndSetI(res, mem_ptr, src) );
ins_pipe(pipe_class_default);
%}
-instruct getAndSetL(iRegLdst res, iRegPdst mem_ptr, iRegLsrc src) %{
+instruct getAndSetL(iRegLdst res, iRegPdst mem_ptr, iRegLsrc src, flagsRegCR0 cr0) %{
match(Set res (GetAndSetL mem_ptr src));
+ effect(TEMP cr0);
format %{ "GetAndSetL $res, $mem_ptr, $src" %}
// Variable size: instruction count smaller if regs are disjoint.
ins_encode( enc_GetAndSetL(res, mem_ptr, src) );
ins_pipe(pipe_class_default);
%}
-instruct getAndSetP(iRegPdst res, iRegPdst mem_ptr, iRegPsrc src) %{
+instruct getAndSetP(iRegPdst res, iRegPdst mem_ptr, iRegPsrc src, flagsRegCR0 cr0) %{
match(Set res (GetAndSetP mem_ptr src));
+ effect(TEMP cr0);
format %{ "GetAndSetP $res, $mem_ptr, $src" %}
// Variable size: instruction count smaller if regs are disjoint.
ins_encode( enc_GetAndSetL(res, mem_ptr, src) );
ins_pipe(pipe_class_default);
%}
-instruct getAndSetN(iRegNdst res, iRegPdst mem_ptr, iRegNsrc src) %{
+instruct getAndSetN(iRegNdst res, iRegPdst mem_ptr, iRegNsrc src, flagsRegCR0 cr0) %{
match(Set res (GetAndSetN mem_ptr src));
+ effect(TEMP cr0);
format %{ "GetAndSetN $res, $mem_ptr, $src" %}
// Variable size: instruction count smaller if regs are disjoint.
ins_encode( enc_GetAndSetI(res, mem_ptr, src) );
@@ -7898,18 +7882,8 @@ instruct subI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
%}
// Immediate Subtraction
-// The compiler converts "x-c0" into "x+ -c0" (see SubINode::Ideal),
-// so this rule seems to be unused.
-instruct subI_reg_imm16(iRegIdst dst, iRegIsrc src1, immI16 src2) %{
- match(Set dst (SubI src1 src2));
- format %{ "SUBI $dst, $src1, $src2" %}
- size(4);
- ins_encode %{
- // TODO: PPC port $archOpcode(ppc64Opcode_addi);
- __ addi($dst$$Register, $src1$$Register, ($src2$$constant) * (-1));
- %}
- ins_pipe(pipe_class_default);
-%}
+// Immediate Subtraction: The compiler converts "x-c0" into "x+ -c0" (see SubLNode::Ideal),
+// Don't try to use addi with - $src2$$constant since it can overflow when $src2$$constant == minI16.
// SubI from constant (using subfic).
instruct subI_imm16_reg(iRegIdst dst, immI16 src1, iRegIsrc src2) %{
@@ -7989,22 +7963,6 @@ instruct subI_regL_regL(iRegIdst dst, iRegLsrc src1, iRegLsrc src2) %{
ins_pipe(pipe_class_default);
%}
-// Immediate Subtraction
-// The compiler converts "x-c0" into "x+ -c0" (see SubLNode::Ideal),
-// so this rule seems to be unused.
-// No constant pool entries required.
-instruct subL_reg_imm16(iRegLdst dst, iRegLsrc src1, immL16 src2) %{
- match(Set dst (SubL src1 src2));
-
- format %{ "SUBI $dst, $src1, $src2 \t// long" %}
- size(4);
- ins_encode %{
- // TODO: PPC port $archOpcode(ppc64Opcode_addi);
- __ addi($dst$$Register, $src1$$Register, ($src2$$constant) * (-1));
- %}
- ins_pipe(pipe_class_default);
-%}
-
// Turn the sign-bit of a long into a 64-bit mask, 0x0...0 for
// positive longs and 0xF...F for negative ones.
instruct signmask64I_regL(iRegIdst dst, iRegLsrc src) %{
@@ -8165,7 +8123,7 @@ instruct divI_reg_regnotMinus1(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
ins_pipe(pipe_class_default);
%}
-instruct cmovI_bne_negI_reg(iRegIdst dst, flagsReg crx, iRegIsrc src1) %{
+instruct cmovI_bne_negI_reg(iRegIdst dst, flagsRegSrc crx, iRegIsrc src1) %{
effect(USE_DEF dst, USE src1, USE crx);
predicate(false);
@@ -8228,7 +8186,7 @@ instruct divL_reg_regnotMinus1(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
ins_pipe(pipe_class_default);
%}
-instruct cmovL_bne_negL_reg(iRegLdst dst, flagsReg crx, iRegLsrc src1) %{
+instruct cmovL_bne_negL_reg(iRegLdst dst, flagsRegSrc crx, iRegLsrc src1) %{
effect(USE_DEF dst, USE src1, USE crx);
predicate(false);
@@ -8281,7 +8239,7 @@ instruct modI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
%}
// Long Remainder with registers
-instruct modL_reg_reg_Ex(iRegLdst dst, iRegLsrc src1, iRegLsrc src2, flagsRegCR0 cr0) %{
+instruct modL_reg_reg_Ex(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
match(Set dst (ModL src1 src2));
ins_cost(10*DEFAULT_COST);
@@ -9011,7 +8969,6 @@ instruct andL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
instruct andL_reg_uimm16(iRegLdst dst, iRegLsrc src1, uimmL16 src2, flagsRegCR0 cr0) %{
match(Set dst (AndL src1 src2));
effect(KILL cr0);
- ins_cost(DEFAULT_COST);
format %{ "ANDI $dst, $src1, $src2 \t// long" %}
size(4);
@@ -9803,7 +9760,7 @@ instruct convD2IRaw_regD(regD dst, regD src) %{
ins_pipe(pipe_class_default);
%}
-instruct cmovI_bso_stackSlotL(iRegIdst dst, flagsReg crx, stackSlotL src) %{
+instruct cmovI_bso_stackSlotL(iRegIdst dst, flagsRegSrc crx, stackSlotL src) %{
// no match-rule, false predicate
effect(DEF dst, USE crx, USE src);
predicate(false);
@@ -9817,7 +9774,7 @@ instruct cmovI_bso_stackSlotL(iRegIdst dst, flagsReg crx, stackSlotL src) %{
ins_pipe(pipe_class_default);
%}
-instruct cmovI_bso_stackSlotL_conLvalue0_Ex(iRegIdst dst, flagsReg crx, stackSlotL mem) %{
+instruct cmovI_bso_stackSlotL_conLvalue0_Ex(iRegIdst dst, flagsRegSrc crx, stackSlotL mem) %{
// no match-rule, false predicate
effect(DEF dst, USE crx, USE mem);
predicate(false);
@@ -9972,7 +9929,7 @@ instruct convF2LRaw_regF(regF dst, regF src) %{
ins_pipe(pipe_class_default);
%}
-instruct cmovL_bso_stackSlotL(iRegLdst dst, flagsReg crx, stackSlotL src) %{
+instruct cmovL_bso_stackSlotL(iRegLdst dst, flagsRegSrc crx, stackSlotL src) %{
// no match-rule, false predicate
effect(DEF dst, USE crx, USE src);
predicate(false);
@@ -9986,7 +9943,7 @@ instruct cmovL_bso_stackSlotL(iRegLdst dst, flagsReg crx, stackSlotL src) %{
ins_pipe(pipe_class_default);
%}
-instruct cmovL_bso_stackSlotL_conLvalue0_Ex(iRegLdst dst, flagsReg crx, stackSlotL mem) %{
+instruct cmovL_bso_stackSlotL_conLvalue0_Ex(iRegLdst dst, flagsRegSrc crx, stackSlotL mem) %{
// no match-rule, false predicate
effect(DEF dst, USE crx, USE mem);
predicate(false);
@@ -10255,7 +10212,6 @@ instruct testI_reg_imm(flagsRegCR0 cr0, iRegIsrc src1, uimmI16 src2, immI_0 zero
size(4);
ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_andi_);
- // FIXME: avoid andi_ ?
__ andi_(R0, $src1$$Register, $src2$$constant);
%}
ins_pipe(pipe_class_compare);
@@ -10302,13 +10258,12 @@ instruct testL_reg_imm(flagsRegCR0 cr0, iRegLsrc src1, uimmL16 src2, immL_0 zero
size(4);
ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_andi_);
- // FIXME: avoid andi_ ?
__ andi_(R0, $src1$$Register, $src2$$constant);
%}
ins_pipe(pipe_class_compare);
%}
-instruct cmovI_conIvalueMinus1_conIvalue1(iRegIdst dst, flagsReg crx) %{
+instruct cmovI_conIvalueMinus1_conIvalue1(iRegIdst dst, flagsRegSrc crx) %{
// no match-rule, false predicate
effect(DEF dst, USE crx);
predicate(false);
@@ -10332,7 +10287,7 @@ instruct cmovI_conIvalueMinus1_conIvalue1(iRegIdst dst, flagsReg crx) %{
ins_pipe(pipe_class_compare);
%}
-instruct cmovI_conIvalueMinus1_conIvalue0_conIvalue1_Ex(iRegIdst dst, flagsReg crx) %{
+instruct cmovI_conIvalueMinus1_conIvalue0_conIvalue1_Ex(iRegIdst dst, flagsRegSrc crx) %{
// no match-rule, false predicate
effect(DEF dst, USE crx);
predicate(false);
@@ -10622,8 +10577,9 @@ instruct cmpP_reg_imm16(flagsReg crx, iRegPsrc src1, immL16 src2) %{
//----------Float Compares----------------------------------------------------
instruct cmpFUnordered_reg_reg(flagsReg crx, regF src1, regF src2) %{
+ // Needs matchrule, see cmpDUnordered.
+ match(Set crx (CmpF src1 src2));
// no match-rule, false predicate
- effect(DEF crx, USE src1, USE src2);
predicate(false);
format %{ "cmpFUrd $crx, $src1, $src2" %}
@@ -10731,8 +10687,14 @@ instruct cmpF3_reg_reg_ExEx(iRegIdst dst, regF src1, regF src2) %{
%}
instruct cmpDUnordered_reg_reg(flagsReg crx, regD src1, regD src2) %{
- // no match-rule, false predicate
- effect(DEF crx, USE src1, USE src2);
+ // Needs matchrule so that ideal opcode is Cmp. This causes that gcm places the
+ // node right before the conditional move using it.
+ // In jck test api/java_awt/geom/QuadCurve2DFloat/index.html#SetCurveTesttestCase7,
+ // compilation of java.awt.geom.RectangularShape::getBounds()Ljava/awt/Rectangle
+ // crashed in register allocation where the flags Reg between cmpDUnoredered and a
+ // conditional move was supposed to be spilled.
+ match(Set crx (CmpD src1 src2));
+ // False predicate, shall not be matched.
predicate(false);
format %{ "cmpFUrd $crx, $src1, $src2" %}
@@ -10830,7 +10792,7 @@ instruct branch(label labl) %{
%}
// Conditional Near Branch
-instruct branchCon(cmpOp cmp, flagsReg crx, label lbl) %{
+instruct branchCon(cmpOp cmp, flagsRegSrc crx, label lbl) %{
// Same match rule as `branchConFar'.
match(If cmp crx);
effect(USE lbl);
@@ -10853,7 +10815,7 @@ instruct branchCon(cmpOp cmp, flagsReg crx, label lbl) %{
// expensive.
//
// Conditional Far Branch
-instruct branchConFar(cmpOp cmp, flagsReg crx, label lbl) %{
+instruct branchConFar(cmpOp cmp, flagsRegSrc crx, label lbl) %{
// Same match rule as `branchCon'.
match(If cmp crx);
effect(USE crx, USE lbl);
@@ -10871,7 +10833,7 @@ instruct branchConFar(cmpOp cmp, flagsReg crx, label lbl) %{
%}
// Conditional Branch used with Power6 scheduler (can be far or short).
-instruct branchConSched(cmpOp cmp, flagsReg crx, label lbl) %{
+instruct branchConSched(cmpOp cmp, flagsRegSrc crx, label lbl) %{
// Same match rule as `branchCon'.
match(If cmp crx);
effect(USE crx, USE lbl);
@@ -10890,7 +10852,7 @@ instruct branchConSched(cmpOp cmp, flagsReg crx, label lbl) %{
ins_pipe(pipe_class_default);
%}
-instruct branchLoopEnd(cmpOp cmp, flagsReg crx, label labl) %{
+instruct branchLoopEnd(cmpOp cmp, flagsRegSrc crx, label labl) %{
match(CountedLoopEnd cmp crx);
effect(USE labl);
ins_cost(BRANCH_COST);
@@ -10904,7 +10866,7 @@ instruct branchLoopEnd(cmpOp cmp, flagsReg crx, label labl) %{
ins_pipe(pipe_class_default);
%}
-instruct branchLoopEndFar(cmpOp cmp, flagsReg crx, label labl) %{
+instruct branchLoopEndFar(cmpOp cmp, flagsRegSrc crx, label labl) %{
match(CountedLoopEnd cmp crx);
effect(USE labl);
predicate(!false /* TODO: PPC port HB_Schedule */);
@@ -10920,7 +10882,7 @@ instruct branchLoopEndFar(cmpOp cmp, flagsReg crx, label labl) %{
%}
// Conditional Branch used with Power6 scheduler (can be far or short).
-instruct branchLoopEndSched(cmpOp cmp, flagsReg crx, label labl) %{
+instruct branchLoopEndSched(cmpOp cmp, flagsRegSrc crx, label labl) %{
match(CountedLoopEnd cmp crx);
effect(USE labl);
predicate(false /* TODO: PPC port HB_Schedule */);
@@ -10969,13 +10931,36 @@ instruct partialSubtypeCheck(iRegPdst result, iRegP_N2P subklass, iRegP_N2P supe
instruct cmpFastLock(flagsReg crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3) %{
match(Set crx (FastLock oop box));
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
- // TODO PPC port predicate(!UseNewFastLockPPC64 || UseBiasedLocking);
+ predicate(/*(!UseNewFastLockPPC64 || UseBiasedLocking) &&*/ !Compile::current()->use_rtm());
format %{ "FASTLOCK $oop, $box, $tmp1, $tmp2, $tmp3" %}
ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_compound);
__ compiler_fast_lock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
- $tmp3$$Register, $tmp1$$Register, $tmp2$$Register);
+ $tmp3$$Register, $tmp1$$Register, $tmp2$$Register,
+ UseBiasedLocking && !UseOptoBiasInlining); // SAPJVM MD 2014-11-06 UseOptoBiasInlining
+ // If locking was successfull, crx should indicate 'EQ'.
+ // The compiler generates a branch to the runtime call to
+ // _complete_monitor_locking_Java for the case where crx is 'NE'.
+ %}
+ ins_pipe(pipe_class_compare);
+%}
+
+// Separate version for TM. Use bound register for box to enable USE_KILL.
+instruct cmpFastLock_tm(flagsReg crx, iRegPdst oop, rarg2RegP box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3) %{
+ match(Set crx (FastLock oop box));
+ effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, USE_KILL box);
+ predicate(Compile::current()->use_rtm());
+
+ format %{ "FASTLOCK $oop, $box, $tmp1, $tmp2, $tmp3 (TM)" %}
+ ins_encode %{
+ // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+ __ compiler_fast_lock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
+ $tmp3$$Register, $tmp1$$Register, $tmp2$$Register,
+ /*Biased Locking*/ false,
+ _rtm_counters, _stack_rtm_counters,
+ ((Method*)(ra_->C->method()->constant_encoding()))->method_data(),
+ /*TM*/ true, ra_->C->profile_rtm());
// If locking was successfull, crx should indicate 'EQ'.
// The compiler generates a branch to the runtime call to
// _complete_monitor_locking_Java for the case where crx is 'NE'.
@@ -10986,12 +10971,33 @@ instruct cmpFastLock(flagsReg crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iR
instruct cmpFastUnlock(flagsReg crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3) %{
match(Set crx (FastUnlock oop box));
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
+ predicate(!Compile::current()->use_rtm());
format %{ "FASTUNLOCK $oop, $box, $tmp1, $tmp2" %}
ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_compound);
__ compiler_fast_unlock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
- $tmp3$$Register, $tmp1$$Register, $tmp2$$Register);
+ $tmp3$$Register, $tmp1$$Register, $tmp2$$Register,
+ UseBiasedLocking && !UseOptoBiasInlining,
+ false);
+ // If unlocking was successfull, crx should indicate 'EQ'.
+ // The compiler generates a branch to the runtime call to
+ // _complete_monitor_unlocking_Java for the case where crx is 'NE'.
+ %}
+ ins_pipe(pipe_class_compare);
+%}
+
+instruct cmpFastUnlock_tm(flagsReg crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3) %{
+ match(Set crx (FastUnlock oop box));
+ effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
+ predicate(Compile::current()->use_rtm());
+
+ format %{ "FASTUNLOCK $oop, $box, $tmp1, $tmp2 (TM)" %}
+ ins_encode %{
+ // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+ __ compiler_fast_unlock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
+ $tmp3$$Register, $tmp1$$Register, $tmp2$$Register,
+ /*Biased Locking*/ false, /*TM*/ true);
// If unlocking was successfull, crx should indicate 'EQ'.
// The compiler generates a branch to the runtime call to
// _complete_monitor_unlocking_Java for the case where crx is 'NE'.
@@ -11658,6 +11664,66 @@ instruct repl2F_immF0(iRegLdst dst, immF_0 zero) %{
ins_pipe(pipe_class_default);
%}
+
+//----------Overflow Math Instructions-----------------------------------------
+
+// Note that we have to make sure that XER.SO is reset before using overflow instructions.
+// Simple Overflow operations can be matched by very few instructions (e.g. addExact: xor, and_, bc).
+// Seems like only Long intrinsincs have an advantage. (The only expensive one is OverflowMulL.)
+
+instruct overflowAddL_reg_reg(flagsRegCR0 cr0, iRegLsrc op1, iRegLsrc op2) %{
+ match(Set cr0 (OverflowAddL op1 op2));
+
+ format %{ "add_ $op1, $op2\t# overflow check long" %}
+ ins_encode %{
+ // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+ __ li(R0, 0);
+ __ mtxer(R0); // clear XER.SO
+ __ addo_(R0, $op1$$Register, $op2$$Register);
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+instruct overflowSubL_reg_reg(flagsRegCR0 cr0, iRegLsrc op1, iRegLsrc op2) %{
+ match(Set cr0 (OverflowSubL op1 op2));
+
+ format %{ "subfo_ R0, $op2, $op1\t# overflow check long" %}
+ ins_encode %{
+ // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+ __ li(R0, 0);
+ __ mtxer(R0); // clear XER.SO
+ __ subfo_(R0, $op2$$Register, $op1$$Register);
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+instruct overflowNegL_reg(flagsRegCR0 cr0, immL_0 zero, iRegLsrc op2) %{
+ match(Set cr0 (OverflowSubL zero op2));
+
+ format %{ "nego_ R0, $op2\t# overflow check long" %}
+ ins_encode %{
+ // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+ __ li(R0, 0);
+ __ mtxer(R0); // clear XER.SO
+ __ nego_(R0, $op2$$Register);
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+instruct overflowMulL_reg_reg(flagsRegCR0 cr0, iRegLsrc op1, iRegLsrc op2) %{
+ match(Set cr0 (OverflowMulL op1 op2));
+
+ format %{ "mulldo_ R0, $op1, $op2\t# overflow check long" %}
+ ins_encode %{
+ // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+ __ li(R0, 0);
+ __ mtxer(R0); // clear XER.SO
+ __ mulldo_(R0, $op1$$Register, $op2$$Register);
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+
// ============================================================================
// Safepoint Instruction
diff --git a/hotspot/src/cpu/ppc/vm/register_definitions_ppc.cpp b/hotspot/src/cpu/ppc/vm/register_definitions_ppc.cpp
index a009ea06f77..6b002d2efd2 100644
--- a/hotspot/src/cpu/ppc/vm/register_definitions_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/register_definitions_ppc.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,19 +23,10 @@
*
*/
-// make sure the defines don't screw up the declarations later on in this file
+// Make sure the defines don't screw up the declarations later on in this file.
#define DONT_USE_REGISTER_DEFINES
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
#include "asm/register.hpp"
-#include "register_ppc.hpp"
-#ifdef TARGET_ARCH_MODEL_ppc_32
-# include "interp_masm_ppc_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_ppc_64
-# include "interp_masm_ppc_64.hpp"
-#endif
REGISTER_DEFINITION(Register, noreg);
diff --git a/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp b/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp
index 4f365a46a05..d163ebac6b9 100644
--- a/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,14 +25,12 @@
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
-#include "assembler_ppc.inline.hpp"
#include "code/relocInfo.hpp"
#include "nativeInst_ppc.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/safepoint.hpp"
void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
- bool copy_back_to_oop_pool = true; // TODO: PPC port
// The following comment is from the declaration of DataRelocation:
//
// "The "o" (displacement) argument is relevant only to split relocations
diff --git a/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp b/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp
index 68dda7d3e2b..5519405dbcd 100644
--- a/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2014 SAP AG. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
+#include "frame_ppc.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interp_masm.hpp"
#include "oops/compiledICHolder.hpp"
@@ -194,8 +195,8 @@ static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = {
RegisterSaver_LiveIntReg( R27 ),
RegisterSaver_LiveIntReg( R28 ),
RegisterSaver_LiveIntReg( R29 ),
- RegisterSaver_LiveIntReg( R31 ),
- RegisterSaver_LiveIntReg( R30 ), // r30 must be the last register
+ RegisterSaver_LiveIntReg( R30 ),
+ RegisterSaver_LiveIntReg( R31 ), // must be the last register (see save/restore functions below)
};
OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
@@ -229,29 +230,30 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble
BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {");
- // Save r30 in the last slot of the not yet pushed frame so that we
+ // Save r31 in the last slot of the not yet pushed frame so that we
// can use it as scratch reg.
- __ std(R30, -reg_size, R1_SP);
+ __ std(R31, -reg_size, R1_SP);
assert(-reg_size == register_save_offset - frame_size_in_bytes + ((regstosave_num-1)*reg_size),
"consistency check");
// save the flags
// Do the save_LR_CR by hand and adjust the return pc if requested.
- __ mfcr(R30);
- __ std(R30, _abi(cr), R1_SP);
+ __ mfcr(R31);
+ __ std(R31, _abi(cr), R1_SP);
switch (return_pc_location) {
- case return_pc_is_lr: __ mflr(R30); break;
- case return_pc_is_r4: __ mr(R30, R4); break;
+ case return_pc_is_lr: __ mflr(R31); break;
+ case return_pc_is_r4: __ mr(R31, R4); break;
case return_pc_is_thread_saved_exception_pc:
- __ ld(R30, thread_(saved_exception_pc)); break;
+ __ ld(R31, thread_(saved_exception_pc)); break;
default: ShouldNotReachHere();
}
- if (return_pc_adjustment != 0)
- __ addi(R30, R30, return_pc_adjustment);
- __ std(R30, _abi(lr), R1_SP);
+ if (return_pc_adjustment != 0) {
+ __ addi(R31, R31, return_pc_adjustment);
+ }
+ __ std(R31, _abi(lr), R1_SP);
// push a new frame
- __ push_frame(frame_size_in_bytes, R30);
+ __ push_frame(frame_size_in_bytes, R31);
// save all registers (ints and floats)
offset = register_save_offset;
@@ -261,7 +263,7 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble
switch (reg_type) {
case RegisterSaver::int_reg: {
- if (reg_num != 30) { // We spilled R30 right at the beginning.
+ if (reg_num != 31) { // We spilled R31 right at the beginning.
__ std(as_Register(reg_num), offset, R1_SP);
}
break;
@@ -272,8 +274,8 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble
}
case RegisterSaver::special_reg: {
if (reg_num == SR_CTR_SpecialRegisterEnumValue) {
- __ mfctr(R30);
- __ std(R30, offset, R1_SP);
+ __ mfctr(R31);
+ __ std(R31, offset, R1_SP);
} else {
Unimplemented();
}
@@ -321,7 +323,7 @@ void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm,
switch (reg_type) {
case RegisterSaver::int_reg: {
- if (reg_num != 30) // R30 restored at the end, it's the tmp reg!
+ if (reg_num != 31) // R31 restored at the end, it's the tmp reg!
__ ld(as_Register(reg_num), offset, R1_SP);
break;
}
@@ -332,8 +334,8 @@ void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm,
case RegisterSaver::special_reg: {
if (reg_num == SR_CTR_SpecialRegisterEnumValue) {
if (restore_ctr) { // Nothing to do here if ctr already contains the next address.
- __ ld(R30, offset, R1_SP);
- __ mtctr(R30);
+ __ ld(R31, offset, R1_SP);
+ __ mtctr(R31);
}
} else {
Unimplemented();
@@ -350,10 +352,10 @@ void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm,
__ pop_frame();
// restore the flags
- __ restore_LR_CR(R30);
+ __ restore_LR_CR(R31);
// restore scratch register's value
- __ ld(R30, -reg_size, R1_SP);
+ __ ld(R31, -reg_size, R1_SP);
BLOCK_COMMENT("} restore_live_registers_and_pop_frame");
}
@@ -2021,6 +2023,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ push_frame(frame_size_in_bytes, r_temp_1); // Push the c2n adapter's frame.
frame_done_pc = (intptr_t)__ pc();
+ __ verify_thread();
+
// Native nmethod wrappers never take possesion of the oop arguments.
// So the caller will gc the arguments.
// The only thing we need an oopMap for is if the call is static.
@@ -2594,7 +2598,7 @@ int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals)
}
uint SharedRuntime::out_preserve_stack_slots() {
-#ifdef COMPILER2
+#if defined(COMPILER1) || defined(COMPILER2)
return frame::jit_out_preserve_size / VMRegImpl::stack_slot_size;
#else
return 0;
@@ -2868,11 +2872,6 @@ void SharedRuntime::generate_deopt_blob() {
__ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
__ BIND(skip_restore_excp);
- // reload narrro_oop_base
- if (UseCompressedOops && Universe::narrow_oop_base() != 0) {
- __ load_const_optimized(R30, Universe::narrow_oop_base());
- }
-
__ pop_frame();
// stack: (deoptee, optional i2c, caller of deoptee, ...).
diff --git a/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp b/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp
index c7a9d06627d..4ddf83ba943 100644
--- a/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp
@@ -261,9 +261,6 @@ class StubGenerator: public StubCodeGenerator {
// global toc register
__ load_const(R29, MacroAssembler::global_toc(), R11_scratch1);
- // Load narrow oop base.
- __ reinit_heapbase(R30, R11_scratch1);
-
// Remember the senderSP so we interpreter can pop c2i arguments off of the stack
// when called via a c2i.
@@ -418,6 +415,23 @@ class StubGenerator: public StubCodeGenerator {
// or native call stub. The pending exception in Thread is
// converted into a Java-level exception.
//
+ // Read:
+ //
+ // LR: The pc the runtime library callee wants to return to.
+ // Since the exception occurred in the callee, the return pc
+ // from the point of view of Java is the exception pc.
+ // thread: Needed for method handles.
+ //
+ // Invalidate:
+ //
+ // volatile registers (except below).
+ //
+ // Update:
+ //
+ // R4_ARG2: exception
+ //
+ // (LR is unchanged and is live out).
+ //
address generate_forward_exception() {
StubCodeMark mark(this, "StubRoutines", "forward_exception");
address start = __ pc();
@@ -1256,9 +1270,9 @@ class StubGenerator: public StubCodeGenerator {
Register tmp3 = R8_ARG6;
#if defined(ABI_ELFv2)
- address nooverlap_target = aligned ?
- StubRoutines::arrayof_jbyte_disjoint_arraycopy() :
- StubRoutines::jbyte_disjoint_arraycopy();
+ address nooverlap_target = aligned ?
+ StubRoutines::arrayof_jbyte_disjoint_arraycopy() :
+ StubRoutines::jbyte_disjoint_arraycopy();
#else
address nooverlap_target = aligned ?
((FunctionDescriptor*)StubRoutines::arrayof_jbyte_disjoint_arraycopy())->entry() :
diff --git a/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp b/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp
index 432a96d8268..2789be2aa55 100644
--- a/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2013, 2014 SAP AG. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2013, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -264,11 +264,11 @@ void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label*
__ cmpdi(CCR0, Rmdo, 0);
__ beq(CCR0, no_mdo);
- // Increment invocation counter in the MDO.
- const int mdo_ic_offs = in_bytes(MethodData::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
- __ lwz(Rscratch2, mdo_ic_offs, Rmdo);
+ // Increment backedge counter in the MDO.
+ const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
+ __ lwz(Rscratch2, mdo_bc_offs, Rmdo);
__ addi(Rscratch2, Rscratch2, increment);
- __ stw(Rscratch2, mdo_ic_offs, Rmdo);
+ __ stw(Rscratch2, mdo_bc_offs, Rmdo);
__ load_const_optimized(Rscratch1, mask, R0);
__ and_(Rscratch1, Rscratch2, Rscratch1);
__ bne(CCR0, done);
@@ -276,12 +276,12 @@ void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label*
}
// Increment counter in MethodCounters*.
- const int mo_ic_offs = in_bytes(MethodCounters::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
+ const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
__ bind(no_mdo);
__ get_method_counters(R19_method, R3_counters, done);
- __ lwz(Rscratch2, mo_ic_offs, R3_counters);
+ __ lwz(Rscratch2, mo_bc_offs, R3_counters);
__ addi(Rscratch2, Rscratch2, increment);
- __ stw(Rscratch2, mo_ic_offs, R3_counters);
+ __ stw(Rscratch2, mo_bc_offs, R3_counters);
__ load_const_optimized(Rscratch1, mask, R0);
__ and_(Rscratch1, Rscratch2, Rscratch1);
__ beq(CCR0, *overflow);
@@ -611,12 +611,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Regist
// For others we can use a normal (native) entry.
inline bool math_entry_available(AbstractInterpreter::MethodKind kind) {
- // Provide math entry with debugging on demand.
- // Note: Debugging changes which code will get executed:
- // Debugging or disabled InlineIntrinsics: java method will get interpreted and performs a native call.
- // Not debugging and enabled InlineIntrinics: processor instruction will get used.
- // Result might differ slightly due to rounding etc.
- if (!InlineIntrinsics && (!FLAG_IS_ERGO(InlineIntrinsics))) return false; // Generate a vanilla entry.
+ if (!InlineIntrinsics) return false;
return ((kind==Interpreter::java_lang_math_sqrt && VM_Version::has_fsqrt()) ||
(kind==Interpreter::java_lang_math_abs));
@@ -628,15 +623,8 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
return Interpreter::entry_for_kind(Interpreter::zerolocals);
}
- Label Lslow_path;
- const Register Rjvmti_mode = R11_scratch1;
address entry = __ pc();
- // Provide math entry with debugging on demand.
- __ lwz(Rjvmti_mode, thread_(interp_only_mode));
- __ cmpwi(CCR0, Rjvmti_mode, 0);
- __ bne(CCR0, Lslow_path); // jvmti_mode!=0
-
__ lfd(F1_RET, Interpreter::stackElementSize, R15_esp);
// Pop c2i arguments (if any) off when we return.
@@ -659,9 +647,6 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
// And we're done.
__ blr();
- // Provide slow path for JVMTI case.
- __ bind(Lslow_path);
- __ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R12_scratch2);
__ flush();
return entry;
diff --git a/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.hpp b/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.hpp
index eb817b0a256..4450dd71897 100644
--- a/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.hpp
+++ b/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2013, 2014 SAP AG. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2013, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
// Run with +PrintInterpreter to get the VM to print out the size.
// Max size with JVMTI
- const static int InterpreterCodeSize = 210*K;
+ const static int InterpreterCodeSize = 230*K;
#endif // CPU_PPC_VM_TEMPLATEINTERPRETER_PPC_HPP
diff --git a/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp b/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp
index a6505333fc6..7602d871c70 100644
--- a/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp
+++ b/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2013, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -375,23 +375,22 @@ void TemplateTable::fast_aldc(bool wide) {
int index_size = wide ? sizeof(u2) : sizeof(u1);
const Register Rscratch = R11_scratch1;
- Label resolved;
+ Label is_null;
// We are resolved if the resolved reference cache entry contains a
// non-null object (CallSite, etc.)
__ get_cache_index_at_bcp(Rscratch, 1, index_size); // Load index.
- __ load_resolved_reference_at_index(R17_tos, Rscratch);
- __ cmpdi(CCR0, R17_tos, 0);
- __ bne(CCR0, resolved);
+ __ load_resolved_reference_at_index(R17_tos, Rscratch, &is_null);
+ __ verify_oop(R17_tos);
+ __ dispatch_epilog(atos, Bytecodes::length_for(bytecode()));
+
+ __ bind(is_null);
__ load_const_optimized(R3_ARG1, (int)bytecode());
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
// First time invocation - must resolve first.
__ call_VM(R17_tos, entry, R3_ARG1);
-
- __ align(32, 12);
- __ bind(resolved);
__ verify_oop(R17_tos);
}
@@ -437,6 +436,14 @@ void TemplateTable::locals_index(Register Rdst, int offset) {
}
void TemplateTable::iload() {
+ iload_internal();
+}
+
+void TemplateTable::nofast_iload() {
+ iload_internal(may_not_rewrite);
+}
+
+void TemplateTable::iload_internal(RewriteControl rc) {
transition(vtos, itos);
// Get the local value into tos
@@ -445,7 +452,7 @@ void TemplateTable::iload() {
// Rewrite iload,iload pair into fast_iload2
// iload,caload pair into fast_icaload
- if (RewriteFrequentPairs) {
+ if (RewriteFrequentPairs && rc == may_rewrite) {
Label Lrewrite, Ldone;
Register Rnext_byte = R3_ARG1,
Rrewrite_to = R6_ARG4,
@@ -709,6 +716,14 @@ void TemplateTable::aload(int n) {
}
void TemplateTable::aload_0() {
+ aload_0_internal();
+}
+
+void TemplateTable::nofast_aload_0() {
+ aload_0_internal(may_not_rewrite);
+}
+
+void TemplateTable::aload_0_internal(RewriteControl rc) {
transition(vtos, atos);
// According to bytecode histograms, the pairs:
//
@@ -732,7 +747,7 @@ void TemplateTable::aload_0() {
// These bytecodes with a small amount of code are most profitable
// to rewrite.
- if (RewriteFrequentPairs) {
+ if (RewriteFrequentPairs && rc == may_rewrite) {
Label Lrewrite, Ldont_rewrite;
Register Rnext_byte = R3_ARG1,
@@ -2144,6 +2159,12 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
__ get_cache_and_index_at_bcp(Rcache, 1, index_size);
Label Lresolved, Ldone;
+ Bytecodes::Code code = bytecode();
+ switch (code) {
+ case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
+ case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
+ }
+
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
// We are resolved if the indices offset contains the current bytecode.
#if defined(VM_LITTLE_ENDIAN)
@@ -2152,24 +2173,11 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
__ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache);
#endif
// Acquire by cmp-br-isync (see below).
- __ cmpdi(CCR0, Rscratch, (int)bytecode());
+ __ cmpdi(CCR0, Rscratch, (int)code);
__ beq(CCR0, Lresolved);
- address entry = NULL;
- switch (bytecode()) {
- case Bytecodes::_getstatic : // fall through
- case Bytecodes::_putstatic : // fall through
- case Bytecodes::_getfield : // fall through
- case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
- case Bytecodes::_invokevirtual : // fall through
- case Bytecodes::_invokespecial : // fall through
- case Bytecodes::_invokestatic : // fall through
- case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
- case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
- case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
- default : ShouldNotReachHere(); break;
- }
- __ li(R4_ARG2, (int)bytecode());
+ address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
+ __ li(R4_ARG2, code);
__ call_VM(noreg, entry, R4_ARG2, true);
// Update registers with resolved info.
@@ -2350,7 +2358,7 @@ void TemplateTable::pop_and_check_object(Register Roop) {
}
// PPC64: implement volatile loads as fence-store-acquire.
-void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
+void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
transition(vtos, vtos);
Label Lacquire, Lisync;
@@ -2366,7 +2374,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
static address field_branch_table[number_of_states],
static_branch_table[number_of_states];
- address* branch_table = is_static ? static_branch_table : field_branch_table;
+ address* branch_table = (is_static || rc == may_not_rewrite) ? static_branch_table : field_branch_table;
// Get field offset.
resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2));
@@ -2417,7 +2425,14 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
#ifdef ASSERT
__ bind(LFlagInvalid);
__ stop("got invalid flag", 0x654);
+#endif
+ if (!is_static && rc == may_not_rewrite) {
+ // We reuse the code from is_static. It's jumped to via the table above.
+ return;
+ }
+
+#ifdef ASSERT
// __ bind(Lvtos);
address pc_before_fence = __ pc();
__ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
@@ -2434,7 +2449,9 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
branch_table[dtos] = __ pc(); // non-volatile_entry point
__ lfdx(F15_ftos, Rclass_or_obj, Roffset);
__ push(dtos);
- if (!is_static) patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch);
+ if (!is_static && rc == may_rewrite) {
+ patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch);
+ }
{
Label acquire_double;
__ beq(CCR6, acquire_double); // Volatile?
@@ -2453,7 +2470,9 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
branch_table[ftos] = __ pc(); // non-volatile_entry point
__ lfsx(F15_ftos, Rclass_or_obj, Roffset);
__ push(ftos);
- if (!is_static) { patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); }
+ if (!is_static && rc == may_rewrite) {
+ patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch);
+ }
{
Label acquire_float;
__ beq(CCR6, acquire_float); // Volatile?
@@ -2472,7 +2491,9 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
branch_table[itos] = __ pc(); // non-volatile_entry point
__ lwax(R17_tos, Rclass_or_obj, Roffset);
__ push(itos);
- if (!is_static) patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch);
+ if (!is_static && rc == may_rewrite) {
+ patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch);
+ }
__ beq(CCR6, Lacquire); // Volatile?
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
@@ -2483,7 +2504,9 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
branch_table[ltos] = __ pc(); // non-volatile_entry point
__ ldx(R17_tos, Rclass_or_obj, Roffset);
__ push(ltos);
- if (!is_static) patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch);
+ if (!is_static && rc == may_rewrite) {
+ patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch);
+ }
__ beq(CCR6, Lacquire); // Volatile?
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
@@ -2495,7 +2518,9 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
__ lbzx(R17_tos, Rclass_or_obj, Roffset);
__ extsb(R17_tos, R17_tos);
__ push(btos);
- if (!is_static) patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch);
+ if (!is_static && rc == may_rewrite) {
+ patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch);
+ }
__ beq(CCR6, Lacquire); // Volatile?
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
@@ -2506,7 +2531,9 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
branch_table[ctos] = __ pc(); // non-volatile_entry point
__ lhzx(R17_tos, Rclass_or_obj, Roffset);
__ push(ctos);
- if (!is_static) patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch);
+ if (!is_static && rc == may_rewrite) {
+ patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch);
+ }
__ beq(CCR6, Lacquire); // Volatile?
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
@@ -2517,7 +2544,9 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
branch_table[stos] = __ pc(); // non-volatile_entry point
__ lhax(R17_tos, Rclass_or_obj, Roffset);
__ push(stos);
- if (!is_static) patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch);
+ if (!is_static && rc == may_rewrite) {
+ patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch);
+ }
__ beq(CCR6, Lacquire); // Volatile?
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
@@ -2530,7 +2559,9 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
__ verify_oop(R17_tos);
__ push(atos);
//__ dcbt(R17_tos); // prefetch
- if (!is_static) patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch);
+ if (!is_static && rc == may_rewrite) {
+ patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch);
+ }
__ beq(CCR6, Lacquire); // Volatile?
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
@@ -2553,6 +2584,10 @@ void TemplateTable::getfield(int byte_no) {
getfield_or_static(byte_no, false);
}
+void TemplateTable::nofast_getfield(int byte_no) {
+ getfield_or_static(byte_no, false, may_not_rewrite);
+}
+
void TemplateTable::getstatic(int byte_no) {
getfield_or_static(byte_no, true);
}
@@ -2643,7 +2678,7 @@ void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, boo
}
// PPC64: implement volatile stores as release-store (return bytecode contains an additional release).
-void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
+void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
Label Lvolatile;
const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
@@ -2657,10 +2692,12 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
Rbc = Rscratch3;
const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
- static address field_branch_table[number_of_states],
+ static address field_rw_branch_table[number_of_states],
+ field_norw_branch_table[number_of_states],
static_branch_table[number_of_states];
- address* branch_table = is_static ? static_branch_table : field_branch_table;
+ address* branch_table = is_static ? static_branch_table :
+ (rc == may_rewrite ? field_rw_branch_table : field_norw_branch_table);
// Stack (grows up):
// value
@@ -2688,7 +2725,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
// Load from branch table and dispatch (volatile case: one instruction ahead).
__ sldi(Rflags, Rflags, LogBytesPerWord);
- if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpwi(CR_is_vol, Rscratch, 1); } // Volatile?
+ if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ cmpwi(CR_is_vol, Rscratch, 1); // Volatile?
+ }
__ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0.
__ ldx(Rbtable, Rbtable, Rflags);
@@ -2715,9 +2754,13 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
assert(branch_table[dtos] == 0, "can't compute twice");
branch_table[dtos] = __ pc(); // non-volatile_entry point
__ pop(dtos);
- if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
+ if (!is_static) {
+ pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
+ }
__ stfdx(F15_ftos, Rclass_or_obj, Roffset);
- if (!is_static) { patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); }
+ if (!is_static && rc == may_rewrite) {
+ patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no);
+ }
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ beq(CR_is_vol, Lvolatile); // Volatile?
}
@@ -2731,7 +2774,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(ftos);
if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
__ stfsx(F15_ftos, Rclass_or_obj, Roffset);
- if (!is_static) { patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); }
+ if (!is_static && rc == may_rewrite) {
+ patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no);
+ }
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ beq(CR_is_vol, Lvolatile); // Volatile?
}
@@ -2745,7 +2790,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(itos);
if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
__ stwx(R17_tos, Rclass_or_obj, Roffset);
- if (!is_static) { patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); }
+ if (!is_static && rc == may_rewrite) {
+ patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no);
+ }
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ beq(CR_is_vol, Lvolatile); // Volatile?
}
@@ -2759,7 +2806,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(ltos);
if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
__ stdx(R17_tos, Rclass_or_obj, Roffset);
- if (!is_static) { patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); }
+ if (!is_static && rc == may_rewrite) {
+ patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no);
+ }
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ beq(CR_is_vol, Lvolatile); // Volatile?
}
@@ -2773,7 +2822,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(btos);
if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
__ stbx(R17_tos, Rclass_or_obj, Roffset);
- if (!is_static) { patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); }
+ if (!is_static && rc == may_rewrite) {
+ patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no);
+ }
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ beq(CR_is_vol, Lvolatile); // Volatile?
}
@@ -2787,7 +2838,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(ctos);
if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1..
__ sthx(R17_tos, Rclass_or_obj, Roffset);
- if (!is_static) { patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); }
+ if (!is_static && rc == may_rewrite) {
+ patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no);
+ }
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ beq(CR_is_vol, Lvolatile); // Volatile?
}
@@ -2801,7 +2854,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(stos);
if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
__ sthx(R17_tos, Rclass_or_obj, Roffset);
- if (!is_static) { patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); }
+ if (!is_static && rc == may_rewrite) {
+ patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no);
+ }
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ beq(CR_is_vol, Lvolatile); // Volatile?
}
@@ -2815,7 +2870,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(atos);
if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1
do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */);
- if (!is_static) { patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); }
+ if (!is_static && rc == may_rewrite) {
+ patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no);
+ }
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ beq(CR_is_vol, Lvolatile); // Volatile?
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
@@ -2839,6 +2896,10 @@ void TemplateTable::putfield(int byte_no) {
putfield_or_static(byte_no, false);
}
+void TemplateTable::nofast_putfield(int byte_no) {
+ putfield_or_static(byte_no, false, may_not_rewrite);
+}
+
void TemplateTable::putstatic(int byte_no) {
putfield_or_static(byte_no, true);
}
@@ -3259,7 +3320,9 @@ void TemplateTable::invokevirtual(int byte_no) {
__ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
__ bfalse(CCR0, LnotFinal);
- patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2);
+ if (RewriteBytecodes && !UseSharedSpaces) {
+ patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2);
+ }
invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2);
__ align(32, 12);
@@ -3795,9 +3858,9 @@ void TemplateTable::instanceof() {
transition(atos, itos);
Label Ldone, Lis_null, Lquicked, Lresolved;
- Register Roffset = R5_ARG3,
+ Register Roffset = R6_ARG4,
RobjKlass = R4_ARG2,
- RspecifiedKlass = R6_ARG4, // Generate_ClassCastException_verbose_handler will expect the value in this register.
+ RspecifiedKlass = R5_ARG3,
Rcpool = R11_scratch1,
Rtags = R12_scratch2;
diff --git a/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp b/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp
index 60f26c5f1c1..357b3d359bd 100644
--- a/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2014 SAP AG. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,12 +32,13 @@
#include "runtime/os.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "utilities/defaultStream.hpp"
+#include "utilities/globalDefinitions.hpp"
#include "vm_version_ppc.hpp"
# include
int VM_Version::_features = VM_Version::unknown_m;
-int VM_Version::_measured_cache_line_size = 128; // default value
+int VM_Version::_measured_cache_line_size = 32; // pessimistic init value
const char* VM_Version::_features_str = "";
bool VM_Version::_is_determine_features_test_running = false;
@@ -55,7 +56,9 @@ void VM_Version::initialize() {
// If PowerArchitecturePPC64 hasn't been specified explicitly determine from features.
if (FLAG_IS_DEFAULT(PowerArchitecturePPC64)) {
- if (VM_Version::has_popcntw()) {
+ if (VM_Version::has_lqarx()) {
+ FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 8);
+ } else if (VM_Version::has_popcntw()) {
FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 7);
} else if (VM_Version::has_cmpb()) {
FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 6);
@@ -66,8 +69,14 @@ void VM_Version::initialize() {
}
}
guarantee(PowerArchitecturePPC64 == 0 || PowerArchitecturePPC64 == 5 ||
- PowerArchitecturePPC64 == 6 || PowerArchitecturePPC64 == 7,
- "PowerArchitecturePPC64 should be 0, 5, 6 or 7");
+ PowerArchitecturePPC64 == 6 || PowerArchitecturePPC64 == 7 ||
+ PowerArchitecturePPC64 == 8,
+ "PowerArchitecturePPC64 should be 0, 5, 6, 7, or 8");
+
+ // Power 8: Configure Data Stream Control Register.
+ if (PowerArchitecturePPC64 >= 8) {
+ config_dscr();
+ }
if (!UseSIGTRAP) {
MSG(TrapBasedICMissChecks);
@@ -97,7 +106,7 @@ void VM_Version::initialize() {
// Create and print feature-string.
char buf[(num_features+1) * 16]; // Max 16 chars per feature.
jio_snprintf(buf, sizeof(buf),
- "ppc64%s%s%s%s%s%s%s%s",
+ "ppc64%s%s%s%s%s%s%s%s%s%s%s%s",
(has_fsqrt() ? " fsqrt" : ""),
(has_isel() ? " isel" : ""),
(has_lxarxeh() ? " lxarxeh" : ""),
@@ -106,11 +115,17 @@ void VM_Version::initialize() {
(has_popcntb() ? " popcntb" : ""),
(has_popcntw() ? " popcntw" : ""),
(has_fcfids() ? " fcfids" : ""),
- (has_vand() ? " vand" : "")
+ (has_vand() ? " vand" : ""),
+ (has_lqarx() ? " lqarx" : ""),
+ (has_vcipher() ? " vcipher" : ""),
+ (has_vpmsumb() ? " vpmsumb" : ""),
+ (has_tcheck() ? " tcheck" : "")
// Make sure number of %s matches num_features!
);
_features_str = os::strdup(buf);
- NOT_PRODUCT(if (Verbose) print_features(););
+ if (Verbose) {
+ print_features();
+ }
// PPC64 supports 8-byte compare-exchange operations (see
// Atomic::cmpxchg and StubGenerator::generate_atomic_cmpxchg_ptr)
@@ -171,7 +186,86 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
}
+ // Adjust RTM (Restricted Transactional Memory) flags.
+ if (!has_tcheck() && UseRTMLocking) {
+ // Can't continue because UseRTMLocking affects UseBiasedLocking flag
+ // setting during arguments processing. See use_biased_locking().
+ // VM_Version_init() is executed after UseBiasedLocking is used
+ // in Thread::allocate().
+ vm_exit_during_initialization("RTM instructions are not available on this CPU");
+ }
+ if (UseRTMLocking) {
+#if INCLUDE_RTM_OPT
+ if (!UnlockExperimentalVMOptions) {
+ vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. "
+ "It must be enabled via -XX:+UnlockExperimentalVMOptions flag.");
+ } else {
+ warning("UseRTMLocking is only available as experimental option on this platform.");
+ }
+ if (!FLAG_IS_CMDLINE(UseRTMLocking)) {
+ // RTM locking should be used only for applications with
+ // high lock contention. For now we do not use it by default.
+ vm_exit_during_initialization("UseRTMLocking flag should be only set on command line");
+ }
+ if (!is_power_of_2(RTMTotalCountIncrRate)) {
+ warning("RTMTotalCountIncrRate must be a power of 2, resetting it to 64");
+ FLAG_SET_DEFAULT(RTMTotalCountIncrRate, 64);
+ }
+ if (RTMAbortRatio < 0 || RTMAbortRatio > 100) {
+ warning("RTMAbortRatio must be in the range 0 to 100, resetting it to 50");
+ FLAG_SET_DEFAULT(RTMAbortRatio, 50);
+ }
+ FLAG_SET_ERGO(bool, UseNewFastLockPPC64, false); // Does not implement TM.
+ guarantee(RTMSpinLoopCount > 0, "unsupported");
+#else
+ // Only C2 does RTM locking optimization.
+ // Can't continue because UseRTMLocking affects UseBiasedLocking flag
+ // setting during arguments processing. See use_biased_locking().
+ vm_exit_during_initialization("RTM locking optimization is not supported in this VM");
+#endif
+ } else { // !UseRTMLocking
+ if (UseRTMForStackLocks) {
+ if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) {
+ warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off");
+ }
+ FLAG_SET_DEFAULT(UseRTMForStackLocks, false);
+ }
+ if (UseRTMDeopt) {
+ FLAG_SET_DEFAULT(UseRTMDeopt, false);
+ }
+ if (PrintPreciseRTMLockingStatistics) {
+ FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false);
+ }
+ }
+
+ // This machine does not allow unaligned memory accesses
+ if (UseUnalignedAccesses) {
+ if (!FLAG_IS_DEFAULT(UseUnalignedAccesses))
+ warning("Unaligned memory access is not available on this CPU");
+ FLAG_SET_DEFAULT(UseUnalignedAccesses, false);
+ }
+}
+
+bool VM_Version::use_biased_locking() {
+#if INCLUDE_RTM_OPT
+ // RTM locking is most useful when there is high lock contention and
+ // low data contention. With high lock contention the lock is usually
+ // inflated and biased locking is not suitable for that case.
+ // RTM locking code requires that biased locking is off.
+ // Note: we can't switch off UseBiasedLocking in get_processor_features()
+ // because it is used by Thread::allocate() which is called before
+ // VM_Version::initialize().
+ if (UseRTMLocking && UseBiasedLocking) {
+ if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
+ FLAG_SET_DEFAULT(UseBiasedLocking, false);
+ } else {
+ warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." );
+ UseBiasedLocking = false;
+ }
+ }
+#endif
+ return UseBiasedLocking;
}
void VM_Version::print_features() {
@@ -437,16 +531,19 @@ void VM_Version::determine_features() {
// Don't use R0 in ldarx.
// Keep R3_ARG1 unmodified, it contains &field (see below).
// Keep R4_ARG2 unmodified, it contains offset = 0 (see below).
- a->fsqrt(F3, F4); // code[0] -> fsqrt_m
- a->fsqrts(F3, F4); // code[1] -> fsqrts_m
- a->isel(R7, R5, R6, 0); // code[2] -> isel_m
- a->ldarx_unchecked(R7, R3_ARG1, R4_ARG2, 1); // code[3] -> lxarx_m
- a->cmpb(R7, R5, R6); // code[4] -> bcmp
- //a->mftgpr(R7, F3); // code[5] -> mftgpr
- a->popcntb(R7, R5); // code[6] -> popcntb
- a->popcntw(R7, R5); // code[7] -> popcntw
- a->fcfids(F3, F4); // code[8] -> fcfids
- a->vand(VR0, VR0, VR0); // code[9] -> vand
+ a->fsqrt(F3, F4); // code[0] -> fsqrt_m
+ a->fsqrts(F3, F4); // code[1] -> fsqrts_m
+ a->isel(R7, R5, R6, 0); // code[2] -> isel_m
+ a->ldarx_unchecked(R7, R3_ARG1, R4_ARG2, 1); // code[3] -> lxarx_m
+ a->cmpb(R7, R5, R6); // code[4] -> cmpb
+ a->popcntb(R7, R5); // code[5] -> popcntb
+ a->popcntw(R7, R5); // code[6] -> popcntw
+ a->fcfids(F3, F4); // code[7] -> fcfids
+ a->vand(VR0, VR0, VR0); // code[8] -> vand
+ a->lqarx_unchecked(R7, R3_ARG1, R4_ARG2, 1); // code[9] -> lqarx_m
+ a->vcipher(VR0, VR1, VR2); // code[10] -> vcipher
+ a->vpmsumb(VR0, VR1, VR2); // code[11] -> vpmsumb
+ a->tcheck(0); // code[12] -> tcheck
a->blr();
// Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
@@ -485,11 +582,14 @@ void VM_Version::determine_features() {
if (code[feature_cntr++]) features |= isel_m;
if (code[feature_cntr++]) features |= lxarxeh_m;
if (code[feature_cntr++]) features |= cmpb_m;
- //if(code[feature_cntr++])features |= mftgpr_m;
if (code[feature_cntr++]) features |= popcntb_m;
if (code[feature_cntr++]) features |= popcntw_m;
if (code[feature_cntr++]) features |= fcfids_m;
if (code[feature_cntr++]) features |= vand_m;
+ if (code[feature_cntr++]) features |= lqarx_m;
+ if (code[feature_cntr++]) features |= vcipher_m;
+ if (code[feature_cntr++]) features |= vpmsumb_m;
+ if (code[feature_cntr++]) features |= tcheck_m;
// Print the detection code.
if (PrintAssembly) {
@@ -501,6 +601,69 @@ void VM_Version::determine_features() {
_features = features;
}
+// Power 8: Configure Data Stream Control Register.
+void VM_Version::config_dscr() {
+ assert(has_tcheck(), "Only execute on Power 8 or later!");
+
+ // 7 InstWords for each call (function descriptor + blr instruction).
+ const int code_size = (2+2*7)*BytesPerInstWord;
+
+ // Allocate space for the code.
+ ResourceMark rm;
+ CodeBuffer cb("config_dscr", code_size, 0);
+ MacroAssembler* a = new MacroAssembler(&cb);
+
+ // Emit code.
+ uint64_t (*get_dscr)() = (uint64_t(*)())(void *)a->emit_fd();
+ uint32_t *code = (uint32_t *)a->pc();
+ a->mfdscr(R3);
+ a->blr();
+
+ void (*set_dscr)(long) = (void(*)(long))(void *)a->emit_fd();
+ a->mtdscr(R3);
+ a->blr();
+
+ uint32_t *code_end = (uint32_t *)a->pc();
+ a->flush();
+
+ // Print the detection code.
+ if (PrintAssembly) {
+ ttyLocker ttyl;
+ tty->print_cr("Decoding dscr configuration stub at " INTPTR_FORMAT " before execution:", code);
+ Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
+ }
+
+ // Apply the configuration if needed.
+ uint64_t dscr_val = (*get_dscr)();
+ if (Verbose) {
+ tty->print_cr("dscr value was 0x%lx" , dscr_val);
+ }
+ bool change_requested = false;
+ if (DSCR_PPC64 != (uintx)-1) {
+ dscr_val = DSCR_PPC64;
+ change_requested = true;
+ }
+ if (DSCR_DPFD_PPC64 <= 7) {
+ uint64_t mask = 0x7;
+ if ((dscr_val & mask) != DSCR_DPFD_PPC64) {
+ dscr_val = (dscr_val & ~mask) | (DSCR_DPFD_PPC64);
+ change_requested = true;
+ }
+ }
+ if (DSCR_URG_PPC64 <= 7) {
+ uint64_t mask = 0x7 << 6;
+ if ((dscr_val & mask) != DSCR_DPFD_PPC64 << 6) {
+ dscr_val = (dscr_val & ~mask) | (DSCR_URG_PPC64 << 6);
+ change_requested = true;
+ }
+ }
+ if (change_requested) {
+ (*set_dscr)(dscr_val);
+ if (Verbose) {
+ tty->print_cr("dscr was set to 0x%lx" , (*get_dscr)());
+ }
+ }
+}
static int saved_features = 0;
diff --git a/hotspot/src/cpu/ppc/vm/vm_version_ppc.hpp b/hotspot/src/cpu/ppc/vm/vm_version_ppc.hpp
index 2bbfdddb21f..6fc76e4cd41 100644
--- a/hotspot/src/cpu/ppc/vm/vm_version_ppc.hpp
+++ b/hotspot/src/cpu/ppc/vm/vm_version_ppc.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2014 SAP AG. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,10 @@ protected:
popcntw,
fcfids,
vand,
- dcba,
+ lqarx,
+ vcipher,
+ vpmsumb,
+ tcheck,
num_features // last entry to count features
};
enum Feature_Flag_Set {
@@ -55,7 +58,10 @@ protected:
popcntw_m = (1 << popcntw),
fcfids_m = (1 << fcfids ),
vand_m = (1 << vand ),
- dcba_m = (1 << dcba ),
+ lqarx_m = (1 << lqarx ),
+ vcipher_m = (1 << vcipher),
+ vpmsumb_m = (1 << vpmsumb),
+ tcheck_m = (1 << tcheck ),
all_features_m = -1
};
static int _features;
@@ -65,12 +71,16 @@ protected:
static void print_features();
static void determine_features(); // also measures cache line size
+ static void config_dscr(); // Power 8: Configure Data Stream Control Register.
static void determine_section_size();
static void power6_micro_bench();
public:
// Initialization
static void initialize();
+ // Override Abstract_VM_Version implementation
+ static bool use_biased_locking();
+
static bool is_determine_features_test_running() { return _is_determine_features_test_running; }
// CPU instruction support
static bool has_fsqrt() { return (_features & fsqrt_m) != 0; }
@@ -82,7 +92,10 @@ public:
static bool has_popcntw() { return (_features & popcntw_m) != 0; }
static bool has_fcfids() { return (_features & fcfids_m) != 0; }
static bool has_vand() { return (_features & vand_m) != 0; }
- static bool has_dcba() { return (_features & dcba_m) != 0; }
+ static bool has_lqarx() { return (_features & lqarx_m) != 0; }
+ static bool has_vcipher() { return (_features & vcipher_m) != 0; }
+ static bool has_vpmsumb() { return (_features & vpmsumb_m) != 0; }
+ static bool has_tcheck() { return (_features & tcheck_m) != 0; }
static const char* cpu_features() { return _features_str; }
diff --git a/hotspot/src/cpu/ppc/vm/vtableStubs_ppc_64.cpp b/hotspot/src/cpu/ppc/vm/vtableStubs_ppc_64.cpp
index 5931afd5c32..0165fb22e34 100644
--- a/hotspot/src/cpu/ppc/vm/vtableStubs_ppc_64.cpp
+++ b/hotspot/src/cpu/ppc/vm/vtableStubs_ppc_64.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2014 SAP AG. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,6 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_ppc_64.hpp"
diff --git a/hotspot/src/cpu/sparc/vm/globals_sparc.hpp b/hotspot/src/cpu/sparc/vm/globals_sparc.hpp
index 2873f441f9d..800e6ef5a74 100644
--- a/hotspot/src/cpu/sparc/vm/globals_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/globals_sparc.hpp
@@ -74,6 +74,8 @@ define_pd_global(bool, RewriteFrequentPairs, true);
define_pd_global(bool, UseMembar, false);
+define_pd_global(bool, PreserveFramePointer, false);
+
// GC Ergo Flags
define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
diff --git a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp
index 862611c4252..042cccd2ec2 100644
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -203,7 +203,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
void field_offset_at(int n, Register tmp, Register dest, Register base);
int field_offset_at(Register object, address bcp, int offset);
void fast_iaaccess(int n, address bcp);
- void fast_iagetfield(address bcp);
void fast_iaputfield(address bcp, bool do_store_check );
void index_check(Register array, Register index, int index_shift, Register tmp, Register res);
diff --git a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp
index 3a932ccee55..fa90497fb76 100644
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp
@@ -3019,44 +3019,107 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
// past the store that releases the lock. But TSO is a strong memory model
// and that particular flavor of barrier is a noop, so we can safely elide it.
// Note that we use 1-0 locking by default for the inflated case. We
- // close the resultant (and rare) race by having contented threads in
+ // close the resultant (and rare) race by having contended threads in
// monitorenter periodically poll _owner.
- ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rscratch);
- ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions), Rbox);
- xor3(Rscratch, G2_thread, Rscratch);
- orcc(Rbox, Rscratch, Rbox);
- brx(Assembler::notZero, false, Assembler::pn, done);
- delayed()->
- ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList), Rscratch);
- ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq), Rbox);
- orcc(Rbox, Rscratch, G0);
- if (EmitSync & 65536) {
- Label LSucc ;
- brx(Assembler::notZero, false, Assembler::pn, LSucc);
- delayed()->nop();
- ba(done);
- delayed()->st_ptr(G0, Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner));
- bind(LSucc);
- st_ptr(G0, Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner));
- if (os::is_MP()) { membar (StoreLoad); }
- ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ), Rscratch);
- andcc(Rscratch, Rscratch, G0);
- brx(Assembler::notZero, false, Assembler::pt, done);
- delayed()->andcc(G0, G0, G0);
- add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark);
- mov(G2_thread, Rscratch);
- cas_ptr(Rmark, G0, Rscratch);
- // invert icc.zf and goto done
- br_notnull(Rscratch, false, Assembler::pt, done);
- delayed()->cmp(G0, G0);
- ba(done);
- delayed()->cmp(G0, 1);
+ if (EmitSync & 1024) {
+ // Emit code to check that _owner == Self
+ // We could fold the _owner test into subsequent code more efficiently
+ // than using a stand-alone check, but since _owner checking is off by
+ // default we don't bother. We also might consider predicating the
+ // _owner==Self check on Xcheck:jni or running on a debug build.
+ ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), Rscratch);
+ orcc(Rscratch, G0, G0);
+ brx(Assembler::notZero, false, Assembler::pn, done);
+ delayed()->nop();
+ }
+
+ if (EmitSync & 512) {
+ // classic lock release code absent 1-0 locking
+ // m->Owner = null;
+ // membar #storeload
+ // if (m->cxq|m->EntryList) == null goto Success
+ // if (m->succ != null) goto Success
+ // if CAS (&m->Owner,0,Self) != 0 goto Success
+ // goto SlowPath
+ ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox);
+ orcc(Rbox, G0, G0);
+ brx(Assembler::notZero, false, Assembler::pn, done);
+ delayed()->nop();
+ st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
+ if (os::is_MP()) { membar(StoreLoad); }
+ ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch);
+ ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox);
+ orcc(Rbox, Rscratch, G0);
+ brx(Assembler::zero, false, Assembler::pt, done);
+ delayed()->
+ ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch);
+ andcc(Rscratch, Rscratch, G0);
+ brx(Assembler::notZero, false, Assembler::pt, done);
+ delayed()->andcc(G0, G0, G0);
+ add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark);
+ mov(G2_thread, Rscratch);
+ cas_ptr(Rmark, G0, Rscratch);
+ cmp(Rscratch, G0);
+ // invert icc.zf and goto done
+ brx(Assembler::notZero, false, Assembler::pt, done);
+ delayed()->cmp(G0, G0);
+ br(Assembler::always, false, Assembler::pt, done);
+ delayed()->cmp(G0, 1);
} else {
- brx(Assembler::notZero, false, Assembler::pn, done);
- delayed()->nop();
- ba(done);
- delayed()->st_ptr(G0, Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner));
+ // 1-0 form : avoids CAS and MEMBAR in the common case
+ // Do not bother to ratify that m->Owner == Self.
+ ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox);
+ orcc(Rbox, G0, G0);
+ brx(Assembler::notZero, false, Assembler::pn, done);
+ delayed()->
+ ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch);
+ ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox);
+ orcc(Rbox, Rscratch, G0);
+ if (EmitSync & 16384) {
+ // As an optional optimization, if (EntryList|cxq) != null and _succ is null then
+ // we should transfer control directly to the slow-path.
+ // This test makes the reacquire operation below very infrequent.
+ // The logic is equivalent to :
+ // if (cxq|EntryList) == null : Owner=null; goto Success
+ // if succ == null : goto SlowPath
+ // Owner=null; membar #storeload
+ // if succ != null : goto Success
+ // if CAS(&Owner,null,Self) != null goto Success
+ // goto SlowPath
+ brx(Assembler::zero, true, Assembler::pt, done);
+ delayed()->
+ st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
+ ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch);
+ andcc(Rscratch, Rscratch, G0) ;
+ brx(Assembler::zero, false, Assembler::pt, done);
+ delayed()->orcc(G0, 1, G0);
+ st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
+ } else {
+ brx(Assembler::zero, false, Assembler::pt, done);
+ delayed()->
+ st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
+ }
+ if (os::is_MP()) { membar(StoreLoad); }
+ // Check that _succ is (or remains) non-zero
+ ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch);
+ andcc(Rscratch, Rscratch, G0);
+ brx(Assembler::notZero, false, Assembler::pt, done);
+ delayed()->andcc(G0, G0, G0);
+ add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark);
+ mov(G2_thread, Rscratch);
+ cas_ptr(Rmark, G0, Rscratch);
+ cmp(Rscratch, G0);
+ // invert icc.zf and goto done
+ // A slightly better v8+/v9 idiom would be the following:
+ // movrnz Rscratch,1,Rscratch
+ // ba done
+ // xorcc Rscratch,1,G0
+ // In v8+ mode the idiom would be valid IFF Rscratch was a G or O register
+ brx(Assembler::notZero, false, Assembler::pt, done);
+ delayed()->cmp(G0, G0);
+ br(Assembler::always, false, Assembler::pt, done);
+ delayed()->cmp(G0, 1);
}
bind (LStacked);
@@ -3632,23 +3695,11 @@ static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) {
if (satb_log_enqueue_with_frame == 0) {
generate_satb_log_enqueue(with_frame);
assert(satb_log_enqueue_with_frame != 0, "postcondition.");
- if (G1SATBPrintStubs) {
- tty->print_cr("Generated with-frame satb enqueue:");
- Disassembler::decode((u_char*)satb_log_enqueue_with_frame,
- satb_log_enqueue_with_frame_end,
- tty);
- }
}
} else {
if (satb_log_enqueue_frameless == 0) {
generate_satb_log_enqueue(with_frame);
assert(satb_log_enqueue_frameless != 0, "postcondition.");
- if (G1SATBPrintStubs) {
- tty->print_cr("Generated frameless satb enqueue:");
- Disassembler::decode((u_char*)satb_log_enqueue_frameless,
- satb_log_enqueue_frameless_end,
- tty);
- }
}
}
}
@@ -3841,12 +3892,6 @@ generate_dirty_card_log_enqueue_if_necessary(jbyte* byte_map_base) {
if (dirty_card_log_enqueue == 0) {
generate_dirty_card_log_enqueue(byte_map_base);
assert(dirty_card_log_enqueue != 0, "postcondition.");
- if (G1SATBPrintStubs) {
- tty->print_cr("Generated dirty_card enqueue:");
- Disassembler::decode((u_char*)dirty_card_log_enqueue,
- dirty_card_log_enqueue_end,
- tty);
- }
}
}
diff --git a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
index b3dcb8b98c9..2c4e892f467 100644
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2664,6 +2664,9 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// disallows any pending_exception.
__ mov(L3_box, O1);
+ // Pass in current thread pointer
+ __ mov(G2_thread, O2);
+
__ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type);
__ delayed()->mov(L4, O0); // Need oop in O0
diff --git a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp
index 342f69b49ca..18e492c65e2 100644
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp
@@ -385,7 +385,6 @@ void TemplateTable::fast_aldc(bool wide) {
__ verify_oop(Otos_i);
}
-
void TemplateTable::ldc2_w() {
transition(vtos, vtos);
Label Long, exit;
@@ -430,22 +429,28 @@ void TemplateTable::ldc2_w() {
__ bind(exit);
}
-
void TemplateTable::locals_index(Register reg, int offset) {
__ ldub( at_bcp(offset), reg );
}
-
void TemplateTable::locals_index_wide(Register reg) {
// offset is 2, not 1, because Lbcp points to wide prefix code
__ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned);
}
void TemplateTable::iload() {
+ iload_internal();
+}
+
+void TemplateTable::nofast_iload() {
+ iload_internal(may_not_rewrite);
+}
+
+void TemplateTable::iload_internal(RewriteControl rc) {
transition(vtos, itos);
// Rewrite iload,iload pair into fast_iload2
// iload,caload pair into fast_icaload
- if (RewriteFrequentPairs) {
+ if (RewriteFrequentPairs && rc == may_rewrite) {
Label rewrite, done;
// get next byte
@@ -672,8 +677,15 @@ void TemplateTable::aload(int n) {
__ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
}
-
void TemplateTable::aload_0() {
+ aload_0_internal();
+}
+
+void TemplateTable::nofast_aload_0() {
+ aload_0_internal(may_not_rewrite);
+}
+
+void TemplateTable::aload_0_internal(RewriteControl rc) {
transition(vtos, atos);
// According to bytecode histograms, the pairs:
@@ -687,7 +699,7 @@ void TemplateTable::aload_0() {
// bytecode into a pair bytecode; otherwise it rewrites the current
// bytecode into _fast_aload_0 that doesn't do the pair check anymore.
//
- if (RewriteFrequentPairs) {
+ if (RewriteFrequentPairs && rc == may_rewrite) {
Label rewrite, done;
// get next byte
@@ -731,7 +743,6 @@ void TemplateTable::aload_0() {
}
}
-
void TemplateTable::istore() {
transition(itos, vtos);
locals_index(G3_scratch);
@@ -2045,30 +2056,21 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
Register index,
size_t index_size) {
// Depends on cpCacheOop layout!
+
Label resolved;
-
- assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
- __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size);
- __ cmp(Lbyte_code, (int) bytecode()); // have we resolved this bytecode?
- __ br(Assembler::equal, false, Assembler::pt, resolved);
- __ delayed()->set((int)bytecode(), O1);
-
- address entry;
- switch (bytecode()) {
- case Bytecodes::_getstatic : // fall through
- case Bytecodes::_putstatic : // fall through
- case Bytecodes::_getfield : // fall through
- case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
- case Bytecodes::_invokevirtual : // fall through
- case Bytecodes::_invokespecial : // fall through
- case Bytecodes::_invokestatic : // fall through
- case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
- case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
- case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
- default:
- fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
- break;
+ Bytecodes::Code code = bytecode();
+ switch (code) {
+ case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
+ case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
}
+
+ assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
+ __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size);
+ __ cmp(Lbyte_code, code); // have we resolved this bytecode?
+ __ br(Assembler::equal, false, Assembler::pt, resolved);
+ __ delayed()->set(code, O1);
+
+ address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
// first time invocation - must resolve first
__ call_VM(noreg, entry, O1);
// Update registers with resolved info
@@ -2183,7 +2185,7 @@ void TemplateTable::jvmti_post_field_access(Register Rcache,
}
}
-void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
+void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
transition(vtos, vtos);
Register Rcache = G3_scratch;
@@ -2231,7 +2233,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
__ load_heap_oop(Rclass, Roffset, Otos_i);
__ verify_oop(Otos_i);
__ push(atos);
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch);
}
__ ba(checkVolatile);
@@ -2246,7 +2248,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
// itos
__ ld(Rclass, Roffset, Otos_i);
__ push(itos);
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch);
}
__ ba(checkVolatile);
@@ -2262,7 +2264,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
// load must be atomic
__ ld_long(Rclass, Roffset, Otos_l);
__ push(ltos);
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch);
}
__ ba(checkVolatile);
@@ -2277,7 +2279,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
// btos
__ ldsb(Rclass, Roffset, Otos_i);
__ push(itos);
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch);
}
__ ba(checkVolatile);
@@ -2292,7 +2294,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
// ctos
__ lduh(Rclass, Roffset, Otos_i);
__ push(itos);
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch);
}
__ ba(checkVolatile);
@@ -2307,7 +2309,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
// stos
__ ldsh(Rclass, Roffset, Otos_i);
__ push(itos);
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch);
}
__ ba(checkVolatile);
@@ -2323,7 +2325,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
// ftos
__ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f);
__ push(ftos);
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch);
}
__ ba(checkVolatile);
@@ -2335,7 +2337,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
// dtos
__ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d);
__ push(dtos);
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch);
}
@@ -2350,16 +2352,18 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
__ bind(exit);
}
-
void TemplateTable::getfield(int byte_no) {
getfield_or_static(byte_no, false);
}
+void TemplateTable::nofast_getfield(int byte_no) {
+ getfield_or_static(byte_no, false, may_not_rewrite);
+}
+
void TemplateTable::getstatic(int byte_no) {
getfield_or_static(byte_no, true);
}
-
void TemplateTable::fast_accessfield(TosState state) {
transition(atos, state);
Register Rcache = G3_scratch;
@@ -2544,7 +2548,7 @@ void TemplateTable::pop_and_check_object(Register r) {
__ verify_oop(r);
}
-void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
+void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
transition(vtos, vtos);
Register Rcache = G3_scratch;
Register index = G4_scratch;
@@ -2620,7 +2624,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop_i();
pop_and_check_object(Rclass);
__ st(Otos_i, Rclass, Roffset);
- patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no);
+ if (rc == may_rewrite) patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no);
__ ba(checkVolatile);
__ delayed()->tst(Lscratch);
}
@@ -2636,7 +2640,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
pop_and_check_object(Rclass);
__ verify_oop(Otos_i);
do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
- patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no);
+ if (rc == may_rewrite) patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no);
__ ba(checkVolatile);
__ delayed()->tst(Lscratch);
}
@@ -2653,7 +2657,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop_i();
if (!is_static) pop_and_check_object(Rclass);
__ stb(Otos_i, Rclass, Roffset);
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch, true, byte_no);
}
__ ba(checkVolatile);
@@ -2670,7 +2674,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop_l();
if (!is_static) pop_and_check_object(Rclass);
__ st_long(Otos_l, Rclass, Roffset);
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch, true, byte_no);
}
__ ba(checkVolatile);
@@ -2687,7 +2691,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop_i();
if (!is_static) pop_and_check_object(Rclass);
__ sth(Otos_i, Rclass, Roffset);
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch, true, byte_no);
}
__ ba(checkVolatile);
@@ -2704,7 +2708,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop_i();
if (!is_static) pop_and_check_object(Rclass);
__ sth(Otos_i, Rclass, Roffset);
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch, true, byte_no);
}
__ ba(checkVolatile);
@@ -2721,7 +2725,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop_f();
if (!is_static) pop_and_check_object(Rclass);
__ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch, true, byte_no);
}
__ ba(checkVolatile);
@@ -2735,7 +2739,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop_d();
if (!is_static) pop_and_check_object(Rclass);
__ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch, true, byte_no);
}
}
@@ -2809,16 +2813,18 @@ void TemplateTable::fast_storefield(TosState state) {
}
}
-
void TemplateTable::putfield(int byte_no) {
putfield_or_static(byte_no, false);
}
+void TemplateTable::nofast_putfield(int byte_no) {
+ putfield_or_static(byte_no, false, may_not_rewrite);
+}
+
void TemplateTable::putstatic(int byte_no) {
putfield_or_static(byte_no, true);
}
-
void TemplateTable::fast_xaccess(TosState state) {
transition(vtos, state);
Register Rcache = G3_scratch;
@@ -2971,7 +2977,9 @@ void TemplateTable::invokevirtual(int byte_no) {
__ br(Assembler::zero, false, Assembler::pt, notFinal);
__ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters
- patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp);
+ if (RewriteBytecodes && !UseSharedSpaces) {
+ patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp);
+ }
invokevfinal_helper(Rscratch, Rret);
diff --git a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp
index ee2be7dd76d..bad83d550f4 100644
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,7 @@
int VM_Version::_features = VM_Version::unknown_m;
const char* VM_Version::_features_str = "";
-unsigned int VM_Version::_L2_cache_line_size = 0;
+unsigned int VM_Version::_L2_data_cache_line_size = 0;
void VM_Version::initialize() {
_features = determine_features();
@@ -356,10 +356,17 @@ void VM_Version::initialize() {
(cache_line_size > ContendedPaddingWidth))
ContendedPaddingWidth = cache_line_size;
+ // This machine does not allow unaligned memory accesses
+ if (UseUnalignedAccesses) {
+ if (!FLAG_IS_DEFAULT(UseUnalignedAccesses))
+ warning("Unaligned memory access is not available on this CPU");
+ FLAG_SET_DEFAULT(UseUnalignedAccesses, false);
+ }
+
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
tty->print_cr("L1 data cache line size: %u", L1_data_cache_line_size());
- tty->print_cr("L2 cache line size: %u", L2_cache_line_size());
+ tty->print_cr("L2 data cache line size: %u", L2_data_cache_line_size());
tty->print("Allocation");
if (AllocatePrefetchStyle <= 0) {
tty->print_cr(": no prefetching");
diff --git a/hotspot/src/cpu/sparc/vm/vm_version_sparc.hpp b/hotspot/src/cpu/sparc/vm/vm_version_sparc.hpp
index fa823863bdb..59969ed1089 100644
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.hpp
@@ -96,8 +96,8 @@ protected:
static int _features;
static const char* _features_str;
- static unsigned int _L2_cache_line_size;
- static unsigned int L2_cache_line_size() { return _L2_cache_line_size; }
+ static unsigned int _L2_data_cache_line_size;
+ static unsigned int L2_data_cache_line_size() { return _L2_data_cache_line_size; }
static void print_features();
static int determine_features();
@@ -171,7 +171,7 @@ public:
static const char* cpu_features() { return _features_str; }
// default prefetch block size on sparc
- static intx prefetch_data_size() { return L2_cache_line_size(); }
+ static intx prefetch_data_size() { return L2_data_cache_line_size(); }
// Prefetch
static intx prefetch_copy_interval_in_bytes() {
diff --git a/hotspot/src/cpu/x86/vm/assembler_x86.cpp b/hotspot/src/cpu/x86/vm/assembler_x86.cpp
index f61e0a0917c..2d06abb0086 100644
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp
@@ -3359,6 +3359,20 @@ void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vecto
// Integer vector arithmetic
+void Assembler::vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
+ assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
+ int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_38);
+ emit_int8(0x01);
+ emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
+ assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
+ int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_38);
+ emit_int8(0x02);
+ emit_int8((unsigned char)(0xC0 | encode));
+}
+
void Assembler::paddb(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
emit_simd_arith(0xFC, dst, src, VEX_SIMD_66);
@@ -3379,6 +3393,20 @@ void Assembler::paddq(XMMRegister dst, XMMRegister src) {
emit_simd_arith(0xD4, dst, src, VEX_SIMD_66);
}
+void Assembler::phaddw(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse3(), ""));
+ int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
+ emit_int8(0x01);
+ emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::phaddd(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse3(), ""));
+ int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
+ emit_int8(0x02);
+ emit_int8((unsigned char)(0xC0 | encode));
+}
+
void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector256);
@@ -3804,6 +3832,17 @@ void Assembler::vinsertf128h(XMMRegister dst, Address src) {
emit_int8(0x01);
}
+void Assembler::vextractf128h(XMMRegister dst, XMMRegister src) {
+ assert(VM_Version::supports_avx(), "");
+ bool vector256 = true;
+ int encode = vex_prefix_and_encode(src, xnoreg, dst, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
+ emit_int8(0x19);
+ emit_int8((unsigned char)(0xC0 | encode));
+ // 0x00 - insert into lower 128 bits
+ // 0x01 - insert into upper 128 bits
+ emit_int8(0x01);
+}
+
void Assembler::vextractf128h(Address dst, XMMRegister src) {
assert(VM_Version::supports_avx(), "");
InstructionMark im(this);
diff --git a/hotspot/src/cpu/x86/vm/assembler_x86.hpp b/hotspot/src/cpu/x86/vm/assembler_x86.hpp
index 3f3fff83d66..742d6c5eed9 100644
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp
@@ -142,8 +142,10 @@ REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved
#endif // _LP64
-// JSR 292 fixed register usages:
-REGISTER_DECLARATION(Register, rbp_mh_SP_save, rbp);
+// JSR 292
+// On x86, the SP does not have to be saved when invoking method handle intrinsics
+// or compiled lambda forms. We indicate that by setting rbp_mh_SP_save to noreg.
+REGISTER_DECLARATION(Register, rbp_mh_SP_save, noreg);
// Address is an abstraction used to represent a memory location
// using any of the amd64 addressing modes with one object.
@@ -1777,6 +1779,12 @@ private:
void vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
void vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
+ // Add horizontal packed integers
+ void vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
+ void vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
+ void phaddw(XMMRegister dst, XMMRegister src);
+ void phaddd(XMMRegister dst, XMMRegister src);
+
// Add packed integers
void paddb(XMMRegister dst, XMMRegister src);
void paddw(XMMRegister dst, XMMRegister src);
@@ -1869,6 +1877,7 @@ private:
// Copy low 128bit into high 128bit of YMM registers.
void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
+ void vextractf128h(XMMRegister dst, XMMRegister src);
// Load/store high 128bit of YMM registers which does not destroy other half.
void vinsertf128h(XMMRegister dst, Address src);
diff --git a/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.cpp b/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.cpp
index f75eca72868..bec7eed65e9 100644
--- a/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.cpp
@@ -343,14 +343,13 @@ LIR_Opr FrameMap::stack_pointer() {
return FrameMap::rsp_opr;
}
-
// JSR 292
+// On x86, there is no need to save the SP, because neither
+// method handle intrinsics, nor compiled lambda forms modify it.
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
- assert(rbp == rbp_mh_SP_save, "must be same register");
- return rbp_opr;
+ return LIR_OprFact::illegalOpr;
}
-
bool FrameMap::validate_frame() {
return true;
}
diff --git a/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp b/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp
index 0c8d0767ed2..cf1a59a30b3 100644
--- a/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp
@@ -360,6 +360,9 @@ void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_by
generate_stack_overflow_check(bang_size_in_bytes);
push(rbp);
+ if (PreserveFramePointer) {
+ mov(rbp, rsp);
+ }
#ifdef TIERED
// c2 leaves fpu stack dirty. Clean it on entry
if (UseSSE < 2 ) {
diff --git a/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp b/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp
index fa7e8c3dd0f..152ad2e0189 100644
--- a/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp
@@ -754,14 +754,9 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
// WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
// since we do a leave anyway.
- // Pop the return address since we are possibly changing SP (restoring from BP).
+ // Pop the return address.
__ leave();
__ pop(rcx);
-
- // Restore SP from BP if the exception PC is a method handle call site.
- NOT_LP64(__ get_thread(thread);)
- __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
- __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
__ jmp(rcx); // jump to exception handler
break;
default: ShouldNotReachHere();
@@ -832,11 +827,6 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
// the pop is also necessary to simulate the effect of a ret(0)
__ pop(exception_pc);
- // Restore SP from BP if the exception PC is a method handle call site.
- NOT_LP64(__ get_thread(thread);)
- __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
- __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
-
// continue at exception handler (return address removed)
// note: do *not* remove arguments when unwinding the
// activation since the caller assumes having
diff --git a/hotspot/src/cpu/x86/vm/frame_x86.cpp b/hotspot/src/cpu/x86/vm/frame_x86.cpp
index 654f0689ec2..525b13e6844 100644
--- a/hotspot/src/cpu/x86/vm/frame_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/frame_x86.cpp
@@ -224,7 +224,8 @@ bool frame::safe_for_sender(JavaThread *thread) {
if (sender_blob->is_nmethod()) {
nmethod* nm = sender_blob->as_nmethod_or_null();
if (nm != NULL) {
- if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc)) {
+ if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
+ nm->method()->is_method_handle_intrinsic()) {
return false;
}
}
@@ -391,10 +392,9 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
// frame::verify_deopt_original_pc
//
// Verifies the calculated original PC of a deoptimization PC for the
-// given unextended SP. The unextended SP might also be the saved SP
-// for MethodHandle call sites.
+// given unextended SP.
#ifdef ASSERT
-void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return) {
+void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) {
frame fr;
// This is ugly but it's better than to change {get,set}_original_pc
@@ -404,33 +404,23 @@ void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool
address original_pc = nm->get_original_pc(&fr);
assert(nm->insts_contains(original_pc), "original PC must be in nmethod");
- assert(nm->is_method_handle_return(original_pc) == is_method_handle_return, "must be");
}
#endif
//------------------------------------------------------------------------------
// frame::adjust_unextended_sp
void frame::adjust_unextended_sp() {
- // If we are returning to a compiled MethodHandle call site, the
- // saved_fp will in fact be a saved value of the unextended SP. The
- // simplest way to tell whether we are returning to such a call site
- // is as follows:
+ // On x86, sites calling method handle intrinsics and lambda forms are treated
+ // as any other call site. Therefore, no special action is needed when we are
+ // returning to any of these call sites.
nmethod* sender_nm = (_cb == NULL) ? NULL : _cb->as_nmethod_or_null();
if (sender_nm != NULL) {
- // If the sender PC is a deoptimization point, get the original
- // PC. For MethodHandle call site the unextended_sp is stored in
- // saved_fp.
- if (sender_nm->is_deopt_mh_entry(_pc)) {
- DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, _fp));
- _unextended_sp = _fp;
- }
- else if (sender_nm->is_deopt_entry(_pc)) {
+ // If the sender PC is a deoptimization point, get the original PC.
+ if (sender_nm->is_deopt_entry(_pc) ||
+ sender_nm->is_deopt_mh_entry(_pc)) {
DEBUG_ONLY(verify_deopt_original_pc(sender_nm, _unextended_sp));
}
- else if (sender_nm->is_method_handle_return(_pc)) {
- _unextended_sp = _fp;
- }
}
}
diff --git a/hotspot/src/cpu/x86/vm/frame_x86.hpp b/hotspot/src/cpu/x86/vm/frame_x86.hpp
index 602b8ff186f..3b7cee89f0e 100644
--- a/hotspot/src/cpu/x86/vm/frame_x86.hpp
+++ b/hotspot/src/cpu/x86/vm/frame_x86.hpp
@@ -76,11 +76,11 @@
// [locals and parameters ]
// <- sender sp
-// [1] When the c++ interpreter calls a new method it returns to the frame
+// [1] When the C++ interpreter calls a new method it returns to the frame
// manager which allocates a new frame on the stack. In that case there
// is no real callee of this newly allocated frame. The frame manager is
-// aware of the additional frame(s) and will pop them as nested calls
-// complete. Howevers tTo make it look good in the debugger the frame
+// aware of the additional frame(s) and will pop them as nested calls
+// complete. However, to make it look good in the debugger the frame
// manager actually installs a dummy pc pointing to RecursiveInterpreterActivation
// with a fake interpreter_state* parameter to make it easy to debug
// nested calls.
@@ -88,7 +88,7 @@
// Note that contrary to the layout for the assembly interpreter the
// expression stack allocated for the C++ interpreter is full sized.
// However this is not as bad as it seems as the interpreter frame_manager
-// will truncate the unused space on succesive method calls.
+// will truncate the unused space on successive method calls.
//
// ------------------------------ C++ interpreter ----------------------------------------
@@ -167,10 +167,7 @@
#ifdef ASSERT
// Used in frame::sender_for_{interpreter,compiled}_frame
- static void verify_deopt_original_pc( nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return = false);
- static void verify_deopt_mh_original_pc(nmethod* nm, intptr_t* unextended_sp) {
- verify_deopt_original_pc(nm, unextended_sp, true);
- }
+ static void verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp);
#endif
public:
diff --git a/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp b/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp
index 0f1839692d5..7f6a99ad2d6 100644
--- a/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp
+++ b/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp
@@ -94,7 +94,7 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
// find_blob call. This is also why we can have no asserts on the validity
// of the pc we find here. AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
// -> pd_last_frame should use a specialized version of pd_last_frame which could
- // call a specilaized frame constructor instead of this one.
+ // call a specialized frame constructor instead of this one.
// Then we could use the assert below. However this assert is of somewhat dubious
// value.
// assert(_pc != NULL, "no pc?");
diff --git a/hotspot/src/cpu/x86/vm/globalDefinitions_x86.hpp b/hotspot/src/cpu/x86/vm/globalDefinitions_x86.hpp
index c949433210f..758593ab77d 100644
--- a/hotspot/src/cpu/x86/vm/globalDefinitions_x86.hpp
+++ b/hotspot/src/cpu/x86/vm/globalDefinitions_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -58,4 +58,9 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
#endif
#endif
+#if defined(COMPILER2) && !defined(JAVASE_EMBEDDED)
+// Include Restricted Transactional Memory lock eliding optimization
+#define INCLUDE_RTM_OPT 1
+#endif
+
#endif // CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP
diff --git a/hotspot/src/cpu/x86/vm/globals_x86.hpp b/hotspot/src/cpu/x86/vm/globals_x86.hpp
index a6d0fbbb336..372eed721ca 100644
--- a/hotspot/src/cpu/x86/vm/globals_x86.hpp
+++ b/hotspot/src/cpu/x86/vm/globals_x86.hpp
@@ -82,14 +82,13 @@ define_pd_global(size_t, CMSYoungGenPerWorker, 64*M); // default max size of CM
define_pd_global(uintx, TypeProfileLevel, 111);
+define_pd_global(bool, PreserveFramePointer, false);
+
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
\
develop(bool, IEEEPrecision, true, \
"Enables IEEE precision (for INTEL only)") \
\
- product(intx, FenceInstruction, 0, \
- "(Unsafe,Unstable) Experimental") \
- \
product(bool, UseStoreImmI16, true, \
"Use store immediate 16-bits value instruction on x86") \
\
diff --git a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp
index 32af915065d..ec36f2beaaf 100644
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp
@@ -1958,6 +1958,11 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
// Programmer's Guide and Specification" claims that an object locked by jni_monitorenter
// should not be unlocked by "normal" java-level locking and vice-versa. The specification
// doesn't specify what will occur if a program engages in such mixed-mode locking, however.
+// Arguably given that the spec legislates the JNI case as undefined our implementation
+// could reasonably *avoid* checking owner in Fast_Unlock().
+// In the interest of performance we elide m->Owner==Self check in unlock.
+// A perfectly viable alternative is to elide the owner check except when
+// Xcheck:jni is enabled.
void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg, bool use_rtm) {
assert(boxReg == rax, "");
@@ -1966,24 +1971,6 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
if (EmitSync & 4) {
// Disable - inhibit all inlining. Force control through the slow-path
cmpptr (rsp, 0);
- } else
- if (EmitSync & 8) {
- Label DONE_LABEL;
- if (UseBiasedLocking) {
- biased_locking_exit(objReg, tmpReg, DONE_LABEL);
- }
- // Classic stack-locking code ...
- // Check whether the displaced header is 0
- //(=> recursive unlock)
- movptr(tmpReg, Address(boxReg, 0));
- testptr(tmpReg, tmpReg);
- jccb(Assembler::zero, DONE_LABEL);
- // If not recursive lock, reset the header to displaced header
- if (os::is_MP()) {
- lock();
- }
- cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
- bind(DONE_LABEL);
} else {
Label DONE_LABEL, Stacked, CheckSucc;
@@ -2060,9 +2047,9 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
// the number of loads below (currently 4) to just 2 or 3.
// Refer to the comments in synchronizer.cpp.
// In practice the chain of fetches doesn't seem to impact performance, however.
+ xorptr(boxReg, boxReg);
if ((EmitSync & 65536) == 0 && (EmitSync & 256)) {
// Attempt to reduce branch density - AMD's branch predictor.
- xorptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
@@ -2070,7 +2057,6 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
jmpb (DONE_LABEL);
} else {
- xorptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
jccb (Assembler::notZero, DONE_LABEL);
movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
@@ -2093,10 +2079,8 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
bind (CheckSucc);
// Optional pre-test ... it's safe to elide this
- if ((EmitSync & 16) == 0) {
- cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
- jccb (Assembler::zero, LGoSlowPath);
- }
+ cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
+ jccb(Assembler::zero, LGoSlowPath);
// We have a classic Dekker-style idiom:
// ST m->_owner = 0 ; MEMBAR; LD m->_succ
@@ -2109,7 +2093,8 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
// In older IA32 processors MFENCE is slower than lock:add or xchg
// particularly if the write-buffer is full as might be the case if
// if stores closely precede the fence or fence-equivalent instruction.
- // In more modern implementations MFENCE appears faster, however.
+ // See https://blogs.oracle.com/dave/entry/instruction_selection_for_volatile_fences
+ // as the situation has changed with Nehalem and Shanghai.
// (3) In lieu of an explicit fence, use lock:addl to the top-of-stack
// The $lines underlying the top-of-stack should be in M-state.
// The locked add instruction is serializing, of course.
@@ -2126,11 +2111,7 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
if (os::is_MP()) {
- if (VM_Version::supports_sse2() && 1 == FenceInstruction) {
- mfence();
- } else {
- lock (); addptr(Address(rsp, 0), 0);
- }
+ lock(); addptr(Address(rsp, 0), 0);
}
// Ratify _succ remains non-null
cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), 0);
@@ -2179,8 +2160,17 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
}
#else // _LP64
// It's inflated
- movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
- xorptr(boxReg, r15_thread);
+ if (EmitSync & 1024) {
+ // Emit code to check that _owner == Self
+ // We could fold the _owner test into subsequent code more efficiently
+ // than using a stand-alone check, but since _owner checking is off by
+ // default we don't bother. We also might consider predicating the
+ // _owner==Self check on Xcheck:jni or running on a debug build.
+ movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
+ xorptr(boxReg, r15_thread);
+ } else {
+ xorptr(boxReg, boxReg);
+ }
orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
jccb (Assembler::notZero, DONE_LABEL);
movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
@@ -2190,23 +2180,51 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
jmpb (DONE_LABEL);
if ((EmitSync & 65536) == 0) {
+ // Try to avoid passing control into the slow_path ...
Label LSuccess, LGoSlowPath ;
bind (CheckSucc);
+
+ // The following optional optimization can be elided if necessary
+ // Effectively: if (succ == null) goto SlowPath
+ // The code reduces the window for a race, however,
+ // and thus benefits performance.
cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
jccb (Assembler::zero, LGoSlowPath);
- // I'd much rather use lock:andl m->_owner, 0 as it's faster than the
- // the explicit ST;MEMBAR combination, but masm doesn't currently support
- // "ANDQ M,IMM". Don't use MFENCE here. lock:add to TOS, xchg, etc
- // are all faster when the write buffer is populated.
- movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD);
- if (os::is_MP()) {
- lock (); addl (Address(rsp, 0), 0);
+ if ((EmitSync & 16) && os::is_MP()) {
+ orptr(boxReg, boxReg);
+ xchgptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
+ } else {
+ movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD);
+ if (os::is_MP()) {
+ // Memory barrier/fence
+ // Dekker pivot point -- fulcrum : ST Owner; MEMBAR; LD Succ
+ // Instead of MFENCE we use a dummy locked add of 0 to the top-of-stack.
+ // This is faster on Nehalem and AMD Shanghai/Barcelona.
+ // See https://blogs.oracle.com/dave/entry/instruction_selection_for_volatile_fences
+ // We might also restructure (ST Owner=0;barrier;LD _Succ) to
+ // (mov box,0; xchgq box, &m->Owner; LD _succ) .
+ lock(); addl(Address(rsp, 0), 0);
+ }
}
cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
jccb (Assembler::notZero, LSuccess);
- movptr (boxReg, (int32_t)NULL_WORD); // box is really EAX
+ // Rare inopportune interleaving - race.
+ // The successor vanished in the small window above.
+ // The lock is contended -- (cxq|EntryList) != null -- and there's no apparent successor.
+ // We need to ensure progress and succession.
+ // Try to reacquire the lock.
+ // If that fails then the new owner is responsible for succession and this
+ // thread needs to take no further action and can exit via the fast path (success).
+ // If the re-acquire succeeds then pass control into the slow path.
+ // As implemented, this latter mode is horrible because we generated more
+ // coherence traffic on the lock *and* artifically extended the critical section
+ // length while by virtue of passing control into the slow path.
+
+ // box is really RAX -- the following CMPXCHG depends on that binding
+ // cmpxchg R,[M] is equivalent to rax = CAS(M,rax,R)
+ movptr(boxReg, (int32_t)NULL_WORD);
if (os::is_MP()) { lock(); }
cmpxchgptr(r15_thread, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
jccb (Assembler::notEqual, LSuccess);
@@ -2231,10 +2249,6 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
}
#endif
bind(DONE_LABEL);
- // Avoid branch to branch on AMD processors
- if (EmitSync & 32768) {
- nop();
- }
}
}
#endif // COMPILER2
@@ -6090,6 +6104,10 @@ void MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool fp_
// We always push rbp, so that on return to interpreter rbp, will be
// restored correctly and we can correct the stack.
push(rbp);
+ // Save caller's stack pointer into RBP if the frame pointer is preserved.
+ if (PreserveFramePointer) {
+ mov(rbp, rsp);
+ }
// Remove word for ebp
framesize -= wordSize;
@@ -6104,6 +6122,11 @@ void MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool fp_
// Save RBP register now.
framesize -= wordSize;
movptr(Address(rsp, framesize), rbp);
+ // Save caller's stack pointer into RBP if the frame pointer is preserved.
+ if (PreserveFramePointer) {
+ movptr(rbp, rsp);
+ addptr(rbp, framesize + wordSize);
+ }
}
if (VerifyStackAtCalls) { // Majik cookie to verify stack depth
@@ -6657,7 +6680,7 @@ void MacroAssembler::string_compare(Register str1, Register str2,
subl(cnt2, stride2);
jccb(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP);
// clean upper bits of YMM registers
- vzeroupper();
+ vpxor(vec1, vec1);
// compare wide vectors tail
bind(COMPARE_WIDE_TAIL);
@@ -6672,7 +6695,7 @@ void MacroAssembler::string_compare(Register str1, Register str2,
// Identifies the mismatching (higher or lower)16-bytes in the 32-byte vectors.
bind(VECTOR_NOT_EQUAL);
// clean upper bits of YMM registers
- vzeroupper();
+ vpxor(vec1, vec1);
lea(str1, Address(str1, result, scale));
lea(str2, Address(str2, result, scale));
jmp(COMPARE_16_CHARS);
@@ -6931,7 +6954,8 @@ void MacroAssembler::char_arrays_equals(bool is_array_equ, Register ary1, Regist
bind(DONE);
if (UseAVX >= 2) {
// clean upper bits of YMM registers
- vzeroupper();
+ vpxor(vec1, vec1);
+ vpxor(vec2, vec2);
}
}
@@ -7065,7 +7089,8 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
BIND(L_check_fill_8_bytes);
// clean upper bits of YMM registers
- vzeroupper();
+ movdl(xtmp, value);
+ pshufd(xtmp, xtmp, 0);
} else {
// Fill 32-byte chunks
pshufd(xtmp, xtmp, 0);
@@ -7228,7 +7253,11 @@ void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
bind(L_copy_16_chars_exit);
if (UseAVX >= 2) {
// clean upper bits of YMM registers
- vzeroupper();
+ vpxor(tmp2Reg, tmp2Reg);
+ vpxor(tmp3Reg, tmp3Reg);
+ vpxor(tmp4Reg, tmp4Reg);
+ movdl(tmp1Reg, tmp5);
+ pshufd(tmp1Reg, tmp1Reg, 0);
}
subptr(len, 8);
jccb(Assembler::greater, L_copy_8_chars_exit);
diff --git a/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp b/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp
index b3776ed8c3f..65a2f3bf30b 100644
--- a/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp
@@ -374,7 +374,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// member_reg - MemberName that was the trailing argument
// temp1_recv_klass - klass of stacked receiver, if needed
// rsi/r13 - interpreter linkage (if interpreted)
- // rcx, rdx, rsi, rdi, r8, r8 - compiler arguments (if compiled)
+ // rcx, rdx, rsi, rdi, r8 - compiler arguments (if compiled)
Label L_incompatible_class_change_error;
switch (iid) {
diff --git a/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp b/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp
index 1cc10d76622..36457cb8178 100644
--- a/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp
+++ b/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp
@@ -126,10 +126,6 @@ void OptoRuntime::generate_exception_blob() {
// rax: exception handler for given
- // Restore SP from BP if the exception PC is a MethodHandle call site.
- __ cmpl(Address(rcx, JavaThread::is_method_handle_return_offset()), 0);
- __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
-
// We have a handler in rax, (could be deopt blob)
// rdx - throwing pc, deopt blob will need it.
diff --git a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
index a4b8fbc8cfd..a0c39dd4dca 100644
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2343,12 +2343,14 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// should be a peal
// +wordSize because of the push above
+ // args are (oop obj, BasicLock* lock, JavaThread* thread)
+ __ push(thread);
__ lea(rax, Address(rbp, lock_slot_rbp_offset));
__ push(rax);
__ push(obj_reg);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
- __ addptr(rsp, 2*wordSize);
+ __ addptr(rsp, 3*wordSize);
#ifdef ASSERT
{
Label L;
diff --git a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
index afc137b69c2..0d27447c49e 100644
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2581,6 +2581,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
__ mov(c_rarg0, obj_reg);
+ __ mov(c_rarg2, r15_thread);
__ mov(r12, rsp); // remember sp
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
__ andptr(rsp, -16); // align stack as required by ABI
@@ -2590,6 +2591,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ movptr(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset())));
__ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
+ // args are (oop obj, BasicLock* lock, JavaThread* thread)
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
__ mov(rsp, r12); // restore sp
__ reinit_heapbase();
@@ -3393,8 +3395,8 @@ void OptoRuntime::generate_exception_blob() {
// Save callee-saved registers. See x86_64.ad.
- // rbp is an implicitly saved callee saved register (i.e. the calling
- // convention will save restore it in prolog/epilog) Other than that
+ // rbp is an implicitly saved callee saved register (i.e., the calling
+ // convention will save/restore it in the prolog/epilog). Other than that
// there are no callee save registers now that adapter frames are gone.
__ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
@@ -3436,9 +3438,9 @@ void OptoRuntime::generate_exception_blob() {
// Restore callee-saved registers
- // rbp is an implicitly saved callee saved register (i.e. the calling
+ // rbp is an implicitly saved callee-saved register (i.e., the calling
// convention will save restore it in prolog/epilog) Other than that
- // there are no callee save registers no that adapter frames are gone.
+ // there are no callee save registers now that adapter frames are gone.
__ movptr(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt));
@@ -3447,10 +3449,6 @@ void OptoRuntime::generate_exception_blob() {
// rax: exception handler
- // Restore SP from BP if the exception PC is a MethodHandle call site.
- __ cmpl(Address(r15_thread, JavaThread::is_method_handle_return_offset()), 0);
- __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
-
// We have a handler in rax (could be deopt blob).
__ mov(r8, rax);
diff --git a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp
index 3d8370f2c5e..9cbffb37176 100644
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp
@@ -835,7 +835,8 @@ class StubGenerator: public StubCodeGenerator {
if (UseUnalignedLoadStores && (UseAVX >= 2)) {
// clean upper bits of YMM registers
- __ vzeroupper();
+ __ vpxor(xmm0, xmm0);
+ __ vpxor(xmm1, xmm1);
}
__ addl(qword_count, 8);
__ jccb(Assembler::zero, L_exit);
diff --git a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp
index 122f94b5d20..5ac6727dce0 100644
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp
@@ -1352,7 +1352,8 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_end);
if (UseAVX >= 2) {
// clean upper bits of YMM registers
- __ vzeroupper();
+ __ vpxor(xmm0, xmm0);
+ __ vpxor(xmm1, xmm1);
}
} else {
// Copy 32-bytes per iteration
@@ -1429,7 +1430,8 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_end);
if (UseAVX >= 2) {
// clean upper bits of YMM registers
- __ vzeroupper();
+ __ vpxor(xmm0, xmm0);
+ __ vpxor(xmm1, xmm1);
}
} else {
// Copy 32-bytes per iteration
diff --git a/hotspot/src/cpu/x86/vm/templateTable_x86.cpp b/hotspot/src/cpu/x86/vm/templateTable_x86.cpp
index f43903c7ae4..f9756cc89e7 100644
--- a/hotspot/src/cpu/x86/vm/templateTable_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86.cpp
@@ -543,8 +543,16 @@ void TemplateTable::locals_index(Register reg, int offset) {
}
void TemplateTable::iload() {
+ iload_internal();
+}
+
+void TemplateTable::nofast_iload() {
+ iload_internal(may_not_rewrite);
+}
+
+void TemplateTable::iload_internal(RewriteControl rc) {
transition(vtos, itos);
- if (RewriteFrequentPairs) {
+ if (RewriteFrequentPairs && rc == may_rewrite) {
Label rewrite, done;
const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
LP64_ONLY(assert(rbx != bc, "register damaged"));
@@ -815,6 +823,14 @@ void TemplateTable::aload(int n) {
}
void TemplateTable::aload_0() {
+ aload_0_internal();
+}
+
+void TemplateTable::nofast_aload_0() {
+ aload_0_internal(may_not_rewrite);
+}
+
+void TemplateTable::aload_0_internal(RewriteControl rc) {
transition(vtos, atos);
// According to bytecode histograms, the pairs:
//
@@ -837,7 +853,7 @@ void TemplateTable::aload_0() {
// aload_0, iload_1
// These bytecodes with a small amount of code are most profitable
// to rewrite
- if (RewriteFrequentPairs) {
+ if (RewriteFrequentPairs && rc == may_rewrite) {
Label rewrite, done;
const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
@@ -2491,29 +2507,21 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
assert_different_registers(Rcache, index, temp);
Label resolved;
- assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
- __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
- __ cmpl(temp, (int) bytecode()); // have we resolved this bytecode?
- __ jcc(Assembler::equal, resolved);
+
+ Bytecodes::Code code = bytecode();
+ switch (code) {
+ case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
+ case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
+ }
+
+ assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
+ __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
+ __ cmpl(temp, code); // have we resolved this bytecode?
+ __ jcc(Assembler::equal, resolved);
// resolve first time through
- address entry;
- switch (bytecode()) {
- case Bytecodes::_getstatic : // fall through
- case Bytecodes::_putstatic : // fall through
- case Bytecodes::_getfield : // fall through
- case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
- case Bytecodes::_invokevirtual : // fall through
- case Bytecodes::_invokespecial : // fall through
- case Bytecodes::_invokestatic : // fall through
- case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
- case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
- case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
- default:
- fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
- break;
- }
- __ movl(temp, (int)bytecode());
+ address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
+ __ movl(temp, code);
__ call_VM(noreg, entry, temp);
// Update registers with resolved info
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
@@ -2628,7 +2636,7 @@ void TemplateTable::pop_and_check_object(Register r) {
__ verify_oop(r);
}
-void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
+void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
transition(vtos, vtos);
const Register cache = rcx;
@@ -2660,7 +2668,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
__ load_signed_byte(rax, field);
__ push(btos);
// Rewrite bytecode to be faster
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
}
__ jmp(Done);
@@ -2671,7 +2679,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
// atos
__ load_heap_oop(rax, field);
__ push(atos);
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
}
__ jmp(Done);
@@ -2683,7 +2691,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
__ movl(rax, field);
__ push(itos);
// Rewrite bytecode to be faster
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
}
__ jmp(Done);
@@ -2695,7 +2703,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
__ load_unsigned_short(rax, field);
__ push(ctos);
// Rewrite bytecode to be faster
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
}
__ jmp(Done);
@@ -2707,7 +2715,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
__ load_signed_short(rax, field);
__ push(stos);
// Rewrite bytecode to be faster
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
}
__ jmp(Done);
@@ -2731,7 +2739,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
__ push(ltos);
// Rewrite bytecode to be faster
- LP64_ONLY(if (!is_static) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx));
+ LP64_ONLY(if (!is_static && rc == may_rewrite) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx));
__ jmp(Done);
__ bind(notLong);
@@ -2743,7 +2751,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
NOT_LP64(__ fld_s(field));
__ push(ftos);
// Rewrite bytecode to be faster
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
}
__ jmp(Done);
@@ -2758,7 +2766,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
NOT_LP64(__ fld_d(field));
__ push(dtos);
// Rewrite bytecode to be faster
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
}
#ifdef ASSERT
@@ -2779,6 +2787,10 @@ void TemplateTable::getfield(int byte_no) {
getfield_or_static(byte_no, false);
}
+void TemplateTable::nofast_getfield(int byte_no) {
+ getfield_or_static(byte_no, false, may_not_rewrite);
+}
+
void TemplateTable::getstatic(int byte_no) {
getfield_or_static(byte_no, true);
}
@@ -2870,7 +2882,7 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is
}
}
-void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
+void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
transition(vtos, vtos);
const Register cache = rcx;
@@ -2911,7 +2923,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(btos);
if (!is_static) pop_and_check_object(obj);
__ movb(field, rax);
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
}
__ jmp(Done);
@@ -2927,7 +2939,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
if (!is_static) pop_and_check_object(obj);
// Store into the field
do_oop_store(_masm, field, rax, _bs->kind(), false);
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
}
__ jmp(Done);
@@ -2942,7 +2954,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(itos);
if (!is_static) pop_and_check_object(obj);
__ movl(field, rax);
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
}
__ jmp(Done);
@@ -2957,7 +2969,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(ctos);
if (!is_static) pop_and_check_object(obj);
__ movw(field, rax);
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
}
__ jmp(Done);
@@ -2972,7 +2984,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(stos);
if (!is_static) pop_and_check_object(obj);
__ movw(field, rax);
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
}
__ jmp(Done);
@@ -2988,7 +3000,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(ltos);
if (!is_static) pop_and_check_object(obj);
__ movq(field, rax);
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
}
__ jmp(Done);
@@ -3035,7 +3047,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
if (!is_static) pop_and_check_object(obj);
NOT_LP64( __ fstp_s(field);)
LP64_ONLY( __ movflt(field, xmm0);)
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
}
__ jmp(Done);
@@ -3053,7 +3065,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
if (!is_static) pop_and_check_object(obj);
NOT_LP64( __ fstp_d(field);)
LP64_ONLY( __ movdbl(field, xmm0);)
- if (!is_static) {
+ if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
}
}
@@ -3079,6 +3091,10 @@ void TemplateTable::putfield(int byte_no) {
putfield_or_static(byte_no, false);
}
+void TemplateTable::nofast_putfield(int byte_no) {
+ putfield_or_static(byte_no, false, may_not_rewrite);
+}
+
void TemplateTable::putstatic(int byte_no) {
putfield_or_static(byte_no, true);
}
diff --git a/hotspot/src/cpu/x86/vm/templateTable_x86_32.hpp b/hotspot/src/cpu/x86/vm/templateTable_x86.hpp
similarity index 89%
rename from hotspot/src/cpu/x86/vm/templateTable_x86_32.hpp
rename to hotspot/src/cpu/x86/vm/templateTable_x86.hpp
index dea3d7f5aaf..f8af9b2ba11 100644
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.hpp
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,8 +22,8 @@
*
*/
-#ifndef CPU_X86_VM_TEMPLATETABLE_X86_32_HPP
-#define CPU_X86_VM_TEMPLATETABLE_X86_32_HPP
+#ifndef CPU_X86_VM_TEMPLATETABLE_X86_HPP
+#define CPU_X86_VM_TEMPLATETABLE_X86_HPP
static void prepare_invoke(int byte_no,
Register method, // linked method (or i-klass)
@@ -39,4 +39,4 @@
static void index_check(Register array, Register index);
static void index_check_without_pop(Register array, Register index);
-#endif // CPU_X86_VM_TEMPLATETABLE_X86_32_HPP
+#endif // CPU_X86_VM_TEMPLATETABLE_X86_HPP
diff --git a/hotspot/src/cpu/x86/vm/templateTable_x86_64.hpp b/hotspot/src/cpu/x86/vm/templateTable_x86_64.hpp
deleted file mode 100644
index fa506a094f8..00000000000
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.hpp
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_X86_VM_TEMPLATETABLE_X86_64_HPP
-#define CPU_X86_VM_TEMPLATETABLE_X86_64_HPP
-
- static void prepare_invoke(int byte_no,
- Register method, // linked method (or i-klass)
- Register index = noreg, // itable index, MethodType, etc.
- Register recv = noreg, // if caller wants to see it
- Register flags = noreg // if caller wants to test it
- );
- static void invokevirtual_helper(Register index, Register recv,
- Register flags);
- static void volatile_barrier(Assembler::Membar_mask_bits order_constraint);
-
- // Helpers
- static void index_check(Register array, Register index);
- static void index_check_without_pop(Register array, Register index);
-
-#endif // CPU_X86_VM_TEMPLATETABLE_X86_64_HPP
diff --git a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp
index e702cf8dd02..61267632209 100644
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp
@@ -379,15 +379,6 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
};
};
-
-void VM_Version::get_cpu_info_wrapper() {
- get_cpu_info_stub(&_cpuid_info);
-}
-
-#ifndef CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED
- #define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) f()
-#endif
-
void VM_Version::get_processor_features() {
_cpu = 4; // 486 by default
@@ -401,9 +392,7 @@ void VM_Version::get_processor_features() {
if (!Use486InstrsOnly) {
// Get raw processor info
- // Some platforms (like Win*) need a wrapper around here
- // in order to properly handle SEGV for YMM registers test.
- CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(get_cpu_info_wrapper);
+ get_cpu_info_stub(&_cpuid_info);
assert_is_initialized();
_cpu = extended_cpu_family();
@@ -980,6 +969,11 @@ void VM_Version::get_processor_features() {
(cache_line_size > ContendedPaddingWidth))
ContendedPaddingWidth = cache_line_size;
+ // This machine allows unaligned memory accesses
+ if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
+ FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
+ }
+
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
tty->print_cr("Logical CPUs per core: %u",
diff --git a/hotspot/src/cpu/x86/vm/x86.ad b/hotspot/src/cpu/x86/vm/x86.ad
index 42cc708a610..5c99066206e 100644
--- a/hotspot/src/cpu/x86/vm/x86.ad
+++ b/hotspot/src/cpu/x86/vm/x86.ad
@@ -490,7 +490,7 @@ source_hpp %{
class NativeJump;
class CallStubImpl {
-
+
//--------------------------------------------------------------
//---< Used for optimization in Compile::shorten_branches >---
//--------------------------------------------------------------
@@ -500,9 +500,9 @@ class CallStubImpl {
static uint size_call_trampoline() {
return 0; // no call trampolines on this platform
}
-
+
// number of relocations needed by a call trampoline stub
- static uint reloc_call_trampoline() {
+ static uint reloc_call_trampoline() {
return 0; // no call trampolines on this platform
}
};
@@ -623,6 +623,22 @@ const bool Matcher::match_rule_supported(int opcode) {
if ((UseSSE < 4) && (UseAVX < 1)) // only with SSE4_1 or AVX
return false;
break;
+ case Op_AddReductionVL:
+ if (UseAVX < 3) // only EVEX : vector connectivity becomes an issue here
+ return false;
+ case Op_AddReductionVI:
+ if (UseSSE < 3) // requires at least SSE3
+ return false;
+ case Op_MulReductionVI:
+ if (UseSSE < 4) // requires at least SSE4
+ return false;
+ case Op_AddReductionVF:
+ case Op_AddReductionVD:
+ case Op_MulReductionVF:
+ case Op_MulReductionVD:
+ if (UseSSE < 1) // requires at least SSE
+ return false;
+ break;
case Op_CompareAndSwapL:
#ifdef _LP64
case Op_CompareAndSwapP:
@@ -914,21 +930,6 @@ static inline jdouble replicate8_imm(int con, int width) {
encode %{
- enc_class preserve_SP %{
- debug_only(int off0 = cbuf.insts_size());
- MacroAssembler _masm(&cbuf);
- // RBP is preserved across all calls, even compiled calls.
- // Use it to preserve RSP in places where the callee might change the SP.
- __ movptr(rbp_mh_SP_save, rsp);
- debug_only(int off1 = cbuf.insts_size());
- assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
- %}
-
- enc_class restore_SP %{
- MacroAssembler _masm(&cbuf);
- __ movptr(rsp, rbp_mh_SP_save);
- %}
-
enc_class call_epilog %{
if (VerifyStackAtCalls) {
// Check that stack depth is unchanged: find majik cookie on stack
@@ -2532,6 +2533,574 @@ instruct Repl4D_zero(vecY dst, immD0 zero) %{
ins_pipe( fpu_reg_reg );
%}
+// ====================REDUCTION ARITHMETIC=======================================
+
+instruct rsadd2I_reduction_reg(rRegI dst, rRegI src1, vecD src2, regF tmp, regF tmp2) %{
+ predicate(UseSSE > 2 && UseAVX == 0);
+ match(Set dst (AddReductionVI src1 src2));
+ effect(TEMP tmp2, TEMP tmp);
+ format %{ "movdqu $tmp2,$src2\n\t"
+ "phaddd $tmp2,$tmp2\n\t"
+ "movd $tmp,$src1\n\t"
+ "paddd $tmp,$tmp2\n\t"
+ "movd $dst,$tmp\t! add reduction2I" %}
+ ins_encode %{
+ __ movdqu($tmp2$$XMMRegister, $src2$$XMMRegister);
+ __ phaddd($tmp2$$XMMRegister, $tmp2$$XMMRegister);
+ __ movdl($tmp$$XMMRegister, $src1$$Register);
+ __ paddd($tmp$$XMMRegister, $tmp2$$XMMRegister);
+ __ movdl($dst$$Register, $tmp$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rvadd2I_reduction_reg(rRegI dst, rRegI src1, vecD src2, regF tmp, regF tmp2) %{
+ predicate(UseAVX > 0);
+ match(Set dst (AddReductionVI src1 src2));
+ effect(TEMP tmp, TEMP tmp2);
+ format %{ "vphaddd $tmp,$src2,$src2\n\t"
+ "movd $tmp2,$src1\n\t"
+ "vpaddd $tmp2,$tmp2,$tmp\n\t"
+ "movd $dst,$tmp2\t! add reduction2I" %}
+ ins_encode %{
+ __ vphaddd($tmp$$XMMRegister, $src2$$XMMRegister, $src2$$XMMRegister, false);
+ __ movdl($tmp2$$XMMRegister, $src1$$Register);
+ __ vpaddd($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, false);
+ __ movdl($dst$$Register, $tmp2$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rsadd4I_reduction_reg(rRegI dst, rRegI src1, vecX src2, regF tmp, regF tmp2) %{
+ predicate(UseSSE > 2 && UseAVX == 0);
+ match(Set dst (AddReductionVI src1 src2));
+ effect(TEMP tmp2, TEMP tmp);
+ format %{ "movdqu $tmp2,$src2\n\t"
+ "phaddd $tmp2,$tmp2\n\t"
+ "phaddd $tmp2,$tmp2\n\t"
+ "movd $tmp,$src1\n\t"
+ "paddd $tmp,$tmp2\n\t"
+ "movd $dst,$tmp\t! add reduction4I" %}
+ ins_encode %{
+ __ movdqu($tmp2$$XMMRegister, $src2$$XMMRegister);
+ __ phaddd($tmp2$$XMMRegister, $tmp2$$XMMRegister);
+ __ phaddd($tmp2$$XMMRegister, $tmp2$$XMMRegister);
+ __ movdl($tmp$$XMMRegister, $src1$$Register);
+ __ paddd($tmp$$XMMRegister, $tmp2$$XMMRegister);
+ __ movdl($dst$$Register, $tmp$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rvadd4I_reduction_reg(rRegI dst, rRegI src1, vecX src2, regF tmp, regF tmp2) %{
+ predicate(UseAVX > 0);
+ match(Set dst (AddReductionVI src1 src2));
+ effect(TEMP tmp, TEMP tmp2);
+ format %{ "vphaddd $tmp,$src2,$src2\n\t"
+ "vphaddd $tmp,$tmp,$tmp2\n\t"
+ "movd $tmp2,$src1\n\t"
+ "vpaddd $tmp2,$tmp2,$tmp\n\t"
+ "movd $dst,$tmp2\t! add reduction4I" %}
+ ins_encode %{
+ __ vphaddd($tmp$$XMMRegister, $src2$$XMMRegister, $src2$$XMMRegister, false);
+ __ vphaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, false);
+ __ movdl($tmp2$$XMMRegister, $src1$$Register);
+ __ vpaddd($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, false);
+ __ movdl($dst$$Register, $tmp2$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rvadd8I_reduction_reg(rRegI dst, rRegI src1, vecY src2, regF tmp, regF tmp2) %{
+ predicate(UseAVX > 0);
+ match(Set dst (AddReductionVI src1 src2));
+ effect(TEMP tmp, TEMP tmp2);
+ format %{ "vphaddd $tmp,$src2,$src2\n\t"
+ "vphaddd $tmp,$tmp,$tmp2\n\t"
+ "vextractf128 $tmp2,$tmp\n\t"
+ "vpaddd $tmp,$tmp,$tmp2\n\t"
+ "movd $tmp2,$src1\n\t"
+ "vpaddd $tmp2,$tmp2,$tmp\n\t"
+ "movd $dst,$tmp2\t! add reduction8I" %}
+ ins_encode %{
+ __ vphaddd($tmp$$XMMRegister, $src2$$XMMRegister, $src2$$XMMRegister, true);
+ __ vphaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, true);
+ __ vextractf128h($tmp2$$XMMRegister, $tmp$$XMMRegister);
+ __ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, false);
+ __ movdl($tmp2$$XMMRegister, $src1$$Register);
+ __ vpaddd($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, false);
+ __ movdl($dst$$Register, $tmp2$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rsadd2F_reduction_reg(regF dst, regF src1, vecD src2, regF tmp, regF tmp2) %{
+ predicate(UseSSE >= 1 && UseAVX == 0);
+ match(Set dst (AddReductionVF src1 src2));
+ effect(TEMP tmp, TEMP tmp2);
+ format %{ "movdqu $tmp,$src1\n\t"
+ "addss $tmp,$src2\n\t"
+ "pshufd $tmp2,$src2,0x01\n\t"
+ "addss $tmp,$tmp2\n\t"
+ "movdqu $dst,$tmp\t! add reduction2F" %}
+ ins_encode %{
+ __ movdqu($tmp$$XMMRegister, $src1$$XMMRegister);
+ __ addss($tmp$$XMMRegister, $src2$$XMMRegister);
+ __ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0x01);
+ __ addss($tmp$$XMMRegister, $tmp2$$XMMRegister);
+ __ movdqu($dst$$XMMRegister, $tmp$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rvadd2F_reduction_reg(regF dst, regF src1, vecD src2, regF tmp, regF tmp2) %{
+ predicate(UseAVX > 0);
+ match(Set dst (AddReductionVF src1 src2));
+ effect(TEMP tmp2, TEMP tmp);
+ format %{ "vaddss $tmp2,$src1,$src2\n\t"
+ "pshufd $tmp,$src2,0x01\n\t"
+ "vaddss $dst,$tmp2,$tmp\t! add reduction2F" %}
+ ins_encode %{
+ __ vaddss($tmp2$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x01);
+ __ vaddss($dst$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rsadd4F_reduction_reg(regF dst, regF src1, vecX src2, regF tmp, regF tmp2) %{
+ predicate(UseSSE >= 1 && UseAVX == 0);
+ match(Set dst (AddReductionVF src1 src2));
+ effect(TEMP tmp, TEMP tmp2);
+ format %{ "movdqu $tmp,$src1\n\t"
+ "addss $tmp,$src2\n\t"
+ "pshufd $tmp2,$src2,0x01\n\t"
+ "addss $tmp,$tmp2\n\t"
+ "pshufd $tmp2,$src2,0x02\n\t"
+ "addss $tmp,$tmp2\n\t"
+ "pshufd $tmp2,$src2,0x03\n\t"
+ "addss $tmp,$tmp2\n\t"
+ "movdqu $dst,$tmp\t! add reduction4F" %}
+ ins_encode %{
+ __ movdqu($tmp$$XMMRegister, $src1$$XMMRegister);
+ __ addss($tmp$$XMMRegister, $src2$$XMMRegister);
+ __ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0x01);
+ __ addss($tmp$$XMMRegister, $tmp2$$XMMRegister);
+ __ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0x02);
+ __ addss($tmp$$XMMRegister, $tmp2$$XMMRegister);
+ __ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0x03);
+ __ addss($tmp$$XMMRegister, $tmp2$$XMMRegister);
+ __ movdqu($dst$$XMMRegister, $tmp$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rvadd4F_reduction_reg(regF dst, regF src1, vecX src2, regF tmp, regF tmp2) %{
+ predicate(UseAVX > 0);
+ match(Set dst (AddReductionVF src1 src2));
+ effect(TEMP tmp, TEMP tmp2);
+ format %{ "vaddss $tmp2,$src1,$src2\n\t"
+ "pshufd $tmp,$src2,0x01\n\t"
+ "vaddss $tmp2,$tmp2,$tmp\n\t"
+ "pshufd $tmp,$src2,0x02\n\t"
+ "vaddss $tmp2,$tmp2,$tmp\n\t"
+ "pshufd $tmp,$src2,0x03\n\t"
+ "vaddss $dst,$tmp2,$tmp\t! add reduction4F" %}
+ ins_encode %{
+ __ vaddss($tmp2$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x01);
+ __ vaddss($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x02);
+ __ vaddss($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03);
+ __ vaddss($dst$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct radd8F_reduction_reg(regF dst, regF src1, vecY src2, regF tmp, regF tmp2, regF tmp3) %{
+ predicate(UseAVX > 0);
+ match(Set dst (AddReductionVF src1 src2));
+ effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
+ format %{ "vaddss $tmp2,$src1,$src2\n\t"
+ "pshufd $tmp,$src2,0x01\n\t"
+ "vaddss $tmp2,$tmp2,$tmp\n\t"
+ "pshufd $tmp,$src2,0x02\n\t"
+ "vaddss $tmp2,$tmp2,$tmp\n\t"
+ "pshufd $tmp,$src2,0x03\n\t"
+ "vaddss $tmp2,$tmp2,$tmp\n\t"
+ "vextractf128 $tmp3,$src2\n\t"
+ "vaddss $tmp2,$tmp2,$tmp3\n\t"
+ "pshufd $tmp,$tmp3,0x01\n\t"
+ "vaddss $tmp2,$tmp2,$tmp\n\t"
+ "pshufd $tmp,$tmp3,0x02\n\t"
+ "vaddss $tmp2,$tmp2,$tmp\n\t"
+ "pshufd $tmp,$tmp3,0x03\n\t"
+ "vaddss $dst,$tmp2,$tmp\t! add reduction8F" %}
+ ins_encode %{
+ __ vaddss($tmp2$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x01);
+ __ vaddss($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x02);
+ __ vaddss($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03);
+ __ vaddss($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ __ vextractf128h($tmp3$$XMMRegister, $src2$$XMMRegister);
+ __ vaddss($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp3$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $tmp3$$XMMRegister, 0x01);
+ __ vaddss($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $tmp3$$XMMRegister, 0x02);
+ __ vaddss($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $tmp3$$XMMRegister, 0x03);
+ __ vaddss($dst$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rsadd2D_reduction_reg(regD dst, regD src1, vecX src2, regD tmp) %{
+ predicate(UseSSE >= 1 && UseAVX == 0);
+ match(Set dst (AddReductionVD src1 src2));
+ effect(TEMP tmp, TEMP dst);
+ format %{ "movdqu $tmp,$src1\n\t"
+ "addsd $tmp,$src2\n\t"
+ "pshufd $dst,$src2,0xE\n\t"
+ "addsd $dst,$tmp\t! add reduction2D" %}
+ ins_encode %{
+ __ movdqu($tmp$$XMMRegister, $src1$$XMMRegister);
+ __ addsd($tmp$$XMMRegister, $src2$$XMMRegister);
+ __ pshufd($dst$$XMMRegister, $src2$$XMMRegister, 0xE);
+ __ addsd($dst$$XMMRegister, $tmp$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rvadd2D_reduction_reg(regD dst, regD src1, vecX src2, regD tmp, regD tmp2) %{
+ predicate(UseAVX > 0);
+ match(Set dst (AddReductionVD src1 src2));
+ effect(TEMP tmp, TEMP tmp2);
+ format %{ "vaddsd $tmp2,$src1,$src2\n\t"
+ "pshufd $tmp,$src2,0xE\n\t"
+ "vaddsd $dst,$tmp2,$tmp\t! add reduction2D" %}
+ ins_encode %{
+ __ vaddsd($tmp2$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE);
+ __ vaddsd($dst$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rvadd4D_reduction_reg(regD dst, regD src1, vecY src2, regD tmp, regD tmp2, regD tmp3) %{
+ predicate(UseAVX > 0);
+ match(Set dst (AddReductionVD src1 src2));
+ effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
+ format %{ "vaddsd $tmp2,$src1,$src2\n\t"
+ "pshufd $tmp,$src2,0xE\n\t"
+ "vaddsd $tmp2,$tmp2,$tmp\n\t"
+ "vextractf128 $tmp3,$src2\n\t"
+ "vaddsd $tmp2,$tmp2,$tmp3\n\t"
+ "pshufd $tmp,$tmp3,0xE\n\t"
+ "vaddsd $dst,$tmp2,$tmp\t! add reduction4D" %}
+ ins_encode %{
+ __ vaddsd($tmp2$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE);
+ __ vaddsd($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ __ vextractf128h($tmp3$$XMMRegister, $src2$$XMMRegister);
+ __ vaddsd($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp3$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $tmp3$$XMMRegister, 0xE);
+ __ vaddsd($dst$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rsmul2I_reduction_reg(rRegI dst, rRegI src1, vecD src2, regF tmp, regF tmp2) %{
+ predicate(UseSSE > 3 && UseAVX == 0);
+ match(Set dst (MulReductionVI src1 src2));
+ effect(TEMP tmp, TEMP tmp2);
+ format %{ "pshufd $tmp2,$src2,0x1\n\t"
+ "pmulld $tmp2,$src2\n\t"
+ "movd $tmp,$src1\n\t"
+ "pmulld $tmp2,$tmp\n\t"
+ "movd $dst,$tmp2\t! mul reduction2I" %}
+ ins_encode %{
+ __ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1);
+ __ pmulld($tmp2$$XMMRegister, $src2$$XMMRegister);
+ __ movdl($tmp$$XMMRegister, $src1$$Register);
+ __ pmulld($tmp2$$XMMRegister, $tmp$$XMMRegister);
+ __ movdl($dst$$Register, $tmp2$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rvmul2I_reduction_reg(rRegI dst, rRegI src1, vecD src2, regF tmp, regF tmp2) %{
+ predicate(UseAVX > 0);
+ match(Set dst (MulReductionVI src1 src2));
+ effect(TEMP tmp, TEMP tmp2);
+ format %{ "pshufd $tmp2,$src2,0x1\n\t"
+ "vpmulld $tmp,$src2,$tmp2\n\t"
+ "movd $tmp2,$src1\n\t"
+ "vpmulld $tmp2,$tmp,$tmp2\n\t"
+ "movd $dst,$tmp2\t! mul reduction2I" %}
+ ins_encode %{
+ __ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1);
+ __ vpmulld($tmp$$XMMRegister, $src2$$XMMRegister, $tmp2$$XMMRegister, false);
+ __ movdl($tmp2$$XMMRegister, $src1$$Register);
+ __ vpmulld($tmp2$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, false);
+ __ movdl($dst$$Register, $tmp2$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rsmul4I_reduction_reg(rRegI dst, rRegI src1, vecX src2, regF tmp, regF tmp2) %{
+ predicate(UseSSE > 3 && UseAVX == 0);
+ match(Set dst (MulReductionVI src1 src2));
+ effect(TEMP tmp, TEMP tmp2);
+ format %{ "pshufd $tmp2,$src2,0xE\n\t"
+ "pmulld $tmp2,$src2\n\t"
+ "pshufd $tmp,$tmp2,0x1\n\t"
+ "pmulld $tmp2,$tmp\n\t"
+ "movd $tmp,$src1\n\t"
+ "pmulld $tmp2,$tmp\n\t"
+ "movd $dst,$tmp2\t! mul reduction4I" %}
+ ins_encode %{
+ __ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0xE);
+ __ pmulld($tmp2$$XMMRegister, $src2$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x1);
+ __ pmulld($tmp2$$XMMRegister, $tmp$$XMMRegister);
+ __ movdl($tmp$$XMMRegister, $src1$$Register);
+ __ pmulld($tmp2$$XMMRegister, $tmp$$XMMRegister);
+ __ movdl($dst$$Register, $tmp2$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rvmul4I_reduction_reg(rRegI dst, rRegI src1, vecX src2, regF tmp, regF tmp2) %{
+ predicate(UseAVX > 0);
+ match(Set dst (MulReductionVI src1 src2));
+ effect(TEMP tmp, TEMP tmp2);
+ format %{ "pshufd $tmp2,$src2,0xE\n\t"
+ "vpmulld $tmp,$src2,$tmp2\n\t"
+ "pshufd $tmp2,$tmp,0x1\n\t"
+ "vpmulld $tmp,$tmp,$tmp2\n\t"
+ "movd $tmp2,$src1\n\t"
+ "vpmulld $tmp2,$tmp,$tmp2\n\t"
+ "movd $dst,$tmp2\t! mul reduction4I" %}
+ ins_encode %{
+ __ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0xE);
+ __ vpmulld($tmp$$XMMRegister, $src2$$XMMRegister, $tmp2$$XMMRegister, false);
+ __ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0x1);
+ __ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, false);
+ __ movdl($tmp2$$XMMRegister, $src1$$Register);
+ __ vpmulld($tmp2$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, false);
+ __ movdl($dst$$Register, $tmp2$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rvmul8I_reduction_reg(rRegI dst, rRegI src1, vecY src2, regF tmp, regF tmp2) %{
+ predicate(UseAVX > 0);
+ match(Set dst (MulReductionVI src1 src2));
+ effect(TEMP tmp, TEMP tmp2);
+ format %{ "vextractf128 $tmp,$src2\n\t"
+ "vpmulld $tmp,$tmp,$src2\n\t"
+ "pshufd $tmp2,$tmp,0xE\n\t"
+ "vpmulld $tmp,$tmp,$tmp2\n\t"
+ "pshufd $tmp2,$tmp,0x1\n\t"
+ "vpmulld $tmp,$tmp,$tmp2\n\t"
+ "movd $tmp2,$src1\n\t"
+ "vpmulld $tmp2,$tmp,$tmp2\n\t"
+ "movd $dst,$tmp2\t! mul reduction8I" %}
+ ins_encode %{
+ __ vextractf128h($tmp$$XMMRegister, $src2$$XMMRegister);
+ __ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $src2$$XMMRegister, false);
+ __ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0xE);
+ __ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, false);
+ __ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0x1);
+ __ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, false);
+ __ movdl($tmp2$$XMMRegister, $src1$$Register);
+ __ vpmulld($tmp2$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, false);
+ __ movdl($dst$$Register, $tmp2$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rsmul2F_reduction_reg(regF dst, regF src1, vecD src2, regF tmp, regF tmp2) %{
+ predicate(UseSSE >= 1 && UseAVX == 0);
+ match(Set dst (MulReductionVF src1 src2));
+ effect(TEMP tmp, TEMP tmp2);
+ format %{ "movdqu $tmp,$src1\n\t"
+ "mulss $tmp,$src2\n\t"
+ "pshufd $tmp2,$src2,0x01\n\t"
+ "mulss $tmp,$tmp2\n\t"
+ "movdqu $dst,$tmp\t! add reduction2F" %}
+ ins_encode %{
+ __ movdqu($tmp$$XMMRegister, $src1$$XMMRegister);
+ __ mulss($tmp$$XMMRegister, $src2$$XMMRegister);
+ __ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0x01);
+ __ mulss($tmp$$XMMRegister, $tmp2$$XMMRegister);
+ __ movdqu($dst$$XMMRegister, $tmp$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rvmul2F_reduction_reg(regF dst, regF src1, vecD src2, regF tmp, regF tmp2) %{
+ predicate(UseAVX > 0);
+ match(Set dst (MulReductionVF src1 src2));
+ effect(TEMP tmp, TEMP tmp2);
+ format %{ "vmulss $tmp2,$src1,$src2\n\t"
+ "pshufd $tmp,$src2,0x01\n\t"
+ "vmulss $dst,$tmp2,$tmp\t! add reduction2F" %}
+ ins_encode %{
+ __ vmulss($tmp2$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x01);
+ __ vmulss($dst$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rsmul4F_reduction_reg(regF dst, regF src1, vecX src2, regF tmp, regF tmp2) %{
+ predicate(UseSSE >= 1 && UseAVX == 0);
+ match(Set dst (MulReductionVF src1 src2));
+ effect(TEMP tmp, TEMP tmp2);
+ format %{ "movdqu $tmp,$src1\n\t"
+ "mulss $tmp,$src2\n\t"
+ "pshufd $tmp2,$src2,0x01\n\t"
+ "mulss $tmp,$tmp2\n\t"
+ "pshufd $tmp2,$src2,0x02\n\t"
+ "mulss $tmp,$tmp2\n\t"
+ "pshufd $tmp2,$src2,0x03\n\t"
+ "mulss $tmp,$tmp2\n\t"
+ "movdqu $dst,$tmp\t! add reduction4F" %}
+ ins_encode %{
+ __ movdqu($tmp$$XMMRegister, $src1$$XMMRegister);
+ __ mulss($tmp$$XMMRegister, $src2$$XMMRegister);
+ __ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0x01);
+ __ mulss($tmp$$XMMRegister, $tmp2$$XMMRegister);
+ __ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0x02);
+ __ mulss($tmp$$XMMRegister, $tmp2$$XMMRegister);
+ __ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0x03);
+ __ mulss($tmp$$XMMRegister, $tmp2$$XMMRegister);
+ __ movdqu($dst$$XMMRegister, $tmp$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rvmul4F_reduction_reg(regF dst, regF src1, vecX src2, regF tmp, regF tmp2) %{
+ predicate(UseAVX > 0);
+ match(Set dst (MulReductionVF src1 src2));
+ effect(TEMP tmp, TEMP tmp2);
+ format %{ "vmulss $tmp2,$src1,$src2\n\t"
+ "pshufd $tmp,$src2,0x01\n\t"
+ "vmulss $tmp2,$tmp2,$tmp\n\t"
+ "pshufd $tmp,$src2,0x02\n\t"
+ "vmulss $tmp2,$tmp2,$tmp\n\t"
+ "pshufd $tmp,$src2,0x03\n\t"
+ "vmulss $dst,$tmp2,$tmp\t! add reduction4F" %}
+ ins_encode %{
+ __ vmulss($tmp2$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x01);
+ __ vmulss($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x02);
+ __ vmulss($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03);
+ __ vmulss($dst$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rvmul8F_reduction_reg(regF dst, regF src1, vecY src2, regF tmp, regF tmp2, regF tmp3) %{
+ predicate(UseAVX > 0);
+ match(Set dst (MulReductionVF src1 src2));
+ effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
+ format %{ "vmulss $tmp2,$src1,$src2\n\t"
+ "pshufd $tmp,$src2,0x01\n\t"
+ "vmulss $tmp2,$tmp2,$tmp\n\t"
+ "pshufd $tmp,$src2,0x02\n\t"
+ "vmulss $tmp2,$tmp2,$tmp\n\t"
+ "pshufd $tmp,$src2,0x03\n\t"
+ "vmulss $tmp2,$tmp2,$tmp\n\t"
+ "vextractf128 $tmp3,$src2\n\t"
+ "vmulss $tmp2,$tmp2,$tmp3\n\t"
+ "pshufd $tmp,$tmp3,0x01\n\t"
+ "vmulss $tmp2,$tmp2,$tmp\n\t"
+ "pshufd $tmp,$tmp3,0x02\n\t"
+ "vmulss $tmp2,$tmp2,$tmp\n\t"
+ "pshufd $tmp,$tmp3,0x03\n\t"
+ "vmulss $dst,$tmp2,$tmp\t! mul reduction8F" %}
+ ins_encode %{
+ __ vmulss($tmp2$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x01);
+ __ vmulss($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x02);
+ __ vmulss($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03);
+ __ vmulss($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ __ vextractf128h($tmp3$$XMMRegister, $src2$$XMMRegister);
+ __ vmulss($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp3$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $tmp3$$XMMRegister, 0x01);
+ __ vmulss($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $tmp3$$XMMRegister, 0x02);
+ __ vmulss($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $tmp3$$XMMRegister, 0x03);
+ __ vmulss($dst$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rsmul2D_reduction_reg(regD dst, regD src1, vecX src2, regD tmp) %{
+ predicate(UseSSE >= 1 && UseAVX == 0);
+ match(Set dst (MulReductionVD src1 src2));
+ effect(TEMP tmp, TEMP dst);
+ format %{ "movdqu $tmp,$src1\n\t"
+ "mulsd $tmp,$src2\n\t"
+ "pshufd $dst,$src2,0xE\n\t"
+ "mulsd $dst,$tmp\t! add reduction2D" %}
+ ins_encode %{
+ __ movdqu($tmp$$XMMRegister, $src1$$XMMRegister);
+ __ mulsd($tmp$$XMMRegister, $src2$$XMMRegister);
+ __ pshufd($dst$$XMMRegister, $src2$$XMMRegister, 0xE);
+ __ mulsd($dst$$XMMRegister, $tmp$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rvmul2D_reduction_reg(regD dst, regD src1, vecX src2, regD tmp, regD tmp2) %{
+ predicate(UseAVX > 0);
+ match(Set dst (MulReductionVD src1 src2));
+ effect(TEMP tmp, TEMP tmp2);
+ format %{ "vmulsd $tmp2,$src1,$src2\n\t"
+ "pshufd $tmp,$src2,0xE\n\t"
+ "vmulsd $dst,$tmp2,$tmp\t! mul reduction2D" %}
+ ins_encode %{
+ __ vmulsd($tmp2$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE);
+ __ vmulsd($dst$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct rvmul4D_reduction_reg(regD dst, regD src1, vecY src2, regD tmp, regD tmp2, regD tmp3) %{
+ predicate(UseAVX > 0);
+ match(Set dst (MulReductionVD src1 src2));
+ effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
+ format %{ "vmulsd $tmp2,$src1,$src2\n\t"
+ "pshufd $tmp,$src2,0xE\n\t"
+ "vmulsd $tmp2,$tmp2,$tmp\n\t"
+ "vextractf128 $tmp3,$src2\n\t"
+ "vmulsd $tmp2,$tmp2,$tmp3\n\t"
+ "pshufd $tmp,$tmp3,0xE\n\t"
+ "vmulsd $dst,$tmp2,$tmp\t! mul reduction4D" %}
+ ins_encode %{
+ __ vmulsd($tmp2$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE);
+ __ vmulsd($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ __ vextractf128h($tmp3$$XMMRegister, $src2$$XMMRegister);
+ __ vmulsd($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp3$$XMMRegister);
+ __ pshufd($tmp$$XMMRegister, $tmp3$$XMMRegister, 0xE);
+ __ vmulsd($dst$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
// ====================VECTOR ARITHMETIC=======================================
// --------------------------------- ADD --------------------------------------
diff --git a/hotspot/src/cpu/x86/vm/x86_32.ad b/hotspot/src/cpu/x86/vm/x86_32.ad
index e8b54389258..709c908c3b4 100644
--- a/hotspot/src/cpu/x86/vm/x86_32.ad
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad
@@ -123,50 +123,94 @@ alloc_class chunk0( ECX, EBX, EBP, EDI, EAX, EDX, ESI, ESP,
// 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
// 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
//
+// Class for no registers (empty set).
+reg_class no_reg();
+
// Class for all registers
-reg_class any_reg(EAX, EDX, EBP, EDI, ESI, ECX, EBX, ESP);
+reg_class any_reg_with_ebp(EAX, EDX, EBP, EDI, ESI, ECX, EBX, ESP);
+// Class for all registers (excluding EBP)
+reg_class any_reg_no_ebp(EAX, EDX, EDI, ESI, ECX, EBX, ESP);
+// Dynamic register class that selects at runtime between register classes
+// any_reg and any_no_ebp_reg (depending on the value of the flag PreserveFramePointer).
+// Equivalent to: return PreserveFramePointer ? any_no_ebp_reg : any_reg;
+reg_class_dynamic any_reg(any_reg_no_ebp, any_reg_with_ebp, %{ PreserveFramePointer %});
+
// Class for general registers
-reg_class int_reg(EAX, EDX, EBP, EDI, ESI, ECX, EBX);
-// Class for general registers which may be used for implicit null checks on win95
-// Also safe for use by tailjump. We don't want to allocate in rbp,
-reg_class int_reg_no_rbp(EAX, EDX, EDI, ESI, ECX, EBX);
+reg_class int_reg_with_ebp(EAX, EDX, EBP, EDI, ESI, ECX, EBX);
+// Class for general registers (excluding EBP).
+// This register class can be used for implicit null checks on win95.
+// It is also safe for use by tailjumps (we don't want to allocate in ebp).
+// Used also if the PreserveFramePointer flag is true.
+reg_class int_reg_no_ebp(EAX, EDX, EDI, ESI, ECX, EBX);
+// Dynamic register class that selects between int_reg and int_reg_no_ebp.
+reg_class_dynamic int_reg(int_reg_no_ebp, int_reg_with_ebp, %{ PreserveFramePointer %});
+
// Class of "X" registers
reg_class int_x_reg(EBX, ECX, EDX, EAX);
+
// Class of registers that can appear in an address with no offset.
// EBP and ESP require an extra instruction byte for zero offset.
// Used in fast-unlock
reg_class p_reg(EDX, EDI, ESI, EBX);
-// Class for general registers not including ECX
-reg_class ncx_reg(EAX, EDX, EBP, EDI, ESI, EBX);
-// Class for general registers not including EAX
+
+// Class for general registers excluding ECX
+reg_class ncx_reg_with_ebp(EAX, EDX, EBP, EDI, ESI, EBX);
+// Class for general registers excluding ECX (and EBP)
+reg_class ncx_reg_no_ebp(EAX, EDX, EDI, ESI, EBX);
+// Dynamic register class that selects between ncx_reg and ncx_reg_no_ebp.
+reg_class_dynamic ncx_reg(ncx_reg_no_ebp, ncx_reg_with_ebp, %{ PreserveFramePointer %});
+
+// Class for general registers excluding EAX
reg_class nax_reg(EDX, EDI, ESI, ECX, EBX);
-// Class for general registers not including EAX or EBX.
-reg_class nabx_reg(EDX, EDI, ESI, ECX, EBP);
+
+// Class for general registers excluding EAX and EBX.
+reg_class nabx_reg_with_ebp(EDX, EDI, ESI, ECX, EBP);
+// Class for general registers excluding EAX and EBX (and EBP)
+reg_class nabx_reg_no_ebp(EDX, EDI, ESI, ECX);
+// Dynamic register class that selects between nabx_reg and nabx_reg_no_ebp.
+reg_class_dynamic nabx_reg(nabx_reg_no_ebp, nabx_reg_with_ebp, %{ PreserveFramePointer %});
+
// Class of EAX (for multiply and divide operations)
reg_class eax_reg(EAX);
+
// Class of EBX (for atomic add)
reg_class ebx_reg(EBX);
+
// Class of ECX (for shift and JCXZ operations and cmpLTMask)
reg_class ecx_reg(ECX);
+
// Class of EDX (for multiply and divide operations)
reg_class edx_reg(EDX);
+
// Class of EDI (for synchronization)
reg_class edi_reg(EDI);
+
// Class of ESI (for synchronization)
reg_class esi_reg(ESI);
-// Singleton class for interpreter's stack pointer
-reg_class ebp_reg(EBP);
+
// Singleton class for stack pointer
reg_class sp_reg(ESP);
+
// Singleton class for instruction pointer
// reg_class ip_reg(EIP);
+
// Class of integer register pairs
-reg_class long_reg( EAX,EDX, ECX,EBX, EBP,EDI );
+reg_class long_reg_with_ebp( EAX,EDX, ECX,EBX, EBP,EDI );
+// Class of integer register pairs (excluding EBP and EDI);
+reg_class long_reg_no_ebp( EAX,EDX, ECX,EBX );
+// Dynamic register class that selects between long_reg and long_reg_no_ebp.
+reg_class_dynamic long_reg(long_reg_no_ebp, long_reg_with_ebp, %{ PreserveFramePointer %});
+
// Class of integer register pairs that aligns with calling convention
reg_class eadx_reg( EAX,EDX );
reg_class ebcx_reg( ECX,EBX );
+
// Not AX or DX, used in divides
-reg_class nadx_reg( EBX,ECX,ESI,EDI,EBP );
+reg_class nadx_reg_with_ebp(EBX, ECX, ESI, EDI, EBP);
+// Not AX or DX (and neither EBP), used in divides
+reg_class nadx_reg_no_ebp(EBX, ECX, ESI, EDI);
+// Dynamic register class that selects between nadx_reg and nadx_reg_no_ebp.
+reg_class_dynamic nadx_reg(nadx_reg_no_ebp, nadx_reg_with_ebp, %{ PreserveFramePointer %});
// Floating point registers. Notice FPR0 is not a choice.
// FPR0 is not ever allocated; we use clever encodings to fake
@@ -240,18 +284,11 @@ static int pre_call_resets_size() {
return size;
}
-static int preserve_SP_size() {
- return 2; // op, rm(reg/reg)
-}
-
// !!!!! Special hack to get all type of calls to specify the byte offset
// from the start of the call to the point where the return address
// will point.
int MachCallStaticJavaNode::ret_addr_offset() {
- int offset = 5 + pre_call_resets_size(); // 5 bytes from start of call to where return address points
- if (_method_handle_invoke)
- offset += preserve_SP_size();
- return offset;
+ return 5 + pre_call_resets_size(); // 5 bytes from start of call to where return address points
}
int MachCallDynamicJavaNode::ret_addr_offset() {
@@ -283,15 +320,6 @@ int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
return round_to(current_offset, alignment_required()) - current_offset;
}
-// The address of the call instruction needs to be 4-byte aligned to
-// ensure that it does not span a cache line so that it can be patched.
-int CallStaticJavaHandleNode::compute_padding(int current_offset) const {
- current_offset += pre_call_resets_size(); // skip fldcw, if any
- current_offset += preserve_SP_size(); // skip mov rbp, rsp
- current_offset += 1; // skip call opcode byte
- return round_to(current_offset, alignment_required()) - current_offset;
-}
-
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
@@ -523,6 +551,10 @@ void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
st->print("# stack bang (%d bytes)", bangsize);
st->print("\n\t");
st->print("PUSH EBP\t# Save EBP");
+ if (PreserveFramePointer) {
+ st->print("\n\t");
+ st->print("MOV EBP, ESP\t# Save the caller's SP into EBP");
+ }
if (framesize) {
st->print("\n\t");
st->print("SUB ESP, #%d\t# Create frame",framesize);
@@ -532,6 +564,10 @@ void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
st->print("\n\t");
framesize -= wordSize;
st->print("MOV [ESP + #%d], EBP\t# Save EBP",framesize);
+ if (PreserveFramePointer) {
+ st->print("\n\t");
+ st->print("MOV EBP, [ESP + #%d]\t# Save the caller's SP into EBP", (framesize + wordSize));
+ }
}
if (VerifyStackAtCalls) {
@@ -1489,7 +1525,7 @@ RegMask Matcher::modL_proj_mask() {
}
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
- return EBP_REG_mask();
+ return NO_REG_mask();
}
// Returns true if the high 32 bits of the value is known to be zero.
@@ -3735,7 +3771,7 @@ operand eRegP() %{
// On windows95, EBP is not safe to use for implicit null tests.
operand eRegP_no_EBP() %{
- constraint(ALLOC_IN_RC(int_reg_no_rbp));
+ constraint(ALLOC_IN_RC(int_reg_no_ebp));
match(RegP);
match(eAXRegP);
match(eBXRegP);
@@ -3824,13 +3860,6 @@ operand eDIRegP(eRegP reg) %{
interface(REG_INTER);
%}
-operand eBPRegP() %{
- constraint(ALLOC_IN_RC(ebp_reg));
- match(RegP);
- format %{ "EBP" %}
- interface(REG_INTER);
-%}
-
operand eRegL() %{
constraint(ALLOC_IN_RC(long_reg));
match(RegL);
@@ -12615,7 +12644,6 @@ instruct cmovFF_reg_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, regF dst,
// compute_padding() functions will have to be adjusted.
instruct CallStaticJavaDirect(method meth) %{
match(CallStaticJava);
- predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
effect(USE meth);
ins_cost(300);
@@ -12629,29 +12657,6 @@ instruct CallStaticJavaDirect(method meth) %{
ins_alignment(4);
%}
-// Call Java Static Instruction (method handle version)
-// Note: If this code changes, the corresponding ret_addr_offset() and
-// compute_padding() functions will have to be adjusted.
-instruct CallStaticJavaHandle(method meth, eBPRegP ebp_mh_SP_save) %{
- match(CallStaticJava);
- predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
- effect(USE meth);
- // EBP is saved by all callees (for interpreter stack correction).
- // We use it here for a similar purpose, in {preserve,restore}_SP.
-
- ins_cost(300);
- format %{ "CALL,static/MethodHandle " %}
- opcode(0xE8); /* E8 cd */
- ins_encode( pre_call_resets,
- preserve_SP,
- Java_Static_Call( meth ),
- restore_SP,
- call_epilog,
- post_call_FPU );
- ins_pipe( pipe_slow );
- ins_alignment(4);
-%}
-
// Call Java Dynamic Instruction
// Note: If this code changes, the corresponding ret_addr_offset() and
// compute_padding() functions will have to be adjusted.
diff --git a/hotspot/src/cpu/x86/vm/x86_64.ad b/hotspot/src/cpu/x86/vm/x86_64.ad
index 20569256102..9abe58f5371 100644
--- a/hotspot/src/cpu/x86/vm/x86_64.ad
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad
@@ -166,42 +166,67 @@ alloc_class chunk0(R10, R10_H,
// 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
//
-// Class for all pointer registers (including RSP)
-reg_class any_reg(RAX, RAX_H,
- RDX, RDX_H,
- RBP, RBP_H,
- RDI, RDI_H,
- RSI, RSI_H,
- RCX, RCX_H,
- RBX, RBX_H,
- RSP, RSP_H,
- R8, R8_H,
- R9, R9_H,
- R10, R10_H,
- R11, R11_H,
- R12, R12_H,
- R13, R13_H,
- R14, R14_H,
- R15, R15_H);
+// Empty register class.
+reg_class no_reg();
-// Class for all pointer registers except RSP
-reg_class ptr_reg(RAX, RAX_H,
- RDX, RDX_H,
- RBP, RBP_H,
- RDI, RDI_H,
- RSI, RSI_H,
- RCX, RCX_H,
- RBX, RBX_H,
- R8, R8_H,
- R9, R9_H,
- R10, R10_H,
- R11, R11_H,
- R13, R13_H,
- R14, R14_H);
+// Class for all pointer registers (including RSP and RBP)
+reg_class any_reg_with_rbp(RAX, RAX_H,
+ RDX, RDX_H,
+ RBP, RBP_H,
+ RDI, RDI_H,
+ RSI, RSI_H,
+ RCX, RCX_H,
+ RBX, RBX_H,
+ RSP, RSP_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R12, R12_H,
+ R13, R13_H,
+ R14, R14_H,
+ R15, R15_H);
-// Class for all pointer registers except RAX and RSP
-reg_class ptr_no_rax_reg(RDX, RDX_H,
- RBP, RBP_H,
+// Class for all pointer registers (including RSP, but excluding RBP)
+reg_class any_reg_no_rbp(RAX, RAX_H,
+ RDX, RDX_H,
+ RDI, RDI_H,
+ RSI, RSI_H,
+ RCX, RCX_H,
+ RBX, RBX_H,
+ RSP, RSP_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R12, R12_H,
+ R13, R13_H,
+ R14, R14_H,
+ R15, R15_H);
+
+// Dynamic register class that selects at runtime between register classes
+// any_reg_no_rbp and any_reg_with_rbp (depending on the value of the flag PreserveFramePointer).
+// Equivalent to: return PreserveFramePointer ? any_reg_no_rbp : any_reg_with_rbp;
+reg_class_dynamic any_reg(any_reg_no_rbp, any_reg_with_rbp, %{ PreserveFramePointer %});
+
+// Class for all pointer registers (excluding RSP)
+reg_class ptr_reg_with_rbp(RAX, RAX_H,
+ RDX, RDX_H,
+ RBP, RBP_H,
+ RDI, RDI_H,
+ RSI, RSI_H,
+ RCX, RCX_H,
+ RBX, RBX_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R13, R13_H,
+ R14, R14_H);
+
+// Class for all pointer registers (excluding RSP and RBP)
+reg_class ptr_reg_no_rbp(RAX, RAX_H,
+ RDX, RDX_H,
RDI, RDI_H,
RSI, RSI_H,
RCX, RCX_H,
@@ -213,31 +238,66 @@ reg_class ptr_no_rax_reg(RDX, RDX_H,
R13, R13_H,
R14, R14_H);
-reg_class ptr_no_rbp_reg(RDX, RDX_H,
- RAX, RAX_H,
- RDI, RDI_H,
- RSI, RSI_H,
- RCX, RCX_H,
- RBX, RBX_H,
- R8, R8_H,
- R9, R9_H,
- R10, R10_H,
- R11, R11_H,
- R13, R13_H,
- R14, R14_H);
+// Dynamic register class that selects between ptr_reg_no_rbp and ptr_reg_with_rbp.
+reg_class_dynamic ptr_reg(ptr_reg_no_rbp, ptr_reg_with_rbp, %{ PreserveFramePointer %});
-// Class for all pointer registers except RAX, RBX and RSP
-reg_class ptr_no_rax_rbx_reg(RDX, RDX_H,
- RBP, RBP_H,
- RDI, RDI_H,
- RSI, RSI_H,
- RCX, RCX_H,
- R8, R8_H,
- R9, R9_H,
- R10, R10_H,
- R11, R11_H,
- R13, R13_H,
- R14, R14_H);
+// Class for all pointer registers (excluding RAX and RSP)
+reg_class ptr_no_rax_reg_with_rbp(RDX, RDX_H,
+ RBP, RBP_H,
+ RDI, RDI_H,
+ RSI, RSI_H,
+ RCX, RCX_H,
+ RBX, RBX_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R13, R13_H,
+ R14, R14_H);
+
+// Class for all pointer registers (excluding RAX, RSP, and RBP)
+reg_class ptr_no_rax_reg_no_rbp(RDX, RDX_H,
+ RDI, RDI_H,
+ RSI, RSI_H,
+ RCX, RCX_H,
+ RBX, RBX_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R13, R13_H,
+ R14, R14_H);
+
+// Dynamic register class that selects between ptr_no_rax_reg_no_rbp and ptr_no_rax_reg_with_rbp.
+reg_class_dynamic ptr_no_rax_reg(ptr_no_rax_reg_no_rbp, ptr_no_rax_reg_with_rbp, %{ PreserveFramePointer %});
+
+// Class for all pointer registers (excluding RAX, RBX, and RSP)
+reg_class ptr_no_rax_rbx_reg_with_rbp(RDX, RDX_H,
+ RBP, RBP_H,
+ RDI, RDI_H,
+ RSI, RSI_H,
+ RCX, RCX_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R13, R13_H,
+ R14, R14_H);
+
+// Class for all pointer registers (excluding RAX, RBX, RSP, and RBP)
+reg_class ptr_no_rax_rbx_reg_no_rbp(RDX, RDX_H,
+ RDI, RDI_H,
+ RSI, RSI_H,
+ RCX, RCX_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R13, R13_H,
+ R14, R14_H);
+
+// Dynamic register class that selects between ptr_no_rax_rbx_reg_no_rbp and ptr_no_rax_rbx_reg_with_rbp.
+reg_class_dynamic ptr_no_rax_rbx_reg(ptr_no_rax_rbx_reg_no_rbp, ptr_no_rax_rbx_reg_with_rbp, %{ PreserveFramePointer %});
// Singleton class for RAX pointer register
reg_class ptr_rax_reg(RAX, RAX_H);
@@ -251,59 +311,29 @@ reg_class ptr_rsi_reg(RSI, RSI_H);
// Singleton class for RDI pointer register
reg_class ptr_rdi_reg(RDI, RDI_H);
-// Singleton class for RBP pointer register
-reg_class ptr_rbp_reg(RBP, RBP_H);
-
// Singleton class for stack pointer
reg_class ptr_rsp_reg(RSP, RSP_H);
// Singleton class for TLS pointer
reg_class ptr_r15_reg(R15, R15_H);
-// Class for all long registers (except RSP)
-reg_class long_reg(RAX, RAX_H,
- RDX, RDX_H,
- RBP, RBP_H,
- RDI, RDI_H,
- RSI, RSI_H,
- RCX, RCX_H,
- RBX, RBX_H,
- R8, R8_H,
- R9, R9_H,
- R10, R10_H,
- R11, R11_H,
- R13, R13_H,
- R14, R14_H);
+// Class for all long registers (excluding RSP)
+reg_class long_reg_with_rbp(RAX, RAX_H,
+ RDX, RDX_H,
+ RBP, RBP_H,
+ RDI, RDI_H,
+ RSI, RSI_H,
+ RCX, RCX_H,
+ RBX, RBX_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R13, R13_H,
+ R14, R14_H);
-// Class for all long registers except RAX, RDX (and RSP)
-reg_class long_no_rax_rdx_reg(RBP, RBP_H,
- RDI, RDI_H,
- RSI, RSI_H,
- RCX, RCX_H,
- RBX, RBX_H,
- R8, R8_H,
- R9, R9_H,
- R10, R10_H,
- R11, R11_H,
- R13, R13_H,
- R14, R14_H);
-
-// Class for all long registers except RCX (and RSP)
-reg_class long_no_rcx_reg(RBP, RBP_H,
- RDI, RDI_H,
- RSI, RSI_H,
- RAX, RAX_H,
- RDX, RDX_H,
- RBX, RBX_H,
- R8, R8_H,
- R9, R9_H,
- R10, R10_H,
- R11, R11_H,
- R13, R13_H,
- R14, R14_H);
-
-// Class for all long registers except RAX (and RSP)
-reg_class long_no_rax_reg(RBP, RBP_H,
+// Class for all long registers (excluding RSP and RBP)
+reg_class long_reg_no_rbp(RAX, RAX_H,
RDX, RDX_H,
RDI, RDI_H,
RSI, RSI_H,
@@ -316,6 +346,67 @@ reg_class long_no_rax_reg(RBP, RBP_H,
R13, R13_H,
R14, R14_H);
+// Dynamic register class that selects between long_reg_no_rbp and long_reg_with_rbp.
+reg_class_dynamic long_reg(long_reg_no_rbp, long_reg_with_rbp, %{ PreserveFramePointer %});
+
+// Class for all long registers (excluding RAX, RDX and RSP)
+reg_class long_no_rax_rdx_reg_with_rbp(RBP, RBP_H,
+ RDI, RDI_H,
+ RSI, RSI_H,
+ RCX, RCX_H,
+ RBX, RBX_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R13, R13_H,
+ R14, R14_H);
+
+// Class for all long registers (excluding RAX, RDX, RSP, and RBP)
+reg_class long_no_rax_rdx_reg_no_rbp(RDI, RDI_H,
+ RSI, RSI_H,
+ RCX, RCX_H,
+ RBX, RBX_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R13, R13_H,
+ R14, R14_H);
+
+// Dynamic register class that selects between long_no_rax_rdx_reg_no_rbp and long_no_rax_rdx_reg_with_rbp.
+reg_class_dynamic long_no_rax_rdx_reg(long_no_rax_rdx_reg_no_rbp, long_no_rax_rdx_reg_with_rbp, %{ PreserveFramePointer %});
+
+// Class for all long registers (excluding RCX and RSP)
+reg_class long_no_rcx_reg_with_rbp(RBP, RBP_H,
+ RDI, RDI_H,
+ RSI, RSI_H,
+ RAX, RAX_H,
+ RDX, RDX_H,
+ RBX, RBX_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R13, R13_H,
+ R14, R14_H);
+
+// Class for all long registers (excluding RCX, RSP, and RBP)
+reg_class long_no_rcx_reg_no_rbp(RDI, RDI_H,
+ RSI, RSI_H,
+ RAX, RAX_H,
+ RDX, RDX_H,
+ RBX, RBX_H,
+ R8, R8_H,
+ R9, R9_H,
+ R10, R10_H,
+ R11, R11_H,
+ R13, R13_H,
+ R14, R14_H);
+
+// Dynamic register class that selects between long_no_rcx_reg_no_rbp and long_no_rcx_reg_with_rbp.
+reg_class_dynamic long_no_rcx_reg(long_no_rcx_reg_no_rbp, long_no_rcx_reg_with_rbp, %{ PreserveFramePointer %});
+
// Singleton class for RAX long register
reg_class long_rax_reg(RAX, RAX_H);
@@ -325,27 +416,27 @@ reg_class long_rcx_reg(RCX, RCX_H);
// Singleton class for RDX long register
reg_class long_rdx_reg(RDX, RDX_H);
-// Class for all int registers (except RSP)
-reg_class int_reg(RAX,
- RDX,
- RBP,
- RDI,
- RSI,
- RCX,
- RBX,
- R8,
- R9,
- R10,
- R11,
- R13,
- R14);
+// Class for all int registers (excluding RSP)
+reg_class int_reg_with_rbp(RAX,
+ RDX,
+ RBP,
+ RDI,
+ RSI,
+ RCX,
+ RBX,
+ R8,
+ R9,
+ R10,
+ R11,
+ R13,
+ R14);
-// Class for all int registers except RCX (and RSP)
-reg_class int_no_rcx_reg(RAX,
+// Class for all int registers (excluding RSP and RBP)
+reg_class int_reg_no_rbp(RAX,
RDX,
- RBP,
RDI,
RSI,
+ RCX,
RBX,
R8,
R9,
@@ -354,18 +445,66 @@ reg_class int_no_rcx_reg(RAX,
R13,
R14);
-// Class for all int registers except RAX, RDX (and RSP)
-reg_class int_no_rax_rdx_reg(RBP,
- RDI,
- RSI,
- RCX,
- RBX,
- R8,
- R9,
- R10,
- R11,
- R13,
- R14);
+// Dynamic register class that selects between int_reg_no_rbp and int_reg_with_rbp.
+reg_class_dynamic int_reg(int_reg_no_rbp, int_reg_with_rbp, %{ PreserveFramePointer %});
+
+// Class for all int registers (excluding RCX and RSP)
+reg_class int_no_rcx_reg_with_rbp(RAX,
+ RDX,
+ RBP,
+ RDI,
+ RSI,
+ RBX,
+ R8,
+ R9,
+ R10,
+ R11,
+ R13,
+ R14);
+
+// Class for all int registers (excluding RCX, RSP, and RBP)
+reg_class int_no_rcx_reg_no_rbp(RAX,
+ RDX,
+ RDI,
+ RSI,
+ RBX,
+ R8,
+ R9,
+ R10,
+ R11,
+ R13,
+ R14);
+
+// Dynamic register class that selects between int_no_rcx_reg_no_rbp and int_no_rcx_reg_with_rbp.
+reg_class_dynamic int_no_rcx_reg(int_no_rcx_reg_no_rbp, int_no_rcx_reg_with_rbp, %{ PreserveFramePointer %});
+
+// Class for all int registers (excluding RAX, RDX, and RSP)
+reg_class int_no_rax_rdx_reg_with_rbp(RBP,
+ RDI,
+ RSI,
+ RCX,
+ RBX,
+ R8,
+ R9,
+ R10,
+ R11,
+ R13,
+ R14);
+
+// Class for all int registers (excluding RAX, RDX, RSP, and RBP)
+reg_class int_no_rax_rdx_reg_no_rbp(RDI,
+ RSI,
+ RCX,
+ RBX,
+ R8,
+ R9,
+ R10,
+ R11,
+ R13,
+ R14);
+
+// Dynamic register class that selects between int_no_rax_rdx_reg_no_rbp and int_no_rax_rdx_reg_with_rbp.
+reg_class_dynamic int_no_rax_rdx_reg(int_no_rax_rdx_reg_no_rbp, int_no_rax_rdx_reg_with_rbp, %{ PreserveFramePointer %});
// Singleton class for RAX int register
reg_class int_rax_reg(RAX);
@@ -396,9 +535,6 @@ source %{
#define __ _masm.
-static int preserve_SP_size() {
- return 3; // rex.w, op, rm(reg/reg)
-}
static int clear_avx_size() {
return (Compile::current()->max_vector_size() > 16) ? 3 : 0; // vzeroupper
}
@@ -409,9 +545,7 @@ static int clear_avx_size() {
int MachCallStaticJavaNode::ret_addr_offset()
{
int offset = 5; // 5 bytes from start of call to where return address points
- offset += clear_avx_size();
- if (_method_handle_invoke)
- offset += preserve_SP_size();
+ offset += clear_avx_size();
return offset;
}
@@ -448,16 +582,6 @@ int CallStaticJavaDirectNode::compute_padding(int current_offset) const
return round_to(current_offset, alignment_required()) - current_offset;
}
-// The address of the call instruction needs to be 4-byte aligned to
-// ensure that it does not span a cache line so that it can be patched.
-int CallStaticJavaHandleNode::compute_padding(int current_offset) const
-{
- current_offset += preserve_SP_size(); // skip mov rbp, rsp
- current_offset += clear_avx_size(); // skip vzeroupper
- current_offset += 1; // skip call opcode byte
- return round_to(current_offset, alignment_required()) - current_offset;
-}
-
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
@@ -724,6 +848,10 @@ void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
st->print("# stack bang (%d bytes)", bangsize);
st->print("\n\t");
st->print("pushq rbp\t# Save rbp");
+ if (PreserveFramePointer) {
+ st->print("\n\t");
+ st->print("movq rbp, rsp\t# Save the caller's SP into rbp");
+ }
if (framesize) {
st->print("\n\t");
st->print("subq rsp, #%d\t# Create frame",framesize);
@@ -732,7 +860,11 @@ void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
st->print("subq rsp, #%d\t# Create frame",framesize);
st->print("\n\t");
framesize -= wordSize;
- st->print("movq [rsp + #%d], rbp\t# Save rbp",framesize);
+ st->print("movq [rsp + #%d], rbp\t# Save rbp",framesize);
+ if (PreserveFramePointer) {
+ st->print("\n\t");
+ st->print("movq rbp, [rsp + #%d]\t# Save the caller's SP into rbp", (framesize + wordSize));
+ }
}
if (VerifyStackAtCalls) {
@@ -1598,8 +1730,9 @@ RegMask Matcher::modL_proj_mask() {
return LONG_RDX_REG_mask();
}
+// Register for saving SP into on method handle invokes. Not used on x86_64.
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
- return PTR_RBP_REG_mask();
+ return NO_REG_mask();
}
%}
@@ -3202,7 +3335,7 @@ operand no_rax_rdx_RegI()
// Pointer Register
operand any_RegP()
%{
- constraint(ALLOC_IN_RC(any_reg));
+ constraint(ALLOC_IN_RC(any_reg));
match(RegP);
match(rax_RegP);
match(rbx_RegP);
@@ -3224,8 +3357,8 @@ operand rRegP()
match(rbx_RegP);
match(rdi_RegP);
match(rsi_RegP);
- match(rbp_RegP);
- match(r15_RegP); // See Q&A below about r15_RegP.
+ match(rbp_RegP); // See Q&A below about
+ match(r15_RegP); // r15_RegP and rbp_RegP.
format %{ %}
interface(REG_INTER);
@@ -3241,11 +3374,14 @@ operand rRegN() %{
// Question: Why is r15_RegP (the read-only TLS register) a match for rRegP?
// Answer: Operand match rules govern the DFA as it processes instruction inputs.
-// It's fine for an instruction input which expects rRegP to match a r15_RegP.
+// It's fine for an instruction input that expects rRegP to match a r15_RegP.
// The output of an instruction is controlled by the allocator, which respects
// register class masks, not match rules. Unless an instruction mentions
// r15_RegP or any_RegP explicitly as its output, r15 will not be considered
// by the allocator as an input.
+// The same logic applies to rbp_RegP being a match for rRegP: If PreserveFramePointer==true,
+// the RBP is used as a proper frame pointer and is not included in ptr_reg. As a
+// result, RBP is not included in the output of the instruction either.
operand no_rax_RegP()
%{
@@ -3259,9 +3395,11 @@ operand no_rax_RegP()
interface(REG_INTER);
%}
+// This operand is not allowed to use RBP even if
+// RBP is not used to hold the frame pointer.
operand no_rbp_RegP()
%{
- constraint(ALLOC_IN_RC(ptr_no_rbp_reg));
+ constraint(ALLOC_IN_RC(ptr_reg_no_rbp));
match(RegP);
match(rbx_RegP);
match(rsi_RegP);
@@ -3338,16 +3476,6 @@ operand rdi_RegP()
interface(REG_INTER);
%}
-operand rbp_RegP()
-%{
- constraint(ALLOC_IN_RC(ptr_rbp_reg));
- match(RegP);
- match(rRegP);
-
- format %{ %}
- interface(REG_INTER);
-%}
-
operand r15_RegP()
%{
constraint(ALLOC_IN_RC(ptr_r15_reg));
@@ -3604,6 +3732,23 @@ operand indIndexScaleOffset(any_RegP reg, immL32 off, rRegL lreg, immI2 scale)
%}
%}
+// Indirect Memory Plus Positive Index Register Plus Offset Operand
+operand indPosIndexOffset(any_RegP reg, immL32 off, rRegI idx)
+%{
+ constraint(ALLOC_IN_RC(ptr_reg));
+ predicate(n->in(2)->in(3)->as_Type()->type()->is_long()->_lo >= 0);
+ match(AddP (AddP reg (ConvI2L idx)) off);
+
+ op_cost(10);
+ format %{"[$reg + $off + $idx]" %}
+ interface(MEMORY_INTER) %{
+ base($reg);
+ index($idx);
+ scale(0x0);
+ disp($off);
+ %}
+%}
+
// Indirect Memory Times Scale Plus Positive Index Register Plus Offset Operand
operand indPosIndexScaleOffset(any_RegP reg, immL32 off, rRegI idx, immI2 scale)
%{
@@ -3755,6 +3900,23 @@ operand indIndexScaleOffsetNarrow(rRegN reg, immL32 off, rRegL lreg, immI2 scale
%}
%}
+// Indirect Memory Times Plus Positive Index Register Plus Offset Operand
+operand indPosIndexOffsetNarrow(rRegN reg, immL32 off, rRegI idx)
+%{
+ constraint(ALLOC_IN_RC(ptr_reg));
+ predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->as_Type()->type()->is_long()->_lo >= 0);
+ match(AddP (AddP (DecodeN reg) (ConvI2L idx)) off);
+
+ op_cost(10);
+ format %{"[$reg + $off + $idx]" %}
+ interface(MEMORY_INTER) %{
+ base($reg);
+ index($idx);
+ scale(0x0);
+ disp($off);
+ %}
+%}
+
// Indirect Memory Times Scale Plus Positive Index Register Plus Offset Operand
operand indPosIndexScaleOffsetNarrow(rRegN reg, immL32 off, rRegI idx, immI2 scale)
%{
@@ -3946,11 +4108,11 @@ operand cmpOpUCF2() %{
// case of this is memory operands.
opclass memory(indirect, indOffset8, indOffset32, indIndexOffset, indIndex,
- indIndexScale, indIndexScaleOffset, indPosIndexScaleOffset,
+ indIndexScale, indIndexScaleOffset, indPosIndexOffset, indPosIndexScaleOffset,
indCompressedOopOffset,
indirectNarrow, indOffset8Narrow, indOffset32Narrow,
indIndexOffsetNarrow, indIndexNarrow, indIndexScaleNarrow,
- indIndexScaleOffsetNarrow, indPosIndexScaleOffsetNarrow);
+ indIndexScaleOffsetNarrow, indPosIndexOffsetNarrow, indPosIndexScaleOffsetNarrow);
//----------PIPELINE-----------------------------------------------------------
// Rules which define the behavior of the target architectures pipeline.
@@ -4984,6 +5146,17 @@ instruct leaPIdxScaleOff(rRegP dst, indIndexScaleOffset mem)
ins_pipe(ialu_reg_reg_fat);
%}
+instruct leaPPosIdxOff(rRegP dst, indPosIndexOffset mem)
+%{
+ match(Set dst mem);
+
+ ins_cost(110);
+ format %{ "leaq $dst, $mem\t# ptr posidxoff" %}
+ opcode(0x8D);
+ ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
+ ins_pipe(ialu_reg_reg_fat);
+%}
+
instruct leaPPosIdxScaleOff(rRegP dst, indPosIndexScaleOffset mem)
%{
match(Set dst mem);
@@ -5068,6 +5241,18 @@ instruct leaPIdxScaleOffNarrow(rRegP dst, indIndexScaleOffsetNarrow mem)
ins_pipe(ialu_reg_reg_fat);
%}
+instruct leaPPosIdxOffNarrow(rRegP dst, indPosIndexOffsetNarrow mem)
+%{
+ predicate(Universe::narrow_oop_shift() == 0);
+ match(Set dst mem);
+
+ ins_cost(110);
+ format %{ "leaq $dst, $mem\t# ptr posidxoffnarrow" %}
+ opcode(0x8D);
+ ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
+ ins_pipe(ialu_reg_reg_fat);
+%}
+
instruct leaPPosIdxScaleOffNarrow(rRegP dst, indPosIndexScaleOffsetNarrow mem)
%{
predicate(Universe::narrow_oop_shift() == 0);
@@ -11353,7 +11538,6 @@ instruct safePoint_poll_far(rFlagsReg cr, rRegP poll)
// compute_padding() functions will have to be adjusted.
instruct CallStaticJavaDirect(method meth) %{
match(CallStaticJava);
- predicate(!((CallStaticJavaNode*) n)->is_method_handle_invoke());
effect(USE meth);
ins_cost(300);
@@ -11364,27 +11548,6 @@ instruct CallStaticJavaDirect(method meth) %{
ins_alignment(4);
%}
-// Call Java Static Instruction (method handle version)
-// Note: If this code changes, the corresponding ret_addr_offset() and
-// compute_padding() functions will have to be adjusted.
-instruct CallStaticJavaHandle(method meth, rbp_RegP rbp_mh_SP_save) %{
- match(CallStaticJava);
- predicate(((CallStaticJavaNode*) n)->is_method_handle_invoke());
- effect(USE meth);
- // RBP is saved by all callees (for interpreter stack correction).
- // We use it here for a similar purpose, in {preserve,restore}_SP.
-
- ins_cost(300);
- format %{ "call,static/MethodHandle " %}
- opcode(0xE8); /* E8 cd */
- ins_encode(clear_avx, preserve_SP,
- Java_Static_Call(meth),
- restore_SP,
- call_epilog);
- ins_pipe(pipe_slow);
- ins_alignment(4);
-%}
-
// Call Java Dynamic Instruction
// Note: If this code changes, the corresponding ret_addr_offset() and
// compute_padding() functions will have to be adjusted.
diff --git a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp
index 785baf9b272..4875e109538 100644
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp
+++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -814,9 +814,9 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
}
#endif // INCLUDE_ALL_GCS
- // If G1 is not enabled then attempt to go through the accessor entry point
- // Reference.get is an accessor
- return generate_accessor_entry();
+ // If G1 is not enabled then attempt to go through the normal entry point
+ // Reference.get could be instrumented by jvmti
+ return generate_normal_entry(false);
}
address InterpreterGenerator::generate_native_entry(bool synchronized) {
diff --git a/hotspot/src/cpu/zero/vm/frame_zero.cpp b/hotspot/src/cpu/zero/vm/frame_zero.cpp
index 84e727dc7ad..fe1ee4f4f95 100644
--- a/hotspot/src/cpu/zero/vm/frame_zero.cpp
+++ b/hotspot/src/cpu/zero/vm/frame_zero.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -213,7 +213,7 @@ void frame::zero_print_on_error(int frame_index,
valuebuf[buflen - 1] = '\0';
// Print the result
- st->print_cr(" " PTR_FORMAT ": %-21s = %s", addr, fieldbuf, valuebuf);
+ st->print_cr(" " PTR_FORMAT ": %-21s = %s", p2i(addr), fieldbuf, valuebuf);
}
}
diff --git a/hotspot/src/cpu/zero/vm/methodHandles_zero.cpp b/hotspot/src/cpu/zero/vm/methodHandles_zero.cpp
index ea1bc0eedc1..a9c651c81f8 100644
--- a/hotspot/src/cpu/zero/vm/methodHandles_zero.cpp
+++ b/hotspot/src/cpu/zero/vm/methodHandles_zero.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -27,6 +27,7 @@
#include "interpreter/interpreterGenerator.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
+#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
void MethodHandles::invoke_target(Method* method, TRAPS) {
@@ -144,6 +145,7 @@ int MethodHandles::method_handle_entry_linkToVirtual(Method* method, intptr_t UN
oop recv = STACK_OBJECT(-numArgs);
Klass* clazz = recv->klass();
Klass* klass_part = InstanceKlass::cast(clazz);
+ ResourceMark rm(THREAD);
klassVtable* vtable = klass_part->vtable();
Method* vmtarget = vtable->method_at(vmindex);
diff --git a/hotspot/src/cpu/zero/vm/stubGenerator_zero.cpp b/hotspot/src/cpu/zero/vm/stubGenerator_zero.cpp
index be7f72b2341..9d999a8c805 100644
--- a/hotspot/src/cpu/zero/vm/stubGenerator_zero.cpp
+++ b/hotspot/src/cpu/zero/vm/stubGenerator_zero.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007, 2008, 2010 Red Hat, Inc.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2007, 2008, 2010, 2015 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,6 +45,18 @@
#include "opto/runtime.hpp"
#endif
+// For SafeFetch we need POSIX tls and setjmp
+#include
+#include
+static pthread_key_t g_jmpbuf_key;
+
+// return the currently active jump buffer for this thread
+// - if there is any, NULL otherwise. Called from
+// zero signal handlers.
+extern sigjmp_buf* get_jmp_buf_for_continuation() {
+ return (sigjmp_buf*) pthread_getspecific(g_jmpbuf_key);
+}
+
// Declaration and definition of StubGenerator (no .hpp file).
// For a more detailed description of the stub routine structure
// see the comment in stubRoutines.hpp
@@ -177,17 +189,55 @@ class StubGenerator: public StubCodeGenerator {
}
static int SafeFetch32(int *adr, int errValue) {
+
+ // set up a jump buffer; anchor the pointer to the jump buffer in tls; then
+ // do the pointer access. If pointer is invalid, we crash; in signal
+ // handler, we retrieve pointer to jmp buffer from tls, and jump back.
+ //
+ // Note: the jump buffer itself - which can get pretty large depending on
+ // the architecture - lives on the stack and that is fine, because we will
+ // not rewind the stack: either we crash, in which case signal handler
+ // frame is below us, or we don't crash, in which case it does not matter.
+ sigjmp_buf jb;
+ if (sigsetjmp(jb, 1)) {
+ // we crashed. clean up tls and return default value.
+ pthread_setspecific(g_jmpbuf_key, NULL);
+ return errValue;
+ } else {
+ // preparation phase
+ pthread_setspecific(g_jmpbuf_key, &jb);
+ }
+
int value = errValue;
value = *adr;
+
+ // all went well. clean tls.
+ pthread_setspecific(g_jmpbuf_key, NULL);
+
return value;
}
static intptr_t SafeFetchN(intptr_t *adr, intptr_t errValue) {
+
+ sigjmp_buf jb;
+ if (sigsetjmp(jb, 1)) {
+ // we crashed. clean up tls and return default value.
+ pthread_setspecific(g_jmpbuf_key, NULL);
+ return errValue;
+ } else {
+ // preparation phase
+ pthread_setspecific(g_jmpbuf_key, &jb);
+ }
+
intptr_t value = errValue;
value = *adr;
- return value;
- }
+ // all went well. clean tls.
+ pthread_setspecific(g_jmpbuf_key, NULL);
+
+ return value;
+
+ }
void generate_initial() {
// Generates all stubs and initializes the entry points
@@ -241,6 +291,7 @@ class StubGenerator: public StubCodeGenerator {
generate_arraycopy_stubs();
// Safefetch stubs.
+ pthread_key_create(&g_jmpbuf_key, NULL);
StubRoutines::_safefetch32_entry = CAST_FROM_FN_PTR(address, StubGenerator::SafeFetch32);
StubRoutines::_safefetch32_fault_pc = NULL;
StubRoutines::_safefetch32_continuation_pc = NULL;
diff --git a/hotspot/src/cpu/zero/vm/vm_version_zero.cpp b/hotspot/src/cpu/zero/vm/vm_version_zero.cpp
index 31ee8ba84e1..c19a21d879d 100644
--- a/hotspot/src/cpu/zero/vm/vm_version_zero.cpp
+++ b/hotspot/src/cpu/zero/vm/vm_version_zero.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2009 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -30,4 +30,11 @@
#include "runtime/stubCodeGenerator.hpp"
#include "vm_version_zero.hpp"
-// This file is intentionally empty
+
+void VM_Version::initialize() {
+ // This machine does not allow unaligned memory accesses
+ if (! FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
+ warning("Unaligned memory access is not available on this CPU");
+ FLAG_SET_DEFAULT(UseUnalignedAccesses, false);
+ }
+}
diff --git a/hotspot/src/cpu/zero/vm/vm_version_zero.hpp b/hotspot/src/cpu/zero/vm/vm_version_zero.hpp
index 78109b933f4..68a29df3a60 100644
--- a/hotspot/src/cpu/zero/vm/vm_version_zero.hpp
+++ b/hotspot/src/cpu/zero/vm/vm_version_zero.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -34,6 +34,7 @@ class VM_Version : public Abstract_VM_Version {
static const char* cpu_features() {
return "";
}
+ static void initialize();
};
#endif // CPU_ZERO_VM_VM_VERSION_ZERO_HPP
diff --git a/hotspot/src/os/aix/vm/attachListener_aix.cpp b/hotspot/src/os/aix/vm/attachListener_aix.cpp
index 93be23f2db2..b6707aa9cb9 100644
--- a/hotspot/src/os/aix/vm/attachListener_aix.cpp
+++ b/hotspot/src/os/aix/vm/attachListener_aix.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -144,6 +144,10 @@ class ArgumentIterator : public StackObj {
}
char* next() {
if (*_pos == '\0') {
+ if (_pos < _end) {
+ _pos += 1;
+ }
+
return NULL;
}
char* res = _pos;
@@ -214,6 +218,7 @@ int AixAttachListener::init() {
// bind socket
struct sockaddr_un addr;
+ memset((void *)&addr, 0, sizeof(addr));
addr.sun_family = AF_UNIX;
strcpy(addr.sun_path, initial_path);
::unlink(initial_path);
diff --git a/hotspot/src/os/aix/vm/globals_aix.hpp b/hotspot/src/os/aix/vm/globals_aix.hpp
index dc20a6aa95b..4f190f2f136 100644
--- a/hotspot/src/os/aix/vm/globals_aix.hpp
+++ b/hotspot/src/os/aix/vm/globals_aix.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,10 @@
//
#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
\
+ /* Use 64K pages for virtual memory (shmat). */ \
+ product(bool, Use64KPages, true, \
+ "Use 64K pages if available.") \
+ \
/* If UseLargePages == true allow or deny usage of 16M pages. 16M pages are */ \
/* a scarce resource and there may be situations where we do not want the VM */ \
/* to run with 16M pages. (Will fall back to 64K pages). */ \
@@ -55,7 +59,7 @@ define_pd_global(intx, AttachListenerTimeout, 1000);
// Defines Aix-specific default values. The flags are available on all
// platforms, but they may have different default values on other platforms.
//
-define_pd_global(bool, UseLargePages, true);
+define_pd_global(bool, UseLargePages, false);
define_pd_global(bool, UseLargePagesIndividualAllocation, false);
define_pd_global(bool, UseOSErrorReporting, false);
define_pd_global(bool, UseThreadPriorities, true) ;
diff --git a/hotspot/src/os/aix/vm/interfaceSupport_aix.hpp b/hotspot/src/os/aix/vm/interfaceSupport_aix.hpp
index 62e75c7eb72..8f32db7b810 100644
--- a/hotspot/src/os/aix/vm/interfaceSupport_aix.hpp
+++ b/hotspot/src/os/aix/vm/interfaceSupport_aix.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,8 +23,8 @@
*
*/
-#ifndef OS_LINUX_VM_INTERFACESUPPORT_LINUX_HPP
-#define OS_LINUX_VM_INTERFACESUPPORT_LINUX_HPP
+#ifndef OS_AIX_VM_INTERFACESUPPORT_AIX_HPP
+#define OS_AIX_VM_INTERFACESUPPORT_AIX_HPP
// Contains inlined functions for class InterfaceSupport
@@ -32,4 +32,4 @@ static inline void serialize_memory(JavaThread *thread) {
os::write_memory_serialize_page(thread);
}
-#endif // OS_LINUX_VM_INTERFACESUPPORT_LINUX_HPP
+#endif // OS_AIX_VM_INTERFACESUPPORT_AIX_HPP
diff --git a/hotspot/src/os/aix/vm/osThread_aix.cpp b/hotspot/src/os/aix/vm/osThread_aix.cpp
index ef6c973e3fe..c5566147f99 100644
--- a/hotspot/src/os/aix/vm/osThread_aix.cpp
+++ b/hotspot/src/os/aix/vm/osThread_aix.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -47,7 +47,7 @@ void OSThread::pd_initialize() {
_startThread_lock = new Monitor(Mutex::event, "startThread_lock", true,
Monitor::_safepoint_check_never);
- assert(_startThread_lock !=NULL, "check");
+ assert(_startThread_lock != NULL, "check");
}
void OSThread::pd_destroy() {
diff --git a/hotspot/src/os/aix/vm/os_aix.cpp b/hotspot/src/os/aix/vm/os_aix.cpp
index d77a26ffa66..eee444ea1fb 100644
--- a/hotspot/src/os/aix/vm/os_aix.cpp
+++ b/hotspot/src/os/aix/vm/os_aix.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2014 SAP AG. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -113,6 +113,10 @@
#define RUSAGE_THREAD (1) /* only the calling thread */
#endif
+// PPC port
+static const uintx Use64KPagesThreshold = 1*M;
+static const uintx MaxExpectedDataSegmentSize = SIZE_4G*2;
+
// Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
#if !defined(_AIXVERSION_610)
extern "C" {
@@ -168,8 +172,8 @@ typedef stackslot_t* stackptr_t;
return -1; \
}
-// query dimensions of the stack of the calling thread
-static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
+// Query dimensions of the stack of the calling thread.
+static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
// function to check a given stack pointer against given stack limits
inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
@@ -220,9 +224,6 @@ int os::Aix::_page_size = -1;
int os::Aix::_on_pase = -1;
int os::Aix::_os_version = -1;
int os::Aix::_stack_page_size = -1;
-size_t os::Aix::_shm_default_page_size = -1;
-int os::Aix::_can_use_64K_pages = -1;
-int os::Aix::_can_use_16M_pages = -1;
int os::Aix::_xpg_sus_mode = -1;
int os::Aix::_extshm = -1;
int os::Aix::_logical_cpus = -1;
@@ -238,7 +239,63 @@ static bool check_signals = true;
static pid_t _initial_pid = 0;
static int SR_signum = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
static sigset_t SR_sigset;
-static pthread_mutex_t dl_mutex; // Used to protect dlsym() calls.
+
+// This describes the state of multipage support of the underlying
+// OS. Note that this is of no interest to the outsize world and
+// therefore should not be defined in AIX class.
+//
+// AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
+// latter two (16M "large" resp. 16G "huge" pages) require special
+// setup and are normally not available.
+//
+// AIX supports multiple page sizes per process, for:
+// - Stack (of the primordial thread, so not relevant for us)
+// - Data - data, bss, heap, for us also pthread stacks
+// - Text - text code
+// - shared memory
+//
+// Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
+// and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
+//
+// For shared memory, page size can be set dynamically via
+// shmctl(). Different shared memory regions can have different page
+// sizes.
+//
+// More information can be found at AIBM info center:
+// http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
+//
+static struct {
+ size_t pagesize; // sysconf _SC_PAGESIZE (4K)
+ size_t datapsize; // default data page size (LDR_CNTRL DATAPSIZE)
+ size_t shmpsize; // default shared memory page size (LDR_CNTRL SHMPSIZE)
+ size_t pthr_stack_pagesize; // stack page size of pthread threads
+ size_t textpsize; // default text page size (LDR_CNTRL STACKPSIZE)
+ bool can_use_64K_pages; // True if we can alloc 64K pages dynamically with Sys V shm.
+ bool can_use_16M_pages; // True if we can alloc 16M pages dynamically with Sys V shm.
+ int error; // Error describing if something went wrong at multipage init.
+} g_multipage_support = {
+ (size_t) -1,
+ (size_t) -1,
+ (size_t) -1,
+ (size_t) -1,
+ (size_t) -1,
+ false, false,
+ 0
+};
+
+// We must not accidentally allocate memory close to the BRK - even if
+// that would work - because then we prevent the BRK segment from
+// growing which may result in a malloc OOM even though there is
+// enough memory. The problem only arises if we shmat() or mmap() at
+// a specific wish address, e.g. to place the heap in a
+// compressed-oops-friendly way.
+static bool is_close_to_brk(address a) {
+ address a1 = (address) sbrk(0);
+ if (a >= a1 && a < (a1 + MaxExpectedDataSegmentSize)) {
+ return true;
+ }
+ return false;
+}
julong os::available_memory() {
return Aix::available_memory();
@@ -257,19 +314,6 @@ julong os::physical_memory() {
return Aix::physical_memory();
}
-////////////////////////////////////////////////////////////////////////////////
-// environment support
-
-bool os::getenv(const char* name, char* buf, int len) {
- const char* val = ::getenv(name);
- if (val != NULL && strlen(val) < (size_t)len) {
- strcpy(buf, val);
- return true;
- }
- if (len > 0) buf[0] = 0; // return a null string
- return false;
-}
-
// Return true if user is running as root.
bool os::have_special_privileges() {
@@ -291,7 +335,7 @@ static bool my_disclaim64(char* addr, size_t size) {
}
// Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
- const unsigned int maxDisclaimSize = 0x80000000;
+ const unsigned int maxDisclaimSize = 0x40000000;
const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
@@ -368,138 +412,131 @@ static const char* describe_pagesize(size_t pagesize) {
case SIZE_64K: return "64K";
case SIZE_16M: return "16M";
case SIZE_16G: return "16G";
+ case -1: return "not set";
default:
assert(false, "surprise");
return "??";
}
}
-// Retrieve information about multipage size support. Will initialize
-// Aix::_page_size, Aix::_stack_page_size, Aix::_can_use_64K_pages,
-// Aix::_can_use_16M_pages.
+// Probe OS for multipage support.
+// Will fill the global g_multipage_support structure.
// Must be called before calling os::large_page_init().
-void os::Aix::query_multipage_support() {
+static void query_multipage_support() {
- guarantee(_page_size == -1 &&
- _stack_page_size == -1 &&
- _can_use_64K_pages == -1 &&
- _can_use_16M_pages == -1 &&
- g_multipage_error == -1,
+ guarantee(g_multipage_support.pagesize == -1,
"do not call twice");
- _page_size = ::sysconf(_SC_PAGESIZE);
+ g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
// This really would surprise me.
- assert(_page_size == SIZE_4K, "surprise!");
-
+ assert(g_multipage_support.pagesize == SIZE_4K, "surprise!");
// Query default data page size (default page size for C-Heap, pthread stacks and .bss).
- // Default data page size is influenced either by linker options (-bdatapsize)
+ // Default data page size is defined either by linker options (-bdatapsize)
// or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
// default should be 4K.
- size_t data_page_size = SIZE_4K;
{
- void* p = os::malloc(SIZE_16M, mtInternal);
- guarantee(p != NULL, "malloc failed");
- data_page_size = os::Aix::query_pagesize(p);
- os::free(p);
+ void* p = ::malloc(SIZE_16M);
+ g_multipage_support.datapsize = os::Aix::query_pagesize(p);
+ ::free(p);
}
- // query default shm page size (LDR_CNTRL SHMPSIZE)
+ // Query default shm page size (LDR_CNTRL SHMPSIZE).
{
const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
guarantee(shmid != -1, "shmget failed");
void* p = ::shmat(shmid, NULL, 0);
::shmctl(shmid, IPC_RMID, NULL);
guarantee(p != (void*) -1, "shmat failed");
- _shm_default_page_size = os::Aix::query_pagesize(p);
+ g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
::shmdt(p);
}
- // before querying the stack page size, make sure we are not running as primordial
+ // Before querying the stack page size, make sure we are not running as primordial
// thread (because primordial thread's stack may have different page size than
// pthread thread stacks). Running a VM on the primordial thread won't work for a
- // number of reasons so we may just as well guarantee it here
- guarantee(!os::Aix::is_primordial_thread(), "Must not be called for primordial thread");
+ // number of reasons so we may just as well guarantee it here.
+ guarantee0(!os::Aix::is_primordial_thread());
- // query stack page size
+ // Query pthread stack page size.
{
int dummy = 0;
- _stack_page_size = os::Aix::query_pagesize(&dummy);
- // everything else would surprise me and should be looked into
- guarantee(_stack_page_size == SIZE_4K || _stack_page_size == SIZE_64K, "Wrong page size");
- // also, just for completeness: pthread stacks are allocated from C heap, so
- // stack page size should be the same as data page size
- guarantee(_stack_page_size == data_page_size, "stack page size should be the same as data page size");
+ g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
}
- // EXTSHM is bad: among other things, it prevents setting pagesize dynamically
- // for system V shm.
- if (Aix::extshm()) {
- if (Verbose) {
- fprintf(stderr, "EXTSHM is active - will disable large page support.\n"
- "Please make sure EXTSHM is OFF for large page support.\n");
- }
- g_multipage_error = ERROR_MP_EXTSHM_ACTIVE;
- _can_use_64K_pages = _can_use_16M_pages = 0;
+ // Query default text page size (LDR_CNTRL TEXTPSIZE).
+ /* PPC port: so far unused.
+ {
+ address any_function =
+ (address) resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
+ g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
+ }
+ */
+
+ // Now probe for support of 64K pages and 16M pages.
+
+ // Before OS/400 V6R1, there is no support for pages other than 4K.
+ if (os::Aix::on_pase_V5R4_or_older()) {
+ Unimplemented();
goto query_multipage_support_end;
}
- // now check which page sizes the OS claims it supports, and of those, which actually can be used.
+ // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
{
const int MAX_PAGE_SIZES = 4;
psize_t sizes[MAX_PAGE_SIZES];
const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
if (num_psizes == -1) {
- if (Verbose) {
- fprintf(stderr, "vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
- fprintf(stderr, "disabling multipage support.\n");
- }
- g_multipage_error = ERROR_MP_VMGETINFO_FAILED;
- _can_use_64K_pages = _can_use_16M_pages = 0;
+ trc("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
+ trc("disabling multipage support.\n");
+ g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
goto query_multipage_support_end;
}
guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
- if (Verbose) {
- fprintf(stderr, "vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
- for (int i = 0; i < num_psizes; i ++) {
- fprintf(stderr, " %s ", describe_pagesize(sizes[i]));
- }
- fprintf(stderr, " .\n");
+ trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
+ for (int i = 0; i < num_psizes; i ++) {
+ trcVerbose(" %s ", describe_pagesize(sizes[i]));
}
// Can we use 64K, 16M pages?
- _can_use_64K_pages = 0;
- _can_use_16M_pages = 0;
for (int i = 0; i < num_psizes; i ++) {
- if (sizes[i] == SIZE_64K) {
- _can_use_64K_pages = 1;
- } else if (sizes[i] == SIZE_16M) {
- _can_use_16M_pages = 1;
+ const size_t pagesize = sizes[i];
+ if (pagesize != SIZE_64K && pagesize != SIZE_16M) {
+ continue;
}
- }
-
- if (!_can_use_64K_pages) {
- g_multipage_error = ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K;
- }
-
- // Double-check for 16M pages: Even if AIX claims to be able to use 16M pages,
- // there must be an actual 16M page pool, and we must run with enough rights.
- if (_can_use_16M_pages) {
- const int shmid = ::shmget(IPC_PRIVATE, SIZE_16M, IPC_CREAT | S_IRUSR | S_IWUSR);
- guarantee(shmid != -1, "shmget failed");
+ bool can_use = false;
+ trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
+ const int shmid = ::shmget(IPC_PRIVATE, pagesize,
+ IPC_CREAT | S_IRUSR | S_IWUSR);
+ guarantee0(shmid != -1); // Should always work.
+ // Try to set pagesize.
struct shmid_ds shm_buf = { 0 };
- shm_buf.shm_pagesize = SIZE_16M;
- const bool can_set_pagesize = ::shmctl(shmid, SHM_PAGESIZE, &shm_buf) == 0 ? true : false;
- const int en = errno;
- ::shmctl(shmid, IPC_RMID, NULL);
- if (!can_set_pagesize) {
- if (Verbose) {
- fprintf(stderr, "Failed to allocate even one misely 16M page. shmctl failed with %d (%s).\n"
- "Will deactivate 16M support.\n", en, strerror(en));
+ shm_buf.shm_pagesize = pagesize;
+ if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
+ const int en = errno;
+ ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
+ // PPC port trcVerbose("shmctl(SHM_PAGESIZE) failed with %s",
+ // PPC port MiscUtils::describe_errno(en));
+ } else {
+ // Attach and double check pageisze.
+ void* p = ::shmat(shmid, NULL, 0);
+ ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
+ guarantee0(p != (void*) -1); // Should always work.
+ const size_t real_pagesize = os::Aix::query_pagesize(p);
+ if (real_pagesize != pagesize) {
+ trcVerbose("real page size (0x%llX) differs.", real_pagesize);
+ } else {
+ can_use = true;
}
- _can_use_16M_pages = 0;
+ ::shmdt(p);
+ }
+ trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
+ if (pagesize == SIZE_64K) {
+ g_multipage_support.can_use_64K_pages = can_use;
+ } else if (pagesize == SIZE_16M) {
+ g_multipage_support.can_use_16M_pages = can_use;
}
}
@@ -507,23 +544,29 @@ void os::Aix::query_multipage_support() {
query_multipage_support_end:
- guarantee(_page_size != -1 &&
- _stack_page_size != -1 &&
- _can_use_64K_pages != -1 &&
- _can_use_16M_pages != -1, "Page sizes not properly initialized");
+ trcVerbose("base page size (sysconf _SC_PAGESIZE): %s\n",
+ describe_pagesize(g_multipage_support.pagesize));
+ trcVerbose("Data page size (C-Heap, bss, etc): %s\n",
+ describe_pagesize(g_multipage_support.datapsize));
+ trcVerbose("Text page size: %s\n",
+ describe_pagesize(g_multipage_support.textpsize));
+ trcVerbose("Thread stack page size (pthread): %s\n",
+ describe_pagesize(g_multipage_support.pthr_stack_pagesize));
+ trcVerbose("Default shared memory page size: %s\n",
+ describe_pagesize(g_multipage_support.shmpsize));
+ trcVerbose("Can use 64K pages dynamically with shared meory: %s\n",
+ (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
+ trcVerbose("Can use 16M pages dynamically with shared memory: %s\n",
+ (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
+ trcVerbose("Multipage error details: %d\n",
+ g_multipage_support.error);
- if (_can_use_64K_pages) {
- g_multipage_error = 0;
- }
-
- if (Verbose) {
- fprintf(stderr, "Data page size (C-Heap, bss, etc): %s\n", describe_pagesize(data_page_size));
- fprintf(stderr, "Thread stack page size (pthread): %s\n", describe_pagesize(_stack_page_size));
- fprintf(stderr, "Default shared memory page size: %s\n", describe_pagesize(_shm_default_page_size));
- fprintf(stderr, "Can use 64K pages dynamically with shared meory: %s\n", (_can_use_64K_pages ? "yes" :"no"));
- fprintf(stderr, "Can use 16M pages dynamically with shared memory: %s\n", (_can_use_16M_pages ? "yes" :"no"));
- fprintf(stderr, "Multipage error details: %d\n", g_multipage_error);
- }
+ // sanity checks
+ assert0(g_multipage_support.pagesize == SIZE_4K);
+ assert0(g_multipage_support.datapsize == SIZE_4K || g_multipage_support.datapsize == SIZE_64K);
+ // PPC port: so far unused.assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K);
+ assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
+ assert0(g_multipage_support.shmpsize == SIZE_4K || g_multipage_support.shmpsize == SIZE_64K);
} // end os::Aix::query_multipage_support()
@@ -1225,6 +1268,10 @@ void os::shutdown() {
// called from signal handler. Before adding something to os::abort(), make
// sure it is async-safe and can handle partially initialized VM.
void os::abort(bool dump_core) {
+ abort(dump_core, NULL, NULL);
+}
+
+void os::abort(bool dump_core, void* siginfo, void* context) {
os::shutdown();
if (dump_core) {
#ifndef PRODUCT
@@ -1492,13 +1539,8 @@ void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
return NULL;
}
-// Glibc-2.0 libdl is not MT safe. If you are building with any glibc,
-// chances are you might want to run the generated bits against glibc-2.0
-// libdl.so, so always use locking for any version of glibc.
void* os::dll_lookup(void* handle, const char* name) {
- pthread_mutex_lock(&dl_mutex);
void* res = dlsym(handle, name);
- pthread_mutex_unlock(&dl_mutex);
return res;
}
@@ -1572,9 +1614,12 @@ void os::print_memory_info(outputStream* st) {
st->print_cr(" default page size: %s", describe_pagesize(os::vm_page_size()));
st->print_cr(" default stack page size: %s", describe_pagesize(os::vm_page_size()));
- st->print_cr(" default shm page size: %s", describe_pagesize(os::Aix::shm_default_page_size()));
- st->print_cr(" can use 64K pages dynamically: %s", (os::Aix::can_use_64K_pages() ? "yes" :"no"));
- st->print_cr(" can use 16M pages dynamically: %s", (os::Aix::can_use_16M_pages() ? "yes" :"no"));
+ st->print_cr(" Default shared memory page size: %s",
+ describe_pagesize(g_multipage_support.shmpsize));
+ st->print_cr(" Can use 64K pages dynamically with shared meory: %s",
+ (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
+ st->print_cr(" Can use 16M pages dynamically with shared memory: %s",
+ (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
if (g_multipage_error != 0) {
st->print_cr(" multipage error: %d", g_multipage_error);
}
@@ -1585,6 +1630,9 @@ void os::print_memory_info(outputStream* st) {
const char* const extshm = ::getenv("EXTSHM");
st->print_cr(" EXTSHM=%s.", extshm ? extshm : "");
+ if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
+ st->print_cr(" *** Unsupported! Please remove EXTSHM from your environment! ***");
+ }
// Call os::Aix::get_meminfo() to retrieve memory statistics.
os::Aix::meminfo_t mi;
@@ -1827,315 +1875,386 @@ int os::signal_wait() {
////////////////////////////////////////////////////////////////////////////////
// Virtual Memory
-// AddrRange describes an immutable address range
-//
-// This is a helper class for the 'shared memory bookkeeping' below.
-class AddrRange {
- friend class ShmBkBlock;
+// We need to keep small simple bookkeeping for os::reserve_memory and friends.
- char* _start;
- size_t _size;
+#define VMEM_MAPPED 1
+#define VMEM_SHMATED 2
-public:
+struct vmembk_t {
+ int type; // 1 - mmap, 2 - shmat
+ char* addr;
+ size_t size; // Real size, may be larger than usersize.
+ size_t pagesize; // page size of area
+ vmembk_t* next;
- AddrRange(char* start, size_t size)
- : _start(start), _size(size)
- {}
-
- AddrRange(const AddrRange& r)
- : _start(r.start()), _size(r.size())
- {}
-
- char* start() const { return _start; }
- size_t size() const { return _size; }
- char* end() const { return _start + _size; }
- bool is_empty() const { return _size == 0 ? true : false; }
-
- static AddrRange empty_range() { return AddrRange(NULL, 0); }
-
- bool contains(const char* p) const {
- return start() <= p && end() > p;
+ bool contains_addr(char* p) const {
+ return p >= addr && p < (addr + size);
}
- bool contains(const AddrRange& range) const {
- return start() <= range.start() && end() >= range.end();
+ bool contains_range(char* p, size_t s) const {
+ return contains_addr(p) && contains_addr(p + s - 1);
}
- bool intersects(const AddrRange& range) const {
- return (range.start() <= start() && range.end() > start()) ||
- (range.start() < end() && range.end() >= end()) ||
- contains(range);
+ void print_on(outputStream* os) const {
+ os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
+ " bytes, %d %s pages), %s",
+ addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
+ (type == VMEM_SHMATED ? "shmat" : "mmap")
+ );
}
- bool is_same_range(const AddrRange& range) const {
- return start() == range.start() && size() == range.size();
- }
-
- // return the closest inside range consisting of whole pages
- AddrRange find_closest_aligned_range(size_t pagesize) const {
- if (pagesize == 0 || is_empty()) {
- return empty_range();
+ // Check that range is a sub range of memory block (or equal to memory block);
+ // also check that range is fully page aligned to the page size if the block.
+ void assert_is_valid_subrange(char* p, size_t s) const {
+ if (!contains_range(p, s)) {
+ fprintf(stderr, "[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
+ "range of [" PTR_FORMAT " - " PTR_FORMAT "].\n",
+ p, p + s - 1, addr, addr + size - 1);
+ guarantee0(false);
}
- char* const from = (char*)align_size_up((intptr_t)_start, pagesize);
- char* const to = (char*)align_size_down((intptr_t)end(), pagesize);
- if (from > to) {
- return empty_range();
+ if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
+ fprintf(stderr, "range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
+ " aligned to pagesize (%s)\n", p, p + s);
+ guarantee0(false);
}
- return AddrRange(from, to - from);
}
};
-////////////////////////////////////////////////////////////////////////////
-// shared memory bookkeeping
-//
-// the os::reserve_memory() API and friends hand out different kind of memory, depending
-// on need and circumstances. Memory may be allocated with mmap() or with shmget/shmat.
-//
-// But these memory types have to be treated differently. For example, to uncommit
-// mmap-based memory, msync(MS_INVALIDATE) is needed, to uncommit shmat-based memory,
-// disclaim64() is needed.
-//
-// Therefore we need to keep track of the allocated memory segments and their
-// properties.
-
-// ShmBkBlock: base class for all blocks in the shared memory bookkeeping
-class ShmBkBlock : public CHeapObj {
-
- ShmBkBlock* _next;
-
-protected:
-
- AddrRange _range;
- const size_t _pagesize;
- const bool _pinned;
-
-public:
-
- ShmBkBlock(AddrRange range, size_t pagesize, bool pinned)
- : _range(range), _pagesize(pagesize), _pinned(pinned) , _next(NULL) {
-
- assert(_pagesize == SIZE_4K || _pagesize == SIZE_64K || _pagesize == SIZE_16M, "invalid page size");
- assert(!_range.is_empty(), "invalid range");
- }
-
- virtual void print(outputStream* st) const {
- st->print("0x%p ... 0x%p (%llu) - %d %s pages - %s",
- _range.start(), _range.end(), _range.size(),
- _range.size() / _pagesize, describe_pagesize(_pagesize),
- _pinned ? "pinned" : "");
- }
-
- enum Type { MMAP, SHMAT };
- virtual Type getType() = 0;
-
- char* base() const { return _range.start(); }
- size_t size() const { return _range.size(); }
-
- void setAddrRange(AddrRange range) {
- _range = range;
- }
-
- bool containsAddress(const char* p) const {
- return _range.contains(p);
- }
-
- bool containsRange(const char* p, size_t size) const {
- return _range.contains(AddrRange((char*)p, size));
- }
-
- bool isSameRange(const char* p, size_t size) const {
- return _range.is_same_range(AddrRange((char*)p, size));
- }
-
- virtual bool disclaim(char* p, size_t size) = 0;
- virtual bool release() = 0;
-
- // blocks live in a list.
- ShmBkBlock* next() const { return _next; }
- void set_next(ShmBkBlock* blk) { _next = blk; }
-
-}; // end: ShmBkBlock
-
-
-// ShmBkMappedBlock: describes an block allocated with mmap()
-class ShmBkMappedBlock : public ShmBkBlock {
-public:
-
- ShmBkMappedBlock(AddrRange range)
- : ShmBkBlock(range, SIZE_4K, false) {} // mmap: always 4K, never pinned
-
- void print(outputStream* st) const {
- ShmBkBlock::print(st);
- st->print_cr(" - mmap'ed");
- }
-
- Type getType() {
- return MMAP;
- }
-
- bool disclaim(char* p, size_t size) {
-
- AddrRange r(p, size);
-
- guarantee(_range.contains(r), "invalid disclaim");
-
- // only disclaim whole ranges.
- const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
- if (r2.is_empty()) {
- return true;
- }
-
- const int rc = ::msync(r2.start(), r2.size(), MS_INVALIDATE);
-
- if (rc != 0) {
- warning("msync(0x%p, %llu, MS_INVALIDATE) failed (%d)\n", r2.start(), r2.size(), errno);
- }
-
- return rc == 0 ? true : false;
- }
-
- bool release() {
- // mmap'ed blocks are released using munmap
- if (::munmap(_range.start(), _range.size()) != 0) {
- warning("munmap(0x%p, %llu) failed (%d)\n", _range.start(), _range.size(), errno);
- return false;
- }
- return true;
- }
-}; // end: ShmBkMappedBlock
-
-// ShmBkShmatedBlock: describes an block allocated with shmget/shmat()
-class ShmBkShmatedBlock : public ShmBkBlock {
-public:
-
- ShmBkShmatedBlock(AddrRange range, size_t pagesize, bool pinned)
- : ShmBkBlock(range, pagesize, pinned) {}
-
- void print(outputStream* st) const {
- ShmBkBlock::print(st);
- st->print_cr(" - shmat'ed");
- }
-
- Type getType() {
- return SHMAT;
- }
-
- bool disclaim(char* p, size_t size) {
-
- AddrRange r(p, size);
-
- if (_pinned) {
- return true;
- }
-
- // shmat'ed blocks are disclaimed using disclaim64
- guarantee(_range.contains(r), "invalid disclaim");
-
- // only disclaim whole ranges.
- const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
- if (r2.is_empty()) {
- return true;
- }
-
- const bool rc = my_disclaim64(r2.start(), r2.size());
-
- if (Verbose && !rc) {
- warning("failed to disclaim shm %p-%p\n", r2.start(), r2.end());
- }
-
- return rc;
- }
-
- bool release() {
- bool rc = false;
- if (::shmdt(_range.start()) != 0) {
- warning("shmdt(0x%p) failed (%d)\n", _range.start(), errno);
- } else {
- rc = true;
- }
- return rc;
- }
-
-}; // end: ShmBkShmatedBlock
-
-static ShmBkBlock* g_shmbk_list = NULL;
-static volatile jint g_shmbk_table_lock = 0;
-
-// keep some usage statistics
static struct {
- int nodes; // number of nodes in list
- size_t bytes; // reserved - not committed - bytes.
- int reserves; // how often reserve was called
- int lookups; // how often a lookup was made
-} g_shmbk_stats = { 0, 0, 0, 0 };
+ vmembk_t* first;
+ MiscUtils::CritSect cs;
+} vmem;
-// add information about a shared memory segment to the bookkeeping
-static void shmbk_register(ShmBkBlock* p_block) {
- guarantee(p_block, "logic error");
- p_block->set_next(g_shmbk_list);
- g_shmbk_list = p_block;
- g_shmbk_stats.reserves ++;
- g_shmbk_stats.bytes += p_block->size();
- g_shmbk_stats.nodes ++;
-}
-
-// remove information about a shared memory segment by its starting address
-static void shmbk_unregister(ShmBkBlock* p_block) {
- ShmBkBlock* p = g_shmbk_list;
- ShmBkBlock* prev = NULL;
- while (p) {
- if (p == p_block) {
- if (prev) {
- prev->set_next(p->next());
- } else {
- g_shmbk_list = p->next();
- }
- g_shmbk_stats.nodes --;
- g_shmbk_stats.bytes -= p->size();
- return;
- }
- prev = p;
- p = p->next();
+static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
+ vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
+ assert0(p);
+ if (p) {
+ MiscUtils::AutoCritSect lck(&vmem.cs);
+ p->addr = addr; p->size = size;
+ p->pagesize = pagesize;
+ p->type = type;
+ p->next = vmem.first;
+ vmem.first = p;
}
- assert(false, "should not happen");
}
-// given a pointer, return shared memory bookkeeping record for the segment it points into
-// using the returned block info must happen under lock protection
-static ShmBkBlock* shmbk_find_by_containing_address(const char* addr) {
- g_shmbk_stats.lookups ++;
- ShmBkBlock* p = g_shmbk_list;
- while (p) {
- if (p->containsAddress(addr)) {
+static vmembk_t* vmembk_find(char* addr) {
+ MiscUtils::AutoCritSect lck(&vmem.cs);
+ for (vmembk_t* p = vmem.first; p; p = p->next) {
+ if (p->addr <= addr && (p->addr + p->size) > addr) {
return p;
}
- p = p->next();
}
return NULL;
}
-// dump all information about all memory segments allocated with os::reserve_memory()
-void shmbk_dump_info() {
- tty->print_cr("-- shared mem bookkeeping (alive: %d segments, %llu bytes, "
- "total reserves: %d total lookups: %d)",
- g_shmbk_stats.nodes, g_shmbk_stats.bytes, g_shmbk_stats.reserves, g_shmbk_stats.lookups);
- const ShmBkBlock* p = g_shmbk_list;
- int i = 0;
- while (p) {
- p->print(tty);
- p = p->next();
- i ++;
+static void vmembk_remove(vmembk_t* p0) {
+ MiscUtils::AutoCritSect lck(&vmem.cs);
+ assert0(p0);
+ assert0(vmem.first); // List should not be empty.
+ for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
+ if (*pp == p0) {
+ *pp = p0->next;
+ ::free(p0);
+ return;
+ }
+ }
+ assert0(false); // Not found?
+}
+
+static void vmembk_print_on(outputStream* os) {
+ MiscUtils::AutoCritSect lck(&vmem.cs);
+ for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
+ vmi->print_on(os);
+ os->cr();
}
}
-#define LOCK_SHMBK { ThreadCritical _LOCK_SHMBK;
-#define UNLOCK_SHMBK }
+// Reserve and attach a section of System V memory.
+// If is not NULL, function will attempt to attach the memory at the given
+// address. Failing that, it will attach the memory anywhere.
+// If is NULL, function will attach the memory anywhere.
+//
+// is being ignored by this function. It is very probable however that the
+// alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
+// Should this be not enogh, we can put more work into it.
+static char* reserve_shmated_memory (
+ size_t bytes,
+ char* requested_addr,
+ size_t alignment_hint) {
+
+ trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
+ PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
+ bytes, requested_addr, alignment_hint);
+
+ // Either give me wish address or wish alignment but not both.
+ assert0(!(requested_addr != NULL && alignment_hint != 0));
+
+ // We must prevent anyone from attaching too close to the
+ // BRK because that may cause malloc OOM.
+ if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
+ trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
+ "Will attach anywhere.", requested_addr);
+ // Act like the OS refused to attach there.
+ requested_addr = NULL;
+ }
+
+ // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
+ // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
+ if (os::Aix::on_pase_V5R4_or_older()) {
+ ShouldNotReachHere();
+ }
+
+ // Align size of shm up to 64K to avoid errors if we later try to change the page size.
+ const size_t size = align_size_up(bytes, SIZE_64K);
+
+ // Reserve the shared segment.
+ int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
+ if (shmid == -1) {
+ trc("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
+ return NULL;
+ }
+
+ // Important note:
+ // It is very important that we, upon leaving this function, do not leave a shm segment alive.
+ // We must right after attaching it remove it from the system. System V shm segments are global and
+ // survive the process.
+ // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
+
+ struct shmid_ds shmbuf;
+ memset(&shmbuf, 0, sizeof(shmbuf));
+ shmbuf.shm_pagesize = SIZE_64K;
+ if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
+ trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
+ size / SIZE_64K, errno);
+ // I want to know if this ever happens.
+ assert(false, "failed to set page size for shmat");
+ }
+
+ // Now attach the shared segment.
+ // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
+ // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
+ // were not a segment boundary.
+ char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
+ const int errno_shmat = errno;
+
+ // (A) Right after shmat and before handing shmat errors delete the shm segment.
+ if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
+ trc("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
+ assert(false, "failed to remove shared memory segment!");
+ }
+
+ // Handle shmat error. If we failed to attach, just return.
+ if (addr == (char*)-1) {
+ trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
+ return NULL;
+ }
+
+ // Just for info: query the real page size. In case setting the page size did not
+ // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
+ const size_t real_pagesize = os::Aix::query_pagesize(addr);
+ if (real_pagesize != shmbuf.shm_pagesize) {
+ trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
+ }
+
+ if (addr) {
+ trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
+ addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
+ } else {
+ if (requested_addr != NULL) {
+ trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
+ } else {
+ trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
+ }
+ }
+
+ // book-keeping
+ vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
+ assert0(is_aligned_to(addr, os::vm_page_size()));
+
+ return addr;
+}
+
+static bool release_shmated_memory(char* addr, size_t size) {
+
+ trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
+ addr, addr + size - 1);
+
+ bool rc = false;
+
+ // TODO: is there a way to verify shm size without doing bookkeeping?
+ if (::shmdt(addr) != 0) {
+ trcVerbose("error (%d).", errno);
+ } else {
+ trcVerbose("ok.");
+ rc = true;
+ }
+ return rc;
+}
+
+static bool uncommit_shmated_memory(char* addr, size_t size) {
+ trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
+ addr, addr + size - 1);
+
+ const bool rc = my_disclaim64(addr, size);
+
+ if (!rc) {
+ trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
+ return false;
+ }
+ return true;
+}
+
+// Reserve memory via mmap.
+// If is given, an attempt is made to attach at the given address.
+// Failing that, memory is allocated at any address.
+// If is given and is NULL, an attempt is made to
+// allocate at an address aligned with the given alignment. Failing that, memory
+// is aligned anywhere.
+static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
+ trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
+ "alignment_hint " UINTX_FORMAT "...",
+ bytes, requested_addr, alignment_hint);
+
+ // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
+ if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
+ trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
+ return NULL;
+ }
+
+ // We must prevent anyone from attaching too close to the
+ // BRK because that may cause malloc OOM.
+ if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
+ trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
+ "Will attach anywhere.", requested_addr);
+ // Act like the OS refused to attach there.
+ requested_addr = NULL;
+ }
+
+ // Specify one or the other but not both.
+ assert0(!(requested_addr != NULL && alignment_hint > 0));
+
+ // In 64K mode, we claim the global page size (os::vm_page_size())
+ // is 64K. This is one of the few points where that illusion may
+ // break, because mmap() will always return memory aligned to 4K. So
+ // we must ensure we only ever return memory aligned to 64k.
+ if (alignment_hint) {
+ alignment_hint = lcm(alignment_hint, os::vm_page_size());
+ } else {
+ alignment_hint = os::vm_page_size();
+ }
+
+ // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
+ const size_t size = align_size_up(bytes, os::vm_page_size());
+
+ // alignment: Allocate memory large enough to include an aligned range of the right size and
+ // cut off the leading and trailing waste pages.
+ assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
+ const size_t extra_size = size + alignment_hint;
+
+ // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
+ // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
+ int flags = MAP_ANONYMOUS | MAP_SHARED;
+
+ // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
+ // it means if wishaddress is given but MAP_FIXED is not set.
+ //
+ // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
+ // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
+ // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
+ // get clobbered.
+ if (requested_addr != NULL) {
+ if (!os::Aix::xpg_sus_mode()) { // not SPEC1170 Behaviour
+ flags |= MAP_FIXED;
+ }
+ }
+
+ char* addr = (char*)::mmap(requested_addr, extra_size,
+ PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
+
+ if (addr == MAP_FAILED) {
+ trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
+ return NULL;
+ }
+
+ // Handle alignment.
+ char* const addr_aligned = (char *)align_ptr_up(addr, alignment_hint);
+ const size_t waste_pre = addr_aligned - addr;
+ char* const addr_aligned_end = addr_aligned + size;
+ const size_t waste_post = extra_size - waste_pre - size;
+ if (waste_pre > 0) {
+ ::munmap(addr, waste_pre);
+ }
+ if (waste_post > 0) {
+ ::munmap(addr_aligned_end, waste_post);
+ }
+ addr = addr_aligned;
+
+ if (addr) {
+ trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
+ addr, addr + bytes, bytes);
+ } else {
+ if (requested_addr != NULL) {
+ trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
+ } else {
+ trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
+ }
+ }
+
+ // bookkeeping
+ vmembk_add(addr, size, SIZE_4K, VMEM_MAPPED);
+
+ // Test alignment, see above.
+ assert0(is_aligned_to(addr, os::vm_page_size()));
+
+ return addr;
+}
+
+static bool release_mmaped_memory(char* addr, size_t size) {
+ assert0(is_aligned_to(addr, os::vm_page_size()));
+ assert0(is_aligned_to(size, os::vm_page_size()));
+
+ trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
+ addr, addr + size - 1);
+ bool rc = false;
+
+ if (::munmap(addr, size) != 0) {
+ trcVerbose("failed (%d)\n", errno);
+ rc = false;
+ } else {
+ trcVerbose("ok.");
+ rc = true;
+ }
+
+ return rc;
+}
+
+static bool uncommit_mmaped_memory(char* addr, size_t size) {
+
+ assert0(is_aligned_to(addr, os::vm_page_size()));
+ assert0(is_aligned_to(size, os::vm_page_size()));
+
+ trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
+ addr, addr + size - 1);
+ bool rc = false;
+
+ // Uncommit mmap memory with msync MS_INVALIDATE.
+ if (::msync(addr, size, MS_INVALIDATE) != 0) {
+ trcVerbose("failed (%d)\n", errno);
+ rc = false;
+ } else {
+ trcVerbose("ok.");
+ rc = true;
+ }
+
+ return rc;
+}
// End: shared memory bookkeeping
////////////////////////////////////////////////////////////////////////////////////////////////////
int os::vm_page_size() {
- // Seems redundant as all get out
+ // Seems redundant as all get out.
assert(os::Aix::page_size() != -1, "must call os::init");
return os::Aix::page_size();
}
@@ -2146,91 +2265,76 @@ int os::vm_allocation_granularity() {
return os::Aix::page_size();
}
-int os::Aix::commit_memory_impl(char* addr, size_t size, bool exec) {
-
- // Commit is a noop. There is no explicit commit
- // needed on AIX. Memory is committed when touched.
- //
- // Debug : check address range for validity
-#ifdef ASSERT
- LOCK_SHMBK
- ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
- if (!block) {
- fprintf(stderr, "invalid pointer: " INTPTR_FORMAT "\n", addr);
- shmbk_dump_info();
- assert(false, "invalid pointer");
- return false;
- } else if (!block->containsRange(addr, size)) {
- fprintf(stderr, "invalid range: " INTPTR_FORMAT " .. " INTPTR_FORMAT "\n", addr, addr + size);
- shmbk_dump_info();
- assert(false, "invalid range");
- return false;
- }
- UNLOCK_SHMBK
-#endif // ASSERT
-
- return 0;
-}
-
-bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
- return os::Aix::commit_memory_impl(addr, size, exec) == 0;
+#ifdef PRODUCT
+static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
+ int err) {
+ warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
+ ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
+ strerror(err), err);
}
+#endif
void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
const char* mesg) {
assert(mesg != NULL, "mesg must be specified");
- os::Aix::commit_memory_impl(addr, size, exec);
+ if (!pd_commit_memory(addr, size, exec)) {
+ // Add extra info in product mode for vm_exit_out_of_memory():
+ PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
+ vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
+ }
}
-int os::Aix::commit_memory_impl(char* addr, size_t size,
- size_t alignment_hint, bool exec) {
- return os::Aix::commit_memory_impl(addr, size, exec);
+bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
+
+ assert0(is_aligned_to(addr, os::vm_page_size()));
+ assert0(is_aligned_to(size, os::vm_page_size()));
+
+ vmembk_t* const vmi = vmembk_find(addr);
+ assert0(vmi);
+ vmi->assert_is_valid_subrange(addr, size);
+
+ trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
+
+ return true;
}
-bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
- bool exec) {
- return os::Aix::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
+bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
+ return pd_commit_memory(addr, size, exec);
}
void os::pd_commit_memory_or_exit(char* addr, size_t size,
size_t alignment_hint, bool exec,
const char* mesg) {
- os::Aix::commit_memory_impl(addr, size, alignment_hint, exec);
+ // Alignment_hint is ignored on this OS.
+ pd_commit_memory_or_exit(addr, size, exec, mesg);
}
bool os::pd_uncommit_memory(char* addr, size_t size) {
+ assert0(is_aligned_to(addr, os::vm_page_size()));
+ assert0(is_aligned_to(size, os::vm_page_size()));
- // Delegate to ShmBkBlock class which knows how to uncommit its memory.
+ // Dynamically do different things for mmap/shmat.
+ const vmembk_t* const vmi = vmembk_find(addr);
+ assert0(vmi);
+ vmi->assert_is_valid_subrange(addr, size);
- bool rc = false;
- LOCK_SHMBK
- ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
- if (!block) {
- fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
- shmbk_dump_info();
- assert(false, "invalid pointer");
- return false;
- } else if (!block->containsRange(addr, size)) {
- fprintf(stderr, "invalid range: 0x%p .. 0x%p.\n", addr, addr + size);
- shmbk_dump_info();
- assert(false, "invalid range");
- return false;
- }
- rc = block->disclaim(addr, size);
- UNLOCK_SHMBK
-
- if (Verbose && !rc) {
- warning("failed to disclaim 0x%p .. 0x%p (0x%llX bytes).", addr, addr + size, size);
+ if (vmi->type == VMEM_SHMATED) {
+ return uncommit_shmated_memory(addr, size);
+ } else {
+ return uncommit_mmaped_memory(addr, size);
}
- return rc;
}
bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
- return os::guard_memory(addr, size);
+ // Do not call this; no need to commit stack pages on AIX.
+ ShouldNotReachHere();
+ return true;
}
bool os::remove_stack_guard_pages(char* addr, size_t size) {
- return os::unguard_memory(addr, size);
+ // Do not call this; no need to commit stack pages on AIX.
+ ShouldNotReachHere();
+ return true;
}
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
@@ -2273,355 +2377,75 @@ char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info
return end;
}
-// Flags for reserve_shmatted_memory:
-#define RESSHM_WISHADDR_OR_FAIL 1
-#define RESSHM_TRY_16M_PAGES 2
-#define RESSHM_16M_PAGES_OR_FAIL 4
-
-// Result of reserve_shmatted_memory:
-struct shmatted_memory_info_t {
- char* addr;
- size_t pagesize;
- bool pinned;
-};
-
-// Reserve a section of shmatted memory.
-// params:
-// bytes [in]: size of memory, in bytes
-// requested_addr [in]: wish address.
-// NULL = no wish.
-// If RESSHM_WISHADDR_OR_FAIL is set in flags and wish address cannot
-// be obtained, function will fail. Otherwise wish address is treated as hint and
-// another pointer is returned.
-// flags [in]: some flags. Valid flags are:
-// RESSHM_WISHADDR_OR_FAIL - fail if wish address is given and cannot be obtained.
-// RESSHM_TRY_16M_PAGES - try to allocate from 16M page pool
-// (requires UseLargePages and Use16MPages)
-// RESSHM_16M_PAGES_OR_FAIL - if you cannot allocate from 16M page pool, fail.
-// Otherwise any other page size will do.
-// p_info [out] : holds information about the created shared memory segment.
-static bool reserve_shmatted_memory(size_t bytes, char* requested_addr, int flags, shmatted_memory_info_t* p_info) {
-
- assert(p_info, "parameter error");
-
- // init output struct.
- p_info->addr = NULL;
-
- // neither should we be here for EXTSHM=ON.
- if (os::Aix::extshm()) {
- ShouldNotReachHere();
- }
-
- // extract flags. sanity checks.
- const bool wishaddr_or_fail =
- flags & RESSHM_WISHADDR_OR_FAIL;
- const bool try_16M_pages =
- flags & RESSHM_TRY_16M_PAGES;
- const bool f16M_pages_or_fail =
- flags & RESSHM_16M_PAGES_OR_FAIL;
-
- // first check: if a wish address is given and it is mandatory, but not aligned to segment boundary,
- // shmat will fail anyway, so save some cycles by failing right away
- if (requested_addr && ((uintptr_t)requested_addr % SIZE_256M == 0)) {
- if (wishaddr_or_fail) {
- return false;
- } else {
- requested_addr = NULL;
- }
- }
-
- char* addr = NULL;
-
- // Align size of shm up to the largest possible page size, to avoid errors later on when we try to change
- // pagesize dynamically.
- const size_t size = align_size_up(bytes, SIZE_16M);
-
- // reserve the shared segment
- int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
- if (shmid == -1) {
- warning("shmget(.., %lld, ..) failed (errno: %d).", size, errno);
- return false;
- }
-
- // Important note:
- // It is very important that we, upon leaving this function, do not leave a shm segment alive.
- // We must right after attaching it remove it from the system. System V shm segments are global and
- // survive the process.
- // So, from here on: Do not assert. Do not return. Always do a "goto cleanup_shm".
-
- // try forcing the page size
- size_t pagesize = -1; // unknown so far
-
- if (UseLargePages) {
-
- struct shmid_ds shmbuf;
- memset(&shmbuf, 0, sizeof(shmbuf));
-
- // First, try to take from 16M page pool if...
- if (os::Aix::can_use_16M_pages() // we can ...
- && Use16MPages // we are not explicitly forbidden to do so (-XX:-Use16MPages)..
- && try_16M_pages) { // caller wants us to.
- shmbuf.shm_pagesize = SIZE_16M;
- if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
- pagesize = SIZE_16M;
- } else {
- warning("Failed to allocate %d 16M pages. 16M page pool might be exhausted. (shmctl failed with %d)",
- size / SIZE_16M, errno);
- if (f16M_pages_or_fail) {
- goto cleanup_shm;
- }
- }
- }
-
- // Nothing yet? Try setting 64K pages. Note that I never saw this fail, but in theory it might,
- // because the 64K page pool may also be exhausted.
- if (pagesize == -1) {
- shmbuf.shm_pagesize = SIZE_64K;
- if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
- pagesize = SIZE_64K;
- } else {
- warning("Failed to allocate %d 64K pages. (shmctl failed with %d)",
- size / SIZE_64K, errno);
- // here I give up. leave page_size -1 - later, after attaching, we will query the
- // real page size of the attached memory. (in theory, it may be something different
- // from 4K if LDR_CNTRL SHM_PSIZE is set)
- }
- }
- }
-
- // sanity point
- assert(pagesize == -1 || pagesize == SIZE_16M || pagesize == SIZE_64K, "wrong page size");
-
- // Now attach the shared segment.
- addr = (char*) shmat(shmid, requested_addr, 0);
- if (addr == (char*)-1) {
- // How to handle attach failure:
- // If it failed for a specific wish address, tolerate this: in that case, if wish address was
- // mandatory, fail, if not, retry anywhere.
- // If it failed for any other reason, treat that as fatal error.
- addr = NULL;
- if (requested_addr) {
- if (wishaddr_or_fail) {
- goto cleanup_shm;
- } else {
- addr = (char*) shmat(shmid, NULL, 0);
- if (addr == (char*)-1) { // fatal
- addr = NULL;
- warning("shmat failed (errno: %d)", errno);
- goto cleanup_shm;
- }
- }
- } else { // fatal
- addr = NULL;
- warning("shmat failed (errno: %d)", errno);
- goto cleanup_shm;
- }
- }
-
- // sanity point
- assert(addr && addr != (char*) -1, "wrong address");
-
- // after successful Attach remove the segment - right away.
- if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
- warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
- guarantee(false, "failed to remove shared memory segment!");
- }
- shmid = -1;
-
- // query the real page size. In case setting the page size did not work (see above), the system
- // may have given us something other then 4K (LDR_CNTRL)
- {
- const size_t real_pagesize = os::Aix::query_pagesize(addr);
- if (pagesize != -1) {
- assert(pagesize == real_pagesize, "unexpected pagesize after shmat");
- } else {
- pagesize = real_pagesize;
- }
- }
-
- // Now register the reserved block with internal book keeping.
- LOCK_SHMBK
- const bool pinned = pagesize >= SIZE_16M ? true : false;
- ShmBkShmatedBlock* const p_block = new ShmBkShmatedBlock(AddrRange(addr, size), pagesize, pinned);
- assert(p_block, "");
- shmbk_register(p_block);
- UNLOCK_SHMBK
-
-cleanup_shm:
-
- // if we have not done so yet, remove the shared memory segment. This is very important.
- if (shmid != -1) {
- if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
- warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
- guarantee(false, "failed to remove shared memory segment!");
- }
- shmid = -1;
- }
-
- // trace
- if (Verbose && !addr) {
- if (requested_addr != NULL) {
- warning("failed to shm-allocate 0x%llX bytes at wish address 0x%p.", size, requested_addr);
- } else {
- warning("failed to shm-allocate 0x%llX bytes at any address.", size);
- }
- }
-
- // hand info to caller
- if (addr) {
- p_info->addr = addr;
- p_info->pagesize = pagesize;
- p_info->pinned = pagesize == SIZE_16M ? true : false;
- }
-
- // sanity test:
- if (requested_addr && addr && wishaddr_or_fail) {
- guarantee(addr == requested_addr, "shmat error");
- }
-
- // just one more test to really make sure we have no dangling shm segments.
- guarantee(shmid == -1, "dangling shm segments");
-
- return addr ? true : false;
-
-} // end: reserve_shmatted_memory
-
-// Reserve memory using mmap. Behaves the same as reserve_shmatted_memory():
-// will return NULL in case of an error.
-static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) {
-
- // if a wish address is given, but not aligned to 4K page boundary, mmap will fail.
- if (requested_addr && ((uintptr_t)requested_addr % os::vm_page_size() != 0)) {
- warning("Wish address 0x%p not aligned to page boundary.", requested_addr);
- return NULL;
- }
-
- const size_t size = align_size_up(bytes, SIZE_4K);
-
- // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
- // msync(MS_INVALIDATE) (see os::uncommit_memory)
- int flags = MAP_ANONYMOUS | MAP_SHARED;
-
- // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
- // it means if wishaddress is given but MAP_FIXED is not set.
- //
- // Note however that this changes semantics in SPEC1170 mode insofar as MAP_FIXED
- // clobbers the address range, which is probably not what the caller wants. That's
- // why I assert here (again) that the SPEC1170 compat mode is off.
- // If we want to be able to run under SPEC1170, we have to do some porting and
- // testing.
- if (requested_addr != NULL) {
- assert(!os::Aix::xpg_sus_mode(), "SPEC1170 mode not allowed.");
- flags |= MAP_FIXED;
- }
-
- char* addr = (char*)::mmap(requested_addr, size, PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
-
- if (addr == MAP_FAILED) {
- // attach failed: tolerate for specific wish addresses. Not being able to attach
- // anywhere is a fatal error.
- if (requested_addr == NULL) {
- // It's ok to fail here if the machine has not enough memory.
- warning("mmap(NULL, 0x%llX, ..) failed (%d)", size, errno);
- }
- addr = NULL;
- goto cleanup_mmap;
- }
-
- // If we did request a specific address and that address was not available, fail.
- if (addr && requested_addr) {
- guarantee(addr == requested_addr, "unexpected");
- }
-
- // register this mmap'ed segment with book keeping
- LOCK_SHMBK
- ShmBkMappedBlock* const p_block = new ShmBkMappedBlock(AddrRange(addr, size));
- assert(p_block, "");
- shmbk_register(p_block);
- UNLOCK_SHMBK
-
-cleanup_mmap:
-
- // trace
- if (Verbose) {
- if (addr) {
- fprintf(stderr, "mmap-allocated 0x%p .. 0x%p (0x%llX bytes)\n", addr, addr + bytes, bytes);
- }
- else {
- if (requested_addr != NULL) {
- warning("failed to mmap-allocate 0x%llX bytes at wish address 0x%p.", bytes, requested_addr);
- } else {
- warning("failed to mmap-allocate 0x%llX bytes at any address.", bytes);
- }
- }
- }
-
- return addr;
-
-} // end: reserve_mmaped_memory
-
// Reserves and attaches a shared memory segment.
// Will assert if a wish address is given and could not be obtained.
char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
- return os::attempt_reserve_memory_at(bytes, requested_addr);
+
+ // All other Unices do a mmap(MAP_FIXED) if the addr is given,
+ // thereby clobbering old mappings at that place. That is probably
+ // not intended, never used and almost certainly an error were it
+ // ever be used this way (to try attaching at a specified address
+ // without clobbering old mappings an alternate API exists,
+ // os::attempt_reserve_memory_at()).
+ // Instead of mimicking the dangerous coding of the other platforms, here I
+ // just ignore the request address (release) or assert(debug).
+ assert0(requested_addr == NULL);
+
+ // Always round to os::vm_page_size(), which may be larger than 4K.
+ bytes = align_size_up(bytes, os::vm_page_size());
+ const size_t alignment_hint0 =
+ alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
+
+ // In 4K mode always use mmap.
+ // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
+ if (os::vm_page_size() == SIZE_4K) {
+ return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
+ } else {
+ if (bytes >= Use64KPagesThreshold) {
+ return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
+ } else {
+ return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
+ }
+ }
}
bool os::pd_release_memory(char* addr, size_t size) {
- // delegate to ShmBkBlock class which knows how to uncommit its memory.
+ // Dynamically do different things for mmap/shmat.
+ vmembk_t* const vmi = vmembk_find(addr);
+ assert0(vmi);
+
+ // Always round to os::vm_page_size(), which may be larger than 4K.
+ size = align_size_up(size, os::vm_page_size());
+ addr = (char *)align_ptr_up(addr, os::vm_page_size());
bool rc = false;
- LOCK_SHMBK
- ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
- if (!block) {
- fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
- shmbk_dump_info();
- assert(false, "invalid pointer");
- return false;
+ bool remove_bookkeeping = false;
+ if (vmi->type == VMEM_SHMATED) {
+ // For shmatted memory, we do:
+ // - If user wants to release the whole range, release the memory (shmdt).
+ // - If user only wants to release a partial range, uncommit (disclaim) that
+ // range. That way, at least, we do not use memory anymore (bust still page
+ // table space).
+ vmi->assert_is_valid_subrange(addr, size);
+ if (addr == vmi->addr && size == vmi->size) {
+ rc = release_shmated_memory(addr, size);
+ remove_bookkeeping = true;
+ } else {
+ rc = uncommit_shmated_memory(addr, size);
}
- else if (!block->isSameRange(addr, size)) {
- if (block->getType() == ShmBkBlock::MMAP) {
- // Release only the same range or a the beginning or the end of a range.
- if (block->base() == addr && size < block->size()) {
- ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base() + size, block->size() - size));
- assert(b, "");
- shmbk_register(b);
- block->setAddrRange(AddrRange(addr, size));
- }
- else if (addr > block->base() && addr + size == block->base() + block->size()) {
- ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base(), block->size() - size));
- assert(b, "");
- shmbk_register(b);
- block->setAddrRange(AddrRange(addr, size));
- }
- else {
- fprintf(stderr, "invalid mmap range: 0x%p .. 0x%p.\n", addr, addr + size);
- shmbk_dump_info();
- assert(false, "invalid mmap range");
- return false;
- }
- }
- else {
- // Release only the same range. No partial release allowed.
- // Soften the requirement a bit, because the user may think he owns a smaller size
- // than the block is due to alignment etc.
- if (block->base() != addr || block->size() < size) {
- fprintf(stderr, "invalid shmget range: 0x%p .. 0x%p.\n", addr, addr + size);
- shmbk_dump_info();
- assert(false, "invalid shmget range");
- return false;
- }
- }
- }
- rc = block->release();
- assert(rc, "release failed");
- // remove block from bookkeeping
- shmbk_unregister(block);
- delete block;
- UNLOCK_SHMBK
+ } else {
+ // User may unmap partial regions but region has to be fully contained.
+#ifdef ASSERT
+ vmi->assert_is_valid_subrange(addr, size);
+#endif
+ rc = release_mmaped_memory(addr, size);
+ remove_bookkeeping = true;
+ }
- if (!rc) {
- warning("failed to released %lu bytes at 0x%p", size, addr);
+ // update bookkeeping
+ if (rc && remove_bookkeeping) {
+ vmembk_remove(vmi);
}
return rc;
@@ -2654,7 +2478,7 @@ static bool checked_mprotect(char* addr, size_t size, int prot) {
//
if (!os::Aix::xpg_sus_mode()) {
- if (StubRoutines::SafeFetch32_stub()) {
+ if (CanUseSafeFetch32()) {
const bool read_protected =
(SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
@@ -2702,46 +2526,8 @@ static size_t _large_page_size = 0;
// Enable large page support if OS allows that.
void os::large_page_init() {
-
- // Note: os::Aix::query_multipage_support must run first.
-
- if (!UseLargePages) {
- return;
- }
-
- if (!Aix::can_use_64K_pages()) {
- assert(!Aix::can_use_16M_pages(), "64K is a precondition for 16M.");
- UseLargePages = false;
- return;
- }
-
- if (!Aix::can_use_16M_pages() && Use16MPages) {
- fprintf(stderr, "Cannot use 16M pages. Please ensure that there is a 16M page pool "
- " and that the VM runs with CAP_BYPASS_RAC_VMM and CAP_PROPAGATE capabilities.\n");
- }
-
- // Do not report 16M page alignment as part of os::_page_sizes if we are
- // explicitly forbidden from using 16M pages. Doing so would increase the
- // alignment the garbage collector calculates with, slightly increasing
- // heap usage. We should only pay for 16M alignment if we really want to
- // use 16M pages.
- if (Use16MPages && Aix::can_use_16M_pages()) {
- _large_page_size = SIZE_16M;
- _page_sizes[0] = SIZE_16M;
- _page_sizes[1] = SIZE_64K;
- _page_sizes[2] = SIZE_4K;
- _page_sizes[3] = 0;
- } else if (Aix::can_use_64K_pages()) {
- _large_page_size = SIZE_64K;
- _page_sizes[0] = SIZE_64K;
- _page_sizes[1] = SIZE_4K;
- _page_sizes[2] = 0;
- }
-
- if (Verbose) {
- ("Default large page size is 0x%llX.", _large_page_size);
- }
-} // end: os::large_page_init()
+ return; // Nothing to do. See query_multipage_support and friends.
+}
char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
// "exec" is passed in but not used. Creating the shared image for
@@ -2751,7 +2537,7 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr,
}
bool os::release_memory_special(char* base, size_t bytes) {
- // detaching the SHM segment will also delete it, see reserve_memory_special()
+ // Detaching the SHM segment will also delete it, see reserve_memory_special().
Unimplemented();
return false;
}
@@ -2761,40 +2547,32 @@ size_t os::large_page_size() {
}
bool os::can_commit_large_page_memory() {
- // Well, sadly we cannot commit anything at all (see comment in
- // os::commit_memory) but we claim to so we can make use of large pages
- return true;
+ // Does not matter, we do not support huge pages.
+ return false;
}
bool os::can_execute_large_page_memory() {
- // We can do that
- return true;
+ // Does not matter, we do not support huge pages.
+ return false;
}
// Reserve memory at an arbitrary address, only if that area is
// available (and not reserved for something else).
char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
-
- bool use_mmap = false;
-
- // mmap: smaller graining, no large page support
- // shm: large graining (256M), large page support, limited number of shm segments
- //
- // Prefer mmap wherever we either do not need large page support or have OS limits
-
- if (!UseLargePages || bytes < SIZE_16M) {
- use_mmap = true;
- }
-
char* addr = NULL;
- if (use_mmap) {
- addr = reserve_mmaped_memory(bytes, requested_addr);
+
+ // Always round to os::vm_page_size(), which may be larger than 4K.
+ bytes = align_size_up(bytes, os::vm_page_size());
+
+ // In 4K mode always use mmap.
+ // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
+ if (os::vm_page_size() == SIZE_4K) {
+ return reserve_mmaped_memory(bytes, requested_addr, 0);
} else {
- // shmat: wish address is mandatory, and do not try 16M pages here.
- shmatted_memory_info_t info;
- const int flags = RESSHM_WISHADDR_OR_FAIL;
- if (reserve_shmatted_memory(bytes, requested_addr, flags, &info)) {
- addr = info.addr;
+ if (bytes >= Use64KPagesThreshold) {
+ return reserve_shmated_memory(bytes, requested_addr, 0);
+ } else {
+ return reserve_mmaped_memory(bytes, requested_addr, 0);
}
}
@@ -3629,18 +3407,89 @@ extern "C" {
// This is called _before_ the most of global arguments have been parsed.
void os::init(void) {
// This is basic, we want to know if that ever changes.
- // (shared memory boundary is supposed to be a 256M aligned)
+ // (Shared memory boundary is supposed to be a 256M aligned.)
assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
// First off, we need to know whether we run on AIX or PASE, and
// the OS level we run on.
os::Aix::initialize_os_info();
- // Scan environment (SPEC1170 behaviour, etc)
+ // Scan environment (SPEC1170 behaviour, etc).
os::Aix::scan_environment();
// Check which pages are supported by AIX.
- os::Aix::query_multipage_support();
+ query_multipage_support();
+
+ // Act like we only have one page size by eliminating corner cases which
+ // we did not support very well anyway.
+ // We have two input conditions:
+ // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
+ // launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
+ // setting.
+ // Data segment page size is important for us because it defines the thread stack page
+ // size, which is needed for guard page handling, stack banging etc.
+ // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
+ // and should be allocated with 64k pages.
+ //
+ // So, we do the following:
+ // LDR_CNTRL can_use_64K_pages_dynamically what we do remarks
+ // 4K no 4K old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
+ // 4k yes 64k (treat 4k stacks as 64k) different loader than java and standard settings
+ // 64k no --- AIX 5.2 ? ---
+ // 64k yes 64k new systems and standard java loader (we set datapsize=64k when linking)
+
+ // We explicitly leave no option to change page size, because only upgrading would work,
+ // not downgrading (if stack page size is 64k you cannot pretend its 4k).
+
+ if (g_multipage_support.datapsize == SIZE_4K) {
+ // datapsize = 4K. Data segment, thread stacks are 4K paged.
+ if (g_multipage_support.can_use_64K_pages) {
+ // .. but we are able to use 64K pages dynamically.
+ // This would be typical for java launchers which are not linked
+ // with datapsize=64K (like, any other launcher but our own).
+ //
+ // In this case it would be smart to allocate the java heap with 64K
+ // to get the performance benefit, and to fake 64k pages for the
+ // data segment (when dealing with thread stacks).
+ //
+ // However, leave a possibility to downgrade to 4K, using
+ // -XX:-Use64KPages.
+ if (Use64KPages) {
+ trcVerbose("64K page mode (faked for data segment)");
+ Aix::_page_size = SIZE_64K;
+ } else {
+ trcVerbose("4K page mode (Use64KPages=off)");
+ Aix::_page_size = SIZE_4K;
+ }
+ } else {
+ // .. and not able to allocate 64k pages dynamically. Here, just
+ // fall back to 4K paged mode and use mmap for everything.
+ trcVerbose("4K page mode");
+ Aix::_page_size = SIZE_4K;
+ FLAG_SET_ERGO(bool, Use64KPages, false);
+ }
+ } else {
+ // datapsize = 64k. Data segment, thread stacks are 64k paged.
+ // This normally means that we can allocate 64k pages dynamically.
+ // (There is one special case where this may be false: EXTSHM=on.
+ // but we decided to not support that mode).
+ assert0(g_multipage_support.can_use_64K_pages);
+ Aix::_page_size = SIZE_64K;
+ trcVerbose("64K page mode");
+ FLAG_SET_ERGO(bool, Use64KPages, true);
+ }
+
+ // Short-wire stack page size to base page size; if that works, we just remove
+ // that stack page size altogether.
+ Aix::_stack_page_size = Aix::_page_size;
+
+ // For now UseLargePages is just ignored.
+ FLAG_SET_ERGO(bool, UseLargePages, false);
+ _page_sizes[0] = 0;
+ _large_page_size = -1;
+
+ // debug trace
+ trcVerbose("os::vm_page_size %s\n", describe_pagesize(os::vm_page_size()));
// Next, we need to initialize libo4 and libperfstat libraries.
if (os::Aix::on_pase()) {
@@ -3658,34 +3507,6 @@ void os::init(void) {
// need libperfstat etc.
os::Aix::initialize_system_info();
- // Initialize large page support.
- if (UseLargePages) {
- os::large_page_init();
- if (!UseLargePages) {
- // initialize os::_page_sizes
- _page_sizes[0] = Aix::page_size();
- _page_sizes[1] = 0;
- if (Verbose) {
- fprintf(stderr, "Large Page initialization failed: setting UseLargePages=0.\n");
- }
- }
- } else {
- // initialize os::_page_sizes
- _page_sizes[0] = Aix::page_size();
- _page_sizes[1] = 0;
- }
-
- // debug trace
- if (Verbose) {
- fprintf(stderr, "os::vm_page_size 0x%llX\n", os::vm_page_size());
- fprintf(stderr, "os::large_page_size 0x%llX\n", os::large_page_size());
- fprintf(stderr, "os::_page_sizes = ( ");
- for (int i = 0; _page_sizes[i]; i ++) {
- fprintf(stderr, " %s ", describe_pagesize(_page_sizes[i]));
- }
- fprintf(stderr, ")\n");
- }
-
_initial_pid = getpid();
clock_tics_per_sec = sysconf(_SC_CLK_TCK);
@@ -3698,7 +3519,15 @@ void os::init(void) {
Aix::_main_thread = pthread_self();
initial_time_count = os::elapsed_counter();
- pthread_mutex_init(&dl_mutex, NULL);
+
+ // If the pagesize of the VM is greater than 8K determine the appropriate
+ // number of initial guard pages. The user can change this with the
+ // command line arguments, if needed.
+ if (vm_page_size() > (int)Aix::vm_default_page_size()) {
+ StackYellowPages = 1;
+ StackRedPages = 1;
+ StackShadowPages = round_to((StackShadowPages*Aix::vm_default_page_size()), vm_page_size()) / vm_page_size();
+ }
}
// This is called _after_ the global arguments have been parsed.
@@ -3717,7 +3546,7 @@ jint os::init_2(void) {
const int prot = PROT_READ;
const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
- // use optimized addresses for the polling page,
+ // Use optimized addresses for the polling page,
// e.g. map it to a special 32-bit address.
if (OptimizePollingPageLocation) {
// architecture-specific list of address wishes:
@@ -3739,7 +3568,7 @@ jint os::init_2(void) {
// iterate over the list of address wishes:
for (int i=0; iprint("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
+ }
#endif
}
@@ -3797,16 +3627,18 @@ jint os::init_2(void) {
// Add in 2*BytesPerWord times page size to account for VM stack during
// class initialization depending on 32 or 64 bit VM.
os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
- (size_t)(StackYellowPages+StackRedPages+StackShadowPages +
- 2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::page_size());
+ (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Aix::page_size() +
+ (2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::vm_default_page_size());
+
+ os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::Aix::page_size());
size_t threadStackSizeInBytes = ThreadStackSize * K;
if (threadStackSizeInBytes != 0 &&
threadStackSizeInBytes < os::Aix::min_stack_allowed) {
- tty->print_cr("\nThe stack size specified is too small, "
- "Specify at least %dk",
- os::Aix::min_stack_allowed / K);
- return JNI_ERR;
+ tty->print_cr("\nThe stack size specified is too small, "
+ "Specify at least %dk",
+ os::Aix::min_stack_allowed / K);
+ return JNI_ERR;
}
// Make the stack size a multiple of the page size so that
@@ -3817,7 +3649,7 @@ jint os::init_2(void) {
Aix::libpthread_init();
if (MaxFDLimit) {
- // set the number of file descriptors to max. print out error
+ // Set the number of file descriptors to max. print out error
// if getrlimit/setrlimit fails but continue regardless.
struct rlimit nbr_files;
int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
@@ -3835,12 +3667,12 @@ jint os::init_2(void) {
}
if (PerfAllowAtExitRegistration) {
- // only register atexit functions if PerfAllowAtExitRegistration is set.
- // atexit functions can be delayed until process exit time, which
+ // Only register atexit functions if PerfAllowAtExitRegistration is set.
+ // Atexit functions can be delayed until process exit time, which
// can be problematic for embedded VM situations. Embedded VMs should
// call DestroyJavaVM() to assure that VM resources are released.
- // note: perfMemory_exit_helper atexit function may be removed in
+ // Note: perfMemory_exit_helper atexit function may be removed in
// the future if the appropriate cleanup code can be added to the
// VM_Exit VMOperation's doit method.
if (atexit(perfMemory_exit_helper) != 0) {
@@ -4162,8 +3994,10 @@ char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
if (read_only) {
prot = PROT_READ;
+ flags = MAP_SHARED;
} else {
prot = PROT_READ | PROT_WRITE;
+ flags = MAP_PRIVATE;
}
if (allow_exec) {
@@ -4174,7 +4008,12 @@ char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
flags |= MAP_FIXED;
}
- char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
+ // Allow anonymous mappings if 'fd' is -1.
+ if (fd == -1) {
+ flags |= MAP_ANONYMOUS;
+ }
+
+ char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
fd, file_offset);
if (mapped_address == MAP_FAILED) {
return NULL;
@@ -4432,7 +4271,7 @@ void os::Aix::scan_environment() {
if (Verbose) {
fprintf(stderr, "EXTSHM=%s.\n", p ? p : "");
}
- if (p && strcmp(p, "ON") == 0) {
+ if (p && strcasecmp(p, "ON") == 0) {
fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
_extshm = 1;
} else {
@@ -4493,16 +4332,13 @@ void os::Aix::initialize_libperfstat() {
/////////////////////////////////////////////////////////////////////////////
// thread stack
-// function to query the current stack size using pthread_getthrds_np
-//
-// ! do not change anything here unless you know what you are doing !
-static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
-
+// Function to query the current stack size using pthread_getthrds_np.
+static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
// This only works when invoked on a pthread. As we agreed not to use
- // primordial threads anyway, I assert here
+ // primordial threads anyway, I assert here.
guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
- // information about this api can be found (a) in the pthread.h header and
+ // Information about this api can be found (a) in the pthread.h header and
// (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
//
// The use of this API to find out the current stack is kind of undefined.
@@ -4513,57 +4349,72 @@ static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size)
pthread_t tid = pthread_self();
struct __pthrdsinfo pinfo;
- char dummy[1]; // we only need this to satisfy the api and to not get E
+ char dummy[1]; // We only need this to satisfy the api and to not get E.
int dummy_size = sizeof(dummy);
memset(&pinfo, 0, sizeof(pinfo));
- const int rc = pthread_getthrds_np (&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
- sizeof(pinfo), dummy, &dummy_size);
+ const int rc = pthread_getthrds_np(&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
+ sizeof(pinfo), dummy, &dummy_size);
if (rc != 0) {
- fprintf(stderr, "pthread_getthrds_np failed (%d)\n", rc);
- guarantee(0, "pthread_getthrds_np failed");
+ assert0(false);
+ trcVerbose("pthread_getthrds_np failed (%d)", rc);
+ return false;
}
+ guarantee0(pinfo.__pi_stackend);
- guarantee(pinfo.__pi_stackend, "returned stack base invalid");
-
- // the following can happen when invoking pthread_getthrds_np on a pthread running on a user provided stack
- // (when handing down a stack to pthread create, see pthread_attr_setstackaddr).
+ // The following can happen when invoking pthread_getthrds_np on a pthread running
+ // on a user provided stack (when handing down a stack to pthread create, see
+ // pthread_attr_setstackaddr).
// Not sure what to do here - I feel inclined to forbid this use case completely.
- guarantee(pinfo.__pi_stacksize, "returned stack size invalid");
+ guarantee0(pinfo.__pi_stacksize);
+
+ // Note: the pthread stack on AIX seems to look like this:
+ //
+ // --------------------- real base ? at page border ?
+ //
+ // pthread internal data, like ~2K, see also
+ // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/thread_supp_tun_params.htm
+ //
+ // --------------------- __pi_stackend - not page aligned, (xxxxF890)
+ //
+ // stack
+ // ....
+ //
+ // stack
+ //
+ // --------------------- __pi_stackend - __pi_stacksize
+ //
+ // padding due to AIX guard pages (?) see AIXTHREAD_GUARDPAGES
+ // --------------------- __pi_stackaddr (page aligned if AIXTHREAD_GUARDPAGES > 0)
+ //
+ // AIX guard pages (?)
+ //
+
+ // So, the safe thing to do is to use the area from __pi_stackend to __pi_stackaddr;
+ // __pi_stackend however is almost never page aligned.
+ //
- // On AIX, stacks are not necessarily page aligned so round the base and size accordingly
if (p_stack_base) {
- (*p_stack_base) = (address) align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size());
+ (*p_stack_base) = (address) (pinfo.__pi_stackend);
}
if (p_stack_size) {
- (*p_stack_size) = pinfo.__pi_stacksize - os::Aix::stack_page_size();
+ (*p_stack_size) = pinfo.__pi_stackend - pinfo.__pi_stackaddr;
}
-#ifndef PRODUCT
- if (Verbose) {
- fprintf(stderr,
- "query_stack_dimensions() -> real stack_base=" INTPTR_FORMAT ", real stack_addr=" INTPTR_FORMAT
- ", real stack_size=" INTPTR_FORMAT
- ", stack_base=" INTPTR_FORMAT ", stack_size=" INTPTR_FORMAT "\n",
- (intptr_t)pinfo.__pi_stackend, (intptr_t)pinfo.__pi_stackaddr, pinfo.__pi_stacksize,
- (intptr_t)align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size()),
- pinfo.__pi_stacksize - os::Aix::stack_page_size());
- }
-#endif
+ return true;
+}
-} // end query_stack_dimensions
-
-// get the current stack base from the OS (actually, the pthread library)
+// Get the current stack base from the OS (actually, the pthread library).
address os::current_stack_base() {
address p;
query_stack_dimensions(&p, 0);
return p;
}
-// get the current stack size from the OS (actually, the pthread library)
+// Get the current stack size from the OS (actually, the pthread library).
size_t os::current_stack_size() {
size_t s;
query_stack_dimensions(0, &s);
diff --git a/hotspot/src/os/aix/vm/os_aix.hpp b/hotspot/src/os/aix/vm/os_aix.hpp
index 01831fb7cbc..faba5c2b45e 100644
--- a/hotspot/src/os/aix/vm/os_aix.hpp
+++ b/hotspot/src/os/aix/vm/os_aix.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2013 SAP AG. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2013, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,9 +35,9 @@ class Aix {
friend class os;
// For signal-chaining
- // highest so far (AIX 5.2) is SIGSAK (63)
+ // highest so far (AIX 5.2 - 6.1) is SIGSAK (63)
#define MAXSIGNUM 63
- // length of strings included in the libperfstat structures
+ // Length of strings included in the libperfstat structures.
#define IDENTIFIER_LENGTH 64
static struct sigaction sigact[MAXSIGNUM]; // saved preinstalled sigactions
@@ -111,22 +111,6 @@ class Aix {
// (should be LDR_CNTRL DATAPSIZE because stack is allocated on heap by pthread lib)
static int _stack_page_size;
- // Default shm page size. Read: what page size shared memory will be backed
- // with if no page size was set explicitly using shmctl(SHM_PAGESIZE).
- // Should be LDR_CNTRL SHMPSIZE.
- static size_t _shm_default_page_size;
-
- // True if sys V shm can be used with 64K pages dynamically.
- // (via shmctl(.. SHM_PAGESIZE..). Should be true for AIX 53 and
- // newer / PASE V6R1 and newer. (0 or 1, -1 if not initialized)
- static int _can_use_64K_pages;
-
- // True if sys V shm can be used with 16M pages dynamically.
- // (via shmctl(.. SHM_PAGESIZE..). Only true on AIX 5.3 and
- // newer, if the system was set up to use 16M pages and the
- // jvm has enough user rights. (0 or 1, -1 if not initialized)
- static int _can_use_16M_pages;
-
static julong available_memory();
static julong physical_memory() { return _physical_memory; }
static void initialize_system_info();
@@ -135,10 +119,6 @@ class Aix {
// one of Aix::on_pase(), Aix::os_version().
static void initialize_os_info();
- static int commit_memory_impl(char* addr, size_t bytes, bool exec);
- static int commit_memory_impl(char* addr, size_t bytes,
- size_t alignment_hint, bool exec);
-
// Scan environment for important settings which might effect the
// VM. Trace out settings. Warn about invalid settings and/or
// correct them.
@@ -146,10 +126,6 @@ class Aix {
// Must run after os::Aix::initialue_os_info().
static void scan_environment();
- // Retrieve information about multipage size support. Will initialize
- // _page_size, _stack_page_size, _can_use_64K_pages/_can_use_16M_pages
- static void query_multipage_support();
-
// Initialize libo4 (on PASE) and libperfstat (on AIX). Call this
// before relying on functions from either lib, e.g. Aix::get_meminfo().
static void initialize_libo4();
@@ -187,27 +163,8 @@ class Aix {
return _stack_page_size;
}
- // default shm page size. Read: what page size shared memory
- // will be backed with if no page size was set explicitly using shmctl(SHM_PAGESIZE).
- // Should be LDR_CNTRL SHMPSIZE.
- static int shm_default_page_size(void) {
- assert(_shm_default_page_size != -1, "not initialized");
- return _shm_default_page_size;
- }
-
- // Return true if sys V shm can be used with 64K pages dynamically
- // (via shmctl(.. SHM_PAGESIZE..).
- static bool can_use_64K_pages () {
- assert(_can_use_64K_pages != -1, "not initialized");
- return _can_use_64K_pages == 1 ? true : false;
- }
-
- // Return true if sys V shm can be used with 16M pages dynamically.
- // (via shmctl(.. SHM_PAGESIZE..).
- static bool can_use_16M_pages () {
- assert(_can_use_16M_pages != -1, "not initialized");
- return _can_use_16M_pages == 1 ? true : false;
- }
+ // This is used to scale stack space (guard pages etc.). The name is somehow misleading.
+ static int vm_default_page_size(void ) { return 8*K; }
static address ucontext_get_pc(const ucontext_t* uc);
static intptr_t* ucontext_get_sp(ucontext_t* uc);
@@ -269,6 +226,11 @@ class Aix {
return _os_version;
}
+ // Convenience method: returns true if running on PASE V5R4 or older.
+ static bool on_pase_V5R4_or_older() {
+ return on_pase() && os_version() <= 0x0504;
+ }
+
// Convenience method: returns true if running on AIX 5.3 or older.
static bool on_aix_53_or_older() {
return on_aix() && os_version() <= 0x0503;
diff --git a/hotspot/src/os/aix/vm/os_aix.inline.hpp b/hotspot/src/os/aix/vm/os_aix.inline.hpp
index 5602342b4ff..7de0626dae6 100644
--- a/hotspot/src/os/aix/vm/os_aix.inline.hpp
+++ b/hotspot/src/os/aix/vm/os_aix.inline.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,7 +40,7 @@ inline void* os::thread_local_storage_at(int index) {
return pthread_getspecific((pthread_key_t)index);
}
-// File names are case-sensitive on windows only
+// File names are case-sensitive on windows only.
inline int os::file_name_strcmp(const char* s1, const char* s2) {
return strcmp(s1, s2);
}
@@ -53,18 +53,19 @@ inline bool os::uses_stack_guard_pages() {
return true;
}
+// Whether or not calling code should/can commit/uncommit stack pages
+// before guarding them. Answer for AIX is definitly no, because memory
+// is automatically committed on touch.
inline bool os::allocate_stack_guard_pages() {
assert(uses_stack_guard_pages(), "sanity check");
- return true;
+ return false;
}
-
// On Aix, reservations are made on a page by page basis, nothing to do.
inline void os::pd_split_reserved_memory(char *base, size_t size,
size_t split, bool realloc) {
}
-
// Bang the shadow pages if they need to be touched to be mapped.
inline void os::bang_stack_shadow_pages() {
}
@@ -75,15 +76,13 @@ inline void os::dll_unload(void *lib) {
inline const int os::default_file_open_flags() { return 0;}
-inline DIR* os::opendir(const char* dirname)
-{
+inline DIR* os::opendir(const char* dirname) {
assert(dirname != NULL, "just checking");
return ::opendir(dirname);
}
-inline int os::readdir_buf_size(const char *path)
-{
- // according to aix sys/limits, NAME_MAX must be retrieved at runtime. */
+inline int os::readdir_buf_size(const char *path) {
+ // According to aix sys/limits, NAME_MAX must be retrieved at runtime.
const long my_NAME_MAX = pathconf(path, _PC_NAME_MAX);
return my_NAME_MAX + sizeof(dirent) + 1;
}
@@ -104,8 +103,7 @@ inline int os::ftruncate(int fd, jlong length) {
return ::ftruncate64(fd, length);
}
-inline struct dirent* os::readdir(DIR* dirp, dirent *dbuf)
-{
+inline struct dirent* os::readdir(DIR* dirp, dirent *dbuf) {
dirent* p;
int status;
assert(dirp != NULL, "just checking");
@@ -174,11 +172,11 @@ inline int os::send(int fd, char* buf, size_t nBytes, uint flags) {
RESTARTABLE_RETURN_INT(::send(fd, buf, nBytes, flags));
}
-inline int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
+inline int os::raw_send(int fd, char *buf, size_t nBytes, uint flags) {
return os::send(fd, buf, nBytes, flags);
}
-inline int os::connect(int fd, struct sockaddr* him, socklen_t len) {
+inline int os::connect(int fd, struct sockaddr *him, socklen_t len) {
RESTARTABLE_RETURN_INT(::connect(fd, him, len));
}
diff --git a/hotspot/src/os/aix/vm/perfMemory_aix.cpp b/hotspot/src/os/aix/vm/perfMemory_aix.cpp
index c9dd76b7881..c9d6e41e6bb 100644
--- a/hotspot/src/os/aix/vm/perfMemory_aix.cpp
+++ b/hotspot/src/os/aix/vm/perfMemory_aix.cpp
@@ -797,7 +797,7 @@ static void cleanup_sharedmem_resources(const char* dirname) {
// Close the directory and reset the current working directory.
close_directory_secure_cwd(dirp, saved_cwd_fd);
- FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
+ FREE_C_HEAP_ARRAY(char, dbuf);
}
// Make the user specific temporary directory. Returns true if
@@ -1164,9 +1164,9 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
// store file, we don't follow them when attaching either.
//
if (!is_directory_secure(dirname)) {
- FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
+ FREE_C_HEAP_ARRAY(char, dirname);
if (luser != user) {
- FREE_C_HEAP_ARRAY(char, luser, mtInternal);
+ FREE_C_HEAP_ARRAY(char, luser);
}
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Process not found");
diff --git a/hotspot/src/os/aix/vm/porting_aix.hpp b/hotspot/src/os/aix/vm/porting_aix.hpp
index 53f9939937f..2b467ccd858 100644
--- a/hotspot/src/os/aix/vm/porting_aix.hpp
+++ b/hotspot/src/os/aix/vm/porting_aix.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,8 +22,18 @@
*
*/
+#ifndef OS_AIX_VM_PORTING_AIX_HPP
+#define OS_AIX_VM_PORTING_AIX_HPP
+
#include
+// PPC port only:
+#define assert0(b) assert( (b), "" )
+#define guarantee0(b) assert( (b), "" )
+template bool is_aligned_to(T1 what, T2 alignment) {
+ return ( ((uintx)(what)) & (((uintx)(alignment)) - 1) ) == 0 ? true : false;
+}
+
// Header file to contain porting-relevant code which does not have a
// home anywhere else and which can not go into os_.h because
// that header is included inside the os class definition, hence all
@@ -79,3 +89,62 @@ int getFuncName(
const struct tbtable** p_tb, // [out] optional: ptr to traceback table to get further information
char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
);
+
+// -------------------------------------------------------------------------
+
+// A simple critical section which shall be based upon OS critical
+// sections (CRITICAL_SECTION resp. Posix Mutex) and nothing else.
+
+#include
+
+namespace MiscUtils {
+ typedef pthread_mutex_t critsect_t;
+
+ inline void init_critsect(MiscUtils::critsect_t* cs) {
+ pthread_mutex_init(cs, NULL);
+ }
+ inline void free_critsect(MiscUtils::critsect_t* cs) {
+ pthread_mutex_destroy(cs);
+ }
+ inline void enter_critsect(MiscUtils::critsect_t* cs) {
+ pthread_mutex_lock(cs);
+ }
+ inline void leave_critsect(MiscUtils::critsect_t* cs) {
+ pthread_mutex_unlock(cs);
+ }
+
+ // Need to wrap this in an object because we need to dynamically initialize
+ // critical section (because of windows, where there is no way to initialize
+ // a CRITICAL_SECTION statically. On Unix, we could use
+ // PTHREAD_MUTEX_INITIALIZER)
+
+ // Note: The critical section does NOT get cleaned up in the destructor. That is
+ // by design: the CritSect class is only ever used as global objects whose
+ // lifetime spans the whole VM life; in that context we don't want the lock to
+ // be cleaned up when global C++ objects are destroyed, but to continue to work
+ // correctly right to the very end of the process life.
+ class CritSect {
+ critsect_t _cs;
+ public:
+ CritSect() { init_critsect(&_cs); }
+ //~CritSect() { free_critsect(&_cs); }
+ void enter() { enter_critsect(&_cs); }
+ void leave() { leave_critsect(&_cs); }
+ };
+
+ class AutoCritSect {
+ CritSect* const _pcsobj;
+ public:
+ AutoCritSect(CritSect* pcsobj)
+ : _pcsobj(pcsobj)
+ {
+ _pcsobj->enter();
+ }
+ ~AutoCritSect() {
+ _pcsobj->leave();
+ }
+ };
+
+}
+
+#endif // OS_AIX_VM_PORTING_AIX_HPP
diff --git a/hotspot/src/os/bsd/dtrace/generateJvmOffsets.cpp b/hotspot/src/os/bsd/dtrace/generateJvmOffsets.cpp
index 598fe5e5096..a4fa0d560c7 100644
--- a/hotspot/src/os/bsd/dtrace/generateJvmOffsets.cpp
+++ b/hotspot/src/os/bsd/dtrace/generateJvmOffsets.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,12 +45,12 @@
#include "memory/heap.hpp"
#include "memory/memRegion.hpp"
#include "memory/universe.hpp"
+#include "memory/virtualspace.hpp"
#include "oops/constMethod.hpp"
#include "oops/klass.hpp"
#include "oops/method.hpp"
#include "oops/oop.hpp"
#include "oops/symbol.hpp"
-#include "runtime/virtualspace.hpp"
#include "runtime/vmStructs.hpp"
#include "utilities/accessFlags.hpp"
#include "utilities/globalDefinitions.hpp"
diff --git a/hotspot/src/os/bsd/dtrace/libjvm_db.c b/hotspot/src/os/bsd/dtrace/libjvm_db.c
index a0b5413cf49..b483733f9ef 100644
--- a/hotspot/src/os/bsd/dtrace/libjvm_db.c
+++ b/hotspot/src/os/bsd/dtrace/libjvm_db.c
@@ -582,13 +582,14 @@ name_for_methodPtr(jvm_agent_t* J, uint64_t methodPtr, char * result, size_t siz
CHECK_FAIL(err);
result[0] = '\0';
- strncat(result, klassString, size);
- size -= strlen(klassString);
- strncat(result, ".", size);
- size -= 1;
- strncat(result, nameString, size);
- size -= strlen(nameString);
- strncat(result, signatureString, size);
+ if (snprintf(result, size,
+ "%s.%s%s",
+ klassString,
+ nameString,
+ signatureString) >= size) {
+ // truncation
+ goto fail;
+ }
if (nameString != NULL) free(nameString);
if (klassString != NULL) free(klassString);
@@ -1095,9 +1096,9 @@ name_for_nmethod(jvm_agent_t* J,
CHECK_FAIL(err);
}
if (deoptimized) {
- strncat(result + 1, " [deoptimized frame]; ", size-1);
+ strncat(result, " [deoptimized frame]; ", size - strlen(result) - 1);
} else {
- strncat(result + 1, " [compiled] ", size-1);
+ strncat(result, " [compiled] ", size - strlen(result) - 1);
}
if (debug)
fprintf(stderr, "name_for_nmethod: END: method name: %s, vf_cnt: %d\n\n",
diff --git a/hotspot/src/os/bsd/vm/decoder_machO.cpp b/hotspot/src/os/bsd/vm/decoder_machO.cpp
index 6ef6314a1d1..5026ea83471 100644
--- a/hotspot/src/os/bsd/vm/decoder_machO.cpp
+++ b/hotspot/src/os/bsd/vm/decoder_machO.cpp
@@ -97,6 +97,7 @@ bool MachODecoder::decode(address addr, char *buf,
char * symname = mach_find_in_stringtable((char*) ((uintptr_t)mach_base + stroff), strsize, found_strx);
if (symname) {
strncpy(buf, symname, buflen);
+ buf[buflen - 1] = '\0';
return true;
}
DEBUG_ONLY(tty->print_cr("no string or null string found."));
diff --git a/hotspot/src/os/bsd/vm/os_bsd.cpp b/hotspot/src/os/bsd/vm/os_bsd.cpp
index 445ff225f8e..d725b35350b 100644
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp
@@ -190,20 +190,6 @@ julong os::physical_memory() {
return Bsd::physical_memory();
}
-////////////////////////////////////////////////////////////////////////////////
-// environment support
-
-bool os::getenv(const char* name, char* buf, int len) {
- const char* val = ::getenv(name);
- if (val != NULL && strlen(val) < (size_t)len) {
- strcpy(buf, val);
- return true;
- }
- if (len > 0) buf[0] = 0; // return a null string
- return false;
-}
-
-
// Return true if user is running as root.
bool os::have_special_privileges() {
@@ -1146,6 +1132,10 @@ void os::shutdown() {
// called from signal handler. Before adding something to os::abort(), make
// sure it is async-safe and can handle partially initialized VM.
void os::abort(bool dump_core) {
+ abort(dump_core, NULL, NULL);
+}
+
+void os::abort(bool dump_core, void* siginfo, void* context) {
os::shutdown();
if (dump_core) {
#ifndef PRODUCT
@@ -1195,12 +1185,18 @@ pid_t os::Bsd::gettid() {
guarantee(retval != 0, "just checking");
return retval;
-#elif __FreeBSD__
+#else
+ #ifdef __FreeBSD__
retval = syscall(SYS_thr_self);
-#elif __OpenBSD__
+ #else
+ #ifdef __OpenBSD__
retval = syscall(SYS_getthrid);
-#elif __NetBSD__
+ #else
+ #ifdef __NetBSD__
retval = (pid_t) syscall(SYS__lwp_self);
+ #endif
+ #endif
+ #endif
#endif
if (retval == -1) {
diff --git a/hotspot/src/os/linux/vm/os_linux.cpp b/hotspot/src/os/linux/vm/os_linux.cpp
index c3403d49e4d..8270f6e3659 100644
--- a/hotspot/src/os/linux/vm/os_linux.cpp
+++ b/hotspot/src/os/linux/vm/os_linux.cpp
@@ -158,9 +158,6 @@ static pid_t _initial_pid = 0;
static int SR_signum = SIGUSR2;
sigset_t SR_sigset;
-// Used to protect dlsym() calls
-static pthread_mutex_t dl_mutex;
-
// Declarations
static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
@@ -184,20 +181,6 @@ julong os::physical_memory() {
return Linux::physical_memory();
}
-////////////////////////////////////////////////////////////////////////////////
-// environment support
-
-bool os::getenv(const char* name, char* buf, int len) {
- const char* val = ::getenv(name);
- if (val != NULL && strlen(val) < (size_t)len) {
- strcpy(buf, val);
- return true;
- }
- if (len > 0) buf[0] = 0; // return a null string
- return false;
-}
-
-
// Return true if user is running as root.
bool os::have_special_privileges() {
@@ -215,14 +198,20 @@ bool os::have_special_privileges() {
// i386: 224, ia64: 1105, amd64: 186, sparc 143
#ifdef __ia64__
#define SYS_gettid 1105
- #elif __i386__
- #define SYS_gettid 224
- #elif __amd64__
- #define SYS_gettid 186
- #elif __sparc__
- #define SYS_gettid 143
#else
- #error define gettid for the arch
+ #ifdef __i386__
+ #define SYS_gettid 224
+ #else
+ #ifdef __amd64__
+ #define SYS_gettid 186
+ #else
+ #ifdef __sparc__
+ #define SYS_gettid 143
+ #else
+ #error define gettid for the arch
+ #endif
+ #endif
+ #endif
#endif
#endif
@@ -1490,6 +1479,10 @@ void os::shutdown() {
// called from signal handler. Before adding something to os::abort(), make
// sure it is async-safe and can handle partially initialized VM.
void os::abort(bool dump_core) {
+ abort(dump_core, NULL, NULL);
+}
+
+void os::abort(bool dump_core, void* siginfo, void* context) {
os::shutdown();
if (dump_core) {
#ifndef PRODUCT
@@ -2039,14 +2032,8 @@ void * os::Linux::dll_load_in_vmthread(const char *filename, char *ebuf,
return result;
}
-// glibc-2.0 libdl is not MT safe. If you are building with any glibc,
-// chances are you might want to run the generated bits against glibc-2.0
-// libdl.so, so always use locking for any version of glibc.
-//
void* os::dll_lookup(void* handle, const char* name) {
- pthread_mutex_lock(&dl_mutex);
void* res = dlsym(handle, name);
- pthread_mutex_unlock(&dl_mutex);
return res;
}
@@ -4655,8 +4642,6 @@ void os::init(void) {
}
// else it defaults to CLOCK_REALTIME
- pthread_mutex_init(&dl_mutex, NULL);
-
// If the pagesize of the VM is greater than 8K determine the appropriate
// number of initial guard pages. The user can change this with the
// command line arguments, if needed.
diff --git a/hotspot/src/os/posix/vm/os_posix.cpp b/hotspot/src/os/posix/vm/os_posix.cpp
index d64aa0ae156..8a7cc2945ae 100644
--- a/hotspot/src/os/posix/vm/os_posix.cpp
+++ b/hotspot/src/os/posix/vm/os_posix.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -46,7 +46,7 @@ PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
#define IS_VALID_PID(p) (p > 0 && p < MAX_PID)
// Check core dump limit and report possible place where core can be found
-void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) {
+void os::check_dump_limit(char* buffer, size_t bufferSize) {
int n;
struct rlimit rlim;
bool success;
@@ -82,7 +82,7 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
}
}
- VMError::report_coredump_status(buffer, success);
+ VMError::record_coredump_status(buffer, success);
}
int os::get_native_stack(address* stack, int frames, int toSkip) {
diff --git a/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp b/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp
index 36bfff1a82f..c23810915e1 100644
--- a/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp
+++ b/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,12 +45,12 @@
#include "memory/heap.hpp"
#include "memory/memRegion.hpp"
#include "memory/universe.hpp"
+#include "memory/virtualspace.hpp"
#include "oops/constMethod.hpp"
#include "oops/klass.hpp"
#include "oops/method.hpp"
#include "oops/oop.hpp"
#include "oops/symbol.hpp"
-#include "runtime/virtualspace.hpp"
#include "runtime/vmStructs.hpp"
#include "utilities/accessFlags.hpp"
#include "utilities/globalDefinitions.hpp"
diff --git a/hotspot/src/os/solaris/dtrace/libjvm_db.c b/hotspot/src/os/solaris/dtrace/libjvm_db.c
index 67ece5e21e6..1f24b8719ad 100644
--- a/hotspot/src/os/solaris/dtrace/libjvm_db.c
+++ b/hotspot/src/os/solaris/dtrace/libjvm_db.c
@@ -582,13 +582,14 @@ name_for_methodPtr(jvm_agent_t* J, uint64_t methodPtr, char * result, size_t siz
CHECK_FAIL(err);
result[0] = '\0';
- strncat(result, klassString, size);
- size -= strlen(klassString);
- strncat(result, ".", size);
- size -= 1;
- strncat(result, nameString, size);
- size -= strlen(nameString);
- strncat(result, signatureString, size);
+ if (snprintf(result, size,
+ "%s.%s%s",
+ klassString,
+ nameString,
+ signatureString) >= size) {
+ // truncation
+ goto fail;
+ }
if (nameString != NULL) free(nameString);
if (klassString != NULL) free(klassString);
@@ -1095,9 +1096,9 @@ name_for_nmethod(jvm_agent_t* J,
CHECK_FAIL(err);
}
if (deoptimized) {
- strncat(result + 1, " [deoptimized frame]; ", size-1);
+ strncat(result, " [deoptimized frame]; ", size - strlen(result) - 1);
} else {
- strncat(result + 1, " [compiled] ", size-1);
+ strncat(result, " [compiled] ", size - strlen(result) - 1);
}
if (debug)
fprintf(stderr, "name_for_nmethod: END: method name: %s, vf_cnt: %d\n\n",
diff --git a/hotspot/src/os/solaris/vm/os_solaris.cpp b/hotspot/src/os/solaris/vm/os_solaris.cpp
index 47c0f8a9253..befdb29b5cc 100644
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp
@@ -555,17 +555,6 @@ bool os::bind_to_processor(uint processor_id) {
return (bind_result == 0);
}
-bool os::getenv(const char* name, char* buffer, int len) {
- char* val = ::getenv(name);
- if (val == NULL || strlen(val) + 1 > len) {
- if (len > 0) buffer[0] = 0; // return a null string
- return false;
- }
- strcpy(buffer, val);
- return true;
-}
-
-
// Return true if user is running as root.
bool os::have_special_privileges() {
@@ -1532,6 +1521,10 @@ void os::shutdown() {
// called from signal handler. Before adding something to os::abort(), make
// sure it is async-safe and can handle partially initialized VM.
void os::abort(bool dump_core) {
+ abort(dump_core, NULL, NULL);
+}
+
+void os::abort(bool dump_core, void* siginfo, void* context) {
os::shutdown();
if (dump_core) {
#ifndef PRODUCT
diff --git a/hotspot/src/os/windows/vm/os_windows.cpp b/hotspot/src/os/windows/vm/os_windows.cpp
index 76f6384b7b9..97c73ac471c 100644
--- a/hotspot/src/os/windows/vm/os_windows.cpp
+++ b/hotspot/src/os/windows/vm/os_windows.cpp
@@ -114,10 +114,12 @@ static FILETIME process_kernel_time;
#ifdef _M_IA64
#define __CPU__ ia64
-#elif _M_AMD64
- #define __CPU__ amd64
#else
- #define __CPU__ i486
+ #ifdef _M_AMD64
+ #define __CPU__ amd64
+ #else
+ #define __CPU__ i486
+ #endif
#endif
// save DLL module handle, used by GetModuleFileName
@@ -153,11 +155,6 @@ static inline double fileTimeAsDouble(FILETIME* time) {
// Implementation of os
-bool os::getenv(const char* name, char* buffer, int len) {
- int result = GetEnvironmentVariable(name, buffer, len);
- return result > 0 && result < len;
-}
-
bool os::unsetenv(const char* name) {
assert(name != NULL, "Null pointer");
return (SetEnvironmentVariable(name, NULL) == TRUE);
@@ -188,9 +185,13 @@ void os::init_system_properties_values() {
char *dll_path;
char *pslash;
char *bin = "\\bin";
- char home_dir[MAX_PATH];
+ char home_dir[MAX_PATH + 1];
+ char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
- if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) {
+ if (alt_home_dir != NULL) {
+ strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
+ home_dir[MAX_PATH] = '\0';
+ } else {
os::jvm_path(home_dir, sizeof(home_dir));
// Found the full path to jvm.dll.
// Now cut the path to /jre if we can.
@@ -988,7 +989,34 @@ static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
PMINIDUMP_USER_STREAM_INFORMATION,
PMINIDUMP_CALLBACK_INFORMATION);
-void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) {
+static HANDLE dumpFile = NULL;
+
+// Check if dump file can be created.
+void os::check_dump_limit(char* buffer, size_t buffsz) {
+ bool status = true;
+ if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
+ jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
+ status = false;
+ } else {
+ const char* cwd = get_current_directory(NULL, 0);
+ int pid = current_process_id();
+ if (cwd != NULL) {
+ jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
+ } else {
+ jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
+ }
+
+ if (dumpFile == NULL &&
+ (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
+ == INVALID_HANDLE_VALUE) {
+ jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
+ status = false;
+ }
+ }
+ VMError::record_coredump_status(buffer, status);
+}
+
+void os::abort(bool dump_core, void* siginfo, void* context) {
HINSTANCE dbghelp;
EXCEPTION_POINTERS ep;
MINIDUMP_EXCEPTION_INFORMATION mei;
@@ -996,33 +1024,22 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
HANDLE hProcess = GetCurrentProcess();
DWORD processId = GetCurrentProcessId();
- HANDLE dumpFile;
MINIDUMP_TYPE dumpType;
- static const char* cwd;
-// Default is to always create dump for debug builds, on product builds only dump on server versions of Windows.
-#ifndef ASSERT
- // If running on a client version of Windows and user has not explicitly enabled dumping
- if (!os::win32::is_windows_server() && !CreateMinidumpOnCrash) {
- VMError::report_coredump_status("Minidumps are not enabled by default on client versions of Windows", false);
- return;
- // If running on a server version of Windows and user has explictly disabled dumping
- } else if (os::win32::is_windows_server() && !FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) {
- VMError::report_coredump_status("Minidump has been disabled from the command line", false);
- return;
+ shutdown();
+ if (!dump_core || dumpFile == NULL) {
+ if (dumpFile != NULL) {
+ CloseHandle(dumpFile);
+ }
+ win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
}
-#else
- if (!FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) {
- VMError::report_coredump_status("Minidump has been disabled from the command line", false);
- return;
- }
-#endif
dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0);
if (dbghelp == NULL) {
- VMError::report_coredump_status("Failed to load dbghelp.dll", false);
- return;
+ jio_fprintf(stderr, "Failed to load dbghelp.dll\n");
+ CloseHandle(dumpFile);
+ win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
}
_MiniDumpWriteDump =
@@ -1034,30 +1051,23 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
"MiniDumpWriteDump"));
if (_MiniDumpWriteDump == NULL) {
- VMError::report_coredump_status("Failed to find MiniDumpWriteDump() in module dbghelp.dll", false);
- return;
+ jio_fprintf(stderr, "Failed to find MiniDumpWriteDump() in module dbghelp.dll.\n");
+ CloseHandle(dumpFile);
+ win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
}
dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData);
-// Older versions of dbghelp.h doesn't contain all the dumptypes we want, dbghelp.h with
-// API_VERSION_NUMBER 11 or higher contains the ones we want though
+ // Older versions of dbghelp.h do not contain all the dumptypes we want, dbghelp.h with
+ // API_VERSION_NUMBER 11 or higher contains the ones we want though
#if API_VERSION_NUMBER >= 11
dumpType = (MINIDUMP_TYPE)(dumpType | MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo |
MiniDumpWithUnloadedModules);
#endif
- cwd = get_current_directory(NULL, 0);
- jio_snprintf(buffer, bufferSize, "%s\\hs_err_pid%u.mdmp", cwd, current_process_id());
- dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
-
- if (dumpFile == INVALID_HANDLE_VALUE) {
- VMError::report_coredump_status("Failed to create file for dumping", false);
- return;
- }
- if (exceptionRecord != NULL && contextRecord != NULL) {
- ep.ContextRecord = (PCONTEXT) contextRecord;
- ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord;
+ if (siginfo != NULL && context != NULL) {
+ ep.ContextRecord = (PCONTEXT) context;
+ ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
mei.ThreadId = GetCurrentThreadId();
mei.ExceptionPointers = &ep;
@@ -1066,38 +1076,18 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
pmei = NULL;
}
-
// Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
// the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
_MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
- DWORD error = GetLastError();
- LPTSTR msgbuf = NULL;
-
- if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
- FORMAT_MESSAGE_FROM_SYSTEM |
- FORMAT_MESSAGE_IGNORE_INSERTS,
- NULL, error, 0, (LPTSTR)&msgbuf, 0, NULL) != 0) {
-
- jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf);
- LocalFree(msgbuf);
- } else {
- // Call to FormatMessage failed, just include the result from GetLastError
- jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x)", error);
- }
- VMError::report_coredump_status(buffer, false);
- } else {
- VMError::report_coredump_status(buffer, true);
+ jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
}
-
CloseHandle(dumpFile);
+ win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
}
-
void os::abort(bool dump_core) {
- os::shutdown();
- // no core dump on Windows
- win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
+ abort(dump_core, NULL, NULL);
}
// Die immediately, no exit hook, no abort hook, no cleanup.
@@ -2102,20 +2092,22 @@ LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
// at the beginning of the target bundle.
exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF;
assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!");
-#elif _M_AMD64
+#else
+ #ifdef _M_AMD64
// Do not blow up if no thread info available.
if (thread) {
thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
}
// Set pc to handler
exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
-#else
+ #else
// Do not blow up if no thread info available.
if (thread) {
thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
}
// Set pc to handler
exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
+ #endif
#endif
// Continue the execution
@@ -2214,7 +2206,8 @@ LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
// (division by zero is handled explicitly)
#ifdef _M_IA64
assert(0, "Fix Handle_IDiv_Exception");
-#elif _M_AMD64
+#else
+ #ifdef _M_AMD64
PCONTEXT ctx = exceptionInfo->ContextRecord;
address pc = (address)ctx->Rip;
assert(pc[0] == 0xF7, "not an idiv opcode");
@@ -2225,7 +2218,7 @@ LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
ctx->Rax = (DWORD)min_jint; // result
ctx->Rdx = (DWORD)0; // remainder
// Continue the execution
-#else
+ #else
PCONTEXT ctx = exceptionInfo->ContextRecord;
address pc = (address)ctx->Eip;
assert(pc[0] == 0xF7, "not an idiv opcode");
@@ -2236,6 +2229,7 @@ LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
ctx->Eax = (DWORD)min_jint; // result
ctx->Edx = (DWORD)0; // remainder
// Continue the execution
+ #endif
#endif
return EXCEPTION_CONTINUE_EXECUTION;
}
@@ -2308,10 +2302,12 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
// This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction"
// information is saved in the Unix format.
address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2));
-#elif _M_AMD64
- address pc = (address) exceptionInfo->ContextRecord->Rip;
#else
+ #ifdef _M_AMD64
+ address pc = (address) exceptionInfo->ContextRecord->Rip;
+ #else
address pc = (address) exceptionInfo->ContextRecord->Eip;
+ #endif
#endif
Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady
@@ -2696,17 +2692,6 @@ address os::win32::fast_jni_accessor_wrapper(BasicType type) {
}
#endif
-void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) {
- // Install a win32 structured exception handler around the test
- // function call so the VM can generate an error dump if needed.
- __try {
- (*funcPtr)();
- } __except(topLevelExceptionFilter(
- (_EXCEPTION_POINTERS*)_exception_info())) {
- // Nothing to do.
- }
-}
-
// Virtual Memory
int os::vm_page_size() { return os::win32::vm_page_size(); }
@@ -5930,4 +5915,3 @@ void TestReserveMemorySpecial_test() {
UseNUMAInterleaving = old_use_numa_interleaving;
}
#endif // PRODUCT
-
diff --git a/hotspot/src/os/windows/vm/os_windows.hpp b/hotspot/src/os/windows/vm/os_windows.hpp
index 9ef4d1d50b4..08be4d06f70 100644
--- a/hotspot/src/os/windows/vm/os_windows.hpp
+++ b/hotspot/src/os/windows/vm/os_windows.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -115,8 +115,6 @@ class win32 {
static address fast_jni_accessor_wrapper(BasicType);
#endif
- static void call_test_func_with_wrapper(void (*funcPtr)(void));
-
// filter function to ignore faults on serializations page
static LONG WINAPI serialize_fault_filter(struct _EXCEPTION_POINTERS* e);
};
diff --git a/hotspot/src/os/windows/vm/os_windows.inline.hpp b/hotspot/src/os/windows/vm/os_windows.inline.hpp
index deb8821ac70..3227e069c38 100644
--- a/hotspot/src/os/windows/vm/os_windows.inline.hpp
+++ b/hotspot/src/os/windows/vm/os_windows.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -104,7 +104,4 @@ inline void os::exit(int num) {
win32::exit_process_or_thread(win32::EPT_PROCESS, num);
}
-#define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) \
- os::win32::call_test_func_with_wrapper(f)
-
#endif // OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP
diff --git a/hotspot/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp b/hotspot/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp
index 4e9309955bd..0ed05eb9a76 100644
--- a/hotspot/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp
+++ b/hotspot/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp
@@ -59,6 +59,10 @@
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
+// See stubGenerator_zero.cpp
+#include
+extern sigjmp_buf* get_jmp_buf_for_continuation();
+
address os::current_stack_pointer() {
address dummy = (address) &dummy;
return dummy;
@@ -134,6 +138,14 @@ JVM_handle_bsd_signal(int sig,
SignalHandlerMark shm(t);
+ // handle SafeFetch faults
+ if (sig == SIGSEGV || sig == SIGBUS) {
+ sigjmp_buf* const pjb = get_jmp_buf_for_continuation();
+ if (pjb) {
+ siglongjmp(*pjb, 1);
+ }
+ }
+
// Note: it's not uncommon that JNI code uses signal/sigset to
// install then restore certain signal handler (e.g. to temporarily
// block SIGPIPE, or have a SIGILL handler when detecting CPU
diff --git a/hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp b/hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp
index c742b8a0074..326fd326de4 100644
--- a/hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp
+++ b/hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2014 SAP AG. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -298,6 +298,7 @@ JVM_handle_linux_signal(int sig,
goto report_and_die;
}
+ CodeBlob *cb = NULL;
// Handle signal from NativeJump::patch_verified_entry().
if (( TrapBasedNotEntrantChecks && sig == SIGTRAP && nativeInstruction_at(pc)->is_sigtrap_zombie_not_entrant()) ||
(!TrapBasedNotEntrantChecks && sig == SIGILL && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant())) {
@@ -313,7 +314,10 @@ JVM_handle_linux_signal(int sig,
// especially when we try to read from the safepoint polling page. So the check
// (address)info->si_addr == os::get_standard_polling_page()
// doesn't work for us. We use:
- ((NativeInstruction*)pc)->is_safepoint_poll()) {
+ ((NativeInstruction*)pc)->is_safepoint_poll() &&
+ CodeCache::contains((void*) pc) &&
+ ((cb = CodeCache::find_blob(pc)) != NULL) &&
+ cb->is_nmethod()) {
if (TraceTraps) {
tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (SIGSEGV)", p2i(pc));
}
diff --git a/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp b/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp
index a518ab88d44..3e388568342 100644
--- a/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp
+++ b/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp
@@ -54,6 +54,10 @@
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
+// See stubGenerator_zero.cpp
+#include
+extern sigjmp_buf* get_jmp_buf_for_continuation();
+
address os::current_stack_pointer() {
address dummy = (address) &dummy;
return dummy;
@@ -125,6 +129,14 @@ JVM_handle_linux_signal(int sig,
SignalHandlerMark shm(t);
+ // handle SafeFetch faults
+ if (sig == SIGSEGV || sig == SIGBUS) {
+ sigjmp_buf* const pjb = get_jmp_buf_for_continuation();
+ if (pjb) {
+ siglongjmp(*pjb, 1);
+ }
+ }
+
// Note: it's not uncommon that JNI code uses signal/sigset to
// install then restore certain signal handler (e.g. to temporarily
// block SIGPIPE, or have a SIGILL handler when detecting CPU
diff --git a/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp b/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp
index 2b1033496d7..03b2d4b1b29 100644
--- a/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp
@@ -129,7 +129,7 @@ class PICL {
bool is_inconsistent() { return _state == INCONSISTENT; }
void set_inconsistent() { _state = INCONSISTENT; }
- void visit(picl_nodehdl_t nodeh, const char* name) {
+ bool visit(picl_nodehdl_t nodeh, const char* name) {
assert(!is_inconsistent(), "Precondition");
int curr;
if (_picl->get_int_property(nodeh, name, &curr) == PICL_SUCCESS) {
@@ -138,7 +138,9 @@ class PICL {
} else if (curr != value()) { // following iterations
set_inconsistent();
}
+ return true;
}
+ return false;
}
};
@@ -155,8 +157,19 @@ class PICL {
if (!l1_visitor->is_inconsistent()) {
l1_visitor->visit(nodeh, "l1-dcache-line-size");
}
- if (!l2_visitor->is_inconsistent()) {
- l2_visitor->visit(nodeh, "l2-cache-line-size");
+ static const char* l2_data_cache_line_property_name = NULL;
+ // On the first visit determine the name of the l2 cache line size property and memoize it.
+ if (l2_data_cache_line_property_name == NULL) {
+ assert(!l2_visitor->is_inconsistent(), "First iteration cannot be inconsistent");
+ l2_data_cache_line_property_name = "l2-cache-line-size";
+ if (!l2_visitor->visit(nodeh, l2_data_cache_line_property_name)) {
+ l2_data_cache_line_property_name = "l2-dcache-line-size";
+ l2_visitor->visit(nodeh, l2_data_cache_line_property_name);
+ }
+ } else {
+ if (!l2_visitor->is_inconsistent()) {
+ l2_visitor->visit(nodeh, l2_data_cache_line_property_name);
+ }
}
if (l1_visitor->is_inconsistent() && l2_visitor->is_inconsistent()) {
@@ -172,13 +185,13 @@ class PICL {
UniqueValueVisitor* l2_visitor() { return &_l2_visitor; }
};
int _L1_data_cache_line_size;
- int _L2_cache_line_size;
+ int _L2_data_cache_line_size;
public:
static int visit_cpu(picl_nodehdl_t nodeh, void *state) {
return CPUVisitor::visit(nodeh, state);
}
- PICL(bool is_fujitsu) : _L1_data_cache_line_size(0), _L2_cache_line_size(0), _dl_handle(NULL) {
+ PICL(bool is_fujitsu) : _L1_data_cache_line_size(0), _L2_data_cache_line_size(0), _dl_handle(NULL) {
if (!open_library()) {
return;
}
@@ -196,7 +209,7 @@ public:
_L1_data_cache_line_size = cpu_visitor.l1_visitor()->value();
}
if (cpu_visitor.l2_visitor()->is_assigned()) {
- _L2_cache_line_size = cpu_visitor.l2_visitor()->value();
+ _L2_data_cache_line_size = cpu_visitor.l2_visitor()->value();
}
}
_picl_shutdown();
@@ -205,7 +218,7 @@ public:
}
unsigned int L1_data_cache_line_size() const { return _L1_data_cache_line_size; }
- unsigned int L2_cache_line_size() const { return _L2_cache_line_size; }
+ unsigned int L2_data_cache_line_size() const { return _L2_data_cache_line_size; }
};
@@ -431,7 +444,7 @@ int VM_Version::platform_features(int features) {
// Figure out cache line sizes using PICL
PICL picl((features & sparc64_family_m) != 0);
_L1_data_cache_line_size = picl.L1_data_cache_line_size();
- _L2_cache_line_size = picl.L2_cache_line_size();
+ _L2_data_cache_line_size = picl.L2_data_cache_line_size();
return features;
}
diff --git a/hotspot/src/share/tools/hsdis/hsdis.c b/hotspot/src/share/tools/hsdis/hsdis.c
index acc90f88366..e18a94492d2 100644
--- a/hotspot/src/share/tools/hsdis/hsdis.c
+++ b/hotspot/src/share/tools/hsdis/hsdis.c
@@ -410,6 +410,7 @@ static void parse_caller_options(struct hsdis_app_data* app_data, const char* ca
}
p = q;
}
+ *iop = '\0';
}
static void print_help(struct hsdis_app_data* app_data,
diff --git a/hotspot/src/share/vm/adlc/adlparse.cpp b/hotspot/src/share/vm/adlc/adlparse.cpp
index 9e3091cff61..54bff63ad6d 100644
--- a/hotspot/src/share/vm/adlc/adlparse.cpp
+++ b/hotspot/src/share/vm/adlc/adlparse.cpp
@@ -800,6 +800,7 @@ void ADLParser::reg_parse(void) {
}
if (strcmp(token,"reg_def")==0) { reg_def_parse(); }
else if (strcmp(token,"reg_class")==0) { reg_class_parse(); }
+ else if (strcmp(token, "reg_class_dynamic") == 0) { reg_class_dynamic_parse(); }
else if (strcmp(token,"alloc_class")==0) { alloc_class_parse(); }
else if (strcmp(token,"#define")==0) { preproc_define(); }
else { parse_err(SYNERR, "bad token %s inside register block.\n", token); break; }
@@ -2323,11 +2324,12 @@ void ADLParser::reg_class_parse(void) {
// Debug Stuff
if (_AD._adl_debug >1) fprintf(stderr,"Register Class: %s\n", cname);
- RegClass *reg_class = _AD._register->addRegClass(cname);
-
- // Collect registers in class
skipws();
if (_curchar == '(') {
+ // A register list is defined for the register class.
+ // Collect registers into a generic RegClass register class.
+ RegClass* reg_class = _AD._register->addRegClass(cname);
+
next_char(); // Skip '('
skipws();
while (_curchar != ')') {
@@ -2352,12 +2354,15 @@ void ADLParser::reg_class_parse(void) {
}
next_char(); // Skip closing ')'
} else if (_curchar == '%') {
+ // A code snippet is defined for the register class.
+ // Collect the code snippet into a CodeSnippetRegClass register class.
+ CodeSnippetRegClass* reg_class = _AD._register->addRegClass(cname);
char *code = find_cpp_block("reg class");
if (code == NULL) {
parse_err(SYNERR, "missing code declaration for reg class.\n");
return;
}
- reg_class->_user_defined = code;
+ reg_class->set_code_snippet(code);
return;
}
@@ -2374,6 +2379,87 @@ void ADLParser::reg_class_parse(void) {
return;
}
+//------------------------------reg_class_dynamic_parse------------------------
+void ADLParser::reg_class_dynamic_parse(void) {
+ char *cname; // Name of dynamic register class being defined
+
+ // Get register class name
+ skipws();
+ cname = get_ident();
+ if (cname == NULL) {
+ parse_err(SYNERR, "missing dynamic register class name after 'reg_class_dynamic'\n");
+ return;
+ }
+
+ if (_AD._adl_debug > 1) {
+ fprintf(stdout, "Dynamic Register Class: %s\n", cname);
+ }
+
+ skipws();
+ if (_curchar != '(') {
+ parse_err(SYNERR, "missing '(' at the beginning of reg_class_dynamic definition\n");
+ return;
+ }
+ next_char();
+ skipws();
+
+ // Collect two register classes and the C++ code representing the condition code used to
+ // select between the two classes into a ConditionalRegClass register class.
+ ConditionalRegClass* reg_class = _AD._register->addRegClass(cname);
+ int i;
+ for (i = 0; i < 2; i++) {
+ char* name = get_ident();
+ if (name == NULL) {
+ parse_err(SYNERR, "missing class identifier inside reg_class_dynamic list.\n");
+ return;
+ }
+ RegClass* rc = _AD._register->getRegClass(name);
+ if (rc == NULL) {
+ parse_err(SEMERR, "unknown identifier %s inside reg_class_dynamic list.\n", name);
+ } else {
+ reg_class->set_rclass_at_index(i, rc);
+ }
+
+ skipws();
+ if (_curchar == ',') {
+ next_char();
+ skipws();
+ } else {
+ parse_err(SYNERR, "missing separator ',' inside reg_class_dynamic list.\n");
+ }
+ }
+
+ // Collect the condition code.
+ skipws();
+ if (_curchar == '%') {
+ char* code = find_cpp_block("reg class dynamic");
+ if (code == NULL) {
+ parse_err(SYNERR, "missing code declaration for reg_class_dynamic.\n");
+ return;
+ }
+ reg_class->set_condition_code(code);
+ } else {
+ parse_err(SYNERR, "missing %% at the beginning of code block in reg_class_dynamic definition\n");
+ return;
+ }
+
+ skipws();
+ if (_curchar != ')') {
+ parse_err(SYNERR, "missing ')' at the end of reg_class_dynamic definition\n");
+ return;
+ }
+ next_char();
+
+ skipws();
+ if (_curchar != ';') {
+ parse_err(SYNERR, "missing ';' at the end of reg_class_dynamic definition.\n");
+ return;
+ }
+ next_char(); // Skip trailing ';'
+
+ return;
+}
+
//------------------------------alloc_class_parse------------------------------
void ADLParser::alloc_class_parse(void) {
char *name; // Name of allocation class being defined
diff --git a/hotspot/src/share/vm/adlc/adlparse.hpp b/hotspot/src/share/vm/adlc/adlparse.hpp
index 8907cf1754d..8c5442b4c95 100644
--- a/hotspot/src/share/vm/adlc/adlparse.hpp
+++ b/hotspot/src/share/vm/adlc/adlparse.hpp
@@ -53,6 +53,8 @@ class ConstructRule;
// ***** Register Section *****
class RegDef;
class RegClass;
+class CodeSnippetRegClass;
+class ConditionalRegClass;
class AllocClass;
class ResourceForm;
// ***** Pipeline Section *****
@@ -125,6 +127,7 @@ protected:
// Parse components of the register section
void reg_def_parse(void); // Parse register definition
void reg_class_parse(void); // Parse register class definition
+ void reg_class_dynamic_parse(void); // Parse dynamic register class definition
void alloc_class_parse(void); // Parse allocation class definition
// Parse components of the definition section
diff --git a/hotspot/src/share/vm/adlc/archDesc.cpp b/hotspot/src/share/vm/adlc/archDesc.cpp
index af3e0ffaf2a..7ff04fee3fd 100644
--- a/hotspot/src/share/vm/adlc/archDesc.cpp
+++ b/hotspot/src/share/vm/adlc/archDesc.cpp
@@ -908,7 +908,7 @@ char *ArchDesc::stack_or_reg_mask(OperandForm &opForm) {
void ArchDesc::set_stack_or_reg(const char *reg_class_name) {
if( _register ) {
RegClass *reg_class = _register->getRegClass(reg_class_name);
- reg_class->_stack_or_reg = true;
+ reg_class->set_stack_version(true);
}
}
diff --git a/hotspot/src/share/vm/adlc/forms.hpp b/hotspot/src/share/vm/adlc/forms.hpp
index 63e367dd730..073ca4c810d 100644
--- a/hotspot/src/share/vm/adlc/forms.hpp
+++ b/hotspot/src/share/vm/adlc/forms.hpp
@@ -68,6 +68,8 @@ class Opcode;
class InsEncode;
class RegDef;
class RegClass;
+class CodeSnippetRegClass;
+class ConditionalRegClass;
class AllocClass;
class ResourceForm;
class PipeClassForm;
diff --git a/hotspot/src/share/vm/adlc/formsopt.cpp b/hotspot/src/share/vm/adlc/formsopt.cpp
index 07d70fc6c6e..ac8ffff2960 100644
--- a/hotspot/src/share/vm/adlc/formsopt.cpp
+++ b/hotspot/src/share/vm/adlc/formsopt.cpp
@@ -47,13 +47,19 @@ void RegisterForm::addRegDef(char *name, char *callingConv, char *c_conv,
}
// record a new register class
-RegClass *RegisterForm::addRegClass(const char *className) {
- RegClass *regClass = new RegClass(className);
+template
+T* RegisterForm::addRegClass(const char* className) {
+ T* regClass = new T(className);
_rclasses.addName(className);
- _regClass.Insert(className,regClass);
+ _regClass.Insert(className, regClass);
return regClass;
}
+// Explicit instantiation for all supported register classes.
+template RegClass* RegisterForm::addRegClass(const char* className);
+template CodeSnippetRegClass* RegisterForm::addRegClass(const char* className);
+template ConditionalRegClass* RegisterForm::addRegClass(const char* className);
+
// record a new register class
AllocClass *RegisterForm::addAllocClass(char *className) {
AllocClass *allocClass = new AllocClass(className);
@@ -67,9 +73,9 @@ AllocClass *RegisterForm::addAllocClass(char *className) {
void RegisterForm::addSpillRegClass() {
// Stack slots start at the next available even register number.
_reg_ctr = (_reg_ctr+7) & ~7;
- const char *rc_name = "stack_slots";
- RegClass *reg_class = new RegClass(rc_name);
- reg_class->_stack_or_reg = true;
+ const char *rc_name = "stack_slots";
+ RegClass* reg_class = new RegClass(rc_name);
+ reg_class->set_stack_version(true);
_rclasses.addName(rc_name);
_regClass.Insert(rc_name,reg_class);
}
@@ -224,9 +230,11 @@ void RegDef::output(FILE *fp) { // Write info to output files
//------------------------------RegClass---------------------------------------
// Construct a register class into which registers will be inserted
-RegClass::RegClass(const char *classid) : _stack_or_reg(false), _classid(classid), _regDef(cmpstr,hashstr, Form::arena),
- _user_defined(NULL)
-{
+RegClass::RegClass(const char* classid) : _stack_or_reg(false), _classid(classid), _regDef(cmpstr, hashstr, Form::arena) {
+}
+
+RegClass::~RegClass() {
+ delete _classid;
}
// record a register in this class
@@ -305,6 +313,91 @@ void RegClass::output(FILE *fp) { // Write info to output files
fprintf(fp,"--- done with entries for reg_class %s\n\n",_classid);
}
+void RegClass::declare_register_masks(FILE* fp) {
+ const char* prefix = "";
+ const char* rc_name_to_upper = toUpper(_classid);
+ fprintf(fp, "extern const RegMask _%s%s_mask;\n", prefix, rc_name_to_upper);
+ fprintf(fp, "inline const RegMask &%s%s_mask() { return _%s%s_mask; }\n", prefix, rc_name_to_upper, prefix, rc_name_to_upper);
+ if (_stack_or_reg) {
+ fprintf(fp, "extern const RegMask _%sSTACK_OR_%s_mask;\n", prefix, rc_name_to_upper);
+ fprintf(fp, "inline const RegMask &%sSTACK_OR_%s_mask() { return _%sSTACK_OR_%s_mask; }\n", prefix, rc_name_to_upper, prefix, rc_name_to_upper);
+ }
+ delete[] rc_name_to_upper;
+}
+
+void RegClass::build_register_masks(FILE* fp) {
+ int len = RegisterForm::RegMask_Size();
+ const char *prefix = "";
+ const char* rc_name_to_upper = toUpper(_classid);
+ fprintf(fp, "const RegMask _%s%s_mask(", prefix, rc_name_to_upper);
+
+ int i;
+ for(i = 0; i < len - 1; i++) {
+ fprintf(fp," 0x%x,", regs_in_word(i, false));
+ }
+ fprintf(fp," 0x%x );\n", regs_in_word(i, false));
+
+ if (_stack_or_reg) {
+ fprintf(fp, "const RegMask _%sSTACK_OR_%s_mask(", prefix, rc_name_to_upper);
+ for(i = 0; i < len - 1; i++) {
+ fprintf(fp," 0x%x,", regs_in_word(i, true));
+ }
+ fprintf(fp," 0x%x );\n", regs_in_word(i, true));
+ }
+ delete[] rc_name_to_upper;
+}
+
+//------------------------------CodeSnippetRegClass---------------------------
+CodeSnippetRegClass::CodeSnippetRegClass(const char* classid) : RegClass(classid), _code_snippet(NULL) {
+}
+
+CodeSnippetRegClass::~CodeSnippetRegClass() {
+ delete _code_snippet;
+}
+
+void CodeSnippetRegClass::declare_register_masks(FILE* fp) {
+ const char* prefix = "";
+ const char* rc_name_to_upper = toUpper(_classid);
+ fprintf(fp, "inline const RegMask &%s%s_mask() { %s }\n", prefix, rc_name_to_upper, _code_snippet);
+ delete[] rc_name_to_upper;
+}
+
+//------------------------------ConditionalRegClass---------------------------
+ConditionalRegClass::ConditionalRegClass(const char *classid) : RegClass(classid), _condition_code(NULL) {
+}
+
+ConditionalRegClass::~ConditionalRegClass() {
+ delete _condition_code;
+}
+
+void ConditionalRegClass::declare_register_masks(FILE* fp) {
+ const char* prefix = "";
+ const char* rc_name_to_upper = toUpper(_classid);
+ const char* rclass_0_to_upper = toUpper(_rclasses[0]->_classid);
+ const char* rclass_1_to_upper = toUpper(_rclasses[1]->_classid);
+ fprintf(fp, "inline const RegMask &%s%s_mask() {"
+ " return (%s) ?"
+ " %s%s_mask() :"
+ " %s%s_mask(); }\n",
+ prefix, rc_name_to_upper,
+ _condition_code,
+ prefix, rclass_0_to_upper,
+ prefix, rclass_1_to_upper);
+ if (_stack_or_reg) {
+ fprintf(fp, "inline const RegMask &%sSTACK_OR_%s_mask() {"
+ " return (%s) ?"
+ " %sSTACK_OR_%s_mask() :"
+ " %sSTACK_OR_%s_mask(); }\n",
+ prefix, rc_name_to_upper,
+ _condition_code,
+ prefix, rclass_0_to_upper,
+ prefix, rclass_1_to_upper);
+ }
+ delete[] rc_name_to_upper;
+ delete[] rclass_0_to_upper;
+ delete[] rclass_1_to_upper;
+ return;
+}
//------------------------------AllocClass-------------------------------------
AllocClass::AllocClass(char *classid) : _classid(classid), _regDef(cmpstr,hashstr, Form::arena) {
diff --git a/hotspot/src/share/vm/adlc/formsopt.hpp b/hotspot/src/share/vm/adlc/formsopt.hpp
index b6108d48843..8ba8e64d99c 100644
--- a/hotspot/src/share/vm/adlc/formsopt.hpp
+++ b/hotspot/src/share/vm/adlc/formsopt.hpp
@@ -60,6 +60,8 @@ class Opcode;
class InsEncode;
class RegDef;
class RegClass;
+class CodeSnippetRegClass;
+class ConditionalRegClass;
class AllocClass;
class ResourceForm;
class PipeClassForm;
@@ -98,7 +100,8 @@ public:
void addRegDef(char *regName, char *callingConv, char *c_conv,
char * idealtype, char *encoding, char* concreteName);
- RegClass *addRegClass(const char *className);
+ template T* addRegClass(const char* className);
+
AllocClass *addAllocClass(char *allocName);
void addSpillRegClass();
@@ -154,17 +157,28 @@ public:
};
//------------------------------RegClass---------------------------------------
+// Generic register class. This register class is the internal representation
+// for the following .ad file format:
+//
+// reg_class ptr(RAX, RBX, ...);
+//
+// where ptr is the name of the register class, RAX and RBX are registers.
+//
+// This register class allows registers to be spilled onto the stack. Spilling
+// is allowed is field _stack_or_reg is true.
class RegClass : public Form {
public:
// Public Data
const char *_classid; // Name of class
NameList _regDefs; // List of registers in class
Dict _regDef; // Dictionary of registers in class
+protected:
bool _stack_or_reg; // Allowed on any stack slot
- char* _user_defined;
+public:
// Public Methods
RegClass(const char *classid);// Constructor
+ virtual ~RegClass();
void addReg(RegDef *regDef); // Add a register to this class
@@ -183,6 +197,115 @@ public:
void dump(); // Debug printer
void output(FILE *fp); // Write info to output files
+
+ virtual bool has_stack_version() {
+ return _stack_or_reg;
+ }
+ virtual void set_stack_version(bool flag) {
+ _stack_or_reg = flag;
+ }
+
+ virtual void declare_register_masks(FILE* fp);
+ virtual void build_register_masks(FILE* fp);
+};
+
+//------------------------------CodeSnippetRegClass----------------------------
+// Register class that has an user-defined C++ code snippet attached to it
+// to determine at runtime which register class to use. This register class is
+// the internal representation for the following .ad file format:
+//
+// reg_class actual_dflt_reg %{
+// if (VM_Version::has_vfp3_32()) {
+// return DFLT_REG_mask();
+// } else {
+// return DFLT_LOW_REG_mask();
+// }
+// %}
+//
+// where DFLT_REG_mask() and DFLT_LOW_REG_mask() are the internal names of the
+// masks of register classes dflt_reg and dflt_low_reg.
+//
+// The attached code snippet can select also between more than two register classes.
+// This register class can be, however, used only if the register class is not
+// cisc-spillable (i.e., the registers of this class are not allowed on the stack,
+// which is equivalent with _stack_or_reg being false).
+class CodeSnippetRegClass : public RegClass {
+protected:
+ char* _code_snippet;
+public:
+ CodeSnippetRegClass(const char* classid);// Constructor
+ ~CodeSnippetRegClass();
+
+ void set_code_snippet(char* code) {
+ _code_snippet = code;
+ }
+ char* code_snippet() {
+ return _code_snippet;
+ }
+ void set_stack_version(bool flag) {
+ assert(false, "User defined register classes are not allowed to spill to the stack.");
+ }
+ void declare_register_masks(FILE* fp);
+ void build_register_masks(FILE* fp) {
+ // We do not need to generate register masks because we select at runtime
+ // between register masks generated for other register classes.
+ return;
+ }
+};
+
+//------------------------------ConditionalRegClass----------------------------
+// Register class that has two register classes and a runtime condition attached
+// to it. The condition is evaluated at runtime and either one of the register
+// attached register classes is selected. This register class is the internal
+// representation for the following .ad format:
+//
+// reg_class_dynamic actual_dflt_reg(dflt_reg, low_reg,
+// %{ VM_Version::has_vfp3_32() }%
+// );
+//
+// This example is equivalent to the example used with the CodeSnippetRegClass
+// register class. A ConditionalRegClass works also if a register class is cisc-spillable
+// (i.e., _stack_or_reg is true), but if can select only between two register classes.
+class ConditionalRegClass : public RegClass {
+protected:
+ // reference to condition code
+ char* _condition_code; // C++ condition code to dynamically determine which register class to use.
+
+ // Example syntax (equivalent to previous example):
+ //
+ // reg_class actual_dflt_reg(dflt_reg, low_reg,
+ // %{ VM_Version::has_vfp3_32() }%
+ // );
+ // reference to conditional register classes
+ RegClass* _rclasses[2]; // 0 is the register class selected if the condition code returns true
+ // 1 is the register class selected if the condition code returns false
+public:
+ ConditionalRegClass(const char* classid);// Constructor
+ ~ConditionalRegClass();
+
+ virtual void set_stack_version(bool flag) {
+ RegClass::set_stack_version(flag);
+ assert((_rclasses[0] != NULL), "Register class NULL for condition code == true");
+ assert((_rclasses[1] != NULL), "Register class NULL for condition code == false");
+ _rclasses[0]->set_stack_version(flag);
+ _rclasses[1]->set_stack_version(flag);
+ }
+ void declare_register_masks(FILE* fp);
+ void build_register_masks(FILE* fp) {
+ // We do not need to generate register masks because we select at runtime
+ // between register masks generated for other register classes.
+ return;
+ }
+ void set_rclass_at_index(int index, RegClass* rclass) {
+ assert((0 <= index && index < 2), "Condition code can select only between two register classes");
+ _rclasses[index] = rclass;
+ }
+ void set_condition_code(char* code) {
+ _condition_code = code;
+ }
+ char* condition_code() {
+ return _condition_code;
+ }
};
//------------------------------AllocClass-------------------------------------
diff --git a/hotspot/src/share/vm/adlc/formssel.cpp b/hotspot/src/share/vm/adlc/formssel.cpp
index 4a68da6cd1c..92ccb3b2620 100644
--- a/hotspot/src/share/vm/adlc/formssel.cpp
+++ b/hotspot/src/share/vm/adlc/formssel.cpp
@@ -3392,7 +3392,7 @@ const char *MatchNode::reduce_left(FormDict &globals) const {
// Count occurrences of operands names in the leaves of the instruction
// match rule.
void MatchNode::count_instr_names( Dict &names ) {
- if( !this ) return;
+ if( this == NULL ) return;
if( _lChild ) _lChild->count_instr_names(names);
if( _rChild ) _rChild->count_instr_names(names);
if( !_lChild && !_rChild ) {
@@ -4043,6 +4043,13 @@ int MatchRule::is_expensive() const {
strcmp(opType,"ReplicateL")==0 ||
strcmp(opType,"ReplicateF")==0 ||
strcmp(opType,"ReplicateD")==0 ||
+ strcmp(opType,"AddReductionVI")==0 ||
+ strcmp(opType,"AddReductionVL")==0 ||
+ strcmp(opType,"AddReductionVF")==0 ||
+ strcmp(opType,"AddReductionVD")==0 ||
+ strcmp(opType,"MulReductionVI")==0 ||
+ strcmp(opType,"MulReductionVF")==0 ||
+ strcmp(opType,"MulReductionVD")==0 ||
0 /* 0 to line up columns nicely */ )
return 1;
}
@@ -4135,6 +4142,10 @@ bool MatchRule::is_vector() const {
"MulVS","MulVI","MulVF","MulVD",
"DivVF","DivVD",
"AndV" ,"XorV" ,"OrV",
+ "AddReductionVI", "AddReductionVL",
+ "AddReductionVF", "AddReductionVD",
+ "MulReductionVI",
+ "MulReductionVF", "MulReductionVD",
"LShiftCntV","RShiftCntV",
"LShiftVB","LShiftVS","LShiftVI","LShiftVL",
"RShiftVB","RShiftVS","RShiftVI","RShiftVL",
diff --git a/hotspot/src/share/vm/adlc/formssel.hpp b/hotspot/src/share/vm/adlc/formssel.hpp
index 46366f12197..1ed9157a480 100644
--- a/hotspot/src/share/vm/adlc/formssel.hpp
+++ b/hotspot/src/share/vm/adlc/formssel.hpp
@@ -59,6 +59,8 @@ class Opcode;
class InsEncode;
class RegDef;
class RegClass;
+class CodeSnippetRegClass;
+class ConditionalRegClass;
class AllocClass;
class ResourceForm;
class PipeDesc;
diff --git a/hotspot/src/share/vm/adlc/output_c.cpp b/hotspot/src/share/vm/adlc/output_c.cpp
index 79a12a003ce..cc98ef59af4 100644
--- a/hotspot/src/share/vm/adlc/output_c.cpp
+++ b/hotspot/src/share/vm/adlc/output_c.cpp
@@ -138,26 +138,9 @@ void ArchDesc::declare_register_masks(FILE *fp_hpp) {
fprintf(fp_hpp,"// Register masks, one for each register class.\n");
_register->_rclasses.reset();
for (rc_name = NULL; (rc_name = _register->_rclasses.iter()) != NULL;) {
- const char *prefix = "";
RegClass *reg_class = _register->getRegClass(rc_name);
assert(reg_class, "Using an undefined register class");
-
- const char* rc_name_to_upper = toUpper(rc_name);
-
- if (reg_class->_user_defined == NULL) {
- fprintf(fp_hpp, "extern const RegMask _%s%s_mask;\n", prefix, rc_name_to_upper);
- fprintf(fp_hpp, "inline const RegMask &%s%s_mask() { return _%s%s_mask; }\n", prefix, rc_name_to_upper, prefix, rc_name_to_upper);
- } else {
- fprintf(fp_hpp, "inline const RegMask &%s%s_mask() { %s }\n", prefix, rc_name_to_upper, reg_class->_user_defined);
- }
-
- if (reg_class->_stack_or_reg) {
- assert(reg_class->_user_defined == NULL, "no user defined reg class here");
- fprintf(fp_hpp, "extern const RegMask _%sSTACK_OR_%s_mask;\n", prefix, rc_name_to_upper);
- fprintf(fp_hpp, "inline const RegMask &%sSTACK_OR_%s_mask() { return _%sSTACK_OR_%s_mask; }\n", prefix, rc_name_to_upper, prefix, rc_name_to_upper);
- }
- delete[] rc_name_to_upper;
-
+ reg_class->declare_register_masks(fp_hpp);
}
}
}
@@ -173,35 +156,9 @@ void ArchDesc::build_register_masks(FILE *fp_cpp) {
fprintf(fp_cpp,"// Register masks, one for each register class.\n");
_register->_rclasses.reset();
for (rc_name = NULL; (rc_name = _register->_rclasses.iter()) != NULL;) {
- const char *prefix = "";
RegClass *reg_class = _register->getRegClass(rc_name);
assert(reg_class, "Using an undefined register class");
-
- if (reg_class->_user_defined != NULL) {
- continue;
- }
-
- int len = RegisterForm::RegMask_Size();
- const char* rc_name_to_upper = toUpper(rc_name);
- fprintf(fp_cpp, "const RegMask _%s%s_mask(", prefix, rc_name_to_upper);
-
- {
- int i;
- for(i = 0; i < len - 1; i++) {
- fprintf(fp_cpp," 0x%x,", reg_class->regs_in_word(i, false));
- }
- fprintf(fp_cpp," 0x%x );\n", reg_class->regs_in_word(i, false));
- }
-
- if (reg_class->_stack_or_reg) {
- int i;
- fprintf(fp_cpp, "const RegMask _%sSTACK_OR_%s_mask(", prefix, rc_name_to_upper);
- for(i = 0; i < len - 1; i++) {
- fprintf(fp_cpp," 0x%x,",reg_class->regs_in_word(i, true));
- }
- fprintf(fp_cpp," 0x%x );\n",reg_class->regs_in_word(i, true));
- }
- delete[] rc_name_to_upper;
+ reg_class->build_register_masks(fp_cpp);
}
}
}
@@ -1548,8 +1505,8 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
// Iterate over the instructions 'node' expands into
ExpandRule *expand = node->_exprule;
NameAndList *expand_instr = NULL;
- for(expand->reset_instructions();
- (expand_instr = expand->iter_instructions()) != NULL; cnt++) {
+ for (expand->reset_instructions();
+ (expand_instr = expand->iter_instructions()) != NULL; cnt++) {
new_id = expand_instr->name();
InstructForm* expand_instruction = (InstructForm*)globalAD->globalNames()[new_id];
@@ -1560,30 +1517,25 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
continue;
}
- if (expand_instruction->has_temps()) {
- globalAD->syntax_err(node->_linenum, "In %s: expand rules using instructs with TEMPs aren't supported: %s",
- node->_ident, new_id);
- }
-
// Build the node for the instruction
fprintf(fp,"\n %sNode *n%d = new %sNode();\n", new_id, cnt, new_id);
// Add control edge for this node
fprintf(fp," n%d->add_req(_in[0]);\n", cnt);
// Build the operand for the value this node defines.
Form *form = (Form*)_globalNames[new_id];
- assert( form, "'new_id' must be a defined form name");
+ assert(form, "'new_id' must be a defined form name");
// Grab the InstructForm for the new instruction
new_inst = form->is_instruction();
- assert( new_inst, "'new_id' must be an instruction name");
- if( node->is_ideal_if() && new_inst->is_ideal_if() ) {
- fprintf(fp, " ((MachIfNode*)n%d)->_prob = _prob;\n",cnt);
- fprintf(fp, " ((MachIfNode*)n%d)->_fcnt = _fcnt;\n",cnt);
+ assert(new_inst, "'new_id' must be an instruction name");
+ if (node->is_ideal_if() && new_inst->is_ideal_if()) {
+ fprintf(fp, " ((MachIfNode*)n%d)->_prob = _prob;\n", cnt);
+ fprintf(fp, " ((MachIfNode*)n%d)->_fcnt = _fcnt;\n", cnt);
}
- if( node->is_ideal_fastlock() && new_inst->is_ideal_fastlock() ) {
- fprintf(fp, " ((MachFastLockNode*)n%d)->_counters = _counters;\n",cnt);
- fprintf(fp, " ((MachFastLockNode*)n%d)->_rtm_counters = _rtm_counters;\n",cnt);
- fprintf(fp, " ((MachFastLockNode*)n%d)->_stack_rtm_counters = _stack_rtm_counters;\n",cnt);
+ if (node->is_ideal_fastlock() && new_inst->is_ideal_fastlock()) {
+ fprintf(fp, " ((MachFastLockNode*)n%d)->_counters = _counters;\n", cnt);
+ fprintf(fp, " ((MachFastLockNode*)n%d)->_rtm_counters = _rtm_counters;\n", cnt);
+ fprintf(fp, " ((MachFastLockNode*)n%d)->_stack_rtm_counters = _stack_rtm_counters;\n", cnt);
}
// Fill in the bottom_type where requested
diff --git a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp
index 328919cd13f..9a7c291b886 100644
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp
@@ -3462,6 +3462,24 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
case vmIntrinsics::_putFloat : return append_unsafe_put_obj(callee, T_FLOAT, false);
case vmIntrinsics::_putDouble : return append_unsafe_put_obj(callee, T_DOUBLE, false);
+ case vmIntrinsics::_getShortUnaligned :
+ return UseUnalignedAccesses ? append_unsafe_get_obj(callee, T_SHORT, false) : false;
+ case vmIntrinsics::_getCharUnaligned :
+ return UseUnalignedAccesses ? append_unsafe_get_obj(callee, T_CHAR, false) : false;
+ case vmIntrinsics::_getIntUnaligned :
+ return UseUnalignedAccesses ? append_unsafe_get_obj(callee, T_INT, false) : false;
+ case vmIntrinsics::_getLongUnaligned :
+ return UseUnalignedAccesses ? append_unsafe_get_obj(callee, T_LONG, false) : false;
+
+ case vmIntrinsics::_putShortUnaligned :
+ return UseUnalignedAccesses ? append_unsafe_put_obj(callee, T_SHORT, false) : false;
+ case vmIntrinsics::_putCharUnaligned :
+ return UseUnalignedAccesses ? append_unsafe_put_obj(callee, T_CHAR, false) : false;
+ case vmIntrinsics::_putIntUnaligned :
+ return UseUnalignedAccesses ? append_unsafe_put_obj(callee, T_INT, false) : false;
+ case vmIntrinsics::_putLongUnaligned :
+ return UseUnalignedAccesses ? append_unsafe_put_obj(callee, T_LONG, false) : false;
+
case vmIntrinsics::_getObjectVolatile : return append_unsafe_get_obj(callee, T_OBJECT, true);
case vmIntrinsics::_getBooleanVolatile: return append_unsafe_get_obj(callee, T_BOOLEAN, true);
case vmIntrinsics::_getByteVolatile : return append_unsafe_get_obj(callee, T_BYTE, true);
@@ -4065,7 +4083,7 @@ bool GraphBuilder::try_method_handle_inline(ciMethod* callee) {
ValueType* type = apop()->type();
if (type->is_constant()) {
ciMethod* target = type->as_ObjectType()->constant_value()->as_member_name()->get_vmtarget();
- // If the target is another method handle invoke try recursivly to get
+ // If the target is another method handle invoke, try to recursively get
// a better target.
if (target->is_method_handle_intrinsic()) {
if (try_method_handle_inline(target)) {
diff --git a/hotspot/src/share/vm/c1/c1_LIR.cpp b/hotspot/src/share/vm/c1/c1_LIR.cpp
index d58e3c85b42..7d2b4f3e883 100644
--- a/hotspot/src/share/vm/c1/c1_LIR.cpp
+++ b/hotspot/src/share/vm/c1/c1_LIR.cpp
@@ -458,7 +458,7 @@ void LIR_OpRTCall::verify() const {
//-------------------visits--------------------------
// complete rework of LIR instruction visitor.
-// The virtual calls for each instruction type is replaced by a big
+// The virtual call for each instruction type is replaced by a big
// switch that adds the operands for each instruction
void LIR_OpVisitState::visit(LIR_Op* op) {
@@ -825,7 +825,8 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
}
if (opJavaCall->_info) do_info(opJavaCall->_info);
- if (opJavaCall->is_method_handle_invoke()) {
+ if (FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr &&
+ opJavaCall->is_method_handle_invoke()) {
opJavaCall->_method_handle_invoke_SP_save_opr = FrameMap::method_handle_invoke_SP_save_opr();
do_temp(opJavaCall->_method_handle_invoke_SP_save_opr);
}
diff --git a/hotspot/src/share/vm/c1/c1_LIR.hpp b/hotspot/src/share/vm/c1/c1_LIR.hpp
index 90a47c8b47e..4affbfb0826 100644
--- a/hotspot/src/share/vm/c1/c1_LIR.hpp
+++ b/hotspot/src/share/vm/c1/c1_LIR.hpp
@@ -1219,10 +1219,8 @@ class LIR_OpJavaCall: public LIR_OpCall {
// JSR 292 support.
bool is_invokedynamic() const { return code() == lir_dynamic_call; }
bool is_method_handle_invoke() const {
- return
- method()->is_compiled_lambda_form() // Java-generated adapter
- ||
- method()->is_method_handle_intrinsic(); // JVM-generated MH intrinsic
+ return method()->is_compiled_lambda_form() || // Java-generated lambda form
+ method()->is_method_handle_intrinsic(); // JVM-generated MH intrinsic
}
intptr_t vtable_offset() const {
diff --git a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp
index d040ccadd6b..cc35fc3ae30 100644
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp
@@ -1606,13 +1606,26 @@ void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc*
} else {
__ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
}
+
+ LIR_Address* card_addr;
if (can_inline_as_constant(card_table_base)) {
- __ move(LIR_OprFact::intConst(0),
- new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
+ card_addr = new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE);
} else {
- __ move(LIR_OprFact::intConst(0),
- new LIR_Address(tmp, load_constant(card_table_base),
- T_BYTE));
+ card_addr = new LIR_Address(tmp, load_constant(card_table_base), T_BYTE);
+ }
+
+ LIR_Opr dirty = LIR_OprFact::intConst(CardTableModRefBS::dirty_card_val());
+ if (UseCondCardMark) {
+ LIR_Opr cur_value = new_register(T_INT);
+ __ move(card_addr, cur_value);
+
+ LabelObj* L_already_dirty = new LabelObj();
+ __ cmp(lir_cond_equal, cur_value, dirty);
+ __ branch(lir_cond_equal, T_BYTE, L_already_dirty->label());
+ __ move(dirty, card_addr);
+ __ branch_destination(L_already_dirty->label());
+ } else {
+ __ move(dirty, card_addr);
}
#endif
}
@@ -2862,7 +2875,7 @@ LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
// g) lock result registers and emit call operation
//
// Before issuing a call, we must spill-save all values on stack
-// that are in caller-save register. "spill-save" moves thos registers
+// that are in caller-save register. "spill-save" moves those registers
// either in a free callee-save register or spills them if no free
// callee save register is available.
//
@@ -2870,7 +2883,7 @@ LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
// - if invoked between e) and f), we may lock callee save
// register in "spill-save" that destroys the receiver register
// before f) is executed
-// - if we rearange the f) to be earlier, by loading %o0, it
+// - if we rearrange f) to be earlier (by loading %o0) it
// may destroy a value on the stack that is currently in %o0
// and is waiting to be spilled
// - if we keep the receiver locked while doing spill-save,
@@ -2903,14 +2916,16 @@ void LIRGenerator::do_Invoke(Invoke* x) {
assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
// JSR 292
- // Preserve the SP over MethodHandle call sites.
+ // Preserve the SP over MethodHandle call sites, if needed.
ciMethod* target = x->target();
bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
target->is_method_handle_intrinsic() ||
target->is_compiled_lambda_form());
if (is_method_handle_invoke) {
info->set_is_method_handle_invoke(true);
- __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
+ if(FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
+ __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
+ }
}
switch (x->code()) {
@@ -2950,8 +2965,9 @@ void LIRGenerator::do_Invoke(Invoke* x) {
}
// JSR 292
- // Restore the SP after MethodHandle call sites.
- if (is_method_handle_invoke) {
+ // Restore the SP after MethodHandle call sites, if needed.
+ if (is_method_handle_invoke
+ && FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
__ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
}
diff --git a/hotspot/src/share/vm/ci/ciCallSite.cpp b/hotspot/src/share/vm/ci/ciCallSite.cpp
index fb222fe3f5a..028a4ed724f 100644
--- a/hotspot/src/share/vm/ci/ciCallSite.cpp
+++ b/hotspot/src/share/vm/ci/ciCallSite.cpp
@@ -49,6 +49,25 @@ ciMethodHandle* ciCallSite::get_target() const {
return CURRENT_ENV->get_object(method_handle_oop)->as_method_handle();
}
+// ------------------------------------------------------------------
+// ciCallSite::get_context
+//
+// Return the target MethodHandle of this CallSite.
+ciKlass* ciCallSite::get_context() {
+ assert(!is_constant_call_site(), "");
+
+ VM_ENTRY_MARK;
+ oop call_site_oop = get_oop();
+ InstanceKlass* ctxk = MethodHandles::get_call_site_context(call_site_oop);
+ if (ctxk == NULL) {
+ // The call site doesn't have a context associated. Set it to the default context.
+ oop def_context_oop = java_lang_invoke_CallSite::default_context();
+ java_lang_invoke_CallSite::set_context_cas(call_site_oop, def_context_oop, /*expected=*/NULL);
+ ctxk = MethodHandles::get_call_site_context(call_site_oop);
+ }
+ return (CURRENT_ENV->get_metadata(ctxk))->as_klass();
+}
+
// ------------------------------------------------------------------
// ciCallSite::print
//
diff --git a/hotspot/src/share/vm/ci/ciCallSite.hpp b/hotspot/src/share/vm/ci/ciCallSite.hpp
index 063f1e3a5fe..040e894d0df 100644
--- a/hotspot/src/share/vm/ci/ciCallSite.hpp
+++ b/hotspot/src/share/vm/ci/ciCallSite.hpp
@@ -43,6 +43,7 @@ public:
// Return the target MethodHandle of this CallSite.
ciMethodHandle* get_target() const;
+ ciKlass* get_context();
void print();
};
diff --git a/hotspot/src/share/vm/ci/ciMethod.cpp b/hotspot/src/share/vm/ci/ciMethod.cpp
index f06a6e654d2..859bd3eef4c 100644
--- a/hotspot/src/share/vm/ci/ciMethod.cpp
+++ b/hotspot/src/share/vm/ci/ciMethod.cpp
@@ -688,7 +688,8 @@ bool ciMethod::parameter_profiled_type(int i, ciKlass*& type, bool& maybe_null)
// via assert_unique_concrete_method or assert_leaf_type.
ciMethod* ciMethod::find_monomorphic_target(ciInstanceKlass* caller,
ciInstanceKlass* callee_holder,
- ciInstanceKlass* actual_recv) {
+ ciInstanceKlass* actual_recv,
+ bool check_access) {
check_is_loaded();
if (actual_recv->is_interface()) {
@@ -696,7 +697,7 @@ ciMethod* ciMethod::find_monomorphic_target(ciInstanceKlass* caller,
return NULL;
}
- ciMethod* root_m = resolve_invoke(caller, actual_recv);
+ ciMethod* root_m = resolve_invoke(caller, actual_recv, check_access);
if (root_m == NULL) {
// Something went wrong looking up the actual receiver method.
return NULL;
@@ -775,7 +776,7 @@ ciMethod* ciMethod::find_monomorphic_target(ciInstanceKlass* caller,
//
// Given a known receiver klass, find the target for the call.
// Return NULL if the call has no target or the target is abstract.
-ciMethod* ciMethod::resolve_invoke(ciKlass* caller, ciKlass* exact_receiver) {
+ciMethod* ciMethod::resolve_invoke(ciKlass* caller, ciKlass* exact_receiver, bool check_access) {
check_is_loaded();
VM_ENTRY_MARK;
@@ -792,9 +793,9 @@ ciMethod* ciMethod::resolve_invoke(ciKlass* caller, ciKlass* exact_receiver) {
||
InstanceKlass::cast(h_recv())->is_linked() && !exact_receiver->is_interface()) {
if (holder()->is_interface()) {
- m = LinkResolver::resolve_interface_call_or_null(h_recv, h_resolved, h_name, h_signature, caller_klass);
+ m = LinkResolver::resolve_interface_call_or_null(h_recv, h_resolved, h_name, h_signature, caller_klass, check_access);
} else {
- m = LinkResolver::resolve_virtual_call_or_null(h_recv, h_resolved, h_name, h_signature, caller_klass);
+ m = LinkResolver::resolve_virtual_call_or_null(h_recv, h_resolved, h_name, h_signature, caller_klass, check_access);
}
}
diff --git a/hotspot/src/share/vm/ci/ciMethod.hpp b/hotspot/src/share/vm/ci/ciMethod.hpp
index d0c664d6142..ead6a962589 100644
--- a/hotspot/src/share/vm/ci/ciMethod.hpp
+++ b/hotspot/src/share/vm/ci/ciMethod.hpp
@@ -255,11 +255,12 @@ class ciMethod : public ciMetadata {
// its calling environment.
ciMethod* find_monomorphic_target(ciInstanceKlass* caller,
ciInstanceKlass* callee_holder,
- ciInstanceKlass* actual_receiver);
+ ciInstanceKlass* actual_receiver,
+ bool check_access = true);
// Given a known receiver klass, find the target for the call.
// Return NULL if the call has no target or is abstract.
- ciMethod* resolve_invoke(ciKlass* caller, ciKlass* exact_receiver);
+ ciMethod* resolve_invoke(ciKlass* caller, ciKlass* exact_receiver, bool check_access = true);
// Find the proper vtable index to invoke this method.
int resolve_vtable_index(ciKlass* caller, ciKlass* receiver);
diff --git a/hotspot/src/share/vm/classfile/classFileParser.cpp b/hotspot/src/share/vm/classfile/classFileParser.cpp
index 748e76da495..52532918aeb 100644
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp
@@ -4838,20 +4838,21 @@ void ClassFileParser::verify_legal_method_modifiers(
}
}
} else { // not interface
- if (is_initializer) {
- if (is_static || is_final || is_synchronized || is_native ||
- is_abstract || (major_gte_15 && is_bridge)) {
- is_illegal = true;
- }
- } else { // not initializer
- if (is_abstract) {
- if ((is_final || is_native || is_private || is_static ||
- (major_gte_15 && (is_synchronized || is_strict)))) {
+ if (has_illegal_visibility(flags)) {
+ is_illegal = true;
+ } else {
+ if (is_initializer) {
+ if (is_static || is_final || is_synchronized || is_native ||
+ is_abstract || (major_gte_15 && is_bridge)) {
is_illegal = true;
}
- }
- if (has_illegal_visibility(flags)) {
- is_illegal = true;
+ } else { // not initializer
+ if (is_abstract) {
+ if ((is_final || is_native || is_private || is_static ||
+ (major_gte_15 && (is_synchronized || is_strict)))) {
+ is_illegal = true;
+ }
+ }
}
}
}
diff --git a/hotspot/src/share/vm/classfile/javaClasses.cpp b/hotspot/src/share/vm/classfile/javaClasses.cpp
index 42ea9b8230f..9ac84cc631f 100644
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp
@@ -102,21 +102,22 @@ InjectedField* JavaClasses::get_injected(Symbol* class_name, int* field_count) {
static bool find_field(InstanceKlass* ik,
Symbol* name_symbol, Symbol* signature_symbol,
fieldDescriptor* fd,
- bool allow_super = false) {
- if (allow_super)
- return ik->find_field(name_symbol, signature_symbol, fd) != NULL;
- else
+ bool is_static = false, bool allow_super = false) {
+ if (allow_super || is_static) {
+ return ik->find_field(name_symbol, signature_symbol, is_static, fd) != NULL;
+ } else {
return ik->find_local_field(name_symbol, signature_symbol, fd);
+ }
}
// Helpful routine for computing field offsets at run time rather than hardcoding them
static void
compute_offset(int &dest_offset,
Klass* klass_oop, Symbol* name_symbol, Symbol* signature_symbol,
- bool allow_super = false) {
+ bool is_static = false, bool allow_super = false) {
fieldDescriptor fd;
InstanceKlass* ik = InstanceKlass::cast(klass_oop);
- if (!find_field(ik, name_symbol, signature_symbol, &fd, allow_super)) {
+ if (!find_field(ik, name_symbol, signature_symbol, &fd, is_static, allow_super)) {
ResourceMark rm;
tty->print_cr("Invalid layout of %s at %s", ik->external_name(), name_symbol->as_C_string());
#ifndef PRODUCT
@@ -126,7 +127,7 @@ compute_offset(int &dest_offset,
tty->print_cr(" name: %s, sig: %s, flags: %08x", fs.name()->as_C_string(), fs.signature()->as_C_string(), fs.access_flags().as_int());
}
#endif //PRODUCT
- fatal("Invalid layout of preloaded class");
+ vm_exit_during_initialization("Invalid layout of preloaded class: use -XX:+TraceClassLoading to see the origin of the problem class");
}
dest_offset = fd.offset();
}
@@ -1313,7 +1314,8 @@ static inline int version_at(unsigned int merged) {
}
static inline bool version_matches(Method* method, int version) {
- return (method->constants()->version() == version && version < MAX_VERSION);
+ assert(version < MAX_VERSION, "version is too big");
+ return method != NULL && (method->constants()->version() == version);
}
static inline int get_line_number(Method* method, int bci) {
@@ -1343,6 +1345,7 @@ class BacktraceBuilder: public StackObj {
typeArrayOop _methods;
typeArrayOop _bcis;
objArrayOop _mirrors;
+ typeArrayOop _cprefs; // needed to insulate method name against redefinition
int _index;
No_Safepoint_Verifier _nsv;
@@ -1350,8 +1353,9 @@ class BacktraceBuilder: public StackObj {
enum {
trace_methods_offset = java_lang_Throwable::trace_methods_offset,
- trace_bcis_offset = java_lang_Throwable::trace_bcis_offset,
+ trace_bcis_offset = java_lang_Throwable::trace_bcis_offset,
trace_mirrors_offset = java_lang_Throwable::trace_mirrors_offset,
+ trace_cprefs_offset = java_lang_Throwable::trace_cprefs_offset,
trace_next_offset = java_lang_Throwable::trace_next_offset,
trace_size = java_lang_Throwable::trace_size,
trace_chunk_size = java_lang_Throwable::trace_chunk_size
@@ -1373,9 +1377,14 @@ class BacktraceBuilder: public StackObj {
assert(mirrors != NULL, "mirror array should be initialized in backtrace");
return mirrors;
}
+ static typeArrayOop get_cprefs(objArrayHandle chunk) {
+ typeArrayOop cprefs = typeArrayOop(chunk->obj_at(trace_cprefs_offset));
+ assert(cprefs != NULL, "cprefs array should be initialized in backtrace");
+ return cprefs;
+ }
// constructor for new backtrace
- BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL), _mirrors(NULL) {
+ BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL), _mirrors(NULL), _cprefs(NULL) {
expand(CHECK);
_backtrace = _head;
_index = 0;
@@ -1385,6 +1394,7 @@ class BacktraceBuilder: public StackObj {
_methods = get_methods(backtrace);
_bcis = get_bcis(backtrace);
_mirrors = get_mirrors(backtrace);
+ _cprefs = get_cprefs(backtrace);
assert(_methods->length() == _bcis->length() &&
_methods->length() == _mirrors->length(),
"method and source information arrays should match");
@@ -1410,17 +1420,22 @@ class BacktraceBuilder: public StackObj {
objArrayOop mirrors = oopFactory::new_objectArray(trace_chunk_size, CHECK);
objArrayHandle new_mirrors(THREAD, mirrors);
+ typeArrayOop cprefs = oopFactory::new_shortArray(trace_chunk_size, CHECK);
+ typeArrayHandle new_cprefs(THREAD, cprefs);
+
if (!old_head.is_null()) {
old_head->obj_at_put(trace_next_offset, new_head());
}
new_head->obj_at_put(trace_methods_offset, new_methods());
new_head->obj_at_put(trace_bcis_offset, new_bcis());
new_head->obj_at_put(trace_mirrors_offset, new_mirrors());
+ new_head->obj_at_put(trace_cprefs_offset, new_cprefs());
_head = new_head();
_methods = new_methods();
_bcis = new_bcis();
_mirrors = new_mirrors();
+ _cprefs = new_cprefs();
_index = 0;
}
@@ -1440,8 +1455,9 @@ class BacktraceBuilder: public StackObj {
method = mhandle();
}
- _methods->short_at_put(_index, method->method_idnum());
+ _methods->short_at_put(_index, method->orig_method_idnum());
_bcis->int_at_put(_index, merge_bci_and_version(bci, method->constants()->version()));
+ _cprefs->short_at_put(_index, method->name_index());
// We need to save the mirrors in the backtrace to keep the class
// from being unloaded while we still have this stack trace.
@@ -1454,27 +1470,26 @@ class BacktraceBuilder: public StackObj {
// Print stack trace element to resource allocated buffer
char* java_lang_Throwable::print_stack_element_to_buffer(Handle mirror,
- int method_id, int version, int bci) {
+ int method_id, int version, int bci, int cpref) {
// Get strings and string lengths
InstanceKlass* holder = InstanceKlass::cast(java_lang_Class::as_Klass(mirror()));
const char* klass_name = holder->external_name();
int buf_len = (int)strlen(klass_name);
- // The method id may point to an obsolete method, can't get more stack information
- Method* method = holder->method_with_idnum(method_id);
- if (method == NULL) {
- char* buf = NEW_RESOURCE_ARRAY(char, buf_len + 64);
- // This is what the java code prints in this case - added Redefined
- sprintf(buf, "\tat %s.null (Redefined)", klass_name);
- return buf;
- }
+ Method* method = holder->method_with_orig_idnum(method_id, version);
- char* method_name = method->name()->as_C_string();
+ // The method can be NULL if the requested class version is gone
+ Symbol* sym = (method != NULL) ? method->name() : holder->constants()->symbol_at(cpref);
+ char* method_name = sym->as_C_string();
buf_len += (int)strlen(method_name);
+ // Use specific ik version as a holder since the mirror might
+ // refer to version that is now obsolete and no longer accessible
+ // via the previous versions list.
+ holder = holder->get_klass_version(version);
char* source_file_name = NULL;
- if (version_matches(method, version)) {
+ if (holder != NULL) {
Symbol* source = holder->source_file_name();
if (source != NULL) {
source_file_name = source->as_C_string();
@@ -1516,17 +1531,18 @@ char* java_lang_Throwable::print_stack_element_to_buffer(Handle mirror,
}
void java_lang_Throwable::print_stack_element(outputStream *st, Handle mirror,
- int method_id, int version, int bci) {
+ int method_id, int version, int bci, int cpref) {
ResourceMark rm;
- char* buf = print_stack_element_to_buffer(mirror, method_id, version, bci);
+ char* buf = print_stack_element_to_buffer(mirror, method_id, version, bci, cpref);
st->print_cr("%s", buf);
}
void java_lang_Throwable::print_stack_element(outputStream *st, methodHandle method, int bci) {
Handle mirror = method->method_holder()->java_mirror();
- int method_id = method->method_idnum();
+ int method_id = method->orig_method_idnum();
int version = method->constants()->version();
- print_stack_element(st, mirror, method_id, version, bci);
+ int cpref = method->name_index();
+ print_stack_element(st, mirror, method_id, version, bci, cpref);
}
const char* java_lang_Throwable::no_stack_trace_message() {
@@ -1551,6 +1567,7 @@ void java_lang_Throwable::print_stack_trace(oop throwable, outputStream* st) {
typeArrayHandle methods (THREAD, BacktraceBuilder::get_methods(result));
typeArrayHandle bcis (THREAD, BacktraceBuilder::get_bcis(result));
objArrayHandle mirrors (THREAD, BacktraceBuilder::get_mirrors(result));
+ typeArrayHandle cprefs (THREAD, BacktraceBuilder::get_cprefs(result));
int length = methods()->length();
for (int index = 0; index < length; index++) {
@@ -1560,7 +1577,8 @@ void java_lang_Throwable::print_stack_trace(oop throwable, outputStream* st) {
int method = methods->short_at(index);
int version = version_at(bcis->int_at(index));
int bci = bci_at(bcis->int_at(index));
- print_stack_element(st, mirror, method, version, bci);
+ int cpref = cprefs->short_at(index);
+ print_stack_element(st, mirror, method, version, bci, cpref);
}
result = objArrayHandle(THREAD, objArrayOop(result->obj_at(trace_next_offset)));
}
@@ -1837,29 +1855,30 @@ oop java_lang_Throwable::get_stack_trace_element(oop throwable, int index, TRAPS
if (chunk == NULL) {
THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
}
- // Get method id, bci, version and mirror from chunk
+ // Get method id, bci, version, mirror and cpref from chunk
typeArrayOop methods = BacktraceBuilder::get_methods(chunk);
typeArrayOop bcis = BacktraceBuilder::get_bcis(chunk);
objArrayOop mirrors = BacktraceBuilder::get_mirrors(chunk);
+ typeArrayOop cprefs = BacktraceBuilder::get_cprefs(chunk);
assert(methods != NULL && bcis != NULL && mirrors != NULL, "sanity check");
int method = methods->short_at(chunk_index);
int version = version_at(bcis->int_at(chunk_index));
int bci = bci_at(bcis->int_at(chunk_index));
+ int cpref = cprefs->short_at(chunk_index);
Handle mirror(THREAD, mirrors->obj_at(chunk_index));
// Chunk can be partial full
if (mirror.is_null()) {
THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
}
-
- oop element = java_lang_StackTraceElement::create(mirror, method, version, bci, CHECK_0);
+ oop element = java_lang_StackTraceElement::create(mirror, method, version, bci, cpref, CHECK_0);
return element;
}
oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
- int version, int bci, TRAPS) {
+ int version, int bci, int cpref, TRAPS) {
// Allocate java.lang.StackTraceElement instance
Klass* k = SystemDictionary::StackTraceElement_klass();
assert(k != NULL, "must be loaded in 1.4+");
@@ -1876,17 +1895,13 @@ oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
oop classname = StringTable::intern((char*) str, CHECK_0);
java_lang_StackTraceElement::set_declaringClass(element(), classname);
- Method* method = holder->method_with_idnum(method_id);
- // Method on stack may be obsolete because it was redefined so cannot be
- // found by idnum.
- if (method == NULL) {
- // leave name and fileName null
- java_lang_StackTraceElement::set_lineNumber(element(), -1);
- return element();
- }
+ Method* method = holder->method_with_orig_idnum(method_id, version);
+
+ // The method can be NULL if the requested class version is gone
+ Symbol* sym = (method != NULL) ? method->name() : holder->constants()->symbol_at(cpref);
// Fill in method name
- oop methodname = StringTable::intern(method->name(), CHECK_0);
+ oop methodname = StringTable::intern(sym, CHECK_0);
java_lang_StackTraceElement::set_methodName(element(), methodname);
if (!version_matches(method, version)) {
@@ -1895,6 +1910,11 @@ oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
java_lang_StackTraceElement::set_lineNumber(element(), -1);
} else {
// Fill in source file name and line number.
+ // Use specific ik version as a holder since the mirror might
+ // refer to version that is now obsolete and no longer accessible
+ // via the previous versions list.
+ holder = holder->get_klass_version(version);
+ assert(holder != NULL, "sanity check");
Symbol* source = holder->source_file_name();
if (ShowHiddenFrames && source == NULL)
source = vmSymbols::unknown_class_name();
@@ -1909,8 +1929,9 @@ oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
oop java_lang_StackTraceElement::create(methodHandle method, int bci, TRAPS) {
Handle mirror (THREAD, method->method_holder()->java_mirror());
- int method_id = method->method_idnum();
- return create(mirror, method_id, method->constants()->version(), bci, THREAD);
+ int method_id = method->orig_method_idnum();
+ int cpref = method->name_index();
+ return create(mirror, method_id, method->constants()->version(), bci, cpref, THREAD);
}
void java_lang_reflect_AccessibleObject::compute_offsets() {
@@ -2811,33 +2832,6 @@ bool java_lang_invoke_MemberName::is_method(oop mname) {
return (flags(mname) & (MN_IS_METHOD | MN_IS_CONSTRUCTOR)) > 0;
}
-#if INCLUDE_JVMTI
-// Can be executed on VM thread only
-void java_lang_invoke_MemberName::adjust_vmtarget(oop mname, Method* old_method,
- Method* new_method, bool* trace_name_printed) {
- assert(is_method(mname), "wrong type");
- assert(Thread::current()->is_VM_thread(), "not VM thread");
-
- Method* target = (Method*)mname->address_field(_vmtarget_offset);
- if (target == old_method) {
- mname->address_field_put(_vmtarget_offset, (address)new_method);
-
- if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
- if (!(*trace_name_printed)) {
- // RC_TRACE_MESG macro has an embedded ResourceMark
- RC_TRACE_MESG(("adjust: name=%s",
- old_method->method_holder()->external_name()));
- *trace_name_printed = true;
- }
- // RC_TRACE macro has an embedded ResourceMark
- RC_TRACE(0x00400000, ("MemberName method update: %s(%s)",
- new_method->name()->as_C_string(),
- new_method->signature()->as_C_string()));
- }
- }
-}
-#endif // INCLUDE_JVMTI
-
void java_lang_invoke_MemberName::set_vmtarget(oop mname, Metadata* ref) {
assert(is_instance(mname), "wrong type");
// check the type of the vmtarget
@@ -2972,14 +2966,49 @@ int java_lang_invoke_MethodType::rtype_slot_count(oop mt) {
// Support for java_lang_invoke_CallSite
int java_lang_invoke_CallSite::_target_offset;
+int java_lang_invoke_CallSite::_context_offset;
+int java_lang_invoke_CallSite::_default_context_offset;
void java_lang_invoke_CallSite::compute_offsets() {
Klass* k = SystemDictionary::CallSite_klass();
if (k != NULL) {
compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_lang_invoke_MethodHandle_signature());
+ compute_offset(_context_offset, k, vmSymbols::context_name(), vmSymbols::sun_misc_Cleaner_signature());
+ compute_offset(_default_context_offset, k,
+ vmSymbols::DEFAULT_CONTEXT_name(), vmSymbols::sun_misc_Cleaner_signature(),
+ /*is_static=*/true, /*allow_super=*/false);
}
}
+oop java_lang_invoke_CallSite::context_volatile(oop call_site) {
+ assert(java_lang_invoke_CallSite::is_instance(call_site), "");
+
+ oop dep_oop = call_site->obj_field_volatile(_context_offset);
+ return dep_oop;
+}
+
+void java_lang_invoke_CallSite::set_context_volatile(oop call_site, oop context) {
+ assert(java_lang_invoke_CallSite::is_instance(call_site), "");
+ call_site->obj_field_put_volatile(_context_offset, context);
+}
+
+bool java_lang_invoke_CallSite::set_context_cas(oop call_site, oop context, oop expected) {
+ assert(java_lang_invoke_CallSite::is_instance(call_site), "");
+ HeapWord* context_addr = call_site->obj_field_addr(_context_offset);
+ oop res = oopDesc::atomic_compare_exchange_oop(context, context_addr, expected, true);
+ bool success = (res == expected);
+ if (success) {
+ update_barrier_set((void*)context_addr, context);
+ }
+ return success;
+}
+
+oop java_lang_invoke_CallSite::default_context() {
+ InstanceKlass* ik = InstanceKlass::cast(SystemDictionary::CallSite_klass());
+ oop def_context_oop = ik->java_mirror()->obj_field(_default_context_offset);
+ assert(!oopDesc::is_null(def_context_oop), "");
+ return def_context_oop;
+}
// Support for java_security_AccessControlContext
@@ -3606,7 +3635,7 @@ int InjectedField::compute_offset() {
tty->print_cr(" name: %s, sig: %s, flags: %08x", fs.name()->as_C_string(), fs.signature()->as_C_string(), fs.access_flags().as_int());
}
#endif //PRODUCT
- fatal("Invalid layout of preloaded class");
+ vm_exit_during_initialization("Invalid layout of preloaded class: use -XX:+TraceClassLoading to see the origin of the problem class");
return -1;
}
diff --git a/hotspot/src/share/vm/classfile/javaClasses.hpp b/hotspot/src/share/vm/classfile/javaClasses.hpp
index 4c2759466f8..700cf4afab5 100644
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp
@@ -485,8 +485,9 @@ class java_lang_Throwable: AllStatic {
trace_methods_offset = 0,
trace_bcis_offset = 1,
trace_mirrors_offset = 2,
- trace_next_offset = 3,
- trace_size = 4,
+ trace_cprefs_offset = 3,
+ trace_next_offset = 4,
+ trace_size = 5,
trace_chunk_size = 32
};
@@ -497,7 +498,7 @@ class java_lang_Throwable: AllStatic {
static int static_unassigned_stacktrace_offset;
// Printing
- static char* print_stack_element_to_buffer(Handle mirror, int method, int version, int bci);
+ static char* print_stack_element_to_buffer(Handle mirror, int method, int version, int bci, int cpref);
// StackTrace (programmatic access, new since 1.4)
static void clear_stacktrace(oop throwable);
// No stack trace available
@@ -519,7 +520,7 @@ class java_lang_Throwable: AllStatic {
static void set_message(oop throwable, oop value);
static Symbol* detail_message(oop throwable);
static void print_stack_element(outputStream *st, Handle mirror, int method,
- int version, int bci);
+ int version, int bci, int cpref);
static void print_stack_element(outputStream *st, methodHandle method, int bci);
static void print_stack_usage(Handle stream);
@@ -961,7 +962,6 @@ class java_lang_ref_SoftReference: public java_lang_ref_Reference {
static void set_clock(jlong value);
};
-
// Interface to java.lang.invoke.MethodHandle objects
class MethodHandleEntry;
@@ -1091,10 +1091,6 @@ class java_lang_invoke_MemberName: AllStatic {
static Metadata* vmtarget(oop mname);
static void set_vmtarget(oop mname, Metadata* target);
-#if INCLUDE_JVMTI
- static void adjust_vmtarget(oop mname, Method* old_method, Method* new_method,
- bool* trace_name_printed);
-#endif // INCLUDE_JVMTI
static intptr_t vmindex(oop mname);
static void set_vmindex(oop mname, intptr_t index);
@@ -1173,16 +1169,23 @@ class java_lang_invoke_CallSite: AllStatic {
private:
static int _target_offset;
+ static int _context_offset;
+ static int _default_context_offset;
+
static void compute_offsets();
public:
// Accessors
- static oop target( oop site);
- static void set_target( oop site, oop target);
+ static oop target( oop site);
+ static void set_target( oop site, oop target);
+ static void set_target_volatile( oop site, oop target);
- static volatile oop target_volatile(oop site);
- static void set_target_volatile(oop site, oop target);
+ static oop context_volatile(oop site);
+ static void set_context_volatile(oop site, oop context);
+ static bool set_context_cas (oop site, oop context, oop expected);
+
+ static oop default_context();
// Testers
static bool is_subclass(Klass* klass) {
@@ -1194,7 +1197,6 @@ public:
static int target_offset_in_bytes() { return _target_offset; }
};
-
// Interface to java.security.AccessControlContext objects
class java_security_AccessControlContext: AllStatic {
@@ -1314,7 +1316,7 @@ class java_lang_StackTraceElement: AllStatic {
static void set_lineNumber(oop element, int value);
// Create an instance of StackTraceElement
- static oop create(Handle mirror, int method, int version, int bci, TRAPS);
+ static oop create(Handle mirror, int method, int version, int bci, int cpref, TRAPS);
static oop create(methodHandle method, int bci, TRAPS);
// Debugging
diff --git a/hotspot/src/share/vm/classfile/javaClasses.inline.hpp b/hotspot/src/share/vm/classfile/javaClasses.inline.hpp
index 961d6efdd83..c5b2f32066d 100644
--- a/hotspot/src/share/vm/classfile/javaClasses.inline.hpp
+++ b/hotspot/src/share/vm/classfile/javaClasses.inline.hpp
@@ -29,10 +29,6 @@
#include "oops/oop.inline.hpp"
#include "oops/oopsHierarchy.hpp"
-inline volatile oop java_lang_invoke_CallSite::target_volatile(oop site) {
- return oop((oopDesc *)(site->obj_field_volatile(_target_offset)));
-}
-
inline void java_lang_invoke_CallSite::set_target_volatile(oop site, oop target) {
site->obj_field_put_volatile(_target_offset, target);
}
diff --git a/hotspot/src/share/vm/classfile/verifier.cpp b/hotspot/src/share/vm/classfile/verifier.cpp
index aaf33d55079..2283ff921ca 100644
--- a/hotspot/src/share/vm/classfile/verifier.cpp
+++ b/hotspot/src/share/vm/classfile/verifier.cpp
@@ -657,6 +657,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
bool this_uninit = false; // Set to true when invokespecial initialized 'this'
+ bool verified_exc_handlers = false;
// Merge with the next instruction
{
@@ -688,6 +689,18 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
}
}
+ // Look for possible jump target in exception handlers and see if it
+ // matches current_frame. Do this check here for astore*, dstore*,
+ // fstore*, istore*, and lstore* opcodes because they can change the type
+ // state by adding a local. JVM Spec says that the incoming type state
+ // should be used for this check. So, do the check here before a possible
+ // local is added to the type state.
+ if (Bytecodes::is_store_into_local(opcode) && bci >= ex_min && bci < ex_max) {
+ verify_exception_handler_targets(
+ bci, this_uninit, ¤t_frame, &stackmap_table, CHECK_VERIFY(this));
+ verified_exc_handlers = true;
+ }
+
switch (opcode) {
case Bytecodes::_nop :
no_control_flow = false; break;
@@ -1669,9 +1682,13 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
} // end switch
} // end Merge with the next instruction
- // Look for possible jump target in exception handlers and see if it
- // matches current_frame
- if (bci >= ex_min && bci < ex_max) {
+ // Look for possible jump target in exception handlers and see if it matches
+ // current_frame. Don't do this check if it has already been done (for
+ // ([a,d,f,i,l]store* opcodes). This check cannot be done earlier because
+ // opcodes, such as invokespecial, may set the this_uninit flag.
+ assert(!(verified_exc_handlers && this_uninit),
+ "Exception handler targets got verified before this_uninit got set");
+ if (!verified_exc_handlers && bci >= ex_min && bci < ex_max) {
verify_exception_handler_targets(
bci, this_uninit, ¤t_frame, &stackmap_table, CHECK_VERIFY(this));
}
@@ -2236,14 +2253,20 @@ void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
}
// Look at the method's handlers. If the bci is in the handler's try block
-// then check if the handler_pc is already on the stack. If not, push it.
+// then check if the handler_pc is already on the stack. If not, push it
+// unless the handler has already been scanned.
void ClassVerifier::push_handlers(ExceptionTable* exhandlers,
+ GrowableArray* handler_list,
GrowableArray* handler_stack,
u4 bci) {
int exlength = exhandlers->length();
for(int x = 0; x < exlength; x++) {
if (bci >= exhandlers->start_pc(x) && bci < exhandlers->end_pc(x)) {
- handler_stack->append_if_missing(exhandlers->handler_pc(x));
+ u4 exhandler_pc = exhandlers->handler_pc(x);
+ if (!handler_list->contains(exhandler_pc)) {
+ handler_stack->append_if_missing(exhandler_pc);
+ handler_list->append(exhandler_pc);
+ }
}
}
}
@@ -2261,6 +2284,10 @@ bool ClassVerifier::ends_in_athrow(u4 start_bc_offset) {
GrowableArray* bci_stack = new GrowableArray(30);
// Create stack for handlers for try blocks containing this handler.
GrowableArray* handler_stack = new GrowableArray(30);
+ // Create list of handlers that have been pushed onto the handler_stack
+ // so that handlers embedded inside of their own TRY blocks only get
+ // scanned once.
+ GrowableArray* handler_list = new GrowableArray(30);
// Create list of visited branch opcodes (goto* and if*).
GrowableArray* visited_branches = new GrowableArray(30);
ExceptionTable exhandlers(_method());
@@ -2279,7 +2306,7 @@ bool ClassVerifier::ends_in_athrow(u4 start_bc_offset) {
// If the bytecode is in a TRY block, push its handlers so they
// will get parsed.
- push_handlers(&exhandlers, handler_stack, bci);
+ push_handlers(&exhandlers, handler_list, handler_stack, bci);
switch (opcode) {
case Bytecodes::_if_icmpeq:
diff --git a/hotspot/src/share/vm/classfile/verifier.hpp b/hotspot/src/share/vm/classfile/verifier.hpp
index 113e404e42e..012418abe0f 100644
--- a/hotspot/src/share/vm/classfile/verifier.hpp
+++ b/hotspot/src/share/vm/classfile/verifier.hpp
@@ -305,9 +305,10 @@ class ClassVerifier : public StackObj {
bool* this_uninit, constantPoolHandle cp, StackMapTable* stackmap_table,
TRAPS);
- // Used by ends_in_athrow() to push all handlers that contain bci onto
- // the handler_stack, if the handler is not already on the stack.
+ // Used by ends_in_athrow() to push all handlers that contain bci onto the
+ // handler_stack, if the handler has not already been pushed on the stack.
void push_handlers(ExceptionTable* exhandlers,
+ GrowableArray* handler_list,
GrowableArray* handler_stack,
u4 bci);
diff --git a/hotspot/src/share/vm/classfile/vmSymbols.hpp b/hotspot/src/share/vm/classfile/vmSymbols.hpp
index 64e717bbc10..85cec9601cf 100644
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp
@@ -292,6 +292,7 @@
template(setTargetNormal_name, "setTargetNormal") \
template(setTargetVolatile_name, "setTargetVolatile") \
template(setTarget_signature, "(Ljava/lang/invoke/MethodHandle;)V") \
+ template(DEFAULT_CONTEXT_name, "DEFAULT_CONTEXT") \
NOT_LP64( do_alias(intptr_signature, int_signature) ) \
LP64_ONLY( do_alias(intptr_signature, long_signature) ) \
\
@@ -501,6 +502,7 @@
template(class_signature, "Ljava/lang/Class;") \
template(string_signature, "Ljava/lang/String;") \
template(reference_signature, "Ljava/lang/ref/Reference;") \
+ template(sun_misc_Cleaner_signature, "Lsun/misc/Cleaner;") \
template(executable_signature, "Ljava/lang/reflect/Executable;") \
template(concurrenthashmap_signature, "Ljava/util/concurrent/ConcurrentHashMap;") \
template(String_StringBuilder_signature, "(Ljava/lang/String;)Ljava/lang/StringBuilder;") \
@@ -531,18 +533,17 @@
template(java_lang_management_ThreadState, "java/lang/management/ThreadState") \
template(java_lang_management_MemoryUsage, "java/lang/management/MemoryUsage") \
template(java_lang_management_ThreadInfo, "java/lang/management/ThreadInfo") \
- template(sun_management_ManagementFactory, "sun/management/ManagementFactory") \
template(sun_management_Sensor, "sun/management/Sensor") \
template(sun_management_Agent, "sun/management/Agent") \
- template(sun_management_DiagnosticCommandImpl, "sun/management/DiagnosticCommandImpl") \
- template(sun_management_GarbageCollectorImpl, "sun/management/GarbageCollectorImpl") \
+ template(com_sun_management_internal_DiagnosticCommandImpl, "com/sun/management/internal/DiagnosticCommandImpl") \
+ template(com_sun_management_internal_GarbageCollectorExtImpl,"com/sun/management/internal/GarbageCollectorExtImpl") \
template(sun_management_ManagementFactoryHelper, "sun/management/ManagementFactoryHelper") \
template(getDiagnosticCommandMBean_name, "getDiagnosticCommandMBean") \
template(getDiagnosticCommandMBean_signature, "()Lcom/sun/management/DiagnosticCommandMBean;") \
template(getGcInfoBuilder_name, "getGcInfoBuilder") \
- template(getGcInfoBuilder_signature, "()Lsun/management/GcInfoBuilder;") \
+ template(getGcInfoBuilder_signature, "()Lcom/sun/management/internal/GcInfoBuilder;") \
template(com_sun_management_GcInfo, "com/sun/management/GcInfo") \
- template(com_sun_management_GcInfo_constructor_signature, "(Lsun/management/GcInfoBuilder;JJJ[Ljava/lang/management/MemoryUsage;[Ljava/lang/management/MemoryUsage;[Ljava/lang/Object;)V") \
+ template(com_sun_management_GcInfo_constructor_signature, "(Lcom/sun/management/internal/GcInfoBuilder;JJJ[Ljava/lang/management/MemoryUsage;[Ljava/lang/management/MemoryUsage;[Ljava/lang/Object;)V") \
template(createGCNotification_name, "createGCNotification") \
template(createGCNotification_signature, "(JLjava/lang/String;Ljava/lang/String;Ljava/lang/String;Lcom/sun/management/GcInfo;)V") \
template(createDiagnosticFrameworkNotification_name, "createDiagnosticFrameworkNotification") \
@@ -554,11 +555,12 @@
template(createGarbageCollectorMBean_signature, "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/management/GarbageCollectorMBean;") \
template(trigger_name, "trigger") \
template(clear_name, "clear") \
- template(trigger_method_signature, "(ILjava/lang/management/MemoryUsage;)V") \
+ template(trigger_method_signature, "(ILjava/lang/management/MemoryUsage;)V") \
template(startAgent_name, "startAgent") \
template(startRemoteAgent_name, "startRemoteManagementAgent") \
template(startLocalAgent_name, "startLocalManagementAgent") \
template(stopRemoteAgent_name, "stopRemoteManagementAgent") \
+ template(getAgentStatus_name, "getManagementAgentStatus") \
template(java_lang_management_ThreadInfo_constructor_signature, "(Ljava/lang/Thread;ILjava/lang/Object;Ljava/lang/Thread;JJJJ[Ljava/lang/StackTraceElement;)V") \
template(java_lang_management_ThreadInfo_with_locks_constructor_signature, "(Ljava/lang/Thread;ILjava/lang/Object;Ljava/lang/Thread;JJJJ[Ljava/lang/StackTraceElement;[Ljava/lang/Object;[I[Ljava/lang/Object;)V") \
template(long_long_long_long_void_signature, "(JJJJ)V") \
@@ -868,9 +870,12 @@
\
/* Custom branch frequencies profiling support for JSR292 */ \
do_class(java_lang_invoke_MethodHandleImpl, "java/lang/invoke/MethodHandleImpl") \
- do_intrinsic(_profileBoolean, java_lang_invoke_MethodHandleImpl, profileBoolean_name, profileBoolean_signature, F_S) \
- do_name( profileBoolean_name, "profileBoolean") \
- do_signature(profileBoolean_signature, "(Z[I)Z") \
+ do_intrinsic(_profileBoolean, java_lang_invoke_MethodHandleImpl, profileBoolean_name, profileBoolean_signature, F_S) \
+ do_name( profileBoolean_name, "profileBoolean") \
+ do_signature(profileBoolean_signature, "(Z[I)Z") \
+ do_intrinsic(_isCompileConstant, java_lang_invoke_MethodHandleImpl, isCompileConstant_name, isCompileConstant_signature, F_S) \
+ do_name( isCompileConstant_name, "isCompileConstant") \
+ do_alias( isCompileConstant_signature, object_boolean_signature) \
\
/* unsafe memory references (there are a lot of them...) */ \
do_signature(getObject_signature, "(Ljava/lang/Object;J)Ljava/lang/Object;") \
@@ -950,6 +955,20 @@
do_intrinsic(_putFloatVolatile, sun_misc_Unsafe, putFloatVolatile_name, putFloat_signature, F_RN) \
do_intrinsic(_putDoubleVolatile, sun_misc_Unsafe, putDoubleVolatile_name, putDouble_signature, F_RN) \
\
+ do_name(getShortUnaligned_name,"getShortUnaligned") do_name(putShortUnaligned_name,"putShortUnaligned") \
+ do_name(getCharUnaligned_name,"getCharUnaligned") do_name(putCharUnaligned_name,"putCharUnaligned") \
+ do_name(getIntUnaligned_name,"getIntUnaligned") do_name(putIntUnaligned_name,"putIntUnaligned") \
+ do_name(getLongUnaligned_name,"getLongUnaligned") do_name(putLongUnaligned_name,"putLongUnaligned") \
+ \
+ do_intrinsic(_getShortUnaligned, sun_misc_Unsafe, getShortUnaligned_name, getShort_signature, F_R) \
+ do_intrinsic(_getCharUnaligned, sun_misc_Unsafe, getCharUnaligned_name, getChar_signature, F_R) \
+ do_intrinsic(_getIntUnaligned, sun_misc_Unsafe, getIntUnaligned_name, getInt_signature, F_R) \
+ do_intrinsic(_getLongUnaligned, sun_misc_Unsafe, getLongUnaligned_name, getLong_signature, F_R) \
+ do_intrinsic(_putShortUnaligned, sun_misc_Unsafe, putShortUnaligned_name, putShort_signature, F_R) \
+ do_intrinsic(_putCharUnaligned, sun_misc_Unsafe, putCharUnaligned_name, putChar_signature, F_R) \
+ do_intrinsic(_putIntUnaligned, sun_misc_Unsafe, putIntUnaligned_name, putInt_signature, F_R) \
+ do_intrinsic(_putLongUnaligned, sun_misc_Unsafe, putLongUnaligned_name, putLong_signature, F_R) \
+ \
/* %%% these are redundant except perhaps for getAddress, but Unsafe has native methods for them */ \
do_signature(getByte_raw_signature, "(J)B") \
do_signature(putByte_raw_signature, "(JB)V") \
diff --git a/hotspot/src/share/vm/code/codeCache.cpp b/hotspot/src/share/vm/code/codeCache.cpp
index 25323c595f6..7e831bf3249 100644
--- a/hotspot/src/share/vm/code/codeCache.cpp
+++ b/hotspot/src/share/vm/code/codeCache.cpp
@@ -1067,8 +1067,11 @@ void CodeCache::flush_dependents_on(Handle call_site, Handle method_handle) {
int marked = 0;
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
- InstanceKlass* call_site_klass = InstanceKlass::cast(call_site->klass());
- marked = call_site_klass->mark_dependent_nmethods(changes);
+ InstanceKlass* ctxk = MethodHandles::get_call_site_context(call_site());
+ if (ctxk == NULL) {
+ return; // No dependencies to invalidate yet.
+ }
+ marked = ctxk->mark_dependent_nmethods(changes);
}
if (marked > 0) {
// At least one nmethod has been marked for deoptimization
diff --git a/hotspot/src/share/vm/code/dependencies.cpp b/hotspot/src/share/vm/code/dependencies.cpp
index c9b0cb63b28..1f56103be5b 100644
--- a/hotspot/src/share/vm/code/dependencies.cpp
+++ b/hotspot/src/share/vm/code/dependencies.cpp
@@ -117,8 +117,9 @@ void Dependencies::assert_has_no_finalizable_subclasses(ciKlass* ctxk) {
}
void Dependencies::assert_call_site_target_value(ciCallSite* call_site, ciMethodHandle* method_handle) {
- check_ctxk(call_site->klass());
- assert_common_2(call_site_target_value, call_site, method_handle);
+ ciKlass* ctxk = call_site->get_context();
+ check_ctxk(ctxk);
+ assert_common_3(call_site_target_value, ctxk, call_site, method_handle);
}
// Helper function. If we are adding a new dep. under ctxk2,
@@ -388,7 +389,7 @@ int Dependencies::_dep_args[TYPE_LIMIT] = {
3, // unique_concrete_subtypes_2 ctxk, k1, k2
3, // unique_concrete_methods_2 ctxk, m1, m2
1, // no_finalizable_subclasses ctxk
- 2 // call_site_target_value call_site, method_handle
+ 3 // call_site_target_value ctxk, call_site, method_handle
};
const char* Dependencies::dep_name(Dependencies::DepType dept) {
@@ -594,7 +595,7 @@ void Dependencies::DepStream::log_dependency(Klass* witness) {
const int nargs = argument_count();
GrowableArray* args = new GrowableArray(nargs);
for (int j = 0; j < nargs; j++) {
- if (type() == call_site_target_value) {
+ if (is_oop_argument(j)) {
args->push(argument_oop(j));
} else {
args->push(argument(j));
@@ -614,7 +615,7 @@ void Dependencies::DepStream::print_dependency(Klass* witness, bool verbose) {
int nargs = argument_count();
GrowableArray* args = new GrowableArray(nargs);
for (int j = 0; j < nargs; j++) {
- if (type() == call_site_target_value) {
+ if (is_oop_argument(j)) {
args->push(argument_oop(j));
} else {
args->push(argument(j));
@@ -710,7 +711,7 @@ Metadata* Dependencies::DepStream::argument(int i) {
* Returns a unique identifier for each dependency argument.
*/
uintptr_t Dependencies::DepStream::get_identifier(int i) {
- if (has_oop_argument()) {
+ if (is_oop_argument(i)) {
return (uintptr_t)(oopDesc*)argument_oop(i);
} else {
return (uintptr_t)argument(i);
@@ -737,7 +738,7 @@ Klass* Dependencies::DepStream::context_type() {
}
// Some dependencies are using the klass of the first object
- // argument as implicit context type (e.g. call_site_target_value).
+ // argument as implicit context type.
{
int ctxkj = dep_implicit_context_arg(type());
if (ctxkj >= 0) {
@@ -845,7 +846,13 @@ class ClassHierarchyWalker {
assert((uint)n <= (uint)_num_participants, "oob");
Method* fm = _found_methods[n];
assert(n == _num_participants || fm != NULL, "proper usage");
- assert(fm == NULL || fm->method_holder() == _participants[n], "sanity");
+ if (fm != NULL && fm->method_holder() != _participants[n]) {
+ // Default methods from interfaces can be added to classes. In
+ // that case the holder of the method is not the class but the
+ // interface where it's defined.
+ assert(fm->is_default_method(), "sanity");
+ return NULL;
+ }
return fm;
}
@@ -1508,9 +1515,16 @@ Klass* Dependencies::check_has_no_finalizable_subclasses(Klass* ctxk, KlassDepCh
return find_finalizable_subclass(search_at);
}
-Klass* Dependencies::check_call_site_target_value(oop call_site, oop method_handle, CallSiteDepChange* changes) {
- assert(call_site ->is_a(SystemDictionary::CallSite_klass()), "sanity");
- assert(method_handle->is_a(SystemDictionary::MethodHandle_klass()), "sanity");
+Klass* Dependencies::check_call_site_target_value(Klass* recorded_ctxk, oop call_site, oop method_handle, CallSiteDepChange* changes) {
+ assert(call_site->is_a(SystemDictionary::CallSite_klass()), "sanity");
+ assert(!oopDesc::is_null(method_handle), "sanity");
+
+ Klass* call_site_ctxk = MethodHandles::get_call_site_context(call_site);
+ assert(!Klass::is_null(call_site_ctxk), "call site context should be initialized already");
+ if (recorded_ctxk != call_site_ctxk) {
+ // Stale context
+ return recorded_ctxk;
+ }
if (changes == NULL) {
// Validate all CallSites
if (java_lang_invoke_CallSite::target(call_site) != method_handle)
@@ -1525,7 +1539,6 @@ Klass* Dependencies::check_call_site_target_value(oop call_site, oop method_hand
return NULL; // assertion still valid
}
-
void Dependencies::DepStream::trace_and_log_witness(Klass* witness) {
if (witness != NULL) {
if (TraceDependencies) {
@@ -1586,7 +1599,7 @@ Klass* Dependencies::DepStream::check_call_site_dependency(CallSiteDepChange* ch
Klass* witness = NULL;
switch (type()) {
case call_site_target_value:
- witness = check_call_site_target_value(argument_oop(0), argument_oop(1), changes);
+ witness = check_call_site_target_value(context_type(), argument_oop(1), argument_oop(2), changes);
break;
default:
witness = NULL;
diff --git a/hotspot/src/share/vm/code/dependencies.hpp b/hotspot/src/share/vm/code/dependencies.hpp
index 421ecc767d1..cacc5cac095 100644
--- a/hotspot/src/share/vm/code/dependencies.hpp
+++ b/hotspot/src/share/vm/code/dependencies.hpp
@@ -174,7 +174,7 @@ class Dependencies: public ResourceObj {
klass_types = all_types & ~non_klass_types,
non_ctxk_types = (1 << evol_method),
- implicit_ctxk_types = (1 << call_site_target_value),
+ implicit_ctxk_types = 0,
explicit_ctxk_types = all_types & ~(non_ctxk_types | implicit_ctxk_types),
max_arg_count = 3, // current maximum number of arguments (incl. ctxk)
@@ -330,7 +330,7 @@ class Dependencies: public ResourceObj {
static Klass* check_exclusive_concrete_methods(Klass* ctxk, Method* m1, Method* m2,
KlassDepChange* changes = NULL);
static Klass* check_has_no_finalizable_subclasses(Klass* ctxk, KlassDepChange* changes = NULL);
- static Klass* check_call_site_target_value(oop call_site, oop method_handle, CallSiteDepChange* changes = NULL);
+ static Klass* check_call_site_target_value(Klass* recorded_ctxk, oop call_site, oop method_handle, CallSiteDepChange* changes = NULL);
// A returned Klass* is NULL if the dependency assertion is still
// valid. A non-NULL Klass* is a 'witness' to the assertion
// failure, a point in the class hierarchy where the assertion has
@@ -496,7 +496,7 @@ class Dependencies: public ResourceObj {
bool next();
DepType type() { return _type; }
- bool has_oop_argument() { return type() == call_site_target_value; }
+ bool is_oop_argument(int i) { return type() == call_site_target_value && i > 0; }
uintptr_t get_identifier(int i);
int argument_count() { return dep_args(type()); }
@@ -682,7 +682,7 @@ class CallSiteDepChange : public DepChange {
_method_handle(method_handle)
{
assert(_call_site() ->is_a(SystemDictionary::CallSite_klass()), "must be");
- assert(_method_handle()->is_a(SystemDictionary::MethodHandle_klass()), "must be");
+ assert(_method_handle.is_null() || _method_handle()->is_a(SystemDictionary::MethodHandle_klass()), "must be");
}
// What kind of DepChange is this?
diff --git a/hotspot/src/share/vm/code/nmethod.cpp b/hotspot/src/share/vm/code/nmethod.cpp
index a8f6ad18940..03868e2d603 100644
--- a/hotspot/src/share/vm/code/nmethod.cpp
+++ b/hotspot/src/share/vm/code/nmethod.cpp
@@ -504,7 +504,7 @@ nmethod* nmethod::new_native_nmethod(methodHandle method,
basic_lock_owner_sp_offset,
basic_lock_sp_offset, oop_maps);
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_native_nmethod(nm));
- if (PrintAssembly && nm != NULL) {
+ if ((PrintAssembly || CompilerOracle::should_print(method)) && nm != NULL) {
Disassembler::decode(nm);
}
}
@@ -2325,6 +2325,7 @@ void nmethod::check_all_dependencies(DepChange& changes) {
// Dependency checking failed. Print out information about the failed
// dependency and finally fail with an assert. We can fail here, since
// dependency checking is never done in a product build.
+ tty->print_cr("Failed dependency:");
changes.print();
nm->print();
nm->print_dependencies();
@@ -2837,11 +2838,21 @@ const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
st.print(")");
return st.as_string();
}
+ case relocInfo::runtime_call_type: {
+ stringStream st;
+ st.print("runtime_call");
+ runtime_call_Relocation* r = iter.runtime_call_reloc();
+ address dest = r->destination();
+ CodeBlob* cb = CodeCache::find_blob(dest);
+ if (cb != NULL) {
+ st.print(" %s", cb->name());
+ }
+ return st.as_string();
+ }
case relocInfo::virtual_call_type: return "virtual_call";
case relocInfo::opt_virtual_call_type: return "optimized virtual_call";
case relocInfo::static_call_type: return "static_call";
case relocInfo::static_stub_type: return "static_stub";
- case relocInfo::runtime_call_type: return "runtime_call";
case relocInfo::external_word_type: return "external_word";
case relocInfo::internal_word_type: return "internal_word";
case relocInfo::section_word_type: return "section_word";
diff --git a/hotspot/src/share/vm/code/pcDesc.cpp b/hotspot/src/share/vm/code/pcDesc.cpp
index 7f27cc0c08c..5f7ba80f07c 100644
--- a/hotspot/src/share/vm/code/pcDesc.cpp
+++ b/hotspot/src/share/vm/code/pcDesc.cpp
@@ -54,12 +54,7 @@ void PcDesc::print(nmethod* code) {
for (ScopeDesc* sd = code->scope_desc_at(real_pc(code));
sd != NULL;
sd = sd->sender()) {
- tty->print(" ");
- sd->method()->print_short_name(tty);
- tty->print(" @%d", sd->bci());
- if (sd->should_reexecute())
- tty->print(" reexecute=true");
- tty->cr();
+ sd->print_on(tty);
}
#endif
}
diff --git a/hotspot/src/share/vm/code/scopeDesc.cpp b/hotspot/src/share/vm/code/scopeDesc.cpp
index 7e557afcd35..6f19c3ab869 100644
--- a/hotspot/src/share/vm/code/scopeDesc.cpp
+++ b/hotspot/src/share/vm/code/scopeDesc.cpp
@@ -157,14 +157,18 @@ ScopeDesc* ScopeDesc::sender() const {
#ifndef PRODUCT
void ScopeDesc::print_value_on(outputStream* st) const {
- tty->print(" ");
+ st->print(" ");
method()->print_short_name(st);
int lineno = method()->line_number_from_bci(bci());
if (lineno != -1) {
- st->print_cr("@%d (line %d)", bci(), lineno);
+ st->print("@%d (line %d)", bci(), lineno);
} else {
- st->print_cr("@%d", bci());
+ st->print("@%d", bci());
}
+ if (should_reexecute()) {
+ st->print(" reexecute=true");
+ }
+ st->cr();
}
void ScopeDesc::print_on(outputStream* st) const {
@@ -174,7 +178,7 @@ void ScopeDesc::print_on(outputStream* st) const {
void ScopeDesc::print_on(outputStream* st, PcDesc* pd) const {
// header
if (pd != NULL) {
- tty->print_cr("ScopeDesc(pc=" PTR_FORMAT " offset=%x):", pd->real_pc(_code), pd->pc_offset());
+ st->print_cr("ScopeDesc(pc=" PTR_FORMAT " offset=%x):", pd->real_pc(_code), pd->pc_offset());
}
print_value_on(st);
@@ -192,7 +196,7 @@ void ScopeDesc::print_on(outputStream* st, PcDesc* pd) const {
// locals
{ GrowableArray* l = ((ScopeDesc*) this)->locals();
if (l != NULL) {
- tty->print_cr(" Locals");
+ st->print_cr(" Locals");
for (int index = 0; index < l->length(); index++) {
st->print(" - l%d: ", index);
l->at(index)->print_on(st);
@@ -205,7 +209,7 @@ void ScopeDesc::print_on(outputStream* st, PcDesc* pd) const {
if (l != NULL) {
st->print_cr(" Expression stack");
for (int index = 0; index < l->length(); index++) {
- st->print(" - @%d: ", index);
+ st->print(" - @%d: ", index);
l->at(index)->print_on(st);
st->cr();
}
@@ -225,12 +229,12 @@ void ScopeDesc::print_on(outputStream* st, PcDesc* pd) const {
#ifdef COMPILER2
if (DoEscapeAnalysis && is_top() && _objects != NULL) {
- tty->print_cr("Objects");
+ st->print_cr(" Objects");
for (int i = 0; i < _objects->length(); i++) {
ObjectValue* sv = (ObjectValue*) _objects->at(i);
- tty->print(" - %d: ", sv->id());
- sv->print_fields_on(tty);
- tty->cr();
+ st->print(" - %d: ", sv->id());
+ sv->print_fields_on(st);
+ st->cr();
}
}
#endif // COMPILER2
diff --git a/hotspot/src/share/vm/compiler/compileBroker.hpp b/hotspot/src/share/vm/compiler/compileBroker.hpp
index b35d3766d5f..d7cb49bae0d 100644
--- a/hotspot/src/share/vm/compiler/compileBroker.hpp
+++ b/hotspot/src/share/vm/compiler/compileBroker.hpp
@@ -172,7 +172,8 @@ class CompilerCounters : public CHeapObj {
// these methods should be called in a thread safe context
void set_current_method(const char* method) {
- strncpy(_current_method, method, (size_t)cmname_buffer_length);
+ strncpy(_current_method, method, (size_t)cmname_buffer_length-1);
+ _current_method[cmname_buffer_length-1] = '\0';
if (UsePerfData) _perf_current_method->set_value(method);
}
diff --git a/hotspot/src/share/vm/compiler/compilerOracle.cpp b/hotspot/src/share/vm/compiler/compilerOracle.cpp
index 8299da22f0f..21586cdc998 100644
--- a/hotspot/src/share/vm/compiler/compilerOracle.cpp
+++ b/hotspot/src/share/vm/compiler/compilerOracle.cpp
@@ -673,9 +673,7 @@ static MethodMatcher* scan_flag_and_value(const char* type, const char* line, in
// so read integer and fraction part of double value separately.
if (sscanf(line, "%*[ \t]%255[0-9]%*[ /\t]%255[0-9]%n", buffer[0], buffer[1], &bytes_read) == 2) {
char value[512] = "";
- strncat(value, buffer[0], 255);
- strcat(value, ".");
- strncat(value, buffer[1], 255);
+ jio_snprintf(value, sizeof(value), "%s.%s", buffer[0], buffer[1]);
total_bytes_read += bytes_read;
return add_option_string(c_name, c_match, m_name, m_match, signature, flag, atof(value));
} else {
diff --git a/hotspot/src/share/vm/compiler/disassembler.cpp b/hotspot/src/share/vm/compiler/disassembler.cpp
index 46cd7fb1f25..fc8758a3b00 100644
--- a/hotspot/src/share/vm/compiler/disassembler.cpp
+++ b/hotspot/src/share/vm/compiler/disassembler.cpp
@@ -300,6 +300,7 @@ address decode_env::handle_event(const char* event, address arg) {
strlen((const char*)arg) > sizeof(buffer) - 1) {
// Only print this when the mach changes
strncpy(buffer, (const char*)arg, sizeof(buffer) - 1);
+ buffer[sizeof(buffer) - 1] = '\0';
output()->print_cr("[Disassembling for mach='%s']", arg);
}
} else if (match(event, "format bytes-per-line")) {
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp
index bb2cb5a639f..47b90ff5a40 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp
@@ -25,8 +25,8 @@
#include "precompiled.hpp"
#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
+#include "gc_interface/collectedHeap.hpp"
#include "memory/freeBlockDictionary.hpp"
-#include "memory/sharedHeap.hpp"
#include "runtime/globals.hpp"
#include "runtime/mutex.hpp"
#include "runtime/orderAccess.inline.hpp"
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.cpp
new file mode 100644
index 00000000000..7cacb77aaf1
--- /dev/null
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
+#include "memory/iterator.inline.hpp"
+#include "memory/specialized_oop_closures.hpp"
+
+// Generate CMS specialized oop_oop_iterate functions.
+SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(ALL_KLASS_OOP_OOP_ITERATE_DEFN)
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
index a64ca88cd7b..a0df138f804 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
@@ -32,6 +32,7 @@
#include "gc_interface/collectedHeap.inline.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/blockOffsetTable.inline.hpp"
+#include "memory/genCollectedHeap.hpp"
#include "memory/resourceArea.hpp"
#include "memory/space.inline.hpp"
#include "memory/universe.inline.hpp"
@@ -673,10 +674,10 @@ void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,
HeapWord* bottom, \
HeapWord* top, \
ClosureType* cl) { \
- bool is_par = SharedHeap::heap()->n_par_threads() > 0; \
+ bool is_par = GenCollectedHeap::heap()->n_par_threads() > 0; \
if (is_par) { \
- assert(SharedHeap::heap()->n_par_threads() == \
- SharedHeap::heap()->workers()->active_workers(), "Mismatch"); \
+ assert(GenCollectedHeap::heap()->n_par_threads() == \
+ GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch"); \
walk_mem_region_with_cl_par(mr, bottom, top, cl); \
} else { \
walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
@@ -1907,11 +1908,11 @@ CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
assert(chunk->is_free() && ffc->is_free(), "Error");
_bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
if (rem_sz < SmallForDictionary) {
- bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
+ bool is_par = (GenCollectedHeap::heap()->n_par_threads() > 0);
if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
assert(!is_par ||
- (SharedHeap::heap()->n_par_threads() ==
- SharedHeap::heap()->workers()->active_workers()), "Mismatch");
+ (GenCollectedHeap::heap()->n_par_threads() ==
+ GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch");
returnChunkToFreeList(ffc);
split(size, rem_sz);
if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
@@ -1982,7 +1983,7 @@ void CompactibleFreeListSpace::save_marks() {
bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
assert(_promoInfo.tracking(), "No preceding save_marks?");
- assert(SharedHeap::heap()->n_par_threads() == 0,
+ assert(GenCollectedHeap::heap()->n_par_threads() == 0,
"Shouldn't be called if using parallel gc.");
return _promoInfo.noPromotions();
}
@@ -1991,7 +1992,7 @@ bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
\
void CompactibleFreeListSpace:: \
oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
- assert(SharedHeap::heap()->n_par_threads() == 0, \
+ assert(GenCollectedHeap::heap()->n_par_threads() == 0, \
"Shouldn't be called (yet) during parallel part of gc."); \
_promoInfo.promoted_oops_iterate##nv_suffix(blk); \
/* \
@@ -2442,11 +2443,10 @@ void CompactibleFreeListSpace::verify() const {
{
VerifyAllOopsClosure cl(_collector, this, span, past_remark,
_collector->markBitMap());
- CollectedHeap* ch = Universe::heap();
// Iterate over all oops in the heap. Uses the _no_header version
// since we are not interested in following the klass pointers.
- ch->oop_iterate_no_header(&cl);
+ GenCollectedHeap::heap()->oop_iterate_no_header(&cl);
}
if (VerifyObjectStartArray) {
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
index 4c6fb3c5b1b..72b0e9e8564 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
@@ -28,7 +28,7 @@
#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
#include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp"
#include "memory/binaryTreeDictionary.hpp"
-#include "memory/blockOffsetTable.inline.hpp"
+#include "memory/blockOffsetTable.hpp"
#include "memory/freeList.hpp"
#include "memory/space.hpp"
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
index 037abd9ffa6..6729f0b23c2 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
@@ -53,6 +53,7 @@
#include "memory/padded.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp"
+#include "memory/strongRootsScope.hpp"
#include "memory/tenuredGeneration.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
@@ -64,6 +65,7 @@
#include "runtime/vmThread.hpp"
#include "services/memoryService.hpp"
#include "services/runtimeService.hpp"
+#include "utilities/stack.inline.hpp"
// statics
CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
@@ -208,10 +210,6 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
use_adaptive_freelists,
dictionaryChoice);
NOT_PRODUCT(debug_cms_space = _cmsSpace;)
- if (_cmsSpace == NULL) {
- vm_exit_during_initialization(
- "CompactibleFreeListSpace allocation failure");
- }
_cmsSpace->_gen = this;
_gc_stats = new CMSGCStats();
@@ -230,14 +228,8 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
_par_gc_thread_states =
NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
- if (_par_gc_thread_states == NULL) {
- vm_exit_during_initialization("Could not allocate par gc structs");
- }
for (uint i = 0; i < ParallelGCThreads; i++) {
_par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
- if (_par_gc_thread_states[i] == NULL) {
- vm_exit_during_initialization("Could not allocate par gc structs");
- }
}
} else {
_par_gc_thread_states = NULL;
@@ -308,8 +300,6 @@ void CMSCollector::ref_processor_init() {
AdaptiveSizePolicy* CMSCollector::size_policy() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
- assert(gch->kind() == CollectedHeap::GenCollectedHeap,
- "Wrong type of heap");
return gch->gen_policy()->size_policy();
}
@@ -586,11 +576,6 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
return;
}
_hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
- if (_hash_seed == NULL) {
- warning("_hash_seed array allocation failure");
- return;
- }
-
typedef Padded PaddedOopTaskQueue;
for (i = 0; i < num_queues; i++) {
PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
@@ -633,12 +618,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_eden_chunk_index = 0;
_eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
_eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
- if (_eden_chunk_array == NULL) {
- _eden_chunk_capacity = 0;
- warning("GC/CMS: _eden_chunk_array allocation failure");
- }
}
- assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
// Support for parallelizing survivor space rescan
if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
@@ -648,52 +628,15 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
_survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
_cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
- if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
- || _cursor == NULL) {
- warning("Failed to allocate survivor plab/chunk array");
- if (_survivor_plab_array != NULL) {
- FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
- _survivor_plab_array = NULL;
- }
- if (_survivor_chunk_array != NULL) {
- FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
- _survivor_chunk_array = NULL;
- }
- if (_cursor != NULL) {
- FREE_C_HEAP_ARRAY(size_t, _cursor);
- _cursor = NULL;
- }
- } else {
- _survivor_chunk_capacity = 2*max_plab_samples;
- for (uint i = 0; i < ParallelGCThreads; i++) {
- HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
- if (vec == NULL) {
- warning("Failed to allocate survivor plab array");
- for (int j = i; j > 0; j--) {
- FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array());
- }
- FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
- FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
- _survivor_plab_array = NULL;
- _survivor_chunk_array = NULL;
- _survivor_chunk_capacity = 0;
- break;
- } else {
- ChunkArray* cur =
- ::new (&_survivor_plab_array[i]) ChunkArray(vec,
- max_plab_samples);
- assert(cur->end() == 0, "Should be 0");
- assert(cur->array() == vec, "Should be vec");
- assert(cur->capacity() == max_plab_samples, "Error");
- }
- }
+ _survivor_chunk_capacity = 2*max_plab_samples;
+ for (uint i = 0; i < ParallelGCThreads; i++) {
+ HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
+ ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
+ assert(cur->end() == 0, "Should be 0");
+ assert(cur->array() == vec, "Should be vec");
+ assert(cur->capacity() == max_plab_samples, "Error");
}
}
- assert( ( _survivor_plab_array != NULL
- && _survivor_chunk_array != NULL)
- || ( _survivor_chunk_capacity == 0
- && _survivor_chunk_index == 0),
- "Error");
NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
_gc_counters = new CollectorCounters("CMS", 1);
@@ -1037,7 +980,7 @@ oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
assert_lock_strong(freelistLock());
#ifndef PRODUCT
- if (Universe::heap()->promotion_should_fail()) {
+ if (GenCollectedHeap::heap()->promotion_should_fail()) {
return NULL;
}
#endif // #ifndef PRODUCT
@@ -1114,7 +1057,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
oop old, markOop m,
size_t word_sz) {
#ifndef PRODUCT
- if (Universe::heap()->promotion_should_fail()) {
+ if (GenCollectedHeap::heap()->promotion_should_fail()) {
return NULL;
}
#endif // #ifndef PRODUCT
@@ -2524,7 +2467,7 @@ void CMSCollector::verify_after_remark_work_1() {
verification_mark_bm()->iterate(&vcl);
if (vcl.failed()) {
gclog_or_tty->print("Verification failed");
- Universe::heap()->print_on(gclog_or_tty);
+ gch->print_on(gclog_or_tty);
fatal("CMS: failed marking verification after remark");
}
}
@@ -3071,10 +3014,10 @@ void CMSCollector::checkpointRootsInitialWork() {
gch->set_par_threads(n_workers);
initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
if (n_workers > 1) {
- GenCollectedHeap::StrongRootsScope srs(gch);
+ StrongRootsScope srs;
workers->run_task(&tsk);
} else {
- GenCollectedHeap::StrongRootsScope srs(gch);
+ StrongRootsScope srs;
tsk.work(0);
}
gch->set_par_threads(0);
@@ -5169,11 +5112,11 @@ void CMSCollector::do_remark_parallel() {
// necessarily be so, since it's possible that we are doing
// ST marking.
ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
- GenCollectedHeap::StrongRootsScope srs(gch);
+ StrongRootsScope srs;
workers->run_task(&tsk);
} else {
ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
- GenCollectedHeap::StrongRootsScope srs(gch);
+ StrongRootsScope srs;
tsk.work(0);
}
@@ -5241,7 +5184,7 @@ void CMSCollector::do_remark_non_parallel() {
verify_work_stacks_empty();
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
- GenCollectedHeap::StrongRootsScope srs(gch);
+ StrongRootsScope srs;
gch->gen_process_roots(_cmsGen->level(),
true, // younger gens as roots
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
index c316db870e3..ab8be960f74 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
@@ -35,11 +35,11 @@
#include "memory/freeBlockDictionary.hpp"
#include "memory/iterator.hpp"
#include "memory/space.hpp"
+#include "memory/virtualspace.hpp"
#include "runtime/mutexLocker.hpp"
-#include "runtime/virtualspace.hpp"
#include "services/memoryService.hpp"
-#include "utilities/bitMap.inline.hpp"
-#include "utilities/stack.inline.hpp"
+#include "utilities/bitMap.hpp"
+#include "utilities/stack.hpp"
#include "utilities/taskqueue.hpp"
#include "utilities/yieldingWorkgroup.hpp"
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp
index f3e40f61d48..2c41ffeba7f 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp
@@ -27,7 +27,7 @@
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
#include "gc_implementation/shared/concurrentGCThread.hpp"
-#include "runtime/thread.inline.hpp"
+#include "runtime/thread.hpp"
class ConcurrentMarkSweepGeneration;
class CMSCollector;
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp
index dfddda7cf2f..5c1d2650a19 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "memory/genOopClosures.hpp"
#include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
#include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp"
#include "oops/markOop.inline.hpp"
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp
index 39d86cd475a..423c21ca1ff 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -62,7 +62,7 @@ void VM_CMS_Operation::verify_before_gc() {
HandleMark hm;
FreelistLocker x(_collector);
MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
- Universe::heap()->prepare_for_verify();
+ GenCollectedHeap::heap()->prepare_for_verify();
Universe::verify();
}
}
diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp
index 60ae0b4cd91..efae52e60b2 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp
@@ -34,6 +34,7 @@
#include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.hpp"
+#include "gc_implementation/g1/g1StringDedup.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
@@ -46,6 +47,7 @@
#include "memory/genOopClosures.inline.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp"
+#include "memory/strongRootsScope.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
@@ -115,7 +117,7 @@ void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
}
size_t CMBitMap::compute_size(size_t heap_size) {
- return heap_size / mark_distance();
+ return ReservedSpace::allocation_align_size_up(heap_size / mark_distance());
}
size_t CMBitMap::mark_distance() {
@@ -941,13 +943,6 @@ void ConcurrentMark::checkpointRootsInitialPre() {
_has_aborted = false;
-#ifndef PRODUCT
- if (G1PrintReachableAtInitialMark) {
- print_reachable("at-cycle-start",
- VerifyOption_G1UsePrevMarking, true /* all */);
- }
-#endif
-
// Initialize marking structures. This has to be done in a STW phase.
reset();
@@ -1325,7 +1320,7 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
if (VerifyDuringGC) {
HandleMark hm; // handle scope
- Universe::heap()->prepare_for_verify();
+ g1h->prepare_for_verify();
Universe::verify(VerifyOption_G1UsePrevMarking,
" VerifyDuringGC:(before)");
}
@@ -1352,7 +1347,7 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
// Verify the heap w.r.t. the previous marking bitmap.
if (VerifyDuringGC) {
HandleMark hm; // handle scope
- Universe::heap()->prepare_for_verify();
+ g1h->prepare_for_verify();
Universe::verify(VerifyOption_G1UsePrevMarking,
" VerifyDuringGC:(overflow)");
}
@@ -1378,7 +1373,7 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
if (VerifyDuringGC) {
HandleMark hm; // handle scope
- Universe::heap()->prepare_for_verify();
+ g1h->prepare_for_verify();
Universe::verify(VerifyOption_G1UseNextMarking,
" VerifyDuringGC:(after)");
}
@@ -1986,13 +1981,13 @@ void ConcurrentMark::cleanup() {
if (VerifyDuringGC) {
HandleMark hm; // handle scope
- Universe::heap()->prepare_for_verify();
+ g1h->prepare_for_verify();
Universe::verify(VerifyOption_G1UsePrevMarking,
" VerifyDuringGC:(before)");
}
g1h->check_bitmaps("Cleanup Start");
- G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
+ G1CollectorPolicy* g1p = g1h->g1_policy();
g1p->record_concurrent_mark_cleanup_start();
double start = os::elapsedTime();
@@ -2097,7 +2092,7 @@ void ConcurrentMark::cleanup() {
if (VerifyDuringGC) {
HandleMark hm; // handle scope
- Universe::heap()->prepare_for_verify();
+ g1h->prepare_for_verify();
Universe::verify(VerifyOption_G1UsePrevMarking,
" VerifyDuringGC:(after)");
}
@@ -2650,7 +2645,7 @@ void ConcurrentMark::checkpointRootsFinalWork() {
g1h->ensure_parsability(false);
- G1CollectedHeap::StrongRootsScope srs(g1h);
+ StrongRootsScope srs;
// this is remark, so we'll use up all active threads
uint active_workers = g1h->workers()->active_workers();
if (active_workers == 0) {
@@ -2682,166 +2677,6 @@ void ConcurrentMark::checkpointRootsFinalWork() {
print_stats();
}
-#ifndef PRODUCT
-
-class PrintReachableOopClosure: public OopClosure {
-private:
- G1CollectedHeap* _g1h;
- outputStream* _out;
- VerifyOption _vo;
- bool _all;
-
-public:
- PrintReachableOopClosure(outputStream* out,
- VerifyOption vo,
- bool all) :
- _g1h(G1CollectedHeap::heap()),
- _out(out), _vo(vo), _all(all) { }
-
- void do_oop(narrowOop* p) { do_oop_work(p); }
- void do_oop( oop* p) { do_oop_work(p); }
-
- template void do_oop_work(T* p) {
- oop obj = oopDesc::load_decode_heap_oop(p);
- const char* str = NULL;
- const char* str2 = "";
-
- if (obj == NULL) {
- str = "";
- } else if (!_g1h->is_in_g1_reserved(obj)) {
- str = " O";
- } else {
- HeapRegion* hr = _g1h->heap_region_containing(obj);
- bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
- bool marked = _g1h->is_marked(obj, _vo);
-
- if (over_tams) {
- str = " >";
- if (marked) {
- str2 = " AND MARKED";
- }
- } else if (marked) {
- str = " M";
- } else {
- str = " NOT";
- }
- }
-
- _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s",
- p2i(p), p2i((void*) obj), str, str2);
- }
-};
-
-class PrintReachableObjectClosure : public ObjectClosure {
-private:
- G1CollectedHeap* _g1h;
- outputStream* _out;
- VerifyOption _vo;
- bool _all;
- HeapRegion* _hr;
-
-public:
- PrintReachableObjectClosure(outputStream* out,
- VerifyOption vo,
- bool all,
- HeapRegion* hr) :
- _g1h(G1CollectedHeap::heap()),
- _out(out), _vo(vo), _all(all), _hr(hr) { }
-
- void do_object(oop o) {
- bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo);
- bool marked = _g1h->is_marked(o, _vo);
- bool print_it = _all || over_tams || marked;
-
- if (print_it) {
- _out->print_cr(" "PTR_FORMAT"%s",
- p2i((void *)o), (over_tams) ? " >" : (marked) ? " M" : "");
- PrintReachableOopClosure oopCl(_out, _vo, _all);
- o->oop_iterate_no_header(&oopCl);
- }
- }
-};
-
-class PrintReachableRegionClosure : public HeapRegionClosure {
-private:
- G1CollectedHeap* _g1h;
- outputStream* _out;
- VerifyOption _vo;
- bool _all;
-
-public:
- bool doHeapRegion(HeapRegion* hr) {
- HeapWord* b = hr->bottom();
- HeapWord* e = hr->end();
- HeapWord* t = hr->top();
- HeapWord* p = _g1h->top_at_mark_start(hr, _vo);
- _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
- "TAMS: " PTR_FORMAT, p2i(b), p2i(e), p2i(t), p2i(p));
- _out->cr();
-
- HeapWord* from = b;
- HeapWord* to = t;
-
- if (to > from) {
- _out->print_cr("Objects in [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(from), p2i(to));
- _out->cr();
- PrintReachableObjectClosure ocl(_out, _vo, _all, hr);
- hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
- _out->cr();
- }
-
- return false;
- }
-
- PrintReachableRegionClosure(outputStream* out,
- VerifyOption vo,
- bool all) :
- _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { }
-};
-
-void ConcurrentMark::print_reachable(const char* str,
- VerifyOption vo,
- bool all) {
- gclog_or_tty->cr();
- gclog_or_tty->print_cr("== Doing heap dump... ");
-
- if (G1PrintReachableBaseFile == NULL) {
- gclog_or_tty->print_cr(" #### error: no base file defined");
- return;
- }
-
- if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) >
- (JVM_MAXPATHLEN - 1)) {
- gclog_or_tty->print_cr(" #### error: file name too long");
- return;
- }
-
- char file_name[JVM_MAXPATHLEN];
- sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str);
- gclog_or_tty->print_cr(" dumping to file %s", file_name);
-
- fileStream fout(file_name);
- if (!fout.is_open()) {
- gclog_or_tty->print_cr(" #### error: could not open file");
- return;
- }
-
- outputStream* out = &fout;
- out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo));
- out->cr();
-
- out->print_cr("--- ITERATING OVER REGIONS");
- out->cr();
- PrintReachableRegionClosure rcl(out, vo, all);
- _g1h->heap_region_iterate(&rcl);
- out->cr();
-
- gclog_or_tty->print_cr(" done");
- gclog_or_tty->flush();
-}
-
-#endif // PRODUCT
-
void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
// Note we are overriding the read-only view of the prev map here, via
// the cast.
@@ -2958,9 +2793,7 @@ ConcurrentMark::claim_region(uint worker_id) {
#ifndef PRODUCT
enum VerifyNoCSetOopsPhase {
VerifyNoCSetOopsStack,
- VerifyNoCSetOopsQueues,
- VerifyNoCSetOopsSATBCompleted,
- VerifyNoCSetOopsSATBThread
+ VerifyNoCSetOopsQueues
};
class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure {
@@ -2973,8 +2806,6 @@ private:
switch (_phase) {
case VerifyNoCSetOopsStack: return "Stack";
case VerifyNoCSetOopsQueues: return "Queue";
- case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
- case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers";
default: ShouldNotReachHere();
}
return NULL;
@@ -3001,7 +2832,7 @@ public:
virtual void do_oop(narrowOop* p) {
// We should not come across narrow oops while scanning marking
- // stacks and SATB buffers.
+ // stacks
ShouldNotReachHere();
}
@@ -3010,10 +2841,7 @@ public:
}
};
-void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
- bool verify_enqueued_buffers,
- bool verify_thread_buffers,
- bool verify_fingers) {
+void ConcurrentMark::verify_no_cset_oops() {
assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
if (!G1CollectedHeap::heap()->mark_in_progress()) {
return;
@@ -3021,65 +2849,47 @@ void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
VerifyNoCSetOopsClosure cl;
- if (verify_stacks) {
- // Verify entries on the global mark stack
- cl.set_phase(VerifyNoCSetOopsStack);
- _markStack.oops_do(&cl);
+ // Verify entries on the global mark stack
+ cl.set_phase(VerifyNoCSetOopsStack);
+ _markStack.oops_do(&cl);
- // Verify entries on the task queues
- for (uint i = 0; i < _max_worker_id; i += 1) {
- cl.set_phase(VerifyNoCSetOopsQueues, i);
- CMTaskQueue* queue = _task_queues->queue(i);
- queue->oops_do(&cl);
- }
+ // Verify entries on the task queues
+ for (uint i = 0; i < _max_worker_id; i += 1) {
+ cl.set_phase(VerifyNoCSetOopsQueues, i);
+ CMTaskQueue* queue = _task_queues->queue(i);
+ queue->oops_do(&cl);
}
- SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
-
- // Verify entries on the enqueued SATB buffers
- if (verify_enqueued_buffers) {
- cl.set_phase(VerifyNoCSetOopsSATBCompleted);
- satb_qs.iterate_completed_buffers_read_only(&cl);
+ // Verify the global finger
+ HeapWord* global_finger = finger();
+ if (global_finger != NULL && global_finger < _heap_end) {
+ // The global finger always points to a heap region boundary. We
+ // use heap_region_containing_raw() to get the containing region
+ // given that the global finger could be pointing to a free region
+ // which subsequently becomes continues humongous. If that
+ // happens, heap_region_containing() will return the bottom of the
+ // corresponding starts humongous region and the check below will
+ // not hold any more.
+ // Since we always iterate over all regions, we might get a NULL HeapRegion
+ // here.
+ HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
+ guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
+ err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
+ p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
}
- // Verify entries on the per-thread SATB buffers
- if (verify_thread_buffers) {
- cl.set_phase(VerifyNoCSetOopsSATBThread);
- satb_qs.iterate_thread_buffers_read_only(&cl);
- }
-
- if (verify_fingers) {
- // Verify the global finger
- HeapWord* global_finger = finger();
- if (global_finger != NULL && global_finger < _heap_end) {
- // The global finger always points to a heap region boundary. We
- // use heap_region_containing_raw() to get the containing region
- // given that the global finger could be pointing to a free region
- // which subsequently becomes continues humongous. If that
- // happens, heap_region_containing() will return the bottom of the
- // corresponding starts humongous region and the check below will
- // not hold any more.
- // Since we always iterate over all regions, we might get a NULL HeapRegion
- // here.
- HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
- guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
- err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
- p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
- }
-
- // Verify the task fingers
- assert(parallel_marking_threads() <= _max_worker_id, "sanity");
- for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
- CMTask* task = _tasks[i];
- HeapWord* task_finger = task->finger();
- if (task_finger != NULL && task_finger < _heap_end) {
- // See above note on the global finger verification.
- HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
- guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
- !task_hr->in_collection_set(),
- err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
- p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
- }
+ // Verify the task fingers
+ assert(parallel_marking_threads() <= _max_worker_id, "sanity");
+ for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
+ CMTask* task = _tasks[i];
+ HeapWord* task_finger = task->finger();
+ if (task_finger != NULL && task_finger < _heap_end) {
+ // See above note on the global finger verification.
+ HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
+ guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
+ !task_hr->in_collection_set(),
+ err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
+ p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
}
}
}
@@ -3392,22 +3202,29 @@ void ConcurrentMark::print_finger() {
}
#endif
-void CMTask::scan_object(oop obj) {
+template
+inline void CMTask::process_grey_object(oop obj) {
+ assert(scan || obj->is_typeArray(), "Skipping scan of grey non-typeArray");
assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
if (_cm->verbose_high()) {
- gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT,
+ gclog_or_tty->print_cr("[%u] processing grey object " PTR_FORMAT,
_worker_id, p2i((void*) obj));
}
size_t obj_size = obj->size();
_words_scanned += obj_size;
- obj->oop_iterate(_cm_oop_closure);
+ if (scan) {
+ obj->oop_iterate(_cm_oop_closure);
+ }
statsOnly( ++_objs_scanned );
check_limits();
}
+template void CMTask::process_grey_object(oop);
+template void CMTask::process_grey_object(oop);
+
// Closure for iteration over bitmaps
class CMBitMapClosure : public BitMapClosure {
private:
@@ -3878,12 +3695,11 @@ void CMTask::drain_satb_buffers() {
CMObjectClosure oc(this);
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
- satb_mq_set.set_closure(_worker_id, &oc);
// This keeps claiming and applying the closure to completed buffers
// until we run out of buffers or we need to abort.
while (!has_aborted() &&
- satb_mq_set.apply_closure_to_completed_buffer(_worker_id)) {
+ satb_mq_set.apply_closure_to_completed_buffer(&oc)) {
if (_cm->verbose_medium()) {
gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
}
@@ -3897,8 +3713,6 @@ void CMTask::drain_satb_buffers() {
concurrent() ||
satb_mq_set.completed_buffers_num() == 0, "invariant");
- satb_mq_set.set_closure(_worker_id, NULL);
-
// again, this was a potentially expensive operation, decrease the
// limits to get the regular clock call early
decrease_limits();
diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp
index be9773b7991..3702273c959 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp
@@ -785,14 +785,9 @@ public:
}
// Verify that there are no CSet oops on the stacks (taskqueues /
- // global mark stack), enqueued SATB buffers, per-thread SATB
- // buffers, and fingers (global / per-task). The boolean parameters
- // decide which of the above data structures to verify. If marking
- // is not in progress, it's a no-op.
- void verify_no_cset_oops(bool verify_stacks,
- bool verify_enqueued_buffers,
- bool verify_thread_buffers,
- bool verify_fingers) PRODUCT_RETURN;
+ // global mark stack) and fingers (global / per-task).
+ // If marking is not in progress, it's a no-op.
+ void verify_no_cset_oops() PRODUCT_RETURN;
bool isPrevMarked(oop p) const {
assert(p != NULL && p->is_oop(), "expected an oop");
@@ -1100,6 +1095,12 @@ private:
void regular_clock_call();
bool concurrent() { return _concurrent; }
+ // Test whether objAddr might have already been passed over by the
+ // mark bitmap scan, and so needs to be pushed onto the mark stack.
+ bool is_below_finger(HeapWord* objAddr, HeapWord* global_finger) const;
+
+ template void process_grey_object(oop obj);
+
public:
// It resets the task; it should be called right at the beginning of
// a marking phase.
@@ -1152,7 +1153,7 @@ public:
inline void deal_with_reference(oop obj);
// It scans an object and visits its children.
- void scan_object(oop obj);
+ void scan_object(oop obj) { process_grey_object(obj); }
// It pushes an object on the local queue.
inline void push(oop obj);
diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp
index 0b32d13a167..a96c2dc584a 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp
@@ -259,14 +259,35 @@ inline void CMTask::push(oop obj) {
++_local_pushes );
}
-// This determines whether the method below will check both the local
-// and global fingers when determining whether to push on the stack a
-// gray object (value 1) or whether it will only check the global one
-// (value 0). The tradeoffs are that the former will be a bit more
-// accurate and possibly push less on the stack, but it might also be
-// a little bit slower.
+inline bool CMTask::is_below_finger(HeapWord* objAddr,
+ HeapWord* global_finger) const {
+ // If objAddr is above the global finger, then the mark bitmap scan
+ // will find it later, and no push is needed. Similarly, if we have
+ // a current region and objAddr is between the local finger and the
+ // end of the current region, then no push is needed. The tradeoff
+ // of checking both vs only checking the global finger is that the
+ // local check will be more accurate and so result in fewer pushes,
+ // but may also be a little slower.
+ if (_finger != NULL) {
+ // We have a current region.
-#define _CHECK_BOTH_FINGERS_ 1
+ // Finger and region values are all NULL or all non-NULL. We
+ // use _finger to check since we immediately use its value.
+ assert(_curr_region != NULL, "invariant");
+ assert(_region_limit != NULL, "invariant");
+ assert(_region_limit <= global_finger, "invariant");
+
+ // True if objAddr is less than the local finger, or is between
+ // the region limit and the global finger.
+ if (objAddr < _finger) {
+ return true;
+ } else if (objAddr < _region_limit) {
+ return false;
+ } // Else check global finger.
+ }
+ // Check global finger.
+ return objAddr < global_finger;
+}
inline void CMTask::deal_with_reference(oop obj) {
if (_cm->verbose_high()) {
@@ -297,50 +318,43 @@ inline void CMTask::deal_with_reference(oop obj) {
// CAS done in CMBitMap::parMark() call in the routine above.
HeapWord* global_finger = _cm->finger();
-#if _CHECK_BOTH_FINGERS_
- // we will check both the local and global fingers
-
- if (_finger != NULL && objAddr < _finger) {
- if (_cm->verbose_high()) {
- gclog_or_tty->print_cr("[%u] below the local finger ("PTR_FORMAT"), "
- "pushing it", _worker_id, p2i(_finger));
+ // We only need to push a newly grey object on the mark
+ // stack if it is in a section of memory the mark bitmap
+ // scan has already examined. Mark bitmap scanning
+ // maintains progress "fingers" for determining that.
+ //
+ // Notice that the global finger might be moving forward
+ // concurrently. This is not a problem. In the worst case, we
+ // mark the object while it is above the global finger and, by
+ // the time we read the global finger, it has moved forward
+ // past this object. In this case, the object will probably
+ // be visited when a task is scanning the region and will also
+ // be pushed on the stack. So, some duplicate work, but no
+ // correctness problems.
+ if (is_below_finger(objAddr, global_finger)) {
+ if (obj->is_typeArray()) {
+ // Immediately process arrays of primitive types, rather
+ // than pushing on the mark stack. This keeps us from
+ // adding humongous objects to the mark stack that might
+ // be reclaimed before the entry is processed - see
+ // selection of candidates for eager reclaim of humongous
+ // objects. The cost of the additional type test is
+ // mitigated by avoiding a trip through the mark stack,
+ // by only doing a bookkeeping update and avoiding the
+ // actual scan of the object - a typeArray contains no
+ // references, and the metadata is built-in.
+ process_grey_object(obj);
+ } else {
+ if (_cm->verbose_high()) {
+ gclog_or_tty->print_cr("[%u] below a finger (local: " PTR_FORMAT
+ ", global: " PTR_FORMAT ") pushing "
+ PTR_FORMAT " on mark stack",
+ _worker_id, p2i(_finger),
+ p2i(global_finger), p2i(objAddr));
+ }
+ push(obj);
}
- push(obj);
- } else if (_curr_region != NULL && objAddr < _region_limit) {
- // do nothing
- } else if (objAddr < global_finger) {
- // Notice that the global finger might be moving forward
- // concurrently. This is not a problem. In the worst case, we
- // mark the object while it is above the global finger and, by
- // the time we read the global finger, it has moved forward
- // passed this object. In this case, the object will probably
- // be visited when a task is scanning the region and will also
- // be pushed on the stack. So, some duplicate work, but no
- // correctness problems.
-
- if (_cm->verbose_high()) {
- gclog_or_tty->print_cr("[%u] below the global finger "
- "("PTR_FORMAT"), pushing it",
- _worker_id, p2i(global_finger));
- }
- push(obj);
- } else {
- // do nothing
}
-#else // _CHECK_BOTH_FINGERS_
- // we will only check the global finger
-
- if (objAddr < global_finger) {
- // see long comment above
-
- if (_cm->verbose_high()) {
- gclog_or_tty->print_cr("[%u] below the global finger "
- "("PTR_FORMAT"), pushing it",
- _worker_id, p2i(global_finger));
- }
- push(obj);
- }
-#endif // _CHECK_BOTH_FINGERS_
}
}
}
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp
index 4e199a6c28e..d4f2f190690 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp
@@ -110,15 +110,15 @@ void G1DefaultAllocator::abandon_gc_alloc_regions() {
_retained_old_gc_alloc_region = NULL;
}
-G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
- ParGCAllocBuffer(gclab_word_size), _retired(true) { }
+G1PLAB::G1PLAB(size_t gclab_word_size) :
+ PLAB(gclab_word_size), _retired(true) { }
HeapWord* G1ParGCAllocator::allocate_direct_or_new_plab(InCSetState dest,
size_t word_sz,
AllocationContext_t context) {
size_t gclab_word_size = _g1h->desired_plab_sz(dest);
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
- G1ParGCAllocBuffer* alloc_buf = alloc_buffer(dest, context);
+ G1PLAB* alloc_buf = alloc_buffer(dest, context);
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
alloc_buf->retire();
@@ -151,7 +151,7 @@ G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
void G1DefaultParGCAllocator::retire_alloc_buffers() {
for (uint state = 0; state < InCSetState::Num; state++) {
- G1ParGCAllocBuffer* const buf = _alloc_buffers[state];
+ G1PLAB* const buf = _alloc_buffers[state];
if (buf != NULL) {
add_to_alloc_buffer_waste(buf->words_remaining());
buf->flush_and_retire_stats(_g1h->alloc_buffer_stats(state));
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp
index f14b1df4d8c..78741e33a1e 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp
@@ -28,7 +28,10 @@
#include "gc_implementation/g1/g1AllocationContext.hpp"
#include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/g1/g1InCSetState.hpp"
-#include "gc_implementation/shared/parGCAllocBuffer.hpp"
+#include "gc_implementation/shared/plab.hpp"
+#include "gc_interface/collectedHeap.hpp"
+
+class EvacuationInfo;
// Base class for G1 allocators.
class G1Allocator : public CHeapObj {
@@ -144,18 +147,18 @@ public:
}
};
-class G1ParGCAllocBuffer: public ParGCAllocBuffer {
+class G1PLAB: public PLAB {
private:
bool _retired;
public:
- G1ParGCAllocBuffer(size_t gclab_word_size);
- virtual ~G1ParGCAllocBuffer() {
+ G1PLAB(size_t gclab_word_size);
+ virtual ~G1PLAB() {
guarantee(_retired, "Allocation buffer has not been retired");
}
virtual void set_buf(HeapWord* buf) {
- ParGCAllocBuffer::set_buf(buf);
+ PLAB::set_buf(buf);
_retired = false;
}
@@ -163,7 +166,12 @@ public:
if (_retired) {
return;
}
- ParGCAllocBuffer::retire();
+ PLAB::retire();
+ _retired = true;
+ }
+
+ virtual void flush_and_retire_stats(PLABStats* stats) {
+ PLAB::flush_and_retire_stats(stats);
_retired = true;
}
};
@@ -187,7 +195,7 @@ protected:
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
virtual void retire_alloc_buffers() = 0;
- virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
+ virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
// Calculate the survivor space object alignment in bytes. Returns that or 0 if
// there are no restrictions on survivor alignment.
@@ -208,6 +216,7 @@ public:
_g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()),
_alloc_buffer_waste(0), _undo_waste(0) {
}
+ virtual ~G1ParGCAllocator() { }
static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
@@ -226,7 +235,7 @@ public:
HeapWord* plab_allocate(InCSetState dest,
size_t word_sz,
AllocationContext_t context) {
- G1ParGCAllocBuffer* buffer = alloc_buffer(dest, context);
+ G1PLAB* buffer = alloc_buffer(dest, context);
if (_survivor_alignment_bytes == 0) {
return buffer->allocate(word_sz);
} else {
@@ -256,14 +265,14 @@ public:
};
class G1DefaultParGCAllocator : public G1ParGCAllocator {
- G1ParGCAllocBuffer _surviving_alloc_buffer;
- G1ParGCAllocBuffer _tenured_alloc_buffer;
- G1ParGCAllocBuffer* _alloc_buffers[InCSetState::Num];
+ G1PLAB _surviving_alloc_buffer;
+ G1PLAB _tenured_alloc_buffer;
+ G1PLAB* _alloc_buffers[InCSetState::Num];
public:
G1DefaultParGCAllocator(G1CollectedHeap* g1h);
- virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) {
+ virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) {
assert(dest.is_valid(),
err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));
assert(_alloc_buffers[dest.value()] != NULL,
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp
index 7f728cd78bb..50a8782a286 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "memory/space.hpp"
@@ -303,9 +304,9 @@ void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_
assert(blk_start <= threshold, "blk_start should be at or before threshold");
assert(pointer_delta(threshold, blk_start) <= N_words,
"offset should be <= BlockOffsetSharedArray::N");
- assert(Universe::heap()->is_in_reserved(blk_start),
+ assert(G1CollectedHeap::heap()->is_in_reserved(blk_start),
"reference must be into the heap");
- assert(Universe::heap()->is_in_reserved(blk_end-1),
+ assert(G1CollectedHeap::heap()->is_in_reserved(blk_end-1),
"limit must be within the heap");
assert(threshold == _array->_reserved.start() + index*N_words,
"index must agree with threshold");
@@ -458,7 +459,7 @@ G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
}
HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold_raw() {
- assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
+ assert(!G1CollectedHeap::heap()->is_in_reserved(_array->_offset_array),
"just checking");
_next_offset_index = _array->index_for_raw(_bottom);
_next_offset_index++;
@@ -468,7 +469,7 @@ HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold_raw() {
}
void G1BlockOffsetArrayContigSpace::zero_bottom_entry_raw() {
- assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
+ assert(!G1CollectedHeap::heap()->is_in_reserved(_array->_offset_array),
"just checking");
size_t bottom_index = _array->index_for_raw(_bottom);
assert(_array->address_for_index_raw(bottom_index) == _bottom,
@@ -477,7 +478,7 @@ void G1BlockOffsetArrayContigSpace::zero_bottom_entry_raw() {
}
HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
- assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
+ assert(!G1CollectedHeap::heap()->is_in_reserved(_array->_offset_array),
"just checking");
_next_offset_index = _array->index_for(_bottom);
_next_offset_index++;
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp
index 3b81b08894a..9b2a2442dfb 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
#include "memory/memRegion.hpp"
-#include "runtime/virtualspace.hpp"
+#include "memory/virtualspace.hpp"
#include "utilities/globalDefinitions.hpp"
// The CollectedHeap type requires subtypes to implement a method
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp
index 6922818a790..cfcb084637a 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp
@@ -27,7 +27,7 @@
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
#include "memory/allocation.hpp"
-#include "runtime/virtualspace.hpp"
+#include "memory/virtualspace.hpp"
#include "utilities/globalDefinitions.hpp"
class CardTableModRefBS;
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
index 8733e4714a9..68e4815d732 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
@@ -22,11 +22,6 @@
*
*/
-#if !defined(__clang_major__) && defined(__GNUC__)
-// FIXME, formats have issues. Disable this macro definition, compile, and study warnings for more information.
-#define ATTRIBUTE_PRINTF(x,y)
-#endif
-
#include "precompiled.hpp"
#include "classfile/metadataOnStackMark.hpp"
#include "classfile/stringTable.hpp"
@@ -70,6 +65,7 @@
#include "runtime/orderAccess.inline.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/globalDefinitions.hpp"
+#include "utilities/stack.inline.hpp"
size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
@@ -200,7 +196,7 @@ bool YoungList::check_list_well_formed() {
if (!curr->is_young()) {
gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
"incorrectly tagged (y: %d, surv: %d)",
- curr->bottom(), curr->end(),
+ p2i(curr->bottom()), p2i(curr->end()),
curr->is_young(), curr->is_survivor());
ret = false;
}
@@ -329,8 +325,8 @@ void YoungList::print() {
while (curr != NULL) {
gclog_or_tty->print_cr(" "HR_FORMAT", P: "PTR_FORMAT ", N: "PTR_FORMAT", age: %4d",
HR_FORMAT_PARAMS(curr),
- curr->prev_top_at_mark_start(),
- curr->next_top_at_mark_start(),
+ p2i(curr->prev_top_at_mark_start()),
+ p2i(curr->next_top_at_mark_start()),
curr->age_in_surv_rate_group_cond());
curr = curr->get_next_young_region();
}
@@ -409,10 +405,6 @@ bool G1CollectedHeap::is_scavengable(const void* p) {
return !hr->is_humongous();
}
-// Private class members.
-
-G1CollectedHeap* G1CollectedHeap::_g1h;
-
// Private methods.
HeapRegion*
@@ -1728,7 +1720,7 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) {
G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
- SharedHeap(),
+ CollectedHeap(),
_g1_policy(policy_),
_dirty_card_queue_set(false),
_into_cset_dirty_card_queue_set(false),
@@ -1746,7 +1738,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
_old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
_humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
- _humongous_is_live(),
+ _humongous_reclaim_candidates(),
_has_humongous_reclaim_candidates(false),
_free_regions_coming(false),
_young_list(new YoungList(this)),
@@ -1768,9 +1760,12 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
_gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
- _g1h = this;
+ _workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads,
+ /* are_GC_task_threads */true,
+ /* are_ConcurrentGC_threads */false);
+ _workers->initialize_workers();
- _allocator = G1Allocator::create_allocator(_g1h);
+ _allocator = G1Allocator::create_allocator(this);
_humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
int n_queues = MAX2((int)ParallelGCThreads, 1);
@@ -1797,6 +1792,26 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
guarantee(_task_queues != NULL, "task_queues allocation failure.");
}
+G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
+ size_t size,
+ size_t translation_factor) {
+ size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
+ // Allocate a new reserved space, preferring to use large pages.
+ ReservedSpace rs(size, preferred_page_size);
+ G1RegionToSpaceMapper* result =
+ G1RegionToSpaceMapper::create_mapper(rs,
+ size,
+ rs.alignment(),
+ HeapRegion::GrainBytes,
+ translation_factor,
+ mtGC);
+ if (TracePageSizes) {
+ gclog_or_tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT,
+ description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size);
+ }
+ return result;
+}
+
jint G1CollectedHeap::initialize() {
CollectedHeap::pre_initialize();
os::enable_vtime();
@@ -1864,57 +1879,35 @@ jint G1CollectedHeap::initialize() {
ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
G1RegionToSpaceMapper* heap_storage =
G1RegionToSpaceMapper::create_mapper(g1_rs,
+ g1_rs.size(),
UseLargePages ? os::large_page_size() : os::vm_page_size(),
HeapRegion::GrainBytes,
1,
mtJavaHeap);
heap_storage->set_mapping_changed_listener(&_listener);
- // Reserve space for the block offset table. We do not support automatic uncommit
- // for the card table at this time. BOT only.
- ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
+ // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
G1RegionToSpaceMapper* bot_storage =
- G1RegionToSpaceMapper::create_mapper(bot_rs,
- os::vm_page_size(),
- HeapRegion::GrainBytes,
- G1BlockOffsetSharedArray::N_bytes,
- mtGC);
+ create_aux_memory_mapper("Block offset table",
+ G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize),
+ G1BlockOffsetSharedArray::N_bytes);
ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
G1RegionToSpaceMapper* cardtable_storage =
- G1RegionToSpaceMapper::create_mapper(cardtable_rs,
- os::vm_page_size(),
- HeapRegion::GrainBytes,
- G1BlockOffsetSharedArray::N_bytes,
- mtGC);
+ create_aux_memory_mapper("Card table",
+ G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize),
+ G1BlockOffsetSharedArray::N_bytes);
- // Reserve space for the card counts table.
- ReservedSpace card_counts_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
G1RegionToSpaceMapper* card_counts_storage =
- G1RegionToSpaceMapper::create_mapper(card_counts_rs,
- os::vm_page_size(),
- HeapRegion::GrainBytes,
- G1BlockOffsetSharedArray::N_bytes,
- mtGC);
+ create_aux_memory_mapper("Card counts table",
+ G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize),
+ G1BlockOffsetSharedArray::N_bytes);
- // Reserve space for prev and next bitmap.
size_t bitmap_size = CMBitMap::compute_size(g1_rs.size());
-
- ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
G1RegionToSpaceMapper* prev_bitmap_storage =
- G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs,
- os::vm_page_size(),
- HeapRegion::GrainBytes,
- CMBitMap::mark_distance(),
- mtGC);
-
- ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
+ create_aux_memory_mapper("Prev Bitmap", bitmap_size, CMBitMap::mark_distance());
G1RegionToSpaceMapper* next_bitmap_storage =
- G1RegionToSpaceMapper::create_mapper(next_bitmap_rs,
- os::vm_page_size(),
- HeapRegion::GrainBytes,
- CMBitMap::mark_distance(),
- mtGC);
+ create_aux_memory_mapper("Next Bitmap", bitmap_size, CMBitMap::mark_distance());
_hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
g1_barrier_set()->initialize(cardtable_storage);
@@ -1935,10 +1928,14 @@ jint G1CollectedHeap::initialize() {
_bot_shared = new G1BlockOffsetSharedArray(reserved_region(), bot_storage);
- _g1h = this;
+ {
+ HeapWord* start = _hrm.reserved().start();
+ HeapWord* end = _hrm.reserved().end();
+ size_t granularity = HeapRegion::GrainBytes;
- _in_cset_fast_test.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
- _humongous_is_live.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
+ _in_cset_fast_test.initialize(start, end, granularity);
+ _humongous_reclaim_candidates.initialize(start, end, granularity);
+ }
// Create the ConcurrentMark data structure and thread.
// (Must do this late, so that "max_regions" is defined.)
@@ -2026,15 +2023,15 @@ void G1CollectedHeap::stop() {
}
}
-void G1CollectedHeap::clear_humongous_is_live_table() {
- guarantee(G1EagerReclaimHumongousObjects, "Should only be called if true");
- _humongous_is_live.clear();
-}
-
size_t G1CollectedHeap::conservative_max_heap_alignment() {
return HeapRegion::max_region_size();
}
+void G1CollectedHeap::post_initialize() {
+ CollectedHeap::post_initialize();
+ ref_processing_init();
+}
+
void G1CollectedHeap::ref_processing_init() {
// Reference processing in G1 currently works as follows:
//
@@ -2071,7 +2068,6 @@ void G1CollectedHeap::ref_processing_init() {
// * Discovery is atomic - i.e. not concurrent.
// * Reference discovery will not need a barrier.
- SharedHeap::ref_processing_init();
MemRegion mr = reserved_region();
// Concurrent Mark ref processor
@@ -2128,6 +2124,7 @@ void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
}
#ifndef PRODUCT
+
class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
private:
unsigned _gc_time_stamp;
@@ -2462,11 +2459,6 @@ public:
}
};
-void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
- IterateOopClosureRegionClosure blk(cl);
- heap_region_iterate(&blk);
-}
-
// Iterates an ObjectClosure over all objects within a HeapRegion.
class IterateObjectClosureRegionClosure: public HeapRegionClosure {
@@ -2486,23 +2478,6 @@ void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
heap_region_iterate(&blk);
}
-// Calls a SpaceClosure on a HeapRegion.
-
-class SpaceClosureRegionClosure: public HeapRegionClosure {
- SpaceClosure* _cl;
-public:
- SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
- bool doHeapRegion(HeapRegion* r) {
- _cl->do_space(r);
- return false;
- }
-};
-
-void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
- SpaceClosureRegionClosure blk(cl);
- heap_region_iterate(&blk);
-}
-
void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
_hrm.iterate(cl);
}
@@ -2639,23 +2614,19 @@ HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) cons
return result;
}
-Space* G1CollectedHeap::space_containing(const void* addr) const {
- return heap_region_containing(addr);
-}
-
HeapWord* G1CollectedHeap::block_start(const void* addr) const {
- Space* sp = space_containing(addr);
- return sp->block_start(addr);
+ HeapRegion* hr = heap_region_containing(addr);
+ return hr->block_start(addr);
}
size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
- Space* sp = space_containing(addr);
- return sp->block_size(addr);
+ HeapRegion* hr = heap_region_containing(addr);
+ return hr->block_size(addr);
}
bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
- Space* sp = space_containing(addr);
- return sp->block_is_obj(addr);
+ HeapRegion* hr = heap_region_containing(addr);
+ return hr->block_is_obj(addr);
}
bool G1CollectedHeap::supports_tlab_allocation() const {
@@ -2776,9 +2747,9 @@ public:
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
if (_g1h->is_obj_dead_cond(obj, _vo)) {
gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
- "points to dead obj "PTR_FORMAT, p, (void*) obj);
+ "points to dead obj "PTR_FORMAT, p2i(p), p2i(obj));
if (_vo == VerifyOption_G1UseMarkWord) {
- gclog_or_tty->print_cr(" Mark word: "PTR_FORMAT, (void*)(obj->mark()));
+ gclog_or_tty->print_cr(" Mark word: "INTPTR_FORMAT, (intptr_t)obj->mark());
}
obj->print_on(gclog_or_tty);
_failures = true;
@@ -2826,9 +2797,9 @@ class G1VerifyCodeRootOopClosure: public OopClosure {
// contains the nmethod
if (!hrrs->strong_code_roots_list_contains(_nm)) {
gclog_or_tty->print_cr("Code root location "PTR_FORMAT" "
- "from nmethod "PTR_FORMAT" not in strong "
- "code roots for region ["PTR_FORMAT","PTR_FORMAT")",
- p, _nm, hr->bottom(), hr->end());
+ "from nmethod "PTR_FORMAT" not in strong "
+ "code roots for region ["PTR_FORMAT","PTR_FORMAT")",
+ p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
_failures = true;
}
}
@@ -2884,7 +2855,7 @@ class VerifyKlassClosure: public KlassClosure {
_young_ref_counter_closure.reset_count();
k->oops_do(&_young_ref_counter_closure);
if (_young_ref_counter_closure.count() > 0) {
- guarantee(k->has_modified_oops(), err_msg("Klass " PTR_FORMAT ", has young refs but is not dirty.", k));
+ guarantee(k->has_modified_oops(), err_msg("Klass " PTR_FORMAT ", has young refs but is not dirty.", p2i(k)));
}
}
};
@@ -2945,35 +2916,6 @@ public:
size_t live_bytes() { return _live_bytes; }
};
-class PrintObjsInRegionClosure : public ObjectClosure {
- HeapRegion *_hr;
- G1CollectedHeap *_g1;
-public:
- PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
- _g1 = G1CollectedHeap::heap();
- };
-
- void do_object(oop o) {
- if (o != NULL) {
- HeapWord *start = (HeapWord *) o;
- size_t word_sz = o->size();
- gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT
- " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
- (void*) o, word_sz,
- _g1->isMarkedPrev(o),
- _g1->isMarkedNext(o),
- _hr->obj_allocated_since_prev_marking(o));
- HeapWord *end = start + word_sz;
- HeapWord *cur;
- int *val;
- for (cur = start; cur < end; cur++) {
- val = (int *) cur;
- gclog_or_tty->print("\t "PTR_FORMAT":%d\n", val, *val);
- }
- }
- }
-};
-
class VerifyRegionClosure: public HeapRegionClosure {
private:
bool _par;
@@ -3006,7 +2948,7 @@ public:
gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
"max_live_bytes "SIZE_FORMAT" "
"< calculated "SIZE_FORMAT,
- r->bottom(), r->end(),
+ p2i(r->bottom()), p2i(r->end()),
r->max_live_bytes(),
not_dead_yet_cl.live_bytes());
_failures = true;
@@ -3125,12 +3067,6 @@ void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
// print_extended_on() instead of print_on().
print_extended_on(gclog_or_tty);
gclog_or_tty->cr();
-#ifndef PRODUCT
- if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
- concurrent_mark()->print_reachable("at-verification-failure",
- vo, false /* all */);
- }
-#endif
gclog_or_tty->flush();
}
guarantee(!failures, "there should not have been any failures");
@@ -3210,10 +3146,10 @@ void G1CollectedHeap::print_on(outputStream* st) const {
st->print(" %-20s", "garbage-first heap");
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
capacity()/K, used_unlocked()/K);
- st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
- _hrm.reserved().start(),
- _hrm.reserved().start() + _hrm.length() + HeapRegion::GrainWords,
- _hrm.reserved().end());
+ st->print(" [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")",
+ p2i(_hrm.reserved().start()),
+ p2i(_hrm.reserved().start() + _hrm.length() + HeapRegion::GrainWords),
+ p2i(_hrm.reserved().end()));
st->cr();
st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
uint young_regions = _young_list->length();
@@ -3336,9 +3272,10 @@ void G1CollectedHeap::print_all_rsets() {
#endif // PRODUCT
G1CollectedHeap* G1CollectedHeap::heap() {
- assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
- "not a garbage-first heap");
- return _g1h;
+ CollectedHeap* heap = Universe::heap();
+ assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
+ assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
+ return (G1CollectedHeap*)heap;
}
void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
@@ -3434,12 +3371,6 @@ size_t G1CollectedHeap::cards_scanned() {
return g1_rem_set()->cardsScanned();
}
-bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
- HeapRegion* region = region_at(index);
- assert(region->is_starts_humongous(), "Must start a humongous object");
- return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
-}
-
class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
private:
size_t _total_humongous;
@@ -3447,14 +3378,59 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
DirtyCardQueue _dcq;
- bool humongous_region_is_candidate(uint index) {
- HeapRegion* region = G1CollectedHeap::heap()->region_at(index);
- assert(region->is_starts_humongous(), "Must start a humongous object");
+ // We don't nominate objects with many remembered set entries, on
+ // the assumption that such objects are likely still live.
+ bool is_remset_small(HeapRegion* region) const {
HeapRegionRemSet* const rset = region->rem_set();
- bool const allow_stale_refs = G1EagerReclaimHumongousObjectsWithStaleRefs;
- return !oop(region->bottom())->is_objArray() &&
- ((allow_stale_refs && rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)) ||
- (!allow_stale_refs && rset->is_empty()));
+ return G1EagerReclaimHumongousObjectsWithStaleRefs
+ ? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)
+ : rset->is_empty();
+ }
+
+ bool is_typeArray_region(HeapRegion* region) const {
+ return oop(region->bottom())->is_typeArray();
+ }
+
+ bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
+ assert(region->is_starts_humongous(), "Must start a humongous object");
+
+ // Candidate selection must satisfy the following constraints
+ // while concurrent marking is in progress:
+ //
+ // * In order to maintain SATB invariants, an object must not be
+ // reclaimed if it was allocated before the start of marking and
+ // has not had its references scanned. Such an object must have
+ // its references (including type metadata) scanned to ensure no
+ // live objects are missed by the marking process. Objects
+ // allocated after the start of concurrent marking don't need to
+ // be scanned.
+ //
+ // * An object must not be reclaimed if it is on the concurrent
+ // mark stack. Objects allocated after the start of concurrent
+ // marking are never pushed on the mark stack.
+ //
+ // Nominating only objects allocated after the start of concurrent
+ // marking is sufficient to meet both constraints. This may miss
+ // some objects that satisfy the constraints, but the marking data
+ // structures don't support efficiently performing the needed
+ // additional tests or scrubbing of the mark stack.
+ //
+ // However, we presently only nominate is_typeArray() objects.
+ // A humongous object containing references induces remembered
+ // set entries on other regions. In order to reclaim such an
+ // object, those remembered sets would need to be cleaned up.
+ //
+ // We also treat is_typeArray() objects specially, allowing them
+ // to be reclaimed even if allocated before the start of
+ // concurrent mark. For this we rely on mark stack insertion to
+ // exclude is_typeArray() objects, preventing reclaiming an object
+ // that is in the mark stack. We also rely on the metadata for
+ // such objects to be built-in and so ensured to be kept live.
+ // Frequent allocation and drop of large binary blobs is an
+ // important use case for eager reclaim, and this special handling
+ // may reduce needed headroom.
+
+ return is_typeArray_region(region) && is_remset_small(region);
}
public:
@@ -3470,14 +3446,17 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
}
G1CollectedHeap* g1h = G1CollectedHeap::heap();
- uint region_idx = r->hrm_index();
- bool is_candidate = humongous_region_is_candidate(region_idx);
- // Is_candidate already filters out humongous object with large remembered sets.
- // If we have a humongous object with a few remembered sets, we simply flush these
- // remembered set entries into the DCQS. That will result in automatic
- // re-evaluation of their remembered set entries during the following evacuation
- // phase.
+ bool is_candidate = humongous_region_is_candidate(g1h, r);
+ uint rindex = r->hrm_index();
+ g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
if (is_candidate) {
+ _candidate_humongous++;
+ g1h->register_humongous_region_with_cset(rindex);
+ // Is_candidate already filters out humongous object with large remembered sets.
+ // If we have a humongous object with a few remembered sets, we simply flush these
+ // remembered set entries into the DCQS. That will result in automatic
+ // re-evaluation of their remembered set entries during the following evacuation
+ // phase.
if (!r->rem_set()->is_empty()) {
guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
"Found a not-small remembered set here. This is inconsistent with previous assumptions.");
@@ -3499,8 +3478,6 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
r->rem_set()->clear_locked();
}
assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
- g1h->register_humongous_region_with_cset(region_idx);
- _candidate_humongous++;
}
_total_humongous++;
@@ -3520,6 +3497,7 @@ void G1CollectedHeap::register_humongous_regions_with_cset() {
}
double time = os::elapsed_counter();
+ // Collect reclaim candidate information and register candidates with cset.
RegisterHumongousWithInCSetFastTestClosure cl;
heap_region_iterate(&cl);
@@ -3529,10 +3507,6 @@ void G1CollectedHeap::register_humongous_regions_with_cset() {
cl.candidate_humongous());
_has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
- if (_has_humongous_reclaim_candidates || G1TraceEagerReclaimHumongousObjects) {
- clear_humongous_is_live_table();
- }
-
// Finally flush all remembered set entries to re-check into the global DCQS.
cl.flush_rem_set_entries();
}
@@ -3832,14 +3806,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
assert(check_cset_fast_test(), "Inconsistency in the InCSetState table.");
_cm->note_start_of_gc();
- // We should not verify the per-thread SATB buffers given that
- // we have not filtered them yet (we'll do so during the
- // GC). We also call this after finalize_cset() to
+ // We call this after finalize_cset() to
// ensure that the CSet has been finalized.
- _cm->verify_no_cset_oops(true /* verify_stacks */,
- true /* verify_enqueued_buffers */,
- false /* verify_thread_buffers */,
- true /* verify_fingers */);
+ _cm->verify_no_cset_oops();
if (_hr_printer.is_active()) {
HeapRegion* hr = g1_policy()->collection_set();
@@ -3862,16 +3831,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// Actually do the work...
evacuate_collection_set(evacuation_info);
- // We do this to mainly verify the per-thread SATB buffers
- // (which have been filtered by now) since we didn't verify
- // them earlier. No point in re-checking the stacks / enqueued
- // buffers given that the CSet has not changed since last time
- // we checked.
- _cm->verify_no_cset_oops(false /* verify_stacks */,
- false /* verify_enqueued_buffers */,
- true /* verify_thread_buffers */,
- true /* verify_fingers */);
-
free_collection_set(g1_policy()->collection_set(), evacuation_info);
eagerly_reclaim_humongous_regions();
@@ -3954,10 +3913,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// We redo the verification but now wrt to the new CSet which
// has just got initialized after the previous CSet was freed.
- _cm->verify_no_cset_oops(true /* verify_stacks */,
- true /* verify_enqueued_buffers */,
- true /* verify_thread_buffers */,
- true /* verify_fingers */);
+ _cm->verify_no_cset_oops();
_cm->note_end_of_gc();
// This timing is only used by the ergonomics to handle our pause target.
@@ -4116,7 +4072,7 @@ G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_s
oop old) {
assert(obj_in_cs(old),
err_msg("obj: "PTR_FORMAT" should still be in the CSet",
- (HeapWord*) old));
+ p2i(old)));
markOop m = old->mark();
oop forward_ptr = old->forward_to_atomic(old);
if (forward_ptr == NULL) {
@@ -4151,7 +4107,7 @@ G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_s
assert(old == forward_ptr || !obj_in_cs(forward_ptr),
err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" "
"should not be in the CSet",
- (HeapWord*) old, (HeapWord*) forward_ptr));
+ p2i(old), p2i(forward_ptr)));
return forward_ptr;
}
}
@@ -4856,7 +4812,7 @@ void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
bool process_strings, bool process_symbols) {
{
- uint n_workers = _g1h->workers()->active_workers();
+ uint n_workers = workers()->active_workers();
G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
set_par_threads(n_workers);
workers()->run_task(&g1_unlink_task);
@@ -4888,7 +4844,7 @@ class G1RedirtyLoggedCardsTask : public AbstractGangTask {
void G1CollectedHeap::redirty_logged_cards() {
double redirty_logged_cards_start = os::elapsedTime();
- uint n_workers = _g1h->workers()->active_workers();
+ uint n_workers = workers()->active_workers();
G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
dirty_card_queue_set().reset_for_par_iteration();
@@ -5001,8 +4957,7 @@ public:
_par_scan_state->push_on_queue(p);
} else {
assert(!Metaspace::contains((const void*)p),
- err_msg("Unexpectedly found a pointer from metadata: "
- PTR_FORMAT, p));
+ err_msg("Unexpectedly found a pointer from metadata: " PTR_FORMAT, p2i(p)));
_copy_non_heap_obj_cl->do_oop(p);
}
}
@@ -5321,7 +5276,7 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
- if (_g1h->g1_policy()->during_initial_mark_pause()) {
+ if (g1_policy()->during_initial_mark_pause()) {
// We also need to mark copied objects.
copy_non_heap_cl = ©_mark_non_heap_cl;
}
@@ -5667,14 +5622,14 @@ void G1CollectedHeap::verify_dirty_young_regions() {
bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
HeapWord* tams, HeapWord* end) {
guarantee(tams <= end,
- err_msg("tams: "PTR_FORMAT" end: "PTR_FORMAT, tams, end));
+ err_msg("tams: "PTR_FORMAT" end: "PTR_FORMAT, p2i(tams), p2i(end)));
HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
if (result < end) {
gclog_or_tty->cr();
gclog_or_tty->print_cr("## wrong marked address on %s bitmap: "PTR_FORMAT,
- bitmap_name, result);
+ bitmap_name, p2i(result));
gclog_or_tty->print_cr("## %s tams: "PTR_FORMAT" end: "PTR_FORMAT,
- bitmap_name, tams, end);
+ bitmap_name, p2i(tams), p2i(end));
return false;
}
return true;
@@ -5994,41 +5949,42 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
// required because stale remembered sets might reference locations that
// are currently allocated into.
uint region_idx = r->hrm_index();
- if (g1h->humongous_is_live(region_idx) ||
- g1h->humongous_region_is_always_live(region_idx)) {
+ if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
+ !r->rem_set()->is_empty()) {
if (G1TraceEagerReclaimHumongousObjects) {
- gclog_or_tty->print_cr("Live humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
+ gclog_or_tty->print_cr("Live humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length %u with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d reclaim candidate %d type array %d",
region_idx,
- obj->size()*HeapWordSize,
- r->bottom(),
+ (size_t)obj->size() * HeapWordSize,
+ p2i(r->bottom()),
r->region_num(),
r->rem_set()->occupied(),
r->rem_set()->strong_code_roots_list_length(),
next_bitmap->isMarked(r->bottom()),
- g1h->humongous_is_live(region_idx),
- obj->is_objArray()
+ g1h->is_humongous_reclaim_candidate(region_idx),
+ obj->is_typeArray()
);
}
return false;
}
- guarantee(!obj->is_objArray(),
- err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.",
- r->bottom()));
+ guarantee(obj->is_typeArray(),
+ err_msg("Only eagerly reclaiming type arrays is supported, but the object "
+ PTR_FORMAT " is not.",
+ p2i(r->bottom())));
if (G1TraceEagerReclaimHumongousObjects) {
- gclog_or_tty->print_cr("Dead humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
+ gclog_or_tty->print_cr("Dead humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length %u with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d reclaim candidate %d type array %d",
region_idx,
- obj->size()*HeapWordSize,
- r->bottom(),
+ (size_t)obj->size() * HeapWordSize,
+ p2i(r->bottom()),
r->region_num(),
r->rem_set()->occupied(),
r->rem_set()->strong_code_roots_list_length(),
next_bitmap->isMarked(r->bottom()),
- g1h->humongous_is_live(region_idx),
- obj->is_objArray()
+ g1h->is_humongous_reclaim_candidate(region_idx),
+ obj->is_typeArray()
);
}
// Need to clear mark bit of the humongous object if already set.
@@ -6075,12 +6031,12 @@ void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
HeapRegionSetCount empty_set;
remove_from_old_sets(empty_set, cl.humongous_free_count());
- G1HRPrinter* hr_printer = _g1h->hr_printer();
- if (hr_printer->is_active()) {
+ G1HRPrinter* hrp = hr_printer();
+ if (hrp->is_active()) {
FreeRegionListIterator iter(&local_cleanup_list);
while (iter.more_available()) {
HeapRegion* hr = iter.get_next();
- hr_printer->cleanup(hr);
+ hrp->cleanup(hr);
}
}
@@ -6163,8 +6119,6 @@ void G1CollectedHeap::wait_while_free_regions_coming() {
}
void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
- assert(heap_lock_held_for_gc(),
- "the heap lock should already be held by or for this thread");
_young_list->push_region(hr);
}
@@ -6176,7 +6130,7 @@ public:
bool doHeapRegion(HeapRegion* r) {
if (r->is_young()) {
gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
- r->bottom(), r->end());
+ p2i(r->bottom()), p2i(r->end()));
_success = false;
}
return false;
@@ -6526,7 +6480,7 @@ class RegisterNMethodOopClosure: public OopClosure {
assert(!hr->is_continues_humongous(),
err_msg("trying to add code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
" starting at "HR_FORMAT,
- _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
+ p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
// HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
hr->add_strong_code_root_locked(_nm);
@@ -6553,7 +6507,7 @@ class UnregisterNMethodOopClosure: public OopClosure {
assert(!hr->is_continues_humongous(),
err_msg("trying to remove code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
" starting at "HR_FORMAT,
- _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
+ p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
hr->remove_strong_code_root(_nm);
}
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
index 1a3be8c2a9a..f3f0338a087 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
@@ -39,10 +39,9 @@
#include "gc_implementation/g1/heapRegionManager.hpp"
#include "gc_implementation/g1/heapRegionSet.hpp"
#include "gc_implementation/shared/hSpaceCounters.hpp"
-#include "gc_implementation/shared/parGCAllocBuffer.hpp"
+#include "gc_interface/collectedHeap.hpp"
#include "memory/barrierSet.hpp"
#include "memory/memRegion.hpp"
-#include "memory/sharedHeap.hpp"
#include "utilities/stack.hpp"
// A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
@@ -76,6 +75,7 @@ class G1OldTracer;
class EvacuationFailedInfo;
class nmethod;
class Ticks;
+class FlexibleWorkGang;
typedef OverflowTaskQueue RefToScanQueue;
typedef GenericTaskQueueSet RefToScanQueueSet;
@@ -177,7 +177,7 @@ class G1RegionMappingChangedListener : public G1MappingChangedListener {
virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
};
-class G1CollectedHeap : public SharedHeap {
+class G1CollectedHeap : public CollectedHeap {
friend class VM_CollectForMetadataAllocation;
friend class VM_G1CollectForAllocation;
friend class VM_G1CollectFull;
@@ -201,8 +201,7 @@ class G1CollectedHeap : public SharedHeap {
friend class G1CheckCSetFastTableClosure;
private:
- // The one and only G1CollectedHeap, so static functions can find it.
- static G1CollectedHeap* _g1h;
+ FlexibleWorkGang* _workers;
static size_t _humongous_object_threshold_in_words;
@@ -217,7 +216,6 @@ private:
// It keeps track of the humongous regions.
HeapRegionSet _humongous_set;
- void clear_humongous_is_live_table();
void eagerly_reclaim_humongous_regions();
// The number of regions we could create by expansion.
@@ -287,22 +285,26 @@ private:
// Helper for monitoring and management support.
G1MonitoringSupport* _g1mm;
- // Records whether the region at the given index is kept live by roots or
- // references from the young generation.
- class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray {
+ // Records whether the region at the given index is (still) a
+ // candidate for eager reclaim. Only valid for humongous start
+ // regions; other regions have unspecified values. Humongous start
+ // regions are initialized at start of collection pause, with
+ // candidates removed from the set as they are found reachable from
+ // roots or the young generation.
+ class HumongousReclaimCandidates : public G1BiasedMappedArray {
protected:
bool default_value() const { return false; }
public:
void clear() { G1BiasedMappedArray::clear(); }
- void set_live(uint region) {
- set_by_index(region, true);
+ void set_candidate(uint region, bool value) {
+ set_by_index(region, value);
}
- bool is_live(uint region) {
+ bool is_candidate(uint region) {
return get_by_index(region);
}
};
- HumongousIsLiveBiasedMappedArray _humongous_is_live;
+ HumongousReclaimCandidates _humongous_reclaim_candidates;
// Stores whether during humongous object registration we found candidate regions.
// If not, we can skip a few steps.
bool _has_humongous_reclaim_candidates;
@@ -351,6 +353,12 @@ private:
// heap after a compaction.
void print_hrm_post_compaction();
+ // Create a memory mapper for auxiliary data structures of the given size and
+ // translation factor.
+ static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
+ size_t size,
+ size_t translation_factor);
+
double verify(bool guard, const char* msg);
void verify_before_gc();
void verify_after_gc();
@@ -605,6 +613,7 @@ protected:
void enqueue_discovered_references(uint no_of_gc_workers);
public:
+ FlexibleWorkGang* workers() const { return _workers; }
G1Allocator* allocator() {
return _allocator;
@@ -630,21 +639,18 @@ public:
inline AllocationContextStats& allocation_context_stats();
// Do anything common to GC's.
- virtual void gc_prologue(bool full);
- virtual void gc_epilogue(bool full);
+ void gc_prologue(bool full);
+ void gc_epilogue(bool full);
+ // Modify the reclaim candidate set and test for presence.
+ // These are only valid for starts_humongous regions.
+ inline void set_humongous_reclaim_candidate(uint region, bool value);
+ inline bool is_humongous_reclaim_candidate(uint region);
+
+ // Remove from the reclaim candidate set. Also remove from the
+ // collection set so that later encounters avoid the slow path.
inline void set_humongous_is_live(oop obj);
- bool humongous_is_live(uint region) {
- return _humongous_is_live.is_live(region);
- }
-
- // Returns whether the given region (which must be a humongous (start) region)
- // is to be considered conservatively live regardless of any other conditions.
- bool humongous_region_is_always_live(uint index);
- // Returns whether the given region (which must be a humongous (start) region)
- // is considered a candidate for eager reclamation.
- bool humongous_region_is_candidate(uint index);
// Register the given region to be part of the collection set.
inline void register_humongous_region_with_cset(uint index);
// Register regions with humongous objects (actually on the start region) in
@@ -1000,11 +1006,14 @@ public:
// Return the (conservative) maximum heap alignment for any G1 heap
static size_t conservative_max_heap_alignment();
+ // Does operations required after initialization has been done.
+ void post_initialize();
+
// Initialize weak reference processing.
- virtual void ref_processing_init();
+ void ref_processing_init();
// Explicitly import set_par_threads into this scope
- using SharedHeap::set_par_threads;
+ using CollectedHeap::set_par_threads;
// Set _n_par_threads according to a policy TBD.
void set_par_threads();
@@ -1251,10 +1260,6 @@ public:
// Iteration functions.
- // Iterate over all the ref-containing fields of all objects, calling
- // "cl.do_oop" on each.
- virtual void oop_iterate(ExtendedOopClosure* cl);
-
// Iterate over all objects, calling "cl.do_object" on each.
virtual void object_iterate(ObjectClosure* cl);
@@ -1262,9 +1267,6 @@ public:
object_iterate(cl);
}
- // Iterate over all spaces in use in the heap, in ascending address order.
- virtual void space_iterate(SpaceClosure* cl);
-
// Iterate over heap regions, in address order, terminating the
// iteration early if the "doHeapRegion" method returns "true".
void heap_region_iterate(HeapRegionClosure* blk) const;
@@ -1307,10 +1309,6 @@ public:
HeapRegion* next_compaction_region(const HeapRegion* from) const;
- // A CollectedHeap will contain some number of spaces. This finds the
- // space containing a given address, or else returns NULL.
- virtual Space* space_containing(const void* addr) const;
-
// Returns the HeapRegion that contains addr. addr must not be NULL.
template
inline HeapRegion* heap_region_containing_raw(const T addr) const;
@@ -1344,9 +1342,6 @@ public:
// the block is an object.
virtual bool block_is_obj(const HeapWord* addr) const;
- // Does this heap support heap inspection? (+PrintClassHistogram)
- virtual bool supports_heap_inspection() const { return true; }
-
// Section on thread-local allocation buffers (TLABs)
// See CollectedHeap for semantics.
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
index d029e08a854..fd4f138fbcb 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
@@ -352,20 +352,30 @@ inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
return is_obj_ill(obj, heap_region_containing(obj));
}
+inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
+ assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
+ _humongous_reclaim_candidates.set_candidate(region, value);
+}
+
+inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
+ assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
+ return _humongous_reclaim_candidates.is_candidate(region);
+}
+
inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
uint region = addr_to_region((HeapWord*)obj);
- // We not only set the "live" flag in the humongous_is_live table, but also
+ // Clear the flag in the humongous_reclaim_candidates table. Also
// reset the entry in the _in_cset_fast_test table so that subsequent references
// to the same humongous object do not go into the slow path again.
// This is racy, as multiple threads may at the same time enter here, but this
// is benign.
- // During collection we only ever set the "live" flag, and only ever clear the
+ // During collection we only ever clear the "candidate" flag, and only ever clear the
// entry in the in_cset_fast_table.
// We only ever evaluate the contents of these tables (in the VM thread) after
// having synchronized the worker threads with the VM thread, or in the same
// thread (i.e. within the VM thread).
- if (!_humongous_is_live.is_live(region)) {
- _humongous_is_live.set_live(region);
+ if (is_humongous_reclaim_candidate(region)) {
+ set_humongous_reclaim_candidate(region, false);
_in_cset_fast_test.clear_humongous(region);
}
}
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
index 0be44bd25c5..41943dd1ca0 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
@@ -22,11 +22,6 @@
*
*/
-#ifndef __clang_major__
-// FIXME, formats have issues. Disable this macro definition, compile, and study warnings for more information.
-#define ATTRIBUTE_PRINTF(x,y)
-#endif
-
#include "precompiled.hpp"
#include "gc_implementation/g1/concurrentG1Refine.hpp"
#include "gc_implementation/g1/concurrentMark.hpp"
@@ -302,7 +297,7 @@ G1CollectorPolicy::G1CollectorPolicy() :
if (reserve_perc > 50) {
reserve_perc = 50;
warning("G1ReservePercent is set to a value that is too large, "
- "it's been updated to %u", reserve_perc);
+ "it's been updated to " UINTX_FORMAT, reserve_perc);
}
_reserve_factor = (double) reserve_perc / 100.0;
// This will be set when the heap is expanded
@@ -1460,7 +1455,7 @@ void G1CollectorPolicy::update_survivors_policy() {
_max_survivor_regions = (uint) ceil(max_survivor_regions_d);
_tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
- HeapRegion::GrainWords * _max_survivor_regions);
+ HeapRegion::GrainWords * _max_survivor_regions, counters());
}
bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
@@ -1800,7 +1795,7 @@ void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream
assert(csr->in_collection_set(), "bad CS");
st->print_cr(" "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
HR_FORMAT_PARAMS(csr),
- csr->prev_top_at_mark_start(), csr->next_top_at_mark_start(),
+ p2i(csr->prev_top_at_mark_start()), p2i(csr->next_top_at_mark_start()),
csr->age_in_surv_rate_group_cond());
csr = next;
}
@@ -2166,7 +2161,7 @@ void TraceYoungGenTimeData::print_summary(const char* str,
void TraceYoungGenTimeData::print_summary_sd(const char* str,
const NumberSeq* seq) const {
print_summary(str, seq);
- gclog_or_tty->print_cr("%+45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
+ gclog_or_tty->print_cr("%45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
"(num", seq->num(), seq->sd(), seq->maximum());
}
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp
index eff119625be..92df3a0d291 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp
@@ -160,40 +160,43 @@ public:
} while (0)
-#define ergo_verbose(_tag_, _action_) \
- ergo_verbose_common(_tag_, _action_, "", 0, 0, 0, 0, 0, 0)
-
-#define ergo_verbose0(_tag_, _action_, _extra_format_) \
- ergo_verbose_common(_tag_, _action_, _extra_format_, 0, 0, 0, 0, 0, 0)
-
-#define ergo_verbose1(_tag_, _action_, _extra_format_, \
- _arg0_) \
- ergo_verbose_common(_tag_, _action_, _extra_format_, \
- _arg0_, 0, 0, 0, 0, 0)
-
-#define ergo_verbose2(_tag_, _action_, _extra_format_, \
- _arg0_, _arg1_) \
- ergo_verbose_common(_tag_, _action_, _extra_format_, \
- _arg0_, _arg1_, 0, 0, 0, 0)
-
-#define ergo_verbose3(_tag_, _action_, _extra_format_, \
- _arg0_, _arg1_, _arg2_) \
- ergo_verbose_common(_tag_, _action_, _extra_format_, \
- _arg0_, _arg1_, _arg2_, 0, 0, 0)
-
-#define ergo_verbose4(_tag_, _action_, _extra_format_, \
- _arg0_, _arg1_, _arg2_, _arg3_) \
- ergo_verbose_common(_tag_, _action_, _extra_format_, \
- _arg0_, _arg1_, _arg2_, _arg3_, 0, 0)
-
-#define ergo_verbose5(_tag_, _action_, _extra_format_, \
- _arg0_, _arg1_, _arg2_, _arg3_, _arg4_) \
- ergo_verbose_common(_tag_, _action_, _extra_format_, \
- _arg0_, _arg1_, _arg2_, _arg3_, _arg4_, 0)
-
#define ergo_verbose6(_tag_, _action_, _extra_format_, \
_arg0_, _arg1_, _arg2_, _arg3_, _arg4_, _arg5_) \
ergo_verbose_common(_tag_, _action_, _extra_format_, \
_arg0_, _arg1_, _arg2_, _arg3_, _arg4_, _arg5_)
+#define ergo_verbose5(_tag_, _action_, _extra_format_, \
+ _arg0_, _arg1_, _arg2_, _arg3_, _arg4_) \
+ ergo_verbose6(_tag_, _action_, _extra_format_ "%s", \
+ _arg0_, _arg1_, _arg2_, _arg3_, _arg4_, "")
+
+#define ergo_verbose4(_tag_, _action_, _extra_format_, \
+ _arg0_, _arg1_, _arg2_, _arg3_) \
+ ergo_verbose5(_tag_, _action_, _extra_format_ "%s", \
+ _arg0_, _arg1_, _arg2_, _arg3_, "")
+
+#define ergo_verbose3(_tag_, _action_, _extra_format_, \
+ _arg0_, _arg1_, _arg2_) \
+ ergo_verbose4(_tag_, _action_, _extra_format_ "%s", \
+ _arg0_, _arg1_, _arg2_, "")
+
+#define ergo_verbose2(_tag_, _action_, _extra_format_, \
+ _arg0_, _arg1_) \
+ ergo_verbose3(_tag_, _action_, _extra_format_ "%s", \
+ _arg0_, _arg1_, "")
+
+#define ergo_verbose1(_tag_, _action_, _extra_format_, \
+ _arg0_) \
+ ergo_verbose2(_tag_, _action_, _extra_format_ "%s", \
+ _arg0_, "")
+
+
+#define ergo_verbose0(_tag_, _action_, _extra_format_) \
+ ergo_verbose1(_tag_, _action_, _extra_format_ "%s", \
+ "")
+
+#define ergo_verbose(_tag_, _action_) \
+ ergo_verbose0(_tag_, _action_, "")
+
+
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ERGOVERBOSE_HPP
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp
index 3b7a12b320b..8f51050a7e8 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2014 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2015 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -263,7 +263,6 @@ G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
_gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray(max_gc_threads, "SystemDictionary Roots (ms)", true, G1Log::LevelFinest, 3);
_gc_par_phases[CLDGRoots] = new WorkerDataArray(max_gc_threads, "CLDG Roots (ms)", true, G1Log::LevelFinest, 3);
_gc_par_phases[JVMTIRoots] = new WorkerDataArray(max_gc_threads, "JVMTI Roots (ms)", true, G1Log::LevelFinest, 3);
- _gc_par_phases[CodeCacheRoots] = new WorkerDataArray(max_gc_threads, "CodeCache Roots (ms)", true, G1Log::LevelFinest, 3);
_gc_par_phases[CMRefRoots] = new WorkerDataArray(max_gc_threads, "CM RefProcessor Roots (ms)", true, G1Log::LevelFinest, 3);
_gc_par_phases[WaitForStrongCLD] = new WorkerDataArray(max_gc_threads, "Wait For Strong CLD (ms)", true, G1Log::LevelFinest, 3);
_gc_par_phases[WeakCLDRoots] = new WorkerDataArray(max_gc_threads, "Weak CLD Roots (ms)", true, G1Log::LevelFinest, 3);
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp
index 54165cafd3b..323ecf691d3 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2014 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2015 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,7 +51,6 @@ class G1GCPhaseTimes : public CHeapObj {
SystemDictionaryRoots,
CLDGRoots,
JVMTIRoots,
- CodeCacheRoots,
CMRefRoots,
WaitForStrongCLD,
WeakCLDRoots,
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp
index e588988cc7c..ee329a77ef1 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp
@@ -29,7 +29,7 @@
#include "gc_implementation/g1/g1CardCounts.hpp"
#include "memory/allocation.hpp"
#include "runtime/safepoint.hpp"
-#include "runtime/thread.inline.hpp"
+#include "runtime/thread.hpp"
#include "utilities/globalDefinitions.hpp"
class DirtyCardQueue;
@@ -123,7 +123,7 @@ class G1HotCardCache: public CHeapObj {
// Resets the hot card cache and discards the entries.
void reset_hot_cache() {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
- assert(Thread::current()->is_VM_thread(), "Current thread should be the VMthread");
+ assert(Thread::current_noinline()->is_VM_thread(), "Current thread should be the VMthread");
if (default_use_cache()) {
reset_hot_cache_internal();
}
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
index 38ec4a96c7e..64af265d3ec 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
@@ -61,9 +61,8 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
bool clear_all_softrefs) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
- SharedHeap* sh = SharedHeap::heap();
#ifdef ASSERT
- if (sh->collector_policy()->should_clear_all_soft_refs()) {
+ if (G1CollectedHeap::heap()->collector_policy()->should_clear_all_soft_refs()) {
assert(clear_all_softrefs, "Policy should have been checked earler");
}
#endif
@@ -102,11 +101,6 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
BiasedLocking::restore_marks();
GenMarkSweep::deallocate_stacks();
- // "free at last gc" is calculated from these.
- // CHF: cheating for now!!!
- // Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
- // Universe::set_heap_used_at_last_gc(Universe::heap()->used());
-
CodeCache::gc_epilogue();
JvmtiExport::gc_epilogue();
@@ -125,7 +119,6 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
- GenMarkSweep::trace(" 1");
G1CollectedHeap* g1h = G1CollectedHeap::heap();
@@ -168,12 +161,12 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
Klass::clean_weak_klass_links(&GenMarkSweep::is_alive);
// Delete entries for dead interned string and clean up unreferenced symbols in symbol table.
- G1CollectedHeap::heap()->unlink_string_and_symbol_table(&GenMarkSweep::is_alive);
+ g1h->unlink_string_and_symbol_table(&GenMarkSweep::is_alive);
if (VerifyDuringGC) {
HandleMark hm; // handle scope
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
- Universe::heap()->prepare_for_verify();
+ g1h->prepare_for_verify();
// Note: we can verify only the heap here. When an object is
// marked, the previous value of the mark word (including
// identity hash values, ages, etc) is preserved, and the mark
@@ -187,7 +180,7 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
if (!VerifySilently) {
gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
}
- Universe::heap()->verify(VerifySilently, VerifyOption_G1UseMarkWord);
+ g1h->verify(VerifySilently, VerifyOption_G1UseMarkWord);
if (!VerifySilently) {
gclog_or_tty->print_cr("]");
}
@@ -205,7 +198,6 @@ void G1MarkSweep::mark_sweep_phase2() {
// tracking expects us to do so. See comment under phase4.
GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
- GenMarkSweep::trace("2");
prepare_compaction();
}
@@ -239,7 +231,6 @@ void G1MarkSweep::mark_sweep_phase3() {
// Adjust the pointers to reflect the new locations
GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
- GenMarkSweep::trace("3");
// Need cleared claim bits for the roots processing
ClassLoaderDataGraph::clear_claimed_marks();
@@ -301,7 +292,6 @@ void G1MarkSweep::mark_sweep_phase4() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
- GenMarkSweep::trace("4");
G1SpaceCompactClosure blk;
g1h->heap_region_iterate(&blk);
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp
index e8d8c307676..58a84ce49a2 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp
@@ -25,7 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1MARKSWEEP_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1MARKSWEEP_HPP
-#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "memory/genMarkSweep.hpp"
#include "memory/generation.hpp"
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.cpp
index 2bdbca58686..e2b22d5a142 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.cpp
@@ -23,9 +23,12 @@
*/
#include "precompiled.hpp"
+#include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1ParScanThreadState.hpp"
+#include "memory/iterator.inline.hpp"
+#include "utilities/stack.inline.hpp"
G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
G1ParClosureSuper(g1, par_scan_state), _scanned_klass(NULL),
@@ -50,3 +53,6 @@ void G1ParClosureSuper::set_par_scan_thread_state(G1ParScanThreadState* par_scan
assert(_worker_id < MAX2((uint)ParallelGCThreads, 1u),
err_msg("The given worker id %u must be less than the number of threads %u", _worker_id, MAX2((uint)ParallelGCThreads, 1u)));
}
+
+// Generate G1 specialized oop_oop_iterate functions.
+SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(ALL_KLASS_OOP_OOP_ITERATE_DEFN)
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp
index 6afda16da20..11c22ca004b 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -172,7 +172,7 @@ inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) {
oopDesc* o = obj;
#endif // CHECK_UNHANDLED_OOPS
assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
- assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
+ assert(_g1->is_in_reserved(obj), "must be in heap");
#endif // ASSERT
assert(_from != NULL, "from region must be non-NULL");
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp
index 008002566da..9144d3c7246 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,37 +44,45 @@
#endif
#include "utilities/bitMap.inline.hpp"
-G1PageBasedVirtualSpace::G1PageBasedVirtualSpace() : _low_boundary(NULL),
- _high_boundary(NULL), _committed(), _page_size(0), _special(false),
+G1PageBasedVirtualSpace::G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size) :
+ _low_boundary(NULL), _high_boundary(NULL), _committed(), _page_size(0), _special(false),
_dirty(), _executable(false) {
+ initialize_with_page_size(rs, used_size, page_size);
}
-bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t page_size) {
- if (!rs.is_reserved()) {
- return false; // Allocation failed.
- }
- assert(_low_boundary == NULL, "VirtualSpace already initialized");
- assert(page_size > 0, "Granularity must be non-zero.");
+void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size) {
+ guarantee(rs.is_reserved(), "Given reserved space must have been reserved already.");
+
+ vmassert(_low_boundary == NULL, "VirtualSpace already initialized");
+ vmassert(page_size > 0, "Page size must be non-zero.");
+
+ guarantee(is_ptr_aligned(rs.base(), page_size),
+ err_msg("Reserved space base " PTR_FORMAT " is not aligned to requested page size " SIZE_FORMAT, p2i(rs.base()), page_size));
+ guarantee(is_size_aligned(used_size, os::vm_page_size()),
+ err_msg("Given used reserved space size needs to be OS page size aligned (%d bytes) but is " SIZE_FORMAT, os::vm_page_size(), used_size));
+ guarantee(used_size <= rs.size(),
+ err_msg("Used size of reserved space " SIZE_FORMAT " bytes is smaller than reservation at " SIZE_FORMAT " bytes", used_size, rs.size()));
+ guarantee(is_size_aligned(rs.size(), page_size),
+ err_msg("Expected that the virtual space is size aligned, but " SIZE_FORMAT " is not aligned to page size " SIZE_FORMAT, rs.size(), page_size));
_low_boundary = rs.base();
- _high_boundary = _low_boundary + rs.size();
+ _high_boundary = _low_boundary + used_size;
_special = rs.special();
_executable = rs.executable();
_page_size = page_size;
- assert(_committed.size() == 0, "virtual space initialized more than once");
- uintx size_in_bits = rs.size() / page_size;
- _committed.resize(size_in_bits, /* in_resource_area */ false);
+ vmassert(_committed.size() == 0, "virtual space initialized more than once");
+ BitMap::idx_t size_in_pages = rs.size() / page_size;
+ _committed.resize(size_in_pages, /* in_resource_area */ false);
if (_special) {
- _dirty.resize(size_in_bits, /* in_resource_area */ false);
+ _dirty.resize(size_in_pages, /* in_resource_area */ false);
}
- return true;
+ _tail_size = used_size % _page_size;
}
-
G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() {
release();
}
@@ -87,12 +95,18 @@ void G1PageBasedVirtualSpace::release() {
_special = false;
_executable = false;
_page_size = 0;
+ _tail_size = 0;
_committed.resize(0, false);
_dirty.resize(0, false);
}
size_t G1PageBasedVirtualSpace::committed_size() const {
- return _committed.count_one_bits() * _page_size;
+ size_t result = _committed.count_one_bits() * _page_size;
+ // The last page might not be in full.
+ if (is_last_page_partial() && _committed.at(_committed.size() - 1)) {
+ result -= _page_size - _tail_size;
+ }
+ return result;
}
size_t G1PageBasedVirtualSpace::reserved_size() const {
@@ -103,65 +117,134 @@ size_t G1PageBasedVirtualSpace::uncommitted_size() const {
return reserved_size() - committed_size();
}
-uintptr_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
+size_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
return (addr - _low_boundary) / _page_size;
}
-bool G1PageBasedVirtualSpace::is_area_committed(uintptr_t start, size_t size_in_pages) const {
- uintptr_t end = start + size_in_pages;
- return _committed.get_next_zero_offset(start, end) >= end;
+bool G1PageBasedVirtualSpace::is_area_committed(size_t start_page, size_t size_in_pages) const {
+ size_t end_page = start_page + size_in_pages;
+ return _committed.get_next_zero_offset(start_page, end_page) >= end_page;
}
-bool G1PageBasedVirtualSpace::is_area_uncommitted(uintptr_t start, size_t size_in_pages) const {
- uintptr_t end = start + size_in_pages;
- return _committed.get_next_one_offset(start, end) >= end;
+bool G1PageBasedVirtualSpace::is_area_uncommitted(size_t start_page, size_t size_in_pages) const {
+ size_t end_page = start_page + size_in_pages;
+ return _committed.get_next_one_offset(start_page, end_page) >= end_page;
}
-char* G1PageBasedVirtualSpace::page_start(uintptr_t index) {
+char* G1PageBasedVirtualSpace::page_start(size_t index) const {
return _low_boundary + index * _page_size;
}
-size_t G1PageBasedVirtualSpace::byte_size_for_pages(size_t num) {
- return num * _page_size;
+bool G1PageBasedVirtualSpace::is_after_last_page(size_t index) const {
+ guarantee(index <= _committed.size(),
+ err_msg("Given boundary page " SIZE_FORMAT " is beyond managed page count " SIZE_FORMAT, index, _committed.size()));
+ return index == _committed.size();
}
-bool G1PageBasedVirtualSpace::commit(uintptr_t start, size_t size_in_pages) {
+void G1PageBasedVirtualSpace::commit_preferred_pages(size_t start, size_t num_pages) {
+ vmassert(num_pages > 0, "No full pages to commit");
+ vmassert(start + num_pages <= _committed.size(),
+ err_msg("Tried to commit area from page " SIZE_FORMAT " to page " SIZE_FORMAT " "
+ "that is outside of managed space of " SIZE_FORMAT " pages",
+ start, start + num_pages, _committed.size()));
+
+ char* start_addr = page_start(start);
+ size_t size = num_pages * _page_size;
+
+ os::commit_memory_or_exit(start_addr, size, _page_size, _executable,
+ err_msg("Failed to commit area from " PTR_FORMAT " to " PTR_FORMAT " of length " SIZE_FORMAT ".",
+ p2i(start_addr), p2i(start_addr + size), size));
+}
+
+void G1PageBasedVirtualSpace::commit_tail() {
+ vmassert(_tail_size > 0, "The size of the tail area must be > 0 when reaching here");
+
+ char* const aligned_end_address = (char*)align_ptr_down(_high_boundary, _page_size);
+ os::commit_memory_or_exit(aligned_end_address, _tail_size, os::vm_page_size(), _executable,
+ err_msg("Failed to commit tail area from " PTR_FORMAT " to " PTR_FORMAT " of length " SIZE_FORMAT ".",
+ p2i(aligned_end_address), p2i(_high_boundary), _tail_size));
+}
+
+void G1PageBasedVirtualSpace::commit_internal(size_t start_page, size_t end_page) {
+ guarantee(start_page < end_page,
+ err_msg("Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page));
+ guarantee(end_page <= _committed.size(),
+ err_msg("Given end page " SIZE_FORMAT " is beyond end of managed page amount of " SIZE_FORMAT, end_page, _committed.size()));
+
+ size_t pages = end_page - start_page;
+ bool need_to_commit_tail = is_after_last_page(end_page) && is_last_page_partial();
+
+ // If we have to commit some (partial) tail area, decrease the amount of pages to avoid
+ // committing that in the full-page commit code.
+ if (need_to_commit_tail) {
+ pages--;
+ }
+
+ if (pages > 0) {
+ commit_preferred_pages(start_page, pages);
+ }
+
+ if (need_to_commit_tail) {
+ commit_tail();
+ }
+}
+
+char* G1PageBasedVirtualSpace::bounded_end_addr(size_t end_page) const {
+ return MIN2(_high_boundary, page_start(end_page));
+}
+
+void G1PageBasedVirtualSpace::pretouch_internal(size_t start_page, size_t end_page) {
+ guarantee(start_page < end_page,
+ err_msg("Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page));
+
+ os::pretouch_memory(page_start(start_page), bounded_end_addr(end_page));
+}
+
+bool G1PageBasedVirtualSpace::commit(size_t start_page, size_t size_in_pages) {
// We need to make sure to commit all pages covered by the given area.
- guarantee(is_area_uncommitted(start, size_in_pages), "Specified area is not uncommitted");
+ guarantee(is_area_uncommitted(start_page, size_in_pages), "Specified area is not uncommitted");
bool zero_filled = true;
- uintptr_t end = start + size_in_pages;
+ size_t end_page = start_page + size_in_pages;
if (_special) {
// Check for dirty pages and update zero_filled if any found.
- if (_dirty.get_next_one_offset(start,end) < end) {
+ if (_dirty.get_next_one_offset(start_page, end_page) < end_page) {
zero_filled = false;
- _dirty.clear_range(start, end);
+ _dirty.clear_range(start_page, end_page);
}
} else {
- os::commit_memory_or_exit(page_start(start), byte_size_for_pages(size_in_pages), _executable,
- err_msg("Failed to commit pages from "SIZE_FORMAT" of length "SIZE_FORMAT, start, size_in_pages));
+ commit_internal(start_page, end_page);
}
- _committed.set_range(start, end);
+ _committed.set_range(start_page, end_page);
if (AlwaysPreTouch) {
- os::pretouch_memory(page_start(start), page_start(end));
+ pretouch_internal(start_page, end_page);
}
return zero_filled;
}
-void G1PageBasedVirtualSpace::uncommit(uintptr_t start, size_t size_in_pages) {
- guarantee(is_area_committed(start, size_in_pages), "checking");
+void G1PageBasedVirtualSpace::uncommit_internal(size_t start_page, size_t end_page) {
+ guarantee(start_page < end_page,
+ err_msg("Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page));
+ char* start_addr = page_start(start_page);
+ os::uncommit_memory(start_addr, pointer_delta(bounded_end_addr(end_page), start_addr, sizeof(char)));
+}
+
+void G1PageBasedVirtualSpace::uncommit(size_t start_page, size_t size_in_pages) {
+ guarantee(is_area_committed(start_page, size_in_pages), "checking");
+
+ size_t end_page = start_page + size_in_pages;
if (_special) {
// Mark that memory is dirty. If committed again the memory might
// need to be cleared explicitly.
- _dirty.set_range(start, start + size_in_pages);
+ _dirty.set_range(start_page, end_page);
} else {
- os::uncommit_memory(page_start(start), byte_size_for_pages(size_in_pages));
+ uncommit_internal(start_page, end_page);
}
- _committed.clear_range(start, start + size_in_pages);
+ _committed.clear_range(start_page, end_page);
}
bool G1PageBasedVirtualSpace::contains(const void* p) const {
@@ -175,7 +258,8 @@ void G1PageBasedVirtualSpace::print_on(outputStream* out) {
out->cr();
out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size());
- out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", p2i(_low_boundary), p2i(_high_boundary));
+ out->print_cr(" - preferred page size: " SIZE_FORMAT, _page_size);
+ out->print_cr(" - [low_b, high_b]: [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(_low_boundary), p2i(_high_boundary));
}
void G1PageBasedVirtualSpace::print() {
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp
index fb2c78415f9..edf4d6972a8 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,13 +27,19 @@
#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
-#include "runtime/virtualspace.hpp"
+#include "memory/virtualspace.hpp"
#include "utilities/bitMap.hpp"
// Virtual space management helper for a virtual space with an OS page allocation
// granularity.
// (De-)Allocation requests are always OS page aligned by passing a page index
// and multiples of pages.
+// For systems that only commits of memory in a given size (always greater than
+// page size) the base address is required to be aligned to that page size.
+// The actual size requested need not be aligned to that page size, but the size
+// of the reservation passed may be rounded up to this page size. Any fragment
+// (less than the page size) of the actual size at the tail of the request will
+// be committed using OS small pages.
// The implementation gives an error when trying to commit or uncommit pages that
// have already been committed or uncommitted.
class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
@@ -43,7 +49,11 @@ class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
char* _low_boundary;
char* _high_boundary;
- // The commit/uncommit granularity in bytes.
+ // The size of the tail in bytes of the handled space that needs to be committed
+ // using small pages.
+ size_t _tail_size;
+
+ // The preferred page size used for commit/uncommit in bytes.
size_t _page_size;
// Bitmap used for verification of commit/uncommit operations.
@@ -62,30 +72,55 @@ class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
// Indicates whether the committed space should be executable.
bool _executable;
+ // Helper function for committing memory. Commit the given memory range by using
+ // _page_size pages as much as possible and the remainder with small sized pages.
+ void commit_internal(size_t start_page, size_t end_page);
+ // Commit num_pages pages of _page_size size starting from start. All argument
+ // checking has been performed.
+ void commit_preferred_pages(size_t start_page, size_t end_page);
+ // Commit space at the high end of the space that needs to be committed with small
+ // sized pages.
+ void commit_tail();
+
+ // Uncommit the given memory range.
+ void uncommit_internal(size_t start_page, size_t end_page);
+
+ // Pretouch the given memory range.
+ void pretouch_internal(size_t start_page, size_t end_page);
+
// Returns the index of the page which contains the given address.
uintptr_t addr_to_page_index(char* addr) const;
// Returns the address of the given page index.
- char* page_start(uintptr_t index);
- // Returns the byte size of the given number of pages.
- size_t byte_size_for_pages(size_t num);
+ char* page_start(size_t index) const;
+
+ // Is the given page index the last page?
+ bool is_last_page(size_t index) const { return index == (_committed.size() - 1); }
+ // Is the given page index the first after last page?
+ bool is_after_last_page(size_t index) const;
+ // Is the last page only partially covered by this space?
+ bool is_last_page_partial() const { return !is_ptr_aligned(_high_boundary, _page_size); }
+ // Returns the end address of the given page bounded by the reserved space.
+ char* bounded_end_addr(size_t end_page) const;
// Returns true if the entire area is backed by committed memory.
- bool is_area_committed(uintptr_t start, size_t size_in_pages) const;
+ bool is_area_committed(size_t start_page, size_t size_in_pages) const;
// Returns true if the entire area is not backed by committed memory.
- bool is_area_uncommitted(uintptr_t start, size_t size_in_pages) const;
+ bool is_area_uncommitted(size_t start_page, size_t size_in_pages) const;
+ void initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size);
public:
// Commit the given area of pages starting at start being size_in_pages large.
// Returns true if the given area is zero filled upon completion.
- bool commit(uintptr_t start, size_t size_in_pages);
+ bool commit(size_t start_page, size_t size_in_pages);
// Uncommit the given area of pages starting at start being size_in_pages large.
- void uncommit(uintptr_t start, size_t size_in_pages);
+ void uncommit(size_t start_page, size_t size_in_pages);
- // Initialization
- G1PageBasedVirtualSpace();
- bool initialize_with_granularity(ReservedSpace rs, size_t page_size);
+ // Initialize the given reserved space with the given base address and the size
+ // actually used.
+ // Prefer to commit in page_size chunks.
+ G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size);
// Destruction
~G1PageBasedVirtualSpace();
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp
index 6ffb5fe5902..e7f6e18c4da 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp
@@ -26,8 +26,10 @@
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
+#include "gc_implementation/g1/g1StringDedup.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/prefetch.inline.hpp"
+#include "utilities/stack.inline.hpp"
G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
: _g1h(g1h),
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp
index 1b03f8caae2..81c79806a1a 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -63,7 +63,7 @@ inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
assert(has_partial_array_mask(p), "invariant");
oop from_obj = clear_partial_array_mask(p);
- assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
+ assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
assert(from_obj->is_objArray(), "must be obj array");
objArrayOop from_obj_array = objArrayOop(from_obj);
// The from-space object contains the real length.
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp
index 68d967c764a..b78f37d8cc9 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,22 +26,21 @@
#include "gc_implementation/g1/g1BiasedArray.hpp"
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
#include "memory/allocation.inline.hpp"
-#include "runtime/virtualspace.hpp"
+#include "memory/virtualspace.hpp"
#include "services/memTracker.hpp"
#include "utilities/bitMap.inline.hpp"
G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
- size_t commit_granularity,
+ size_t used_size,
+ size_t page_size,
size_t region_granularity,
MemoryType type) :
- _storage(),
- _commit_granularity(commit_granularity),
+ _storage(rs, used_size, page_size),
_region_granularity(region_granularity),
_listener(NULL),
_commit_map() {
- guarantee(is_power_of_2(commit_granularity), "must be");
+ guarantee(is_power_of_2(page_size), "must be");
guarantee(is_power_of_2(region_granularity), "must be");
- _storage.initialize_with_granularity(rs, commit_granularity);
MemTracker::record_virtual_memory_type((address)rs.base(), type);
}
@@ -55,25 +54,26 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
public:
G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs,
- size_t os_commit_granularity,
+ size_t actual_size,
+ size_t page_size,
size_t alloc_granularity,
size_t commit_factor,
MemoryType type) :
- G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
- _pages_per_region(alloc_granularity / (os_commit_granularity * commit_factor)) {
+ G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, type),
+ _pages_per_region(alloc_granularity / (page_size * commit_factor)) {
- guarantee(alloc_granularity >= os_commit_granularity, "allocation granularity smaller than commit granularity");
+ guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity");
_commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
}
- virtual void commit_regions(uintptr_t start_idx, size_t num_regions) {
- bool zero_filled = _storage.commit(start_idx * _pages_per_region, num_regions * _pages_per_region);
+ virtual void commit_regions(uint start_idx, size_t num_regions) {
+ bool zero_filled = _storage.commit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region);
_commit_map.set_range(start_idx, start_idx + num_regions);
fire_on_commit(start_idx, num_regions, zero_filled);
}
- virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) {
- _storage.uncommit(start_idx * _pages_per_region, num_regions * _pages_per_region);
+ virtual void uncommit_regions(uint start_idx, size_t num_regions) {
+ _storage.uncommit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region);
_commit_map.clear_range(start_idx, start_idx + num_regions);
}
};
@@ -98,22 +98,23 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
public:
G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs,
- size_t os_commit_granularity,
+ size_t actual_size,
+ size_t page_size,
size_t alloc_granularity,
size_t commit_factor,
MemoryType type) :
- G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
- _regions_per_page((os_commit_granularity * commit_factor) / alloc_granularity), _refcounts() {
+ G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, type),
+ _regions_per_page((page_size * commit_factor) / alloc_granularity), _refcounts() {
- guarantee((os_commit_granularity * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
- _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + rs.size()), os_commit_granularity);
+ guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
+ _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_size_up(rs.size(), page_size)), page_size);
_commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
}
- virtual void commit_regions(uintptr_t start_idx, size_t num_regions) {
- for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) {
- assert(!_commit_map.at(i), err_msg("Trying to commit storage at region "INTPTR_FORMAT" that is already committed", i));
- uintptr_t idx = region_idx_to_page_idx(i);
+ virtual void commit_regions(uint start_idx, size_t num_regions) {
+ for (uint i = start_idx; i < start_idx + num_regions; i++) {
+ assert(!_commit_map.at(i), err_msg("Trying to commit storage at region %u that is already committed", i));
+ size_t idx = region_idx_to_page_idx(i);
uint old_refcount = _refcounts.get_by_index(idx);
bool zero_filled = false;
if (old_refcount == 0) {
@@ -125,10 +126,10 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
}
}
- virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) {
- for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) {
- assert(_commit_map.at(i), err_msg("Trying to uncommit storage at region "INTPTR_FORMAT" that is not committed", i));
- uintptr_t idx = region_idx_to_page_idx(i);
+ virtual void uncommit_regions(uint start_idx, size_t num_regions) {
+ for (uint i = start_idx; i < start_idx + num_regions; i++) {
+ assert(_commit_map.at(i), err_msg("Trying to uncommit storage at region %u that is not committed", i));
+ size_t idx = region_idx_to_page_idx(i);
uint old_refcount = _refcounts.get_by_index(idx);
assert(old_refcount > 0, "must be");
if (old_refcount == 1) {
@@ -147,14 +148,15 @@ void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions, b
}
G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs,
- size_t os_commit_granularity,
+ size_t actual_size,
+ size_t page_size,
size_t region_granularity,
size_t commit_factor,
MemoryType type) {
- if (region_granularity >= (os_commit_granularity * commit_factor)) {
- return new G1RegionsLargerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type);
+ if (region_granularity >= (page_size * commit_factor)) {
+ return new G1RegionsLargerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
} else {
- return new G1RegionsSmallerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type);
+ return new G1RegionsSmallerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
}
}
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp
index e46877785d7..6623a37f9d0 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp
@@ -46,12 +46,12 @@ class G1RegionToSpaceMapper : public CHeapObj {
protected:
// Backing storage.
G1PageBasedVirtualSpace _storage;
- size_t _commit_granularity;
+
size_t _region_granularity;
// Mapping management
BitMap _commit_map;
- G1RegionToSpaceMapper(ReservedSpace rs, size_t commit_granularity, size_t region_granularity, MemoryType type);
+ G1RegionToSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, MemoryType type);
void fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled);
public:
@@ -70,16 +70,20 @@ class G1RegionToSpaceMapper : public CHeapObj {
return _commit_map.at(idx);
}
- virtual void commit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0;
- virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0;
+ virtual void commit_regions(uint start_idx, size_t num_regions = 1) = 0;
+ virtual void uncommit_regions(uint start_idx, size_t num_regions = 1) = 0;
// Creates an appropriate G1RegionToSpaceMapper for the given parameters.
+ // The actual space to be used within the given reservation is given by actual_size.
+ // This is because some OSes need to round up the reservation size to guarantee
+ // alignment of page_size.
// The byte_translation_factor defines how many bytes in a region correspond to
// a single byte in the data structure this mapper is for.
// Eg. in the card table, this value corresponds to the size a single card
- // table entry corresponds to.
+ // table entry corresponds to in the heap.
static G1RegionToSpaceMapper* create_mapper(ReservedSpace rs,
- size_t os_commit_granularity,
+ size_t actual_size,
+ size_t page_size,
size_t region_granularity,
size_t byte_translation_factor,
MemoryType type);
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp
index b452f9adca2..1bc162f328d 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp
@@ -38,6 +38,7 @@
#include "oops/oop.inline.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/intHisto.hpp"
+#include "utilities/stack.inline.hpp"
#define CARD_REPEAT_HISTO 0
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
index 523817d4994..ee32c64a426 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -57,7 +57,7 @@ inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, uint tid) {
oopDesc* o = obj;
#endif // CHECK_UNHANDLED_OOPS
assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
- assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
+ assert(_g1->is_in_reserved(obj), "must be in heap");
#endif // ASSERT
assert(from == NULL || from->is_in_reserved(p), "p is not in from");
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1RootProcessor.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1RootProcessor.cpp
index 0a31dc72111..84d57271000 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RootProcessor.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RootProcessor.cpp
@@ -116,7 +116,7 @@ void G1RootProcessor::wait_until_all_strong_classes_discovered() {
G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h) :
_g1h(g1h),
_process_strong_tasks(new SubTasksDone(G1RP_PS_NumElements)),
- _srs(g1h),
+ _srs(),
_lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
_n_workers_discovered_strong_classes(0) {}
@@ -253,7 +253,8 @@ void G1RootProcessor::process_java_roots(OopClosure* strong_roots,
{
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i);
- Threads::possibly_parallel_oops_do(strong_roots, thread_stack_clds, strong_code);
+ bool is_par = _g1h->n_par_threads() > 0;
+ Threads::possibly_parallel_oops_do(is_par, strong_roots, thread_stack_clds, strong_code);
}
}
@@ -323,10 +324,6 @@ void G1RootProcessor::process_vm_roots(OopClosure* strong_roots,
void G1RootProcessor::scan_remembered_sets(G1ParPushHeapRSClosure* scan_rs,
OopClosure* scan_non_heap_weak_roots,
uint worker_i) {
- G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
- G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CodeCacheRoots, worker_i);
-
- // Now scan the complement of the collection set.
G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
_g1h->g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1RootProcessor.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1RootProcessor.hpp
index ee7b00f22ae..38287e784ba 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RootProcessor.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RootProcessor.hpp
@@ -26,7 +26,7 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_ROOTPROCESSOR_HPP
#include "memory/allocation.hpp"
-#include "memory/sharedHeap.hpp"
+#include "memory/strongRootsScope.hpp"
#include "runtime/mutex.hpp"
class CLDClosure;
@@ -46,7 +46,7 @@ class SubTasksDone;
class G1RootProcessor : public StackObj {
G1CollectedHeap* _g1h;
SubTasksDone* _process_strong_tasks;
- SharedHeap::StrongRootsScope _srs;
+ StrongRootsScope _srs;
// Used to implement the Thread work barrier.
Monitor _lock;
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp
index 829faeb4f87..1778abd10a7 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp
@@ -206,7 +206,7 @@ G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field,
if (new_val == NULL) return;
// Otherwise, log it.
G1SATBCardTableLoggingModRefBS* g1_bs =
- barrier_set_cast(Universe::heap()->barrier_set());
+ barrier_set_cast(G1CollectedHeap::heap()->barrier_set());
g1_bs->write_ref_field_work(field, new_val);
}
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp
index 80ad7983939..1e2c5e4970f 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "classfile/javaClasses.inline.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
#include "gc_implementation/g1/g1StringDedup.hpp"
#include "gc_implementation/g1/g1StringDedupQueue.hpp"
#include "memory/gcLocker.hpp"
@@ -163,7 +164,7 @@ void G1StringDedupQueue::verify() {
while (!iter.is_empty()) {
oop obj = iter.next();
if (obj != NULL) {
- guarantee(Universe::heap()->is_in_reserved(obj), "Object must be on the heap");
+ guarantee(G1CollectedHeap::heap()->is_in_reserved(obj), "Object must be on the heap");
guarantee(!obj->is_forwarded(), "Object must not be forwarded");
guarantee(java_lang_String::is_instance(obj), "Object must be a String");
}
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupTable.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupTable.cpp
index a9d2d790e32..7c998fa51e8 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupTable.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupTable.cpp
@@ -27,6 +27,7 @@
#include "classfile/javaClasses.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc_implementation/g1/g1StringDedup.hpp"
#include "gc_implementation/g1/g1StringDedupTable.hpp"
#include "memory/gcLocker.hpp"
#include "memory/padded.inline.hpp"
@@ -519,7 +520,7 @@ void G1StringDedupTable::verify() {
while (*entry != NULL) {
typeArrayOop value = (*entry)->obj();
guarantee(value != NULL, "Object must not be NULL");
- guarantee(Universe::heap()->is_in_reserved(value), "Object must be on the heap");
+ guarantee(G1CollectedHeap::heap()->is_in_reserved(value), "Object must be on the heap");
guarantee(!value->is_forwarded(), "Object must not be forwarded");
guarantee(value->is_typeArray(), "Object must be a typeArrayOop");
unsigned int hash = hash_code(value);
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupTable.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupTable.hpp
index f357523c513..75f25f3db0c 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupTable.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupTable.hpp
@@ -29,6 +29,7 @@
#include "runtime/mutexLocker.hpp"
class G1StringDedupEntryCache;
+class G1StringDedupUnlinkOrOopsDoClosure;
//
// Table entry in the deduplication hashtable. Points weakly to the
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp
index d1da538bacc..859cfc2fdbe 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp
@@ -41,15 +41,6 @@
develop(intx, G1MarkingVerboseLevel, 0, \
"Level (0-4) of verboseness of the marking code") \
\
- develop(bool, G1PrintReachableAtInitialMark, false, \
- "Reachable object dump at the initial mark pause") \
- \
- develop(bool, G1VerifyDuringGCPrintReachable, false, \
- "If conc mark verification fails, dump reachable objects") \
- \
- develop(ccstr, G1PrintReachableBaseFile, NULL, \
- "The base file name for the reachable object dumps") \
- \
develop(bool, G1TraceMarkStackOverflow, false, \
"If true, extra debugging code for CM restart for ovflw.") \
\
@@ -99,9 +90,6 @@
"the buffer will be enqueued for processing. A value of 0 " \
"specifies that mutator threads should not do such filtering.") \
\
- develop(bool, G1SATBPrintStubs, false, \
- "If true, print generated stubs for the SATB barrier") \
- \
experimental(intx, G1ExpandByPercentOfAvailable, 20, \
"When expanding, % of uncommitted space to claim.") \
\
diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp
index ec5fb14e116..2e971906650 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp
@@ -31,7 +31,6 @@
#include "gc_implementation/g1/survRateGroup.hpp"
#include "gc_implementation/shared/ageTable.hpp"
#include "gc_implementation/shared/spaceDecorator.hpp"
-#include "memory/space.inline.hpp"
#include "memory/watermark.hpp"
#include "utilities/macros.hpp"
@@ -45,6 +44,7 @@
// The solution is to remove this method from the definition
// of a Space.
+class G1CollectedHeap;
class HeapRegionRemSet;
class HeapRegionRemSetIterator;
class HeapRegion;
diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp
index a6c70ef1613..1c28a8e1f5d 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionManager.cpp
@@ -330,8 +330,12 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, Heap
assert(!hrclaimer->is_region_claimed(ch_index),
"Must not have been claimed yet because claiming of humongous continuation first claims the start region");
- // There's no need to actually claim the continues humongous region, but we can do it in an assert as an extra precaution.
- assert(hrclaimer->claim_region(ch_index), "We should always be able to claim the continuesHumongous part of the humongous object");
+ // Claim the region so no other worker tries to process the region. When a worker processes a
+ // starts_humongous region it may also process the associated continues_humongous regions.
+ // The continues_humongous regions can be changed to free regions. Unless this worker claims
+ // all of these regions, other workers might try claim and process these newly free regions.
+ bool claim_result = hrclaimer->claim_region(ch_index);
+ guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
bool res2 = blk->doHeapRegion(chr);
if (res2) {
diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp
index 9657356c3bb..7f55ac0540d 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp
@@ -419,6 +419,7 @@ void FreeRegionList_test() {
ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(heap.word_size()));
G1RegionToSpaceMapper* bot_storage =
G1RegionToSpaceMapper::create_mapper(bot_rs,
+ bot_rs.size(),
os::vm_page_size(),
HeapRegion::GrainBytes,
G1BlockOffsetSharedArray::N_bytes,
diff --git a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp
index 4da322feb7a..b3bbf570cbe 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,38 +25,75 @@
#include "precompiled.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/satbQueue.hpp"
+#include "gc_interface/collectedHeap.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/sharedHeap.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/thread.hpp"
#include "runtime/vmThread.hpp"
void ObjPtrQueue::flush() {
- // The buffer might contain refs into the CSet. We have to filter it
- // first before we flush it, otherwise we might end up with an
- // enqueued buffer with refs into the CSet which breaks our invariants.
+ // Filter now to possibly save work later. If filtering empties the
+ // buffer then flush_impl can deallocate the buffer.
filter();
flush_impl();
}
-// This method removes entries from an SATB buffer that will not be
-// useful to the concurrent marking threads. An entry is removed if it
-// satisfies one of the following conditions:
+// Return true if a SATB buffer entry refers to an object that
+// requires marking.
//
-// * it points to an object outside the G1 heap (G1's concurrent
-// marking only visits objects inside the G1 heap),
-// * it points to an object that has been allocated since marking
-// started (according to SATB those objects do not need to be
-// visited during marking), or
-// * it points to an object that has already been marked (no need to
-// process it again).
+// The entry must point into the G1 heap. In particular, it must not
+// be a NULL pointer. NULL pointers are pre-filtered and never
+// inserted into a SATB buffer.
//
-// The rest of the entries will be retained and are compacted towards
-// the top of the buffer. Note that, because we do not allow old
-// regions in the CSet during marking, all objects on the CSet regions
-// are young (eden or survivors) and therefore implicitly live. So any
-// references into the CSet will be removed during filtering.
+// An entry that is below the NTAMS pointer for the containing heap
+// region requires marking. Such an entry must point to a valid object.
+//
+// An entry that is at least the NTAMS pointer for the containing heap
+// region might be any of the following, none of which should be marked.
+//
+// * A reference to an object allocated since marking started.
+// According to SATB, such objects are implicitly kept live and do
+// not need to be dealt with via SATB buffer processing.
+//
+// * A reference to a young generation object. Young objects are
+// handled separately and are not marked by concurrent marking.
+//
+// * A stale reference to a young generation object. If a young
+// generation object reference is recorded and not filtered out
+// before being moved by a young collection, the reference becomes
+// stale.
+//
+// * A stale reference to an eagerly reclaimed humongous object. If a
+// humongous object is recorded and then reclaimed, the reference
+// becomes stale.
+//
+// The stale reference cases are implicitly handled by the NTAMS
+// comparison. Because of the possibility of stale references, buffer
+// processing must be somewhat circumspect and not assume entries
+// in an unfiltered buffer refer to valid objects.
+
+inline bool requires_marking(const void* entry, G1CollectedHeap* heap) {
+ // Includes rejection of NULL pointers.
+ assert(heap->is_in_reserved(entry),
+ err_msg("Non-heap pointer in SATB buffer: " PTR_FORMAT, p2i(entry)));
+
+ HeapRegion* region = heap->heap_region_containing_raw(entry);
+ assert(region != NULL, err_msg("No region for " PTR_FORMAT, p2i(entry)));
+ if (entry >= region->next_top_at_mark_start()) {
+ return false;
+ }
+
+ assert(((oop)entry)->is_oop(true /* ignore mark word */),
+ err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(entry)));
+
+ return true;
+}
+
+// This method removes entries from a SATB buffer that will not be
+// useful to the concurrent marking threads. Entries are retained if
+// they require marking and are not already marked. Retained entries
+// are compacted toward the top of the buffer.
void ObjPtrQueue::filter() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
@@ -78,26 +115,25 @@ void ObjPtrQueue::filter() {
assert(i > 0, "we should have at least one more entry to process");
i -= oopSize;
debug_only(entries += 1;)
- oop* p = (oop*) &buf[byte_index_to_index((int) i)];
- oop obj = *p;
+ void** p = &buf[byte_index_to_index((int) i)];
+ void* entry = *p;
// NULL the entry so that unused parts of the buffer contain NULLs
// at the end. If we are going to retain it we will copy it to its
// final place. If we have retained all entries we have visited so
// far, we'll just end up copying it to the same place.
*p = NULL;
- bool retain = g1h->is_obj_ill(obj);
- if (retain) {
+ if (requires_marking(entry, g1h) && !g1h->isMarkedNext((oop)entry)) {
assert(new_index > 0, "we should not have already filled up the buffer");
new_index -= oopSize;
assert(new_index >= i,
"new_index should never be below i, as we always compact 'up'");
- oop* new_p = (oop*) &buf[byte_index_to_index((int) new_index)];
+ void** new_p = &buf[byte_index_to_index((int) new_index)];
assert(new_p >= p, "the destination location should never be below "
"the source as we always compact 'up'");
assert(*new_p == NULL,
"we should have already cleared the destination location");
- *new_p = obj;
+ *new_p = entry;
debug_only(retained += 1;)
}
}
@@ -144,12 +180,6 @@ bool ObjPtrQueue::should_enqueue_buffer() {
return should_enqueue;
}
-void ObjPtrQueue::apply_closure(ObjectClosure* cl) {
- if (_buf != NULL) {
- apply_closure_to_buffer(cl, _buf, _index, _sz);
- }
-}
-
void ObjPtrQueue::apply_closure_and_empty(ObjectClosure* cl) {
if (_buf != NULL) {
apply_closure_to_buffer(cl, _buf, _index, _sz);
@@ -184,23 +214,12 @@ void ObjPtrQueue::print(const char* name,
}
#endif // PRODUCT
-#ifdef ASSERT
-void ObjPtrQueue::verify_oops_in_buffer() {
- if (_buf == NULL) return;
- for (size_t i = _index; i < _sz; i += oopSize) {
- oop obj = (oop)_buf[byte_index_to_index((int)i)];
- assert(obj != NULL && obj->is_oop(true /* ignore mark word */),
- "Not an oop");
- }
-}
-#endif
-
#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
#endif // _MSC_VER
SATBMarkQueueSet::SATBMarkQueueSet() :
- PtrQueueSet(), _closures(NULL),
+ PtrQueueSet(),
_shared_satb_queue(this, true /*perm*/) { }
void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
@@ -208,11 +227,9 @@ void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
Mutex* lock) {
PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1);
_shared_satb_queue.set_lock(lock);
- _closures = NEW_C_HEAP_ARRAY(ObjectClosure*, ParallelGCThreads, mtGC);
}
void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) {
- DEBUG_ONLY(t->satb_mark_queue().verify_oops_in_buffer();)
t->satb_mark_queue().handle_zero_index();
}
@@ -272,13 +289,7 @@ void SATBMarkQueueSet::filter_thread_buffers() {
shared_satb_queue()->filter();
}
-void SATBMarkQueueSet::set_closure(uint worker, ObjectClosure* closure) {
- assert(_closures != NULL, "Precondition");
- assert(worker < ParallelGCThreads, "Worker index must be in range [0...ParallelGCThreads)");
- _closures[worker] = closure;
-}
-
-bool SATBMarkQueueSet::apply_closure_to_completed_buffer(uint worker) {
+bool SATBMarkQueueSet::apply_closure_to_completed_buffer(ObjectClosure* cl) {
BufferNode* nd = NULL;
{
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
@@ -290,7 +301,6 @@ bool SATBMarkQueueSet::apply_closure_to_completed_buffer(uint worker) {
if (_n_completed_buffers == 0) _process_completed = false;
}
}
- ObjectClosure* cl = _closures[worker];
if (nd != NULL) {
void **buf = BufferNode::make_buffer_from_node(nd);
ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz);
@@ -301,28 +311,6 @@ bool SATBMarkQueueSet::apply_closure_to_completed_buffer(uint worker) {
}
}
-void SATBMarkQueueSet::iterate_completed_buffers_read_only(ObjectClosure* cl) {
- assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
- assert(cl != NULL, "pre-condition");
-
- BufferNode* nd = _completed_buffers_head;
- while (nd != NULL) {
- void** buf = BufferNode::make_buffer_from_node(nd);
- ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz);
- nd = nd->next();
- }
-}
-
-void SATBMarkQueueSet::iterate_thread_buffers_read_only(ObjectClosure* cl) {
- assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
- assert(cl != NULL, "pre-condition");
-
- for (JavaThread* t = Threads::first(); t; t = t->next()) {
- t->satb_mark_queue().apply_closure(cl);
- }
- shared_satb_queue()->apply_closure(cl);
-}
-
#ifndef PRODUCT
// Helpful for debugging
diff --git a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp
index 89c42c8947e..596904d06df 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -41,9 +41,6 @@ private:
// Filter out unwanted entries from the buffer.
void filter();
- // Apply the closure to all elements.
- void apply_closure(ObjectClosure* cl);
-
// Apply the closure to all elements and empty the buffer;
void apply_closure_and_empty(ObjectClosure* cl);
@@ -72,13 +69,9 @@ public:
void print(const char* name);
static void print(const char* name, void** buf, size_t index, size_t sz);
#endif // PRODUCT
-
- void verify_oops_in_buffer() NOT_DEBUG_RETURN;
};
class SATBMarkQueueSet: public PtrQueueSet {
- ObjectClosure** _closures; // One per ParGCThread.
-
ObjPtrQueue _shared_satb_queue;
#ifdef ASSERT
@@ -104,22 +97,10 @@ public:
// Filter all the currently-active SATB buffers.
void filter_thread_buffers();
- // Register closure for the given worker thread. The "apply_closure_to_completed_buffer"
- // method will apply this closure to a completed buffer, and "iterate_closure_all_threads"
- // applies it to partially-filled buffers (the latter should only be done
- // with the world stopped).
- void set_closure(uint worker, ObjectClosure* closure);
-
// If there exists some completed buffer, pop it, then apply the
- // registered closure to all its elements, and return true. If no
+ // closure to all its elements, and return true. If no
// completed buffers exist, return false.
- bool apply_closure_to_completed_buffer(uint worker);
-
- // Apply the given closure on enqueued and currently-active buffers
- // respectively. Both methods are read-only, i.e., they do not
- // modify any of the buffers.
- void iterate_completed_buffers_read_only(ObjectClosure* cl);
- void iterate_thread_buffers_read_only(ObjectClosure* cl);
+ bool apply_closure_to_completed_buffer(ObjectClosure* closure);
#ifndef PRODUCT
// Helpful for debugging
diff --git a/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp b/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp
index 00238e177f9..58f70a363bd 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp
@@ -26,8 +26,8 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
#include "gc_implementation/g1/heapRegion.hpp"
-#include "gc_implementation/g1/heapRegionManager.inline.hpp"
-#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/heapRegionManager.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
#define VM_STRUCTS_G1(nonstatic_field, static_field) \
\
@@ -70,7 +70,7 @@
\
declare_toplevel_type(G1HeapRegionTable) \
\
- declare_type(G1CollectedHeap, SharedHeap) \
+ declare_type(G1CollectedHeap, CollectedHeap) \
\
declare_type(G1OffsetTableContigSpace, CompactibleSpace) \
declare_type(HeapRegion, G1OffsetTableContigSpace) \
diff --git a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
index 47267f21d53..c4ffabc3f7e 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
@@ -225,15 +225,10 @@ void VM_CGC_Operation::release_and_notify_pending_list_lock() {
void VM_CGC_Operation::doit() {
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
- GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm(), G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id());
- SharedHeap* sh = SharedHeap::heap();
- // This could go away if CollectedHeap gave access to _gc_is_active...
- if (sh != NULL) {
- IsGCActiveMark x;
- _cl->do_void();
- } else {
- _cl->do_void();
- }
+ G1CollectedHeap* g1h = G1CollectedHeap::heap();
+ GCTraceTime t(_printGCMessage, G1Log::fine(), true, g1h->gc_timer_cm(), g1h->concurrent_mark()->concurrent_gc_id());
+ IsGCActiveMark x;
+ _cl->do_void();
}
bool VM_CGC_Operation::doit_prologue() {
@@ -244,14 +239,12 @@ bool VM_CGC_Operation::doit_prologue() {
}
Heap_lock->lock();
- SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true;
return true;
}
void VM_CGC_Operation::doit_epilogue() {
// Note the relative order of the unlocks must match that in
// VM_GC_Operation::doit_epilogue()
- SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false;
Heap_lock->unlock();
if (_needs_pll) {
release_and_notify_pending_list_lock();
diff --git a/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp b/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp
index 9ab76383957..24e7564d941 100644
--- a/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp
@@ -23,17 +23,17 @@
*/
#include "precompiled.hpp"
+#include "gc_interface/collectedHeap.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/cardTableModRefBS.hpp"
#include "memory/cardTableRS.hpp"
-#include "memory/sharedHeap.hpp"
+#include "memory/genCollectedHeap.hpp"
#include "memory/space.inline.hpp"
-#include "memory/universe.hpp"
+#include "memory/virtualspace.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/orderAccess.inline.hpp"
-#include "runtime/virtualspace.hpp"
#include "runtime/vmThread.hpp"
void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
@@ -449,7 +449,7 @@ get_LNC_array_for_space(Space* sp,
// Do a dirty read here. If we pass the conditional then take the rare
// event lock and do the read again in case some other thread had already
// succeeded and done the resize.
- int cur_collection = Universe::heap()->total_collections();
+ int cur_collection = GenCollectedHeap::heap()->total_collections();
if (_last_LNC_resizing_collection[i] != cur_collection) {
MutexLocker x(ParGCRareEvent_lock);
if (_last_LNC_resizing_collection[i] != cur_collection) {
diff --git a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
index 775a6a513c6..68b04be6b79 100644
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
@@ -34,7 +34,7 @@
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
-#include "gc_implementation/shared/parGCAllocBuffer.inline.hpp"
+#include "gc_implementation/shared/plab.inline.hpp"
#include "gc_implementation/shared/spaceDecorator.hpp"
#include "memory/defNewGeneration.inline.hpp"
#include "memory/genCollectedHeap.hpp"
@@ -42,7 +42,7 @@
#include "memory/generation.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/sharedHeap.hpp"
+#include "memory/strongRootsScope.hpp"
#include "memory/space.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
@@ -53,6 +53,7 @@
#include "runtime/thread.inline.hpp"
#include "utilities/copy.hpp"
#include "utilities/globalDefinitions.hpp"
+#include "utilities/stack.inline.hpp"
#include "utilities/workgroup.hpp"
#ifdef _MSC_VER
@@ -117,7 +118,7 @@ bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) c
void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
assert(old->is_objArray(), "must be obj array");
assert(old->is_forwarded(), "must be forwarded");
- assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
+ assert(GenCollectedHeap::heap()->is_in_reserved(old), "must be in heap.");
assert(!old_gen()->is_in(old), "must be in young generation.");
objArrayOop obj = objArrayOop(old->forwardee());
@@ -199,9 +200,9 @@ bool ParScanThreadState::take_from_overflow_stack() {
for (size_t i = 0; i != num_take_elems; i++) {
oop cur = of_stack->pop();
oop obj_to_push = cur->forwardee();
- assert(Universe::heap()->is_in_reserved(cur), "Should be in heap");
+ assert(GenCollectedHeap::heap()->is_in_reserved(cur), "Should be in heap");
assert(!old_gen()->is_in_reserved(cur), "Should be in young gen");
- assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap");
+ assert(GenCollectedHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap");
if (should_be_partially_scanned(obj_to_push, cur)) {
assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
obj_to_push = cur;
@@ -225,7 +226,7 @@ HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
// buffer.
HeapWord* obj = NULL;
if (!_to_space_full) {
- ParGCAllocBuffer* const plab = to_space_alloc_buffer();
+ PLAB* const plab = to_space_alloc_buffer();
Space* const sp = to_space();
if (word_sz * 100 <
ParallelGCBufferWastePct * plab->word_sz()) {
@@ -235,7 +236,7 @@ HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
HeapWord* buf_space = sp->par_allocate(buf_size);
if (buf_space == NULL) {
const size_t min_bytes =
- ParGCAllocBuffer::min_size() << LogHeapWordSize;
+ PLAB::min_size() << LogHeapWordSize;
size_t free_bytes = sp->free();
while(buf_space == NULL && free_bytes >= min_bytes) {
buf_size = free_bytes >> LogHeapWordSize;
@@ -251,7 +252,7 @@ HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
record_survivor_plab(buf_space, buf_size);
obj = plab->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
// Note that we cannot compare buf_size < word_sz below
- // because of AlignmentReserve (see ParGCAllocBuffer::allocate()).
+ // because of AlignmentReserve (see PLAB::allocate()).
assert(obj != NULL || plab->words_remaining() < word_sz,
"Else should have been able to allocate");
// It's conceivable that we may be able to use the
@@ -596,8 +597,6 @@ void ParNewGenTask::work(uint worker_id) {
// and handle marks.
ResourceMark rm;
HandleMark hm;
- // We would need multiple old-gen queues otherwise.
- assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen.");
ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
assert(_state_set->is_valid(worker_id), "Should not have been called");
@@ -697,7 +696,7 @@ void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
_par_cl->do_oop_nv(p);
- if (Universe::heap()->is_in_reserved(p)) {
+ if (GenCollectedHeap::heap()->is_in_reserved(p)) {
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
_rs->write_ref_field_gc_par(p, obj);
}
@@ -724,7 +723,7 @@ void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
_cl->do_oop_nv(p);
- if (Universe::heap()->is_in_reserved(p)) {
+ if (GenCollectedHeap::heap()->is_in_reserved(p)) {
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
_rs->write_ref_field_gc_par(p, obj);
}
@@ -823,8 +822,6 @@ public:
void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
{
GenCollectedHeap* gch = GenCollectedHeap::heap();
- assert(gch->kind() == CollectedHeap::GenCollectedHeap,
- "not a generational heap");
FlexibleWorkGang* workers = gch->workers();
assert(workers != NULL, "Need parallel worker threads.");
_state_set.reset(workers->active_workers(), _generation.promotion_failed());
@@ -899,7 +896,7 @@ void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThr
_gc_tracer.report_promotion_failed(_promotion_failed_info);
}
// Reset the PromotionFailureALot counters.
- NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
+ NOT_PRODUCT(gch->reset_promotion_should_fail();)
}
void ParNewGeneration::collect(bool full,
@@ -912,8 +909,6 @@ void ParNewGeneration::collect(bool full,
_gc_timer->register_gc_start();
- assert(gch->kind() == CollectedHeap::GenCollectedHeap,
- "not a CMS generational heap");
AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
FlexibleWorkGang* workers = gch->workers();
assert(workers != NULL, "Need workgang for parallel work");
@@ -922,8 +917,6 @@ void ParNewGeneration::collect(bool full,
workers->active_workers(),
Threads::number_of_non_daemon_threads());
workers->set_active_workers(active_workers);
- assert(gch->n_gens() == 2,
- "Par collection currently only works with single older gen.");
_old_gen = gch->old_gen();
// If the next generation is too full to accommodate worst-case promotion
@@ -974,10 +967,10 @@ void ParNewGeneration::collect(bool full,
// in the multi-threaded case, but we special-case n=1 here to get
// repeatable measurements of the 1-thread overhead of the parallel code.
if (n_workers > 1) {
- GenCollectedHeap::StrongRootsScope srs(gch);
+ StrongRootsScope srs;
workers->run_task(&tsk);
} else {
- GenCollectedHeap::StrongRootsScope srs(gch);
+ StrongRootsScope srs;
tsk.work(0);
}
thread_state_set.reset(0 /* Bad value in debug if not reset */,
@@ -1194,7 +1187,7 @@ oop ParNewGeneration::copy_to_survivor_space(
} else {
// Is in to-space; do copying ourselves.
Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
- assert(Universe::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
+ assert(GenCollectedHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
forward_ptr = old->forward_to_atomic(new_obj);
// Restore the mark word copied above.
new_obj->set_mark(m);
diff --git a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp
index 8e81515ed69..cf6ca9a82e8 100644
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp
@@ -27,7 +27,7 @@
#include "gc_implementation/parNew/parOopClosures.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
-#include "gc_implementation/shared/parGCAllocBuffer.hpp"
+#include "gc_implementation/shared/plab.hpp"
#include "gc_implementation/shared/copyFailedInfo.hpp"
#include "memory/defNewGeneration.hpp"
#include "memory/padded.hpp"
@@ -65,7 +65,7 @@ class ParScanThreadState {
ObjToScanQueue *_work_queue;
Stack* const _overflow_stack;
- ParGCAllocBuffer _to_space_alloc_buffer;
+ PLAB _to_space_alloc_buffer;
ParScanWithoutBarrierClosure _to_space_closure; // scan_without_gc_barrier
ParScanWithBarrierClosure _old_gen_closure; // scan_with_gc_barrier
@@ -140,7 +140,7 @@ class ParScanThreadState {
ObjToScanQueue* work_queue() { return _work_queue; }
- ParGCAllocBuffer* to_space_alloc_buffer() {
+ PLAB* to_space_alloc_buffer() {
return &_to_space_alloc_buffer;
}
diff --git a/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.cpp b/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.cpp
new file mode 100644
index 00000000000..4b3f3f26adb
--- /dev/null
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "memory/iterator.inline.hpp"
+#include "memory/specialized_oop_closures.hpp"
+#include "gc_implementation/parNew/parOopClosures.inline.hpp"
+
+// Generate ParNew specialized oop_oop_iterate functions.
+SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(ALL_KLASS_OOP_OOP_ITERATE_DEFN);
diff --git a/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp b/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp
index 93000ce2f84..1c5d8f6e33f 100644
--- a/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp
@@ -70,7 +70,7 @@ template
inline void ParScanClosure::do_oop_work(T* p,
bool gc_barrier,
bool root_scan) {
- assert((!Universe::heap()->is_in_reserved(p) ||
+ assert((!GenCollectedHeap::heap()->is_in_reserved(p) ||
generation()->is_in_reserved(p))
&& (generation()->level() == 0 || gc_barrier),
"The gen must be right, and we must be doing the barrier "
@@ -82,7 +82,7 @@ inline void ParScanClosure::do_oop_work(T* p,
#ifndef PRODUCT
if (_g->to()->is_in_reserved(obj)) {
tty->print_cr("Scanning field (" PTR_FORMAT ") twice?", p2i(p));
- GenCollectedHeap* gch = (GenCollectedHeap*)Universe::heap();
+ GenCollectedHeap* gch = GenCollectedHeap::heap();
Space* sp = gch->space_containing(p);
oop obj = oop(sp->block_start(p));
assert((HeapWord*)obj < (HeapWord*)p, "Error");
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp
index d1a169b89b3..918720df89a 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -89,7 +89,7 @@ size_t ASPSOldGen::available_for_expansion() {
assert(virtual_space()->is_aligned(gen_size_limit()), "not aligned");
assert(gen_size_limit() >= virtual_space()->committed_size(), "bad gen size");
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
size_t result = gen_size_limit() - virtual_space()->committed_size();
size_t result_aligned = align_size_down(result, heap->generation_alignment());
return result_aligned;
@@ -101,7 +101,7 @@ size_t ASPSOldGen::available_for_contraction() {
return uncommitted_bytes;
}
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
const size_t gen_alignment = heap->generation_alignment();
PSAdaptiveSizePolicy* policy = heap->size_policy();
const size_t working_size =
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp
index d711104e459..4395ce6f626 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -73,7 +73,7 @@ size_t ASPSYoungGen::available_for_expansion() {
size_t current_committed_size = virtual_space()->committed_size();
assert((gen_size_limit() >= current_committed_size),
"generation size limit is wrong");
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
size_t result = gen_size_limit() - current_committed_size;
size_t result_aligned = align_size_down(result, heap->generation_alignment());
return result_aligned;
@@ -91,7 +91,7 @@ size_t ASPSYoungGen::available_for_contraction() {
if (eden_space()->is_empty()) {
// Respect the minimum size for eden and for the young gen as a whole.
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
const size_t eden_alignment = heap->space_alignment();
const size_t gen_alignment = heap->generation_alignment();
@@ -128,7 +128,7 @@ size_t ASPSYoungGen::available_for_contraction() {
// If to_space is below from_space, to_space is not considered.
// to_space can be.
size_t ASPSYoungGen::available_to_live() {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
const size_t alignment = heap->space_alignment();
// Include any space that is committed but is not in eden.
@@ -292,7 +292,7 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
assert(eden_start < from_start, "Cannot push into from_space");
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
const size_t alignment = heap->space_alignment();
const bool maintain_minimum =
(requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
@@ -345,8 +345,6 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
// Does the optimal to-space overlap from-space?
if (to_start < (char*)from_space()->end()) {
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
// Calculate the minimum offset possible for from_end
size_t from_size =
pointer_delta(from_space()->top(), from_start, sizeof(char));
@@ -509,9 +507,7 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
assert(from_space()->top() == old_from_top, "from top changed!");
if (PrintAdaptiveSizePolicy) {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
"collection: %d "
"(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
@@ -542,7 +538,7 @@ void ASPSYoungGen::reset_after_change() {
}
MemRegion cmr((HeapWord*)virtual_space()->low(),
(HeapWord*)virtual_space()->high());
- Universe::heap()->barrier_set()->resize_covered_region(cmr);
+ ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
space_invariants();
}
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp
index 3523ff514d8..2ad1128edba 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp
@@ -76,9 +76,7 @@ class CheckForUnmarkedObjects : public ObjectClosure {
public:
CheckForUnmarkedObjects() {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
_young_gen = heap->young_gen();
_card_table = barrier_set_cast(heap->barrier_set());
// No point in asserting barrier set type here. Need to make CardTableExtension
@@ -325,9 +323,7 @@ void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_arra
void CardTableExtension::verify_all_young_refs_imprecise() {
CheckForUnmarkedObjects check;
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSOldGen* old_gen = heap->old_gen();
old_gen->object_iterate(&check);
@@ -335,9 +331,7 @@ void CardTableExtension::verify_all_young_refs_imprecise() {
// This should be called immediately after a scavenge, before mutators resume.
void CardTableExtension::verify_all_young_refs_precise() {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSOldGen* old_gen = heap->old_gen();
CheckForPreciseMarks check(
@@ -351,7 +345,7 @@ void CardTableExtension::verify_all_young_refs_precise() {
void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {
CardTableExtension* card_table =
- barrier_set_cast(Universe::heap()->barrier_set());
+ barrier_set_cast(ParallelScavengeHeap::heap()->barrier_set());
jbyte* bot = card_table->byte_for(mr.start());
jbyte* top = card_table->byte_for(mr.end());
@@ -523,7 +517,7 @@ bool CardTableExtension::resize_commit_uncommit(int changed_region,
cur_committed = new_committed;
}
#ifdef ASSERT
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
assert(cur_committed.start() ==
(HeapWord*) align_size_up((uintptr_t) cur_committed.start(),
os::vm_page_size()),
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
index a50328680bc..6f35768cfea 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
@@ -49,42 +49,25 @@ PSYoungGen* ParallelScavengeHeap::_young_gen = NULL;
PSOldGen* ParallelScavengeHeap::_old_gen = NULL;
PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
-ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL;
GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
jint ParallelScavengeHeap::initialize() {
CollectedHeap::pre_initialize();
- // Initialize collector policy
- _collector_policy = new GenerationSizer();
- _collector_policy->initialize_all();
-
const size_t heap_size = _collector_policy->max_heap_byte_size();
ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment());
- MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap);
os::trace_page_sizes("ps main", _collector_policy->min_heap_byte_size(),
heap_size, generation_alignment(),
heap_rs.base(),
heap_rs.size());
- if (!heap_rs.is_reserved()) {
- vm_shutdown_during_initialization(
- "Could not reserve enough space for object heap");
- return JNI_ENOMEM;
- }
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
CardTableExtension* const barrier_set = new CardTableExtension(reserved_region());
barrier_set->initialize();
- _barrier_set = barrier_set;
- oopDesc::set_bs(_barrier_set);
- if (_barrier_set == NULL) {
- vm_shutdown_during_initialization(
- "Could not reserve enough space for barrier set");
- return JNI_ENOMEM;
- }
+ set_barrier_set(barrier_set);
// Make up the generations
// Calculate the maximum size that a generation can grow. This
@@ -120,7 +103,6 @@ jint ParallelScavengeHeap::initialize() {
// initialize the policy counters - 2 collectors, 3 generations
_gc_policy_counters =
new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
- _psh = this;
// Set up the GCTaskManager
_gc_task_manager = GCTaskManager::create(ParallelGCThreads);
@@ -176,27 +158,11 @@ size_t ParallelScavengeHeap::max_capacity() const {
}
bool ParallelScavengeHeap::is_in(const void* p) const {
- if (young_gen()->is_in(p)) {
- return true;
- }
-
- if (old_gen()->is_in(p)) {
- return true;
- }
-
- return false;
+ return young_gen()->is_in(p) || old_gen()->is_in(p);
}
bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
- if (young_gen()->is_in_reserved(p)) {
- return true;
- }
-
- if (old_gen()->is_in_reserved(p)) {
- return true;
- }
-
- return false;
+ return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
}
bool ParallelScavengeHeap::is_scavengable(const void* addr) {
@@ -265,7 +231,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
// total_collections() value!
{
MutexLocker ml(Heap_lock);
- gc_count = Universe::heap()->total_collections();
+ gc_count = total_collections();
result = young_gen()->allocate(size);
if (result != NULL) {
@@ -315,8 +281,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
// This prevents us from looping until time out on requests that can
// not be satisfied.
if (op.prologue_succeeded()) {
- assert(Universe::heap()->is_in_or_null(op.result()),
- "result not in heap");
+ assert(is_in_or_null(op.result()), "result not in heap");
// If GC was locked out during VM operation then retry allocation
// and/or stall as necessary.
@@ -426,7 +391,7 @@ void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
- assert(!Universe::heap()->is_gc_active(), "not reentrant");
+ assert(!is_gc_active(), "not reentrant");
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
// We assume that allocation in eden will fail unless we collect.
@@ -514,18 +479,14 @@ void ParallelScavengeHeap::collect(GCCause::Cause cause) {
{
MutexLocker ml(Heap_lock);
// This value is guarded by the Heap_lock
- gc_count = Universe::heap()->total_collections();
- full_gc_count = Universe::heap()->total_full_collections();
+ gc_count = total_collections();
+ full_gc_count = total_full_collections();
}
VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
VMThread::execute(&op);
}
-void ParallelScavengeHeap::oop_iterate(ExtendedOopClosure* cl) {
- Unimplemented();
-}
-
void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
young_gen()->object_iterate(cl);
old_gen()->object_iterate(cl);
@@ -661,9 +622,10 @@ void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_trac
}
ParallelScavengeHeap* ParallelScavengeHeap::heap() {
- assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
- assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");
- return _psh;
+ CollectedHeap* heap = Universe::heap();
+ assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
+ assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Not a ParallelScavengeHeap");
+ return (ParallelScavengeHeap*)heap;
}
// Before delegating the resize to the young generation,
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp
index 867bdbf55ee..d8bb56ae0a1 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp
@@ -32,8 +32,9 @@
#include "gc_implementation/parallelScavenge/psYoungGen.hpp"
#include "gc_implementation/shared/gcPolicyCounters.hpp"
#include "gc_implementation/shared/gcWhen.hpp"
-#include "gc_interface/collectedHeap.inline.hpp"
+#include "gc_interface/collectedHeap.hpp"
#include "memory/collectorPolicy.hpp"
+#include "memory/strongRootsScope.hpp"
#include "utilities/ostream.hpp"
class AdjoiningGenerations;
@@ -52,8 +53,6 @@ class ParallelScavengeHeap : public CollectedHeap {
static PSAdaptiveSizePolicy* _size_policy;
static PSGCAdaptivePolicyCounters* _gc_policy_counters;
- static ParallelScavengeHeap* _psh;
-
GenerationSizer* _collector_policy;
// Collection of generations that are adjacent in the
@@ -75,7 +74,8 @@ class ParallelScavengeHeap : public CollectedHeap {
HeapWord* mem_allocate_old_gen(size_t size);
public:
- ParallelScavengeHeap() : CollectedHeap(), _death_march_count(0) { }
+ ParallelScavengeHeap(GenerationSizer* policy) :
+ CollectedHeap(), _collector_policy(policy), _death_march_count(0) { }
// For use by VM operations
enum CollectionType {
@@ -131,9 +131,6 @@ class ParallelScavengeHeap : public CollectedHeap {
// the young gen.
virtual bool is_scavengable(const void* addr);
- // Does this heap support heap inspection? (+PrintClassHistogram)
- bool supports_heap_inspection() const { return true; }
-
size_t max_capacity() const;
// Whether p is in the allocated part of the heap
@@ -201,7 +198,6 @@ class ParallelScavengeHeap : public CollectedHeap {
// initializing stores to an object at this address.
virtual bool can_elide_initializing_store_barrier(oop new_obj);
- void oop_iterate(ExtendedOopClosure* cl);
void object_iterate(ObjectClosure* cl);
void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
@@ -238,7 +234,7 @@ class ParallelScavengeHeap : public CollectedHeap {
void gen_mangle_unused_area() PRODUCT_RETURN;
// Call these in sequential code around the processing of strong roots.
- class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
+ class ParStrongRootsScope : public MarkScope {
public:
ParStrongRootsScope();
~ParStrongRootsScope();
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
index fc2cd8c6bea..b220c12aa82 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
@@ -41,13 +41,14 @@
#include "runtime/thread.hpp"
#include "runtime/vmThread.hpp"
#include "services/management.hpp"
+#include "utilities/stack.inline.hpp"
//
// ThreadRootsMarkingTask
//
void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
- assert(Universe::heap()->is_gc_active(), "called outside gc");
+ assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
ResourceMark rm;
@@ -78,7 +79,7 @@ void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
- assert(Universe::heap()->is_gc_active(), "called outside gc");
+ assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
NOT_PRODUCT(GCTraceTime tm("MarkFromRootsTask",
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
@@ -149,7 +150,7 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
{
- assert(Universe::heap()->is_gc_active(), "called outside gc");
+ assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
NOT_PRODUCT(GCTraceTime tm("RefProcTask",
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
@@ -167,7 +168,7 @@ void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
void RefProcTaskExecutor::execute(ProcessTask& task)
{
- ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
uint parallel_gc_threads = heap->gc_task_manager()->workers();
uint active_gc_threads = heap->gc_task_manager()->active_workers();
RegionTaskQueueSet* qset = ParCompactionManager::region_array();
@@ -188,7 +189,7 @@ void RefProcTaskExecutor::execute(ProcessTask& task)
void RefProcTaskExecutor::execute(EnqueueTask& task)
{
- ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
uint parallel_gc_threads = heap->gc_task_manager()->workers();
GCTaskQueue* q = GCTaskQueue::create();
for(uint i=0; iis_gc_active(), "called outside gc");
+ assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
NOT_PRODUCT(GCTraceTime tm("StealMarkingTask",
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
@@ -237,7 +238,7 @@ StealRegionCompactionTask::StealRegionCompactionTask(ParallelTaskTerminator* t):
_terminator(t) {}
void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) {
- assert(Universe::heap()->is_gc_active(), "called outside gc");
+ assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
NOT_PRODUCT(GCTraceTime tm("StealRegionCompactionTask",
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
@@ -319,7 +320,7 @@ void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
}
void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
- assert(Universe::heap()->is_gc_active(), "called outside gc");
+ assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
NOT_PRODUCT(GCTraceTime tm("DrainStacksCompactionTask",
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp
index 70b619f813d..d56c4f777cd 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp
@@ -30,7 +30,10 @@
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
#include "gc_implementation/parallelScavenge/psCompactionManager.inline.hpp"
#include "gc_implementation/parallelScavenge/psOldGen.hpp"
-#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
+#include "gc_implementation/parallelScavenge/psParallelCompact.inline.hpp"
+#include "memory/iterator.inline.hpp"
+#include "oops/instanceKlass.inline.hpp"
+#include "oops/instanceMirrorKlass.inline.hpp"
#include "oops/objArrayKlass.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.inline.hpp"
@@ -57,8 +60,7 @@ ParCompactionManager::ParCompactionManager() :
_region_stack(NULL),
_region_stack_index((uint)max_uintx) {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
_old_gen = heap->old_gen();
_start_array = old_gen()->start_array();
@@ -174,6 +176,142 @@ ParCompactionManager::gc_thread_compaction_manager(int index) {
return _manager_array[index];
}
+void InstanceKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
+ assert(obj != NULL, "can't follow the content of NULL object");
+
+ PSParallelCompact::follow_klass(cm, this);
+ // Only mark the header and let the scan of the meta-data mark
+ // everything else.
+
+ PSParallelCompact::MarkAndPushClosure cl(cm);
+ InstanceKlass::oop_oop_iterate_oop_maps(obj, &cl);
+}
+
+void InstanceMirrorKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
+ InstanceKlass::oop_pc_follow_contents(obj, cm);
+
+ // Follow the klass field in the mirror.
+ Klass* klass = java_lang_Class::as_Klass(obj);
+ if (klass != NULL) {
+ // An anonymous class doesn't have its own class loader, so the call
+ // to follow_klass will mark and push its java mirror instead of the
+ // class loader. When handling the java mirror for an anonymous class
+ // we need to make sure its class loader data is claimed, this is done
+ // by calling follow_class_loader explicitly. For non-anonymous classes
+ // the call to follow_class_loader is made when the class loader itself
+ // is handled.
+ if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) {
+ PSParallelCompact::follow_class_loader(cm, klass->class_loader_data());
+ } else {
+ PSParallelCompact::follow_klass(cm, klass);
+ }
+ } else {
+ // If klass is NULL then this a mirror for a primitive type.
+ // We don't have to follow them, since they are handled as strong
+ // roots in Universe::oops_do.
+ assert(java_lang_Class::is_primitive(obj), "Sanity check");
+ }
+
+ PSParallelCompact::MarkAndPushClosure cl(cm);
+ oop_oop_iterate_statics(obj, &cl);
+}
+
+void InstanceClassLoaderKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
+ InstanceKlass::oop_pc_follow_contents(obj, cm);
+
+ ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data(obj);
+ if (loader_data != NULL) {
+ PSParallelCompact::follow_class_loader(cm, loader_data);
+ }
+}
+
+template
+static void oop_pc_follow_contents_specialized(InstanceRefKlass* klass, oop obj, ParCompactionManager* cm) {
+ T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+ T heap_oop = oopDesc::load_heap_oop(referent_addr);
+ debug_only(
+ if(TraceReferenceGC && PrintGCDetails) {
+ gclog_or_tty->print_cr("InstanceRefKlass::oop_pc_follow_contents " PTR_FORMAT, p2i(obj));
+ }
+ )
+ if (!oopDesc::is_null(heap_oop)) {
+ oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
+ if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) &&
+ PSParallelCompact::ref_processor()->discover_reference(obj, klass->reference_type())) {
+ // reference already enqueued, referent will be traversed later
+ klass->InstanceKlass::oop_pc_follow_contents(obj, cm);
+ debug_only(
+ if(TraceReferenceGC && PrintGCDetails) {
+ gclog_or_tty->print_cr(" Non NULL enqueued " PTR_FORMAT, p2i(obj));
+ }
+ )
+ return;
+ } else {
+ // treat referent as normal oop
+ debug_only(
+ if(TraceReferenceGC && PrintGCDetails) {
+ gclog_or_tty->print_cr(" Non NULL normal " PTR_FORMAT, p2i(obj));
+ }
+ )
+ PSParallelCompact::mark_and_push(cm, referent_addr);
+ }
+ }
+ T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+ if (ReferenceProcessor::pending_list_uses_discovered_field()) {
+ // Treat discovered as normal oop, if ref is not "active",
+ // i.e. if next is non-NULL.
+ T next_oop = oopDesc::load_heap_oop(next_addr);
+ if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
+ T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
+ debug_only(
+ if(TraceReferenceGC && PrintGCDetails) {
+ gclog_or_tty->print_cr(" Process discovered as normal "
+ PTR_FORMAT, p2i(discovered_addr));
+ }
+ )
+ PSParallelCompact::mark_and_push(cm, discovered_addr);
+ }
+ } else {
+#ifdef ASSERT
+ // In the case of older JDKs which do not use the discovered
+ // field for the pending list, an inactive ref (next != NULL)
+ // must always have a NULL discovered field.
+ T next = oopDesc::load_heap_oop(next_addr);
+ oop discovered = java_lang_ref_Reference::discovered(obj);
+ assert(oopDesc::is_null(next) || oopDesc::is_null(discovered),
+ err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field",
+ p2i(obj)));
+#endif
+ }
+ PSParallelCompact::mark_and_push(cm, next_addr);
+ klass->InstanceKlass::oop_pc_follow_contents(obj, cm);
+}
+
+
+void InstanceRefKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
+ if (UseCompressedOops) {
+ oop_pc_follow_contents_specialized(this, obj, cm);
+ } else {
+ oop_pc_follow_contents_specialized(this, obj, cm);
+ }
+}
+
+void ObjArrayKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
+ PSParallelCompact::follow_klass(cm, this);
+
+ if (UseCompressedOops) {
+ oop_pc_follow_contents_specialized(objArrayOop(obj), 0, cm);
+ } else {
+ oop_pc_follow_contents_specialized(objArrayOop(obj), 0, cm);
+ }
+}
+
+void TypeArrayKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
+ assert(obj->is_typeArray(),"must be a type array");
+ // Performance tweak: We skip iterating over the klass pointer since we
+ // know that Universe::TypeArrayKlass never moves.
+}
+
void ParCompactionManager::follow_marking_stacks() {
do {
// Drain the overflow stack first, to allow stealing from the marking stack.
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp
index 90c22306e7d..fb700318b9c 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp
@@ -26,9 +26,11 @@
#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_INLINE_HPP
#include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
-#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
-#include "oops/objArrayKlass.inline.hpp"
-#include "oops/oop.pcgc.inline.hpp"
+#include "gc_implementation/parallelScavenge/psParallelCompact.inline.hpp"
+#include "oops/objArrayOop.hpp"
+#include "oops/oop.inline.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
void ParCompactionManager::push_objarray(oop obj, size_t index)
{
@@ -49,16 +51,42 @@ void ParCompactionManager::push_region(size_t index)
}
inline void ParCompactionManager::follow_contents(oop obj) {
- obj->follow_contents(this);
+ assert(PSParallelCompact::mark_bitmap()->is_marked(obj), "should be marked");
+ obj->pc_follow_contents(this);
+}
+
+template
+inline void oop_pc_follow_contents_specialized(objArrayOop obj, int index, ParCompactionManager* cm) {
+ const size_t len = size_t(obj->length());
+ const size_t beg_index = size_t(index);
+ assert(beg_index < len || len == 0, "index too large");
+
+ const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
+ const size_t end_index = beg_index + stride;
+ T* const base = (T*)obj->base();
+ T* const beg = base + beg_index;
+ T* const end = base + end_index;
+
+ // Push the non-NULL elements of the next stride on the marking stack.
+ for (T* e = beg; e < end; e++) {
+ PSParallelCompact::mark_and_push(cm, e);
+ }
+
+ if (end_index < len) {
+ cm->push_objarray(obj, end_index); // Push the continuation.
+ }
}
inline void ParCompactionManager::follow_contents(objArrayOop obj, int index) {
- ObjArrayKlass* k = (ObjArrayKlass*)obj->klass();
- k->oop_follow_contents(this, obj, index);
+ if (UseCompressedOops) {
+ oop_pc_follow_contents_specialized(obj, index, this);
+ } else {
+ oop_pc_follow_contents_specialized(obj, index, this);
+ }
}
inline void ParCompactionManager::update_contents(oop obj) {
- obj->update_contents(this);
+ obj->pc_update_contents();
}
#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_INLINE_HPP
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
index 171f4643920..d676ddec86e 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
@@ -59,7 +59,7 @@ jlong PSMarkSweep::_time_of_last_gc = 0;
CollectorCounters* PSMarkSweep::_counters = NULL;
void PSMarkSweep::initialize() {
- MemRegion mr = Universe::heap()->reserved_region();
+ MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
_ref_processor = new ReferenceProcessor(mr); // a vanilla ref proc
_counters = new CollectorCounters("PSMarkSweep", 1);
}
@@ -81,9 +81,9 @@ void PSMarkSweep::initialize() {
void PSMarkSweep::invoke(bool maximum_heap_compaction) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
- assert(!Universe::heap()->is_gc_active(), "not reentrant");
+ assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
GCCause::Cause gc_cause = heap->gc_cause();
PSAdaptiveSizePolicy* policy = heap->size_policy();
IsGCActiveMark mark;
@@ -110,8 +110,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
return false;
}
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
GCCause::Cause gc_cause = heap->gc_cause();
_gc_timer->register_gc_start();
@@ -487,9 +486,7 @@ bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
}
void PSMarkSweep::allocate_stacks() {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSYoungGen* young_gen = heap->young_gen();
MutableSpace* to_space = young_gen->to_space();
@@ -513,10 +510,8 @@ void PSMarkSweep::deallocate_stacks() {
void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
- trace(" 1");
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
// Need to clear claim bits before the tracing starts.
ClassLoaderDataGraph::clear_claimed_marks();
@@ -574,7 +569,6 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
void PSMarkSweep::mark_sweep_phase2() {
GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
- trace("2");
// Now all live objects are marked, compute the new object addresses.
@@ -582,9 +576,7 @@ void PSMarkSweep::mark_sweep_phase2() {
// phase2, phase3 and phase4, but the ValidateMarkSweep live oops
// tracking expects us to do so. See comment under phase4.
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSOldGen* old_gen = heap->old_gen();
// Begin compacting into the old gen
@@ -604,11 +596,8 @@ static PSAlwaysTrueClosure always_true;
void PSMarkSweep::mark_sweep_phase3() {
// Adjust the pointers to reflect the new locations
GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
- trace("3");
-
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
@@ -647,13 +636,10 @@ void PSMarkSweep::mark_sweep_phase3() {
void PSMarkSweep::mark_sweep_phase4() {
EventMark m("4 compact heap");
GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
- trace("4");
// All pointers are now adjusted, move objects accordingly
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp
index 2cf025452b1..e1231c2999a 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp
@@ -38,15 +38,12 @@ PSMarkSweepDecorator* PSMarkSweepDecorator::_destination_decorator = NULL;
void PSMarkSweepDecorator::set_destination_decorator_tenured() {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
_destination_decorator = heap->old_gen()->object_mark_sweep();
}
void PSMarkSweepDecorator::advance_destination_decorator() {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
assert(_destination_decorator != NULL, "Sanity");
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp
index 61c6caf5364..35c2ff91848 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp
@@ -107,20 +107,22 @@ void PSOldGen::initialize_work(const char* perf_data_name, int level) {
SpaceMangler::mangle_region(cmr);
}
- Universe::heap()->barrier_set()->resize_covered_region(cmr);
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
+ BarrierSet* bs = heap->barrier_set();
- CardTableModRefBS* _ct =
- barrier_set_cast(Universe::heap()->barrier_set());
+ bs->resize_covered_region(cmr);
+
+ CardTableModRefBS* ct = barrier_set_cast(bs);
// Verify that the start and end of this generation is the start of a card.
// If this wasn't true, a single card could span more than one generation,
// which would cause problems when we commit/uncommit memory, and when we
// clear and dirty cards.
- guarantee(_ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
- if (_reserved.end() != Universe::heap()->reserved_region().end()) {
+ guarantee(ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
+ if (_reserved.end() != heap->reserved_region().end()) {
// Don't check at the very end of the heap as we'll assert that we're probing off
// the end if we try.
- guarantee(_ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
+ guarantee(ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
}
//
@@ -161,8 +163,7 @@ bool PSOldGen::is_allocated() {
}
void PSOldGen::precompact() {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
// Reset start array first.
start_array()->reset();
@@ -197,7 +198,7 @@ HeapWord* PSOldGen::allocate(size_t word_size) {
// Allocations in the old generation need to be reported
if (res != NULL) {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
heap->size_policy()->tenured_allocation(word_size);
}
@@ -376,8 +377,7 @@ void PSOldGen::resize(size_t desired_free_space) {
}
if (PrintAdaptiveSizePolicy) {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
"collection: %d "
"(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
@@ -397,7 +397,7 @@ void PSOldGen::post_resize() {
size_t new_word_size = new_memregion.word_size();
start_array()->set_covered_region(new_memregion);
- Universe::heap()->barrier_set()->resize_covered_region(new_memregion);
+ ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(new_memregion);
// ALWAYS do this last!!
object_space()->initialize(new_memregion,
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp
index 0dbd4732f72..9322672680d 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -60,11 +60,29 @@ class PSOldGen : public CHeapObj {
// Used when initializing the _name field.
static inline const char* select_name();
+#ifdef ASSERT
+ void assert_block_in_covered_region(MemRegion new_memregion) {
+ // Explictly capture current covered_region in a local
+ MemRegion covered_region = this->start_array()->covered_region();
+ assert(covered_region.contains(new_memregion),
+ err_msg("new region is not in covered_region [ "PTR_FORMAT", "PTR_FORMAT" ], "
+ "new region [ "PTR_FORMAT", "PTR_FORMAT" ], "
+ "object space [ "PTR_FORMAT", "PTR_FORMAT" ]",
+ p2i(covered_region.start()),
+ p2i(covered_region.end()),
+ p2i(new_memregion.start()),
+ p2i(new_memregion.end()),
+ p2i(this->object_space()->used_region().start()),
+ p2i(this->object_space()->used_region().end())));
+ }
+#endif
+
HeapWord* allocate_noexpand(size_t word_size) {
// We assume the heap lock is held here.
assert_locked_or_safepoint(Heap_lock);
HeapWord* res = object_space()->allocate(word_size);
if (res != NULL) {
+ DEBUG_ONLY(assert_block_in_covered_region(MemRegion(res, word_size)));
_start_array.allocate_block(res);
}
return res;
@@ -77,6 +95,7 @@ class PSOldGen : public CHeapObj {
assert(SafepointSynchronize::is_at_safepoint(), "Must only be called at safepoint");
HeapWord* res = object_space()->cas_allocate(word_size);
if (res != NULL) {
+ DEBUG_ONLY(assert_block_in_covered_region(MemRegion(res, word_size)));
_start_array.allocate_block(res);
}
return res;
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
index b8f60ffdadc..7ef05d16a62 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
@@ -34,7 +34,7 @@
#include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
#include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
#include "gc_implementation/parallelScavenge/psOldGen.hpp"
-#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
+#include "gc_implementation/parallelScavenge/psParallelCompact.inline.hpp"
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
#include "gc_implementation/parallelScavenge/psYoungGen.hpp"
@@ -48,7 +48,10 @@
#include "memory/gcLocker.inline.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/referenceProcessor.hpp"
+#include "oops/instanceKlass.inline.hpp"
+#include "oops/instanceMirrorKlass.inline.hpp"
#include "oops/methodData.hpp"
+#include "oops/objArrayKlass.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/fprofiler.hpp"
@@ -745,7 +748,7 @@ bool ParallelCompactData::summarize(SplitInfo& split_info,
HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
assert(addr != NULL, "Should detect NULL oop earlier");
- assert(PSParallelCompact::gc_heap()->is_in(addr), "not in heap");
+ assert(ParallelScavengeHeap::heap()->is_in(addr), "not in heap");
assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
// Region covering the object.
@@ -823,16 +826,8 @@ void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompa
PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure;
PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure;
-void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p); }
-void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); }
-
void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); }
-void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) {
- mark_and_push(_compaction_manager, p);
-}
-void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); }
-
void PSParallelCompact::FollowKlassClosure::do_klass(Klass* klass) {
klass->oops_do(_mark_and_push_closure);
}
@@ -841,9 +836,7 @@ void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) {
}
void PSParallelCompact::post_initialize() {
- ParallelScavengeHeap* heap = gc_heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
MemRegion mr = heap->reserved_region();
_ref_processor =
new ReferenceProcessor(mr, // span
@@ -860,8 +853,7 @@ void PSParallelCompact::post_initialize() {
}
bool PSParallelCompact::initialize() {
- ParallelScavengeHeap* heap = gc_heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
MemRegion mr = heap->reserved_region();
// Was the old gen get allocated successfully?
@@ -895,7 +887,7 @@ void PSParallelCompact::initialize_space_info()
{
memset(&_space_info, 0, sizeof(_space_info));
- ParallelScavengeHeap* heap = gc_heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSYoungGen* young_gen = heap->young_gen();
_space_info[old_space_id].set_space(heap->old_gen()->object_space());
@@ -978,7 +970,7 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
// promotion failure does not swap spaces) because an unknown number of minor
// collections will have swapped the spaces an unknown number of times.
GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
- ParallelScavengeHeap* heap = gc_heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
_space_info[from_space_id].set_space(heap->young_gen()->from_space());
_space_info[to_space_id].set_space(heap->young_gen()->to_space());
@@ -1033,7 +1025,7 @@ void PSParallelCompact::post_compact()
MutableSpace* const from_space = _space_info[from_space_id].space();
MutableSpace* const to_space = _space_info[to_space_id].space();
- ParallelScavengeHeap* heap = gc_heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
bool eden_empty = eden_space->is_empty();
if (!eden_empty) {
eden_empty = absorb_live_data_from_eden(heap->size_policy(),
@@ -1971,7 +1963,7 @@ void PSParallelCompact::invoke(bool maximum_heap_compaction) {
assert(Thread::current() == (Thread*)VMThread::vm_thread(),
"should be in vm thread");
- ParallelScavengeHeap* heap = gc_heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
GCCause::Cause gc_cause = heap->gc_cause();
assert(!heap->is_gc_active(), "not reentrant");
@@ -1999,7 +1991,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
return false;
}
- ParallelScavengeHeap* heap = gc_heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
_gc_timer.register_gc_start();
_gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
@@ -2352,7 +2344,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
// Recursively traverse all live objects and mark them
GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
- ParallelScavengeHeap* heap = gc_heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
uint parallel_gc_threads = heap->gc_task_manager()->workers();
uint active_gc_threads = heap->gc_task_manager()->active_workers();
TaskQueueSetSuper* qset = ParCompactionManager::region_array();
@@ -2692,8 +2684,7 @@ void PSParallelCompact::compact() {
// trace("5");
GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSOldGen* old_gen = heap->old_gen();
old_gen->start_array()->reset();
uint parallel_gc_threads = heap->gc_task_manager()->workers();
@@ -2844,7 +2835,7 @@ PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
// heap, last_space_id is returned. In debug mode it expects the address to be
// in the heap and asserts such.
PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
- assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap");
+ assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap");
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
if (_space_info[id].space()->contains(addr)) {
@@ -3338,6 +3329,71 @@ void MoveAndUpdateClosure::copy_partial_obj()
update_state(words);
}
+void InstanceKlass::oop_pc_update_pointers(oop obj) {
+ oop_oop_iterate_oop_maps(obj, PSParallelCompact::adjust_pointer_closure());
+}
+
+void InstanceMirrorKlass::oop_pc_update_pointers(oop obj) {
+ InstanceKlass::oop_pc_update_pointers(obj);
+
+ oop_oop_iterate_statics(obj, PSParallelCompact::adjust_pointer_closure());
+}
+
+void InstanceClassLoaderKlass::oop_pc_update_pointers(oop obj) {
+ InstanceKlass::oop_pc_update_pointers(obj);
+}
+
+#ifdef ASSERT
+template static void trace_reference_gc(const char *s, oop obj,
+ T* referent_addr,
+ T* next_addr,
+ T* discovered_addr) {
+ if(TraceReferenceGC && PrintGCDetails) {
+ gclog_or_tty->print_cr("%s obj " PTR_FORMAT, s, p2i(obj));
+ gclog_or_tty->print_cr(" referent_addr/* " PTR_FORMAT " / "
+ PTR_FORMAT, p2i(referent_addr),
+ referent_addr ? p2i(oopDesc::load_decode_heap_oop(referent_addr)) : NULL);
+ gclog_or_tty->print_cr(" next_addr/* " PTR_FORMAT " / "
+ PTR_FORMAT, p2i(next_addr),
+ next_addr ? p2i(oopDesc::load_decode_heap_oop(next_addr)) : NULL);
+ gclog_or_tty->print_cr(" discovered_addr/* " PTR_FORMAT " / "
+ PTR_FORMAT, p2i(discovered_addr),
+ discovered_addr ? p2i(oopDesc::load_decode_heap_oop(discovered_addr)) : NULL);
+ }
+}
+#endif
+
+template
+static void oop_pc_update_pointers_specialized(oop obj) {
+ T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+ PSParallelCompact::adjust_pointer(referent_addr);
+ T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+ PSParallelCompact::adjust_pointer(next_addr);
+ T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
+ PSParallelCompact::adjust_pointer(discovered_addr);
+ debug_only(trace_reference_gc("InstanceRefKlass::oop_update_ptrs", obj,
+ referent_addr, next_addr, discovered_addr);)
+}
+
+void InstanceRefKlass::oop_pc_update_pointers(oop obj) {
+ InstanceKlass::oop_pc_update_pointers(obj);
+
+ if (UseCompressedOops) {
+ oop_pc_update_pointers_specialized(obj);
+ } else {
+ oop_pc_update_pointers_specialized(obj);
+ }
+}
+
+void ObjArrayKlass::oop_pc_update_pointers(oop obj) {
+ assert(obj->is_objArray(), "obj must be obj array");
+ oop_oop_iterate_elements(objArrayOop(obj), PSParallelCompact::adjust_pointer_closure());
+}
+
+void TypeArrayKlass::oop_pc_update_pointers(oop obj) {
+ assert(obj->is_typeArray(),"must be a type array");
+}
+
ParMarkBitMapClosure::IterationStatus
MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
assert(destination() != NULL, "sanity");
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
index 3e1f9198c57..1daa065e92e 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
@@ -26,11 +26,12 @@
#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP
#include "gc_implementation/parallelScavenge/objectStartArray.hpp"
+#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
#include "gc_implementation/parallelScavenge/parMarkBitMap.hpp"
#include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
#include "gc_implementation/shared/collectorCounters.hpp"
#include "gc_implementation/shared/mutableSpace.hpp"
-#include "memory/sharedHeap.hpp"
+#include "gc_interface/collectedHeap.hpp"
#include "oops/oop.hpp"
class ParallelScavengeHeap;
@@ -951,12 +952,14 @@ class PSParallelCompact : AllStatic {
virtual void do_void();
};
- class AdjustPointerClosure: public OopClosure {
+ class AdjustPointerClosure: public ExtendedOopClosure {
public:
+ template void do_oop_nv(T* p);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
- // do not walk from thread stacks to the code cache on this phase
- virtual void do_code_blob(CodeBlob* cb) const { }
+
+ // This closure provides its own oop verification code.
+ debug_only(virtual bool should_verify_oops() { return false; })
};
class AdjustKlassClosure : public KlassClosure {
@@ -1139,13 +1142,18 @@ class PSParallelCompact : AllStatic {
static void reset_millis_since_last_gc();
public:
- class MarkAndPushClosure: public OopClosure {
+ class MarkAndPushClosure: public ExtendedOopClosure {
private:
ParCompactionManager* _compaction_manager;
public:
MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
+
+ template void do_oop_nv(T* p);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
+
+ // This closure provides its own oop verification code.
+ debug_only(virtual bool should_verify_oops() { return false; })
};
// The one and only place to start following the classes.
@@ -1161,11 +1169,6 @@ class PSParallelCompact : AllStatic {
PSParallelCompact();
- // Convenient accessor for Universe::heap().
- static ParallelScavengeHeap* gc_heap() {
- return (ParallelScavengeHeap*)Universe::heap();
- }
-
static void invoke(bool maximum_heap_compaction);
static bool invoke_no_policy(bool maximum_heap_compaction);
@@ -1177,7 +1180,9 @@ class PSParallelCompact : AllStatic {
static bool initialize();
// Closure accessors
- static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
+ static PSParallelCompact::AdjustPointerClosure* adjust_pointer_closure() {
+ return &_adjust_pointer_closure;
+ }
static KlassClosure* adjust_klass_closure() { return (KlassClosure*)&_adjust_klass_closure; }
static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
@@ -1332,39 +1337,6 @@ inline bool PSParallelCompact::is_marked(oop obj) {
return mark_bitmap()->is_marked(obj);
}
-template
-inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
- T heap_oop = oopDesc::load_heap_oop(p);
- if (!oopDesc::is_null(heap_oop)) {
- oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
- if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) {
- cm->push(obj);
- }
- }
-}
-
-template
-inline void PSParallelCompact::adjust_pointer(T* p) {
- T heap_oop = oopDesc::load_heap_oop(p);
- if (!oopDesc::is_null(heap_oop)) {
- oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
- oop new_obj = (oop)summary_data().calc_new_pointer(obj);
- assert(new_obj != NULL, // is forwarding ptr?
- "should be forwarded");
- // Just always do the update unconditionally?
- if (new_obj != NULL) {
- assert(Universe::heap()->is_in_reserved(new_obj),
- "should be in object space");
- oopDesc::encode_store_heap_oop_not_null(p, new_obj);
- }
- }
-}
-
-inline void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) {
- oop holder = klass->klass_holder();
- PSParallelCompact::mark_and_push(cm, &holder);
-}
-
template
inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) {
mark_and_push(_compaction_manager, p);
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.inline.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.inline.hpp
new file mode 100644
index 00000000000..a817bd00051
--- /dev/null
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.inline.hpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_INLINE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_INLINE_HPP
+
+#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
+#include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
+#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
+#include "gc_interface/collectedHeap.hpp"
+#include "oops/klass.hpp"
+#include "oops/oop.inline.hpp"
+
+template
+inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
+
+ if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) {
+ cm->push(obj);
+ }
+ }
+}
+
+template
+inline void PSParallelCompact::MarkAndPushClosure::do_oop_nv(T* p) {
+ mark_and_push(_compaction_manager, p);
+}
+
+inline void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { do_oop_nv(p); }
+inline void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
+
+inline void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) {
+ oop holder = klass->klass_holder();
+ mark_and_push(cm, &holder);
+}
+
+template
+inline void PSParallelCompact::adjust_pointer(T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
+
+ oop new_obj = (oop)summary_data().calc_new_pointer(obj);
+ assert(new_obj != NULL, // is forwarding ptr?
+ "should be forwarded");
+ // Just always do the update unconditionally?
+ if (new_obj != NULL) {
+ assert(ParallelScavengeHeap::heap()->is_in_reserved(new_obj),
+ "should be in object space");
+ oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+ }
+ }
+}
+
+template
+void PSParallelCompact::AdjustPointerClosure::do_oop_nv(T* p) {
+ adjust_pointer(p);
+}
+
+inline void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { do_oop_nv(p); }
+inline void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_INLINE_HPP
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp
index fc05b2e95d8..6786be2146a 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -103,7 +103,7 @@ void PSPromotionLAB::flush() {
}
bool PSPromotionLAB::unallocate_object(HeapWord* obj, size_t obj_size) {
- assert(Universe::heap()->is_in(obj), "Object outside heap");
+ assert(ParallelScavengeHeap::heap()->is_in(obj), "Object outside heap");
if (contains(obj)) {
HeapWord* object_end = obj + obj_size;
@@ -137,9 +137,7 @@ void PSOldPromotionLAB::flush() {
#ifdef ASSERT
bool PSYoungPromotionLAB::lab_is_valid(MemRegion lab) {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
MutableSpace* to_space = heap->young_gen()->to_space();
MemRegion used = to_space->used_region();
if (used.contains(lab)) {
@@ -150,10 +148,9 @@ bool PSYoungPromotionLAB::lab_is_valid(MemRegion lab) {
}
bool PSOldPromotionLAB::lab_is_valid(MemRegion lab) {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
assert(_start_array->covered_region().contains(lab), "Sanity");
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSOldGen* old_gen = heap->old_gen();
MemRegion used = old_gen->object_space()->used_region();
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp
index 05c27a3f3da..c214a13a0dd 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONLAB_HPP
#include "gc_implementation/parallelScavenge/objectStartArray.hpp"
-#include "gc_interface/collectedHeap.inline.hpp"
+#include "gc_interface/collectedHeap.hpp"
#include "memory/allocation.hpp"
//
@@ -59,7 +59,7 @@ class PSPromotionLAB : public CHeapObj {
// The shared initialize code invokes this.
debug_only(virtual bool lab_is_valid(MemRegion lab) { return false; });
- PSPromotionLAB() : _top(NULL), _bottom(NULL), _end(NULL) { }
+ PSPromotionLAB() : _top(NULL), _bottom(NULL), _end(NULL), _state(zero_size) { }
public:
// Filling and flushing.
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
index be4a9ed11d7..29076516b47 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
@@ -32,6 +32,9 @@
#include "memory/allocation.inline.hpp"
#include "memory/memRegion.hpp"
#include "memory/padded.inline.hpp"
+#include "oops/instanceKlass.inline.hpp"
+#include "oops/instanceMirrorKlass.inline.hpp"
+#include "oops/objArrayKlass.inline.hpp"
#include "oops/oop.inline.hpp"
#include "utilities/stack.inline.hpp"
@@ -41,8 +44,7 @@ PSOldGen* PSPromotionManager::_old_gen = NULL;
MutableSpace* PSPromotionManager::_young_space = NULL;
void PSPromotionManager::initialize() {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
_old_gen = heap->old_gen();
_young_space = heap->young_gen()->to_space();
@@ -85,8 +87,7 @@ PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() {
}
void PSPromotionManager::pre_scavenge() {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
_young_space = heap->young_gen()->to_space();
@@ -129,7 +130,7 @@ static const char* const pm_stats_hdr[] = {
void
PSPromotionManager::print_taskqueue_stats(outputStream* const out) {
out->print_cr("== GC Tasks Stats, GC %3d",
- Universe::heap()->total_collections());
+ ParallelScavengeHeap::heap()->total_collections());
TaskQueueStats totals;
out->print("thr "); TaskQueueStats::print_header(1, out); out->cr();
@@ -157,8 +158,7 @@ PSPromotionManager::reset_stats() {
#endif // TASKQUEUE_STATS
PSPromotionManager::PSPromotionManager() {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
// We set the old lab's start array.
_old_lab.set_start_array(old_gen()->start_array());
@@ -188,8 +188,7 @@ void PSPromotionManager::reset() {
// We need to get an assert in here to make sure the labs are always flushed.
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
// Do not prefill the LAB's, save heap wastage!
HeapWord* lab_base = young_space()->top();
@@ -210,8 +209,7 @@ void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
totally_drain = totally_drain || _totally_drain;
#ifdef ASSERT
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
MutableSpace* to_space = heap->young_gen()->to_space();
MutableSpace* old_space = heap->old_gen()->object_space();
#endif /* ASSERT */
@@ -308,6 +306,118 @@ void PSPromotionManager::process_array_chunk(oop old) {
}
}
+class PushContentsClosure : public ExtendedOopClosure {
+ PSPromotionManager* _pm;
+ public:
+ PushContentsClosure(PSPromotionManager* pm) : _pm(pm) {}
+
+ template void do_oop_nv(T* p) {
+ if (PSScavenge::should_scavenge(p)) {
+ _pm->claim_or_forward_depth(p);
+ }
+ }
+
+ virtual void do_oop(oop* p) { do_oop_nv(p); }
+ virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
+
+ // Don't use the oop verification code in the oop_oop_iterate framework.
+ debug_only(virtual bool should_verify_oops() { return false; })
+};
+
+void InstanceKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
+ PushContentsClosure cl(pm);
+ oop_oop_iterate_oop_maps_reverse(obj, &cl);
+}
+
+void InstanceMirrorKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
+ // Note that we don't have to follow the mirror -> klass pointer, since all
+ // klasses that are dirty will be scavenged when we iterate over the
+ // ClassLoaderData objects.
+
+ InstanceKlass::oop_ps_push_contents(obj, pm);
+
+ PushContentsClosure cl(pm);
+ oop_oop_iterate_statics(obj, &cl);
+}
+
+void InstanceClassLoaderKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
+ InstanceKlass::oop_ps_push_contents(obj, pm);
+
+ // This is called by the young collector. It will already have taken care of
+ // all class loader data. So, we don't have to follow the class loader ->
+ // class loader data link.
+}
+
+template
+static void oop_ps_push_contents_specialized(oop obj, InstanceRefKlass *klass, PSPromotionManager* pm) {
+ T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+ if (PSScavenge::should_scavenge(referent_addr)) {
+ ReferenceProcessor* rp = PSScavenge::reference_processor();
+ if (rp->discover_reference(obj, klass->reference_type())) {
+ // reference already enqueued, referent and next will be traversed later
+ klass->InstanceKlass::oop_ps_push_contents(obj, pm);
+ return;
+ } else {
+ // treat referent as normal oop
+ pm->claim_or_forward_depth(referent_addr);
+ }
+ }
+ // Treat discovered as normal oop, if ref is not "active",
+ // i.e. if next is non-NULL.
+ T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+ if (ReferenceProcessor::pending_list_uses_discovered_field()) {
+ T next_oop = oopDesc::load_heap_oop(next_addr);
+ if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
+ T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
+ debug_only(
+ if(TraceReferenceGC && PrintGCDetails) {
+ gclog_or_tty->print_cr(" Process discovered as normal "
+ PTR_FORMAT, p2i(discovered_addr));
+ }
+ )
+ if (PSScavenge::should_scavenge(discovered_addr)) {
+ pm->claim_or_forward_depth(discovered_addr);
+ }
+ }
+ } else {
+#ifdef ASSERT
+ // In the case of older JDKs which do not use the discovered
+ // field for the pending list, an inactive ref (next != NULL)
+ // must always have a NULL discovered field.
+ oop next = oopDesc::load_decode_heap_oop(next_addr);
+ oop discovered = java_lang_ref_Reference::discovered(obj);
+ assert(oopDesc::is_null(next) || oopDesc::is_null(discovered),
+ err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field",
+ p2i(obj)));
+#endif
+ }
+
+ // Treat next as normal oop; next is a link in the reference queue.
+ if (PSScavenge::should_scavenge(next_addr)) {
+ pm->claim_or_forward_depth(next_addr);
+ }
+ klass->InstanceKlass::oop_ps_push_contents(obj, pm);
+}
+
+void InstanceRefKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
+ if (UseCompressedOops) {
+ oop_ps_push_contents_specialized(obj, this, pm);
+ } else {
+ oop_ps_push_contents_specialized(obj, this, pm);
+ }
+}
+
+void ObjArrayKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
+ assert(obj->is_objArray(), "obj must be obj array");
+ PushContentsClosure cl(pm);
+ oop_oop_iterate_elements(objArrayOop(obj), &cl);
+}
+
+void TypeArrayKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
+ assert(obj->is_typeArray(),"must be a type array");
+ ShouldNotReachHere();
+}
+
oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
assert(_old_gen_is_full || PromotionFailureALot, "Sanity");
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp
index cb6445fe13f..301136fc986 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp
@@ -25,11 +25,12 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
+#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
#include "gc_implementation/parallelScavenge/psOldGen.hpp"
#include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
#include "gc_implementation/parallelScavenge/psPromotionLAB.inline.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
-#include "oops/oop.psgc.inline.hpp"
+#include "oops/oop.inline.hpp"
inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
assert(_manager_array != NULL, "access of NULL manager_array");
@@ -57,9 +58,7 @@ inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
template
inline void PSPromotionManager::claim_or_forward_depth(T* p) {
assert(should_scavenge(p, true), "revisiting object?");
- assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
- "Sanity");
- assert(Universe::heap()->is_in(p), "pointer outside heap");
+ assert(ParallelScavengeHeap::heap()->is_in(p), "pointer outside heap");
claim_or_forward_internal_depth(p);
}
@@ -92,7 +91,7 @@ inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj,
}
inline void PSPromotionManager::push_contents(oop obj) {
- obj->push_contents(this);
+ obj->ps_push_contents(this);
}
//
// This method is pretty bulky. It would be nice to split it up
@@ -150,7 +149,7 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) {
// Otherwise try allocating obj tenured
if (new_obj == NULL) {
#ifndef PRODUCT
- if (Universe::heap()->promotion_should_fail()) {
+ if (ParallelScavengeHeap::heap()->promotion_should_fail()) {
return oop_promotion_failed(o, test_mark);
}
#endif // #ifndef PRODUCT
@@ -296,7 +295,7 @@ inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) {
// that are outside the heap. These pointers are either from roots
// or from metadata.
if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) &&
- Universe::heap()->is_in_reserved(p)) {
+ ParallelScavengeHeap::heap()->is_in_reserved(p)) {
if (PSScavenge::is_obj_in_young(new_obj)) {
PSScavenge::card_table()->inline_write_ref_field_gc(p, new_obj);
}
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
index 04ff184ea0c..10585b53024 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
@@ -87,8 +87,7 @@ protected:
public:
PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
_to_space = heap->young_gen()->to_space();
assert(_promotion_manager != NULL, "Sanity");
@@ -218,11 +217,9 @@ void PSRefProcTaskExecutor::execute(EnqueueTask& task)
bool PSScavenge::invoke() {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
- assert(!Universe::heap()->is_gc_active(), "not reentrant");
-
- ParallelScavengeHeap* const heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
+ ParallelScavengeHeap* const heap = ParallelScavengeHeap::heap();
PSAdaptiveSizePolicy* policy = heap->size_policy();
IsGCActiveMark mark;
@@ -273,9 +270,8 @@ bool PSScavenge::invoke_no_policy() {
return false;
}
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
GCCause::Cause gc_cause = heap->gc_cause();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
// Check for potential problems.
if (!should_attempt_scavenge()) {
@@ -713,9 +709,7 @@ bool PSScavenge::invoke_no_policy() {
// unforwarding markOops. It then restores any preserved mark oops,
// and clears the _preserved_mark_stack.
void PSScavenge::clean_up_failed_promotion() {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSYoungGen* young_gen = heap->young_gen();
{
@@ -742,7 +736,7 @@ void PSScavenge::clean_up_failed_promotion() {
}
// Reset the PromotionFailureALot counters.
- NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
+ NOT_PRODUCT(heap->reset_promotion_should_fail();)
}
// This method is called whenever an attempt to promote an object
@@ -761,8 +755,7 @@ void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
}
bool PSScavenge::should_attempt_scavenge() {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
if (UsePerfData) {
@@ -838,9 +831,7 @@ void PSScavenge::initialize() {
MaxTenuringThreshold;
}
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp
index a6c40edfe7c..e995a618208 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,7 +33,7 @@
#include "utilities/globalDefinitions.hpp"
inline void PSScavenge::save_to_space_top_before_gc() {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
_to_space_top_before_gc = heap->young_gen()->to_space()->top();
}
@@ -56,7 +56,7 @@ inline bool PSScavenge::should_scavenge(T* p, MutableSpace* to_space) {
template
inline bool PSScavenge::should_scavenge(T* p, bool check_to_space) {
if (check_to_space) {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
return should_scavenge(p, heap->young_gen()->to_space());
}
return should_scavenge(p);
@@ -97,7 +97,6 @@ class PSScavengeFromKlassClosure: public OopClosure {
ParallelScavengeHeap* psh = ParallelScavengeHeap::heap();
assert(!psh->is_in_reserved(p), "GC barrier needed");
if (PSScavenge::should_scavenge(p)) {
- assert(!Universe::heap()->is_in_reserved(p), "Not from meta-data?");
assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
oop o = *p;
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
index 5739331ab6a..c9e7ee7171a 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
@@ -47,7 +47,7 @@
//
void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
- assert(Universe::heap()->is_gc_active(), "called outside gc");
+ assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
PSScavengeRootsClosure roots_closure(pm);
@@ -118,7 +118,7 @@ void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
//
void ThreadRootsTask::do_it(GCTaskManager* manager, uint which) {
- assert(Universe::heap()->is_gc_active(), "called outside gc");
+ assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
PSScavengeRootsClosure roots_closure(pm);
@@ -143,7 +143,7 @@ StealTask::StealTask(ParallelTaskTerminator* t) :
_terminator(t) {}
void StealTask::do_it(GCTaskManager* manager, uint which) {
- assert(Universe::heap()->is_gc_active(), "called outside gc");
+ assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
PSPromotionManager* pm =
PSPromotionManager::gc_thread_promotion_manager(which);
@@ -181,10 +181,8 @@ void OldToYoungRootsTask::do_it(GCTaskManager* manager, uint which) {
{
PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
-
- assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
CardTableExtension* card_table =
- barrier_set_cast(Universe::heap()->barrier_set());
+ barrier_set_cast(ParallelScavengeHeap::heap()->barrier_set());
card_table->scavenge_contents_parallel(_gen->start_array(),
_gen->object_space(),
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp
index 867422eaf21..baf8e2dd661 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,8 +24,8 @@
#include "precompiled.hpp"
#include "gc_implementation/parallelScavenge/psVirtualspace.hpp"
+#include "memory/virtualspace.hpp"
#include "runtime/os.hpp"
-#include "runtime/virtualspace.hpp"
// PSVirtualSpace
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp
index a8530f0a6a7..52d77402df1 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSVIRTUALSPACE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSVIRTUALSPACE_HPP
-#include "runtime/virtualspace.hpp"
+#include "memory/virtualspace.hpp"
// VirtualSpace for the parallel scavenge collector.
//
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp
index 286fbaa88fa..08bdea7bf4c 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -62,7 +62,7 @@ void PSYoungGen::initialize_work() {
MemRegion cmr((HeapWord*)virtual_space()->low(),
(HeapWord*)virtual_space()->high());
- Universe::heap()->barrier_set()->resize_covered_region(cmr);
+ ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
if (ZapUnusedHeapArea) {
// Mangle newly committed space immediately because it
@@ -103,7 +103,7 @@ void PSYoungGen::initialize_work() {
_max_gen_size, _virtual_space);
// Compute maximum space sizes for performance counters
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
size_t alignment = heap->space_alignment();
size_t size = virtual_space()->reserved_size();
@@ -153,8 +153,7 @@ void PSYoungGen::initialize_work() {
}
void PSYoungGen::compute_initial_space_boundaries() {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
// Compute sizes
size_t alignment = heap->space_alignment();
@@ -208,7 +207,7 @@ void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) {
#ifndef PRODUCT
void PSYoungGen::space_invariants() {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
const size_t alignment = heap->space_alignment();
// Currently, our eden size cannot shrink to zero
@@ -494,7 +493,7 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
char* to_start = (char*)to_space()->bottom();
char* to_end = (char*)to_space()->end();
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
const size_t alignment = heap->space_alignment();
const bool maintain_minimum =
(requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
@@ -546,8 +545,6 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
// Does the optimal to-space overlap from-space?
if (to_start < (char*)from_space()->end()) {
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
// Calculate the minimum offset possible for from_end
size_t from_size = pointer_delta(from_space()->top(), from_start, sizeof(char));
@@ -708,9 +705,7 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
assert(from_space()->top() == old_from_top, "from top changed!");
if (PrintAdaptiveSizePolicy) {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
"collection: %d "
"(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
@@ -843,7 +838,7 @@ size_t PSYoungGen::available_to_min_gen() {
// from-space.
size_t PSYoungGen::available_to_live() {
size_t delta_in_survivor = 0;
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
const size_t space_alignment = heap->space_alignment();
const size_t gen_alignment = heap->generation_alignment();
@@ -927,7 +922,7 @@ void PSYoungGen::post_resize() {
MemRegion cmr((HeapWord*)virtual_space()->low(),
(HeapWord*)virtual_space()->high());
- Universe::heap()->barrier_set()->resize_covered_region(cmr);
+ ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
space_invariants();
}
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp
index 9148bb4ffc0..cc5e1226344 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp
@@ -26,7 +26,6 @@
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
#include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
-#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
#include "gc_implementation/parallelScavenge/vmPSOperations.hpp"
#include "memory/gcLocker.inline.hpp"
#include "utilities/dtrace.hpp"
@@ -41,8 +40,7 @@ VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t word_size,
void VM_ParallelGCFailedAllocation::doit() {
SvcGCMarker sgcm(SvcGCMarker::MINOR);
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
GCCauseSetter gccs(heap, _gc_cause);
_result = heap->failed_mem_allocate(_word_size);
@@ -63,9 +61,7 @@ VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(uint gc_count,
void VM_ParallelGCSystemGC::doit() {
SvcGCMarker sgcm(SvcGCMarker::FULL);
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap,
- "must be a ParallelScavengeHeap");
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
GCCauseSetter gccs(heap, _gc_cause);
if (_gc_cause == GCCause::_gc_locker || _gc_cause == GCCause::_wb_young_gc
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp
index 3c1a20284ed..fa79b31a13e 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -61,7 +61,6 @@
\
static_field(ParallelScavengeHeap, _young_gen, PSYoungGen*) \
static_field(ParallelScavengeHeap, _old_gen, PSOldGen*) \
- static_field(ParallelScavengeHeap, _psh, ParallelScavengeHeap*) \
\
#define VM_TYPES_PARALLELGC(declare_type, \
diff --git a/hotspot/src/share/vm/gc_implementation/shared/ageTable.cpp b/hotspot/src/share/vm/gc_implementation/shared/ageTable.cpp
index d69035625ad..709d3e18b27 100644
--- a/hotspot/src/share/vm/gc_implementation/shared/ageTable.cpp
+++ b/hotspot/src/share/vm/gc_implementation/shared/ageTable.cpp
@@ -25,9 +25,9 @@
#include "precompiled.hpp"
#include "gc_implementation/shared/ageTable.hpp"
#include "gc_implementation/shared/gcPolicyCounters.hpp"
+#include "gc_interface/collectedHeap.hpp"
#include "memory/collectorPolicy.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/sharedHeap.hpp"
#include "runtime/atomic.inline.hpp"
#include "utilities/copy.hpp"
@@ -79,7 +79,7 @@ void ageTable::merge_par(ageTable* subTable) {
}
}
-uint ageTable::compute_tenuring_threshold(size_t survivor_capacity) {
+uint ageTable::compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCounters* gc_counters) {
size_t desired_survivor_size = (size_t)((((double) survivor_capacity)*TargetSurvivorRatio)/100);
uint result;
@@ -126,9 +126,6 @@ uint ageTable::compute_tenuring_threshold(size_t survivor_capacity) {
age++;
}
if (UsePerfData) {
- SharedHeap* sh = SharedHeap::heap();
- CollectorPolicy* policy = sh->collector_policy();
- GCPolicyCounters* gc_counters = policy->counters();
gc_counters->tenuring_threshold()->set_value(result);
gc_counters->desired_survivor_size()->set_value(
desired_survivor_size*oopSize);
diff --git a/hotspot/src/share/vm/gc_implementation/shared/ageTable.hpp b/hotspot/src/share/vm/gc_implementation/shared/ageTable.hpp
index 44d8e0ace7f..44587d99d62 100644
--- a/hotspot/src/share/vm/gc_implementation/shared/ageTable.hpp
+++ b/hotspot/src/share/vm/gc_implementation/shared/ageTable.hpp
@@ -29,6 +29,8 @@
#include "oops/oop.hpp"
#include "runtime/perfData.hpp"
+class GCPolicyCounters;
+
/* Copyright (c) 1992-2009 Oracle and/or its affiliates, and Stanford University.
See the LICENSE file for license information. */
@@ -69,7 +71,7 @@ class ageTable VALUE_OBJ_CLASS_SPEC {
void merge_par(ageTable* subTable);
// calculate new tenuring threshold based on age information
- uint compute_tenuring_threshold(size_t survivor_capacity);
+ uint compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCounters* gc_counters);
private:
PerfVariable* _perf_sizes[table_size];
diff --git a/hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp b/hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp
index 4a74a3fd4a0..6e92cd50638 100644
--- a/hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp
+++ b/hotspot/src/share/vm/gc_implementation/shared/generationCounters.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,8 +25,8 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GENERATIONCOUNTERS_HPP
#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GENERATIONCOUNTERS_HPP
+#include "memory/virtualspace.hpp"
#include "runtime/perfData.hpp"
-#include "runtime/virtualspace.hpp"
// A GenerationCounter is a holder class for performance counters
// that track a generation
diff --git a/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp b/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp
index bcf4fef7184..e459341ba03 100644
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp
@@ -28,6 +28,8 @@
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/markSweep.inline.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
+#include "oops/instanceKlass.inline.hpp"
+#include "oops/instanceMirrorKlass.inline.hpp"
#include "oops/methodData.hpp"
#include "oops/objArrayKlass.inline.hpp"
#include "oops/oop.inline.hpp"
@@ -55,16 +57,183 @@ MarkSweep::MarkAndPushClosure MarkSweep::mark_and_push_closure;
CLDToOopClosure MarkSweep::follow_cld_closure(&mark_and_push_closure);
CLDToOopClosure MarkSweep::adjust_cld_closure(&adjust_pointer_closure);
-void MarkSweep::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(p); }
-void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(p); }
+template
+void MarkSweep::MarkAndPushClosure::do_oop_nv(T* p) { mark_and_push(p); }
+void MarkSweep::MarkAndPushClosure::do_oop(oop* p) { do_oop_nv(p); }
+void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
void MarkSweep::follow_class_loader(ClassLoaderData* cld) {
MarkSweep::follow_cld_closure.do_cld(cld);
}
+void InstanceKlass::oop_ms_follow_contents(oop obj) {
+ assert(obj != NULL, "can't follow the content of NULL object");
+ MarkSweep::follow_klass(this);
+
+ oop_oop_iterate_oop_maps(obj, &MarkSweep::mark_and_push_closure);
+}
+
+void InstanceMirrorKlass::oop_ms_follow_contents(oop obj) {
+ InstanceKlass::oop_ms_follow_contents(obj);
+
+ // Follow the klass field in the mirror
+ Klass* klass = java_lang_Class::as_Klass(obj);
+ if (klass != NULL) {
+ // An anonymous class doesn't have its own class loader, so the call
+ // to follow_klass will mark and push its java mirror instead of the
+ // class loader. When handling the java mirror for an anonymous class
+ // we need to make sure its class loader data is claimed, this is done
+ // by calling follow_class_loader explicitly. For non-anonymous classes
+ // the call to follow_class_loader is made when the class loader itself
+ // is handled.
+ if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) {
+ MarkSweep::follow_class_loader(klass->class_loader_data());
+ } else {
+ MarkSweep::follow_klass(klass);
+ }
+ } else {
+ // If klass is NULL then this a mirror for a primitive type.
+ // We don't have to follow them, since they are handled as strong
+ // roots in Universe::oops_do.
+ assert(java_lang_Class::is_primitive(obj), "Sanity check");
+ }
+
+ oop_oop_iterate_statics(obj, &MarkSweep::mark_and_push_closure);
+}
+
+void InstanceClassLoaderKlass::oop_ms_follow_contents(oop obj) {
+ InstanceKlass::oop_ms_follow_contents(obj);
+
+ ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data(obj);
+
+ // We must NULL check here, since the class loader
+ // can be found before the loader data has been set up.
+ if(loader_data != NULL) {
+ MarkSweep::follow_class_loader(loader_data);
+ }
+}
+
+template
+static void oop_ms_follow_contents_specialized(InstanceRefKlass* klass, oop obj) {
+ T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+ T heap_oop = oopDesc::load_heap_oop(referent_addr);
+ debug_only(
+ if(TraceReferenceGC && PrintGCDetails) {
+ gclog_or_tty->print_cr("InstanceRefKlass::oop_ms_follow_contents_specialized " PTR_FORMAT, p2i(obj));
+ }
+ )
+ if (!oopDesc::is_null(heap_oop)) {
+ oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
+ if (!referent->is_gc_marked() &&
+ MarkSweep::ref_processor()->discover_reference(obj, klass->reference_type())) {
+ // reference was discovered, referent will be traversed later
+ klass->InstanceKlass::oop_ms_follow_contents(obj);
+ debug_only(
+ if(TraceReferenceGC && PrintGCDetails) {
+ gclog_or_tty->print_cr(" Non NULL enqueued " PTR_FORMAT, p2i(obj));
+ }
+ )
+ return;
+ } else {
+ // treat referent as normal oop
+ debug_only(
+ if(TraceReferenceGC && PrintGCDetails) {
+ gclog_or_tty->print_cr(" Non NULL normal " PTR_FORMAT, p2i(obj));
+ }
+ )
+ MarkSweep::mark_and_push(referent_addr);
+ }
+ }
+ T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+ if (ReferenceProcessor::pending_list_uses_discovered_field()) {
+ // Treat discovered as normal oop, if ref is not "active",
+ // i.e. if next is non-NULL.
+ T next_oop = oopDesc::load_heap_oop(next_addr);
+ if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
+ T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
+ debug_only(
+ if(TraceReferenceGC && PrintGCDetails) {
+ gclog_or_tty->print_cr(" Process discovered as normal "
+ PTR_FORMAT, p2i(discovered_addr));
+ }
+ )
+ MarkSweep::mark_and_push(discovered_addr);
+ }
+ } else {
+#ifdef ASSERT
+ // In the case of older JDKs which do not use the discovered
+ // field for the pending list, an inactive ref (next != NULL)
+ // must always have a NULL discovered field.
+ oop next = oopDesc::load_decode_heap_oop(next_addr);
+ oop discovered = java_lang_ref_Reference::discovered(obj);
+ assert(oopDesc::is_null(next) || oopDesc::is_null(discovered),
+ err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field",
+ p2i(obj)));
+#endif
+ }
+ // treat next as normal oop. next is a link in the reference queue.
+ debug_only(
+ if(TraceReferenceGC && PrintGCDetails) {
+ gclog_or_tty->print_cr(" Process next as normal " PTR_FORMAT, p2i(next_addr));
+ }
+ )
+ MarkSweep::mark_and_push(next_addr);
+ klass->InstanceKlass::oop_ms_follow_contents(obj);
+}
+
+void InstanceRefKlass::oop_ms_follow_contents(oop obj) {
+ if (UseCompressedOops) {
+ oop_ms_follow_contents_specialized(this, obj);
+ } else {
+ oop_ms_follow_contents_specialized(this, obj);
+ }
+}
+
+template
+static void oop_ms_follow_contents_specialized(oop obj, int index) {
+ objArrayOop a = objArrayOop(obj);
+ const size_t len = size_t(a->length());
+ const size_t beg_index = size_t(index);
+ assert(beg_index < len || len == 0, "index too large");
+
+ const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
+ const size_t end_index = beg_index + stride;
+ T* const base = (T*)a->base();
+ T* const beg = base + beg_index;
+ T* const end = base + end_index;
+
+ // Push the non-NULL elements of the next stride on the marking stack.
+ for (T* e = beg; e < end; e++) {
+ MarkSweep::mark_and_push(e);
+ }
+
+ if (end_index < len) {
+ MarkSweep::push_objarray(a, end_index); // Push the continuation.
+ }
+}
+
+void ObjArrayKlass::oop_ms_follow_contents(oop obj) {
+ assert (obj->is_array(), "obj must be array");
+ MarkSweep::follow_klass(this);
+ if (UseCompressedOops) {
+ oop_ms_follow_contents_specialized(obj, 0);
+ } else {
+ oop_ms_follow_contents_specialized(obj, 0);
+ }
+}
+
+void TypeArrayKlass::oop_ms_follow_contents(oop obj) {
+ assert(obj->is_typeArray(),"must be a type array");
+ // Performance tweak: We skip iterating over the klass pointer since we
+ // know that Universe::TypeArrayKlass never moves.
+}
+
void MarkSweep::follow_array(objArrayOop array, int index) {
- ObjArrayKlass* k = (ObjArrayKlass*)array->klass();
- k->oop_follow_contents(array, index);
+ if (UseCompressedOops) {
+ oop_ms_follow_contents_specialized(array, index);
+ } else {
+ oop_ms_follow_contents_specialized(array, index);
+ }
}
void MarkSweep::follow_stack() {
@@ -112,8 +281,10 @@ void MarkSweep::preserve_mark(oop obj, markOop mark) {
MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure;
-void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p); }
-void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); }
+template
+void MarkSweep::AdjustPointerClosure::do_oop_nv(T* p) { adjust_pointer(p); }
+void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { do_oop_nv(p); }
+void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
void MarkSweep::adjust_marks() {
assert( _preserved_oop_stack.size() == _preserved_mark_stack.size(),
@@ -167,11 +338,83 @@ void marksweep_init() {
MarkSweep::_gc_tracer = new (ResourceObj::C_HEAP, mtGC) SerialOldTracer();
}
-#ifndef PRODUCT
-
-void MarkSweep::trace(const char* msg) {
- if (TraceMarkSweep)
- gclog_or_tty->print("%s", msg);
+int InstanceKlass::oop_ms_adjust_pointers(oop obj) {
+ int size = size_helper();
+ oop_oop_iterate_oop_maps(obj, &MarkSweep::adjust_pointer_closure);
+ return size;
}
+int InstanceMirrorKlass::oop_ms_adjust_pointers(oop obj) {
+ int size = oop_size(obj);
+ InstanceKlass::oop_ms_adjust_pointers(obj);
+
+ oop_oop_iterate_statics(obj, &MarkSweep::adjust_pointer_closure);
+ return size;
+}
+
+int InstanceClassLoaderKlass::oop_ms_adjust_pointers(oop obj) {
+ return InstanceKlass::oop_ms_adjust_pointers(obj);
+}
+
+#ifdef ASSERT
+template static void trace_reference_gc(const char *s, oop obj,
+ T* referent_addr,
+ T* next_addr,
+ T* discovered_addr) {
+ if(TraceReferenceGC && PrintGCDetails) {
+ gclog_or_tty->print_cr("%s obj " PTR_FORMAT, s, p2i(obj));
+ gclog_or_tty->print_cr(" referent_addr/* " PTR_FORMAT " / "
+ PTR_FORMAT, p2i(referent_addr),
+ p2i(referent_addr ?
+ (address)oopDesc::load_decode_heap_oop(referent_addr) : NULL));
+ gclog_or_tty->print_cr(" next_addr/* " PTR_FORMAT " / "
+ PTR_FORMAT, p2i(next_addr),
+ p2i(next_addr ? (address)oopDesc::load_decode_heap_oop(next_addr) : NULL));
+ gclog_or_tty->print_cr(" discovered_addr/* " PTR_FORMAT " / "
+ PTR_FORMAT, p2i(discovered_addr),
+ p2i(discovered_addr ?
+ (address)oopDesc::load_decode_heap_oop(discovered_addr) : NULL));
+ }
+}
#endif
+
+template void static adjust_object_specialized(oop obj) {
+ T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+ MarkSweep::adjust_pointer(referent_addr);
+ T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+ MarkSweep::adjust_pointer(next_addr);
+ T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
+ MarkSweep::adjust_pointer(discovered_addr);
+ debug_only(trace_reference_gc("InstanceRefKlass::oop_ms_adjust_pointers", obj,
+ referent_addr, next_addr, discovered_addr);)
+}
+
+int InstanceRefKlass::oop_ms_adjust_pointers(oop obj) {
+ int size = size_helper();
+ InstanceKlass::oop_ms_adjust_pointers(obj);
+
+ if (UseCompressedOops) {
+ adjust_object_specialized(obj);
+ } else {
+ adjust_object_specialized(obj);
+ }
+ return size;
+}
+
+int ObjArrayKlass::oop_ms_adjust_pointers(oop obj) {
+ assert(obj->is_objArray(), "obj must be obj array");
+ objArrayOop a = objArrayOop(obj);
+ // Get size before changing pointers.
+ // Don't call size() or oop_size() since that is a virtual call.
+ int size = a->object_size();
+ oop_oop_iterate_elements(a, &MarkSweep::adjust_pointer_closure);
+ return size;
+}
+
+int TypeArrayKlass::oop_ms_adjust_pointers(oop obj) {
+ assert(obj->is_typeArray(), "must be a type array");
+ typeArrayOop t = typeArrayOop(obj);
+ // Performance tweak: We skip iterating over the klass pointer since we
+ // know that Universe::TypeArrayKlass never moves.
+ return t->object_size();
+}
diff --git a/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp b/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp
index 724d212940c..6ffd76af54d 100644
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp
@@ -60,8 +60,9 @@ class MarkSweep : AllStatic {
virtual void do_oop(narrowOop* p);
};
- class MarkAndPushClosure: public OopClosure {
+ class MarkAndPushClosure: public ExtendedOopClosure {
public:
+ template void do_oop_nv(T* p);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
};
@@ -73,8 +74,12 @@ class MarkSweep : AllStatic {
class AdjustPointerClosure: public OopsInGenClosure {
public:
+ template void do_oop_nv(T* p);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
+
+ // This closure provides its own oop verification code.
+ debug_only(virtual bool should_verify_oops() { return false; })
};
// Used for java/lang/ref handling
@@ -126,9 +131,6 @@ class MarkSweep : AllStatic {
// Non public closures
static KeepAliveClosure keep_alive;
- // Debugging
- static void trace(const char* msg) PRODUCT_RETURN;
-
public:
// Public closures
static IsAliveClosure is_alive;
diff --git a/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp b/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp
index 196dcad7a7f..39201f76440 100644
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp
@@ -28,11 +28,15 @@
#include "gc_implementation/shared/markSweep.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "oops/markOop.inline.hpp"
+#include "oops/instanceKlass.inline.hpp"
+#include "oops/instanceClassLoaderKlass.inline.hpp"
+#include "oops/instanceMirrorKlass.inline.hpp"
+#include "oops/instanceRefKlass.inline.hpp"
+#include "oops/objArrayKlass.inline.hpp"
#include "utilities/stack.inline.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/g1/g1StringDedup.hpp"
-#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
#endif // INCLUDE_ALL_GCS
inline void MarkSweep::mark_object(oop obj) {
@@ -59,7 +63,9 @@ inline void MarkSweep::follow_klass(Klass* klass) {
}
inline void MarkSweep::follow_object(oop obj) {
- obj->follow_contents();
+ assert(obj->is_gc_marked(), "should be marked");
+
+ obj->ms_follow_contents();
}
template inline void MarkSweep::follow_root(T* p) {
@@ -95,13 +101,15 @@ void MarkSweep::push_objarray(oop obj, size_t index) {
}
inline int MarkSweep::adjust_pointers(oop obj) {
- return obj->adjust_pointers();
+ return obj->ms_adjust_pointers();
}
template inline void MarkSweep::adjust_pointer(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ assert(Universe::heap()->is_in(obj), "should be in heap");
+
oop new_obj = oop(obj->mark()->decode_pointer());
assert(new_obj != NULL || // is forwarding ptr?
obj->mark() == markOopDesc::prototype() || // not gc marked?
diff --git a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
index 1d4564160a0..b6c68fd4808 100644
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
@@ -26,7 +26,7 @@
#include "precompiled.hpp"
#include "gc_implementation/shared/mutableNUMASpace.hpp"
#include "gc_implementation/shared/spaceDecorator.hpp"
-#include "memory/sharedHeap.hpp"
+#include "gc_interface/collectedHeap.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/thread.inline.hpp"
diff --git a/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp b/hotspot/src/share/vm/gc_implementation/shared/plab.cpp
similarity index 90%
rename from hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp
rename to hotspot/src/share/vm/gc_implementation/shared/plab.cpp
index fe21e584cf7..3ecc205df31 100644
--- a/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp
+++ b/hotspot/src/share/vm/gc_implementation/shared/plab.cpp
@@ -23,21 +23,21 @@
*/
#include "precompiled.hpp"
-#include "gc_implementation/shared/parGCAllocBuffer.hpp"
+#include "gc_implementation/shared/plab.hpp"
#include "memory/threadLocalAllocBuffer.hpp"
#include "oops/arrayOop.hpp"
#include "oops/oop.inline.hpp"
-size_t ParGCAllocBuffer::min_size() {
+size_t PLAB::min_size() {
// Make sure that we return something that is larger than AlignmentReserve
return align_object_size(MAX2(MinTLABSize / HeapWordSize, (uintx)oopDesc::header_size())) + AlignmentReserve;
}
-size_t ParGCAllocBuffer::max_size() {
+size_t PLAB::max_size() {
return ThreadLocalAllocBuffer::max_size();
}
-ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) :
+PLAB::PLAB(size_t desired_plab_sz_) :
_word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
_end(NULL), _hard_end(NULL), _allocated(0), _wasted(0)
{
@@ -53,9 +53,9 @@ ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) :
// the smallest object. We can't allow that because the buffer must
// look like it's full of objects when we retire it, so we make
// sure we have enough space for a filler int array object.
-size_t ParGCAllocBuffer::AlignmentReserve;
+size_t PLAB::AlignmentReserve;
-void ParGCAllocBuffer::flush_and_retire_stats(PLABStats* stats) {
+void PLAB::flush_and_retire_stats(PLABStats* stats) {
// Retire the last allocation buffer.
size_t unused = retire_internal();
@@ -71,11 +71,11 @@ void ParGCAllocBuffer::flush_and_retire_stats(PLABStats* stats) {
_wasted = 0;
}
-void ParGCAllocBuffer::retire() {
+void PLAB::retire() {
_wasted += retire_internal();
}
-size_t ParGCAllocBuffer::retire_internal() {
+size_t PLAB::retire_internal() {
size_t result = 0;
if (_top < _hard_end) {
CollectedHeap::fill_with_object(_top, _hard_end);
@@ -126,8 +126,8 @@ void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
}
#ifndef PRODUCT
-void ParGCAllocBuffer::print() {
- gclog_or_tty->print_cr("parGCAllocBuffer: _bottom: " PTR_FORMAT " _top: " PTR_FORMAT
+void PLAB::print() {
+ gclog_or_tty->print_cr("PLAB: _bottom: " PTR_FORMAT " _top: " PTR_FORMAT
" _end: " PTR_FORMAT " _hard_end: " PTR_FORMAT ")",
p2i(_bottom), p2i(_top), p2i(_end), p2i(_hard_end));
}
diff --git a/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp b/hotspot/src/share/vm/gc_implementation/shared/plab.hpp
similarity index 93%
rename from hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp
rename to hotspot/src/share/vm/gc_implementation/shared/plab.hpp
index a8991a0816b..324adfcd8a9 100644
--- a/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp
+++ b/hotspot/src/share/vm/gc_implementation/shared/plab.hpp
@@ -22,8 +22,8 @@
*
*/
-#ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_PLAB_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_PLAB_HPP
#include "gc_implementation/shared/gcUtil.hpp"
#include "memory/allocation.hpp"
@@ -34,7 +34,7 @@
class PLABStats;
// A per-thread allocation buffer used during GC.
-class ParGCAllocBuffer: public CHeapObj {
+class PLAB: public CHeapObj {
protected:
char head[32];
size_t _word_sz; // In HeapWord units
@@ -65,8 +65,8 @@ protected:
public:
// Initializes the buffer to be empty, but with the given "word_sz".
// Must get initialized with "set_buf" for an allocation to succeed.
- ParGCAllocBuffer(size_t word_sz);
- virtual ~ParGCAllocBuffer() {}
+ PLAB(size_t word_sz);
+ virtual ~PLAB() {}
// Minimum PLAB size.
static size_t min_size();
@@ -133,7 +133,7 @@ public:
// Flush allocation statistics into the given PLABStats supporting ergonomic
// sizing of PLAB's and retire the current buffer. To be called at the end of
// GC.
- void flush_and_retire_stats(PLABStats* stats);
+ virtual void flush_and_retire_stats(PLABStats* stats);
// Fills in the unallocated portion of the buffer with a garbage object and updates
// statistics. To be called during GC.
@@ -166,11 +166,11 @@ class PLABStats VALUE_OBJ_CLASS_SPEC {
{ }
static const size_t min_size() {
- return ParGCAllocBuffer::min_size();
+ return PLAB::min_size();
}
static const size_t max_size() {
- return ParGCAllocBuffer::max_size();
+ return PLAB::max_size();
}
size_t desired_plab_sz() {
@@ -194,4 +194,4 @@ class PLABStats VALUE_OBJ_CLASS_SPEC {
}
};
-#endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
+#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_PLAB_HPP
diff --git a/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.inline.hpp b/hotspot/src/share/vm/gc_implementation/shared/plab.inline.hpp
similarity index 74%
rename from hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.inline.hpp
rename to hotspot/src/share/vm/gc_implementation/shared/plab.inline.hpp
index 352ce05a3e5..5c9fe845b1c 100644
--- a/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.inline.hpp
+++ b/hotspot/src/share/vm/gc_implementation/shared/plab.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,13 +22,13 @@
*
*/
-#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_PARGCALLOCBUFFER_INLINE_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_SHARED_PARGCALLOCBUFFER_INLINE_HPP
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_PLAB_INLINE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_PLAB_INLINE_HPP
-#include "gc_implementation/shared/parGCAllocBuffer.hpp"
+#include "gc_implementation/shared/plab.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
-HeapWord* ParGCAllocBuffer::allocate_aligned(size_t word_sz, unsigned short alignment_in_bytes) {
+HeapWord* PLAB::allocate_aligned(size_t word_sz, unsigned short alignment_in_bytes) {
HeapWord* res = CollectedHeap::align_allocation_or_fail(_top, _end, alignment_in_bytes);
if (res == NULL) {
@@ -41,4 +41,4 @@ HeapWord* ParGCAllocBuffer::allocate_aligned(size_t word_sz, unsigned short alig
return allocate(word_sz);
}
-#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_PARGCALLOCBUFFER_INLINE_HPP
+#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_PLAB_INLINE_HPP
diff --git a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp
index dbfbb08cdfc..dcfcbfb82ed 100644
--- a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp
+++ b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp
@@ -116,8 +116,6 @@ bool VM_GC_Operation::doit_prologue() {
_prologue_succeeded = false;
} else {
_prologue_succeeded = true;
- SharedHeap* sh = SharedHeap::heap();
- if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = true;
}
return _prologue_succeeded;
}
@@ -126,22 +124,11 @@ bool VM_GC_Operation::doit_prologue() {
void VM_GC_Operation::doit_epilogue() {
assert(Thread::current()->is_Java_thread(), "just checking");
// Release the Heap_lock first.
- SharedHeap* sh = SharedHeap::heap();
- if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = false;
Heap_lock->unlock();
release_and_notify_pending_list_lock();
}
-bool VM_GC_HeapInspection::doit_prologue() {
- if (Universe::heap()->supports_heap_inspection()) {
- return VM_GC_Operation::doit_prologue();
- } else {
- return false;
- }
-}
-
bool VM_GC_HeapInspection::skip_operation() const {
- assert(Universe::heap()->supports_heap_inspection(), "huh?");
return false;
}
diff --git a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp
index 5885638cd43..284d2d13822 100644
--- a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp
+++ b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp
@@ -150,7 +150,6 @@ class VM_GC_HeapInspection: public VM_GC_Operation {
~VM_GC_HeapInspection() {}
virtual VMOp_Type type() const { return VMOp_GC_HeapInspection; }
virtual bool skip_operation() const;
- virtual bool doit_prologue();
virtual void doit();
void set_csv_format(bool value) {_csv_format = value;}
void set_print_help(bool value) {_print_help = value;}
diff --git a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp
index 649bce23c1e..2698adcd9f6 100644
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp
@@ -220,6 +220,11 @@ void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
}
}
+void CollectedHeap::set_barrier_set(BarrierSet* barrier_set) {
+ _barrier_set = barrier_set;
+ oopDesc::set_bs(_barrier_set);
+}
+
void CollectedHeap::pre_initialize() {
// Used for ReduceInitialCardMarks (when COMPILER2 is used);
// otherwise remains unused.
diff --git a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp
index 5dec0eca8ab..ce3d07f1460 100644
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp
@@ -75,9 +75,8 @@ class GCHeapLog : public EventLogBase {
//
// CollectedHeap
-// SharedHeap
-// GenCollectedHeap
-// G1CollectedHeap
+// GenCollectedHeap
+// G1CollectedHeap
// ParallelScavengeHeap
//
class CollectedHeap : public CHeapObj {
@@ -205,7 +204,7 @@ class CollectedHeap : public CHeapObj {
// In many heaps, there will be a need to perform some initialization activities
// after the Universe is fully formed, but before general heap allocation is allowed.
// This is the correct place to place such initialization methods.
- virtual void post_initialize() = 0;
+ virtual void post_initialize();
// Stop any onging concurrent work and prepare for exit.
virtual void stop() {}
@@ -240,22 +239,11 @@ class CollectedHeap : public CHeapObj {
}
// Returns "TRUE" iff "p" points into the committed areas of the heap.
- // Since this method can be expensive in general, we restrict its
- // use to assertion checking only.
+ // This method can be expensive so avoid using it in performance critical
+ // code.
virtual bool is_in(const void* p) const = 0;
- bool is_in_or_null(const void* p) const {
- return p == NULL || is_in(p);
- }
-
- bool is_in_place(Metadata** p) {
- return !Universe::heap()->is_in(p);
- }
- bool is_in_place(oop* p) { return Universe::heap()->is_in(p); }
- bool is_in_place(narrowOop* p) {
- oop o = oopDesc::load_decode_heap_oop_not_null(p);
- return Universe::heap()->is_in((const void*)o);
- }
+ DEBUG_ONLY(bool is_in_or_null(const void* p) const { return p == NULL || is_in(p); })
// Let's define some terms: a "closed" subset of a heap is one that
//
@@ -451,9 +439,6 @@ class CollectedHeap : public CHeapObj {
// remembered set.
virtual void flush_deferred_store_barrier(JavaThread* thread);
- // Does this heap support heap inspection (+PrintClassHistogram?)
- virtual bool supports_heap_inspection() const = 0;
-
// Perform a collection of the heap; intended for use in implementing
// "System.gc". This probably implies as full a collection as the
// "CollectedHeap" supports.
@@ -470,6 +455,7 @@ class CollectedHeap : public CHeapObj {
// Returns the barrier set for this heap
BarrierSet* barrier_set() { return _barrier_set; }
+ void set_barrier_set(BarrierSet* barrier_set);
// Returns "true" iff there is a stop-world GC in progress. (I assume
// that it should answer "false" for the concurrent part of a concurrent
@@ -497,12 +483,6 @@ class CollectedHeap : public CHeapObj {
// Return the CollectorPolicy for the heap
virtual CollectorPolicy* collector_policy() const = 0;
- void oop_iterate_no_header(OopClosure* cl);
-
- // Iterate over all the ref-containing fields of all objects, calling
- // "cl.do_oop" on each.
- virtual void oop_iterate(ExtendedOopClosure* cl) = 0;
-
// Iterate over all objects, calling "cl.do_object" on each.
virtual void object_iterate(ObjectClosure* cl) = 0;
diff --git a/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp b/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp
index 302d0c7cb3a..2b938a1d194 100644
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp
@@ -236,12 +236,6 @@ oop CollectedHeap::array_allocate_nozero(KlassHandle klass,
return (oop)obj;
}
-inline void CollectedHeap::oop_iterate_no_header(OopClosure* cl) {
- NoHeaderExtendedOopClosure no_header_cl(cl);
- oop_iterate(&no_header_cl);
-}
-
-
inline HeapWord* CollectedHeap::align_allocation_or_fail(HeapWord* addr,
HeapWord* end,
unsigned short alignment_in_bytes) {
diff --git a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp
index 991aca1a357..802879268ab 100644
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -576,10 +576,10 @@ BytecodeInterpreter::run(interpreterState istate) {
/* 0xD8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
/* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
-/* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
-/* 0xE4 */ &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer,
-/* 0xE8 */ &&opc_invokehandle,&&opc_default, &&opc_default, &&opc_default,
-/* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
+/* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
+/* 0xE4 */ &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer,
+/* 0xE8 */ &&opc_invokehandle,&&opc_default, &&opc_default, &&opc_default,
+/* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
/* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
/* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
@@ -1942,7 +1942,7 @@ run:
cache = cp->entry_at(index);
if (!cache->is_resolved((Bytecodes::Code)opcode)) {
- CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode),
+ CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
handle_exception);
cache = cp->entry_at(index);
}
@@ -2040,7 +2040,7 @@ run:
u2 index = Bytes::get_native_u2(pc+1);
ConstantPoolCacheEntry* cache = cp->entry_at(index);
if (!cache->is_resolved((Bytecodes::Code)opcode)) {
- CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode),
+ CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
handle_exception);
cache = cp->entry_at(index);
}
@@ -2416,7 +2416,7 @@ run:
// This kind of CP cache entry does not need to match the flags byte, because
// there is a 1-1 relation between bytecode type and CP entry type.
if (! cache->is_resolved((Bytecodes::Code) opcode)) {
- CALL_VM(InterpreterRuntime::resolve_invokedynamic(THREAD),
+ CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
handle_exception);
cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
}
@@ -2447,7 +2447,7 @@ run:
ConstantPoolCacheEntry* cache = cp->entry_at(index);
if (! cache->is_resolved((Bytecodes::Code) opcode)) {
- CALL_VM(InterpreterRuntime::resolve_invokehandle(THREAD),
+ CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
handle_exception);
cache = cp->entry_at(index);
}
@@ -2480,7 +2480,7 @@ run:
ConstantPoolCacheEntry* cache = cp->entry_at(index);
if (!cache->is_resolved((Bytecodes::Code)opcode)) {
- CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode),
+ CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
handle_exception);
cache = cp->entry_at(index);
}
@@ -2571,7 +2571,7 @@ run:
// out so c++ compiler has a chance for constant prop to fold everything possible away.
if (!cache->is_resolved((Bytecodes::Code)opcode)) {
- CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode),
+ CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
handle_exception);
cache = cp->entry_at(index);
}
diff --git a/hotspot/src/share/vm/interpreter/bytecodes.cpp b/hotspot/src/share/vm/interpreter/bytecodes.cpp
index 7fcd6543984..a5b53ad3529 100644
--- a/hotspot/src/share/vm/interpreter/bytecodes.cpp
+++ b/hotspot/src/share/vm/interpreter/bytecodes.cpp
@@ -525,6 +525,12 @@ void Bytecodes::initialize() {
def(_fast_aldc , "fast_aldc" , "bj" , NULL , T_OBJECT, 1, true, _ldc );
def(_fast_aldc_w , "fast_aldc_w" , "bJJ" , NULL , T_OBJECT, 1, true, _ldc_w );
+ def(_nofast_getfield , "nofast_getfield" , "bJJ" , NULL , T_ILLEGAL, 0, true, _getfield );
+ def(_nofast_putfield , "nofast_putfield" , "bJJ" , NULL , T_ILLEGAL, -2, true , _putfield );
+
+ def(_nofast_aload_0 , "nofast_aload_0" , "b" , NULL , T_ILLEGAL, 1, true , _aload_0 );
+ def(_nofast_iload , "nofast_iload" , "bi" , NULL , T_ILLEGAL, 1, false, _iload );
+
def(_shouldnotreachhere , "_shouldnotreachhere" , "b" , NULL , T_VOID , 0, false);
// compare can_trap information for each bytecode with the
diff --git a/hotspot/src/share/vm/interpreter/bytecodes.hpp b/hotspot/src/share/vm/interpreter/bytecodes.hpp
index d66ceb46033..78f6c48f595 100644
--- a/hotspot/src/share/vm/interpreter/bytecodes.hpp
+++ b/hotspot/src/share/vm/interpreter/bytecodes.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -285,7 +285,20 @@ class Bytecodes: AllStatic {
// special handling of signature-polymorphic methods:
_invokehandle ,
- _shouldnotreachhere, // For debugging
+ // These bytecodes are rewritten at CDS dump time, so that we can prevent them from being
+ // rewritten at run time. This way, the ConstMethods can be placed in the CDS ReadOnly
+ // section, and RewriteByteCodes/RewriteFrequentPairs can rewrite non-CDS bytecodes
+ // at run time.
+ //
+ // Rewritten at CDS dump time to | Original bytecode
+ // _invoke_virtual rewritten on sparc, will be disabled if UseSharedSpaces turned on.
+ // ------------------------------+------------------
+ _nofast_getfield , // <- _getfield
+ _nofast_putfield , // <- _putfield
+ _nofast_aload_0 , // <- _aload_0
+ _nofast_iload , // <- _iload
+
+ _shouldnotreachhere , // For debugging
number_of_codes
@@ -401,6 +414,7 @@ class Bytecodes: AllStatic {
static bool is_astore (Code code) { return (code == _astore || code == _astore_0 || code == _astore_1
|| code == _astore_2 || code == _astore_3); }
+ static bool is_store_into_local(Code code){ return (_istore <= code && code <= _astore_3); }
static bool is_const (Code code) { return (_aconst_null <= code && code <= _ldc2_w); }
static bool is_zero_const (Code code) { return (code == _aconst_null || code == _iconst_0
|| code == _fconst_0 || code == _dconst_0); }
diff --git a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp
index e59c49ba4e6..66f28341c23 100644
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp
@@ -537,11 +537,13 @@ IRT_END
// Fields
//
-IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode))
+void InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode) {
+ Thread* THREAD = thread;
// resolve field
fieldDescriptor info;
constantPoolHandle pool(thread, method(thread)->constants());
- bool is_put = (bytecode == Bytecodes::_putfield || bytecode == Bytecodes::_putstatic);
+ bool is_put = (bytecode == Bytecodes::_putfield || bytecode == Bytecodes::_nofast_putfield ||
+ bytecode == Bytecodes::_putstatic);
bool is_static = (bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic);
{
@@ -551,7 +553,8 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecode
} // end JvmtiHideSingleStepping
// check if link resolution caused cpCache to be updated
- if (already_resolved(thread)) return;
+ ConstantPoolCacheEntry* cp_cache_entry = cache_entry(thread);
+ if (cp_cache_entry->is_resolved(bytecode)) return;
// compute auxiliary field attributes
TosState state = as_TosState(info.field_type());
@@ -579,7 +582,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecode
}
}
- cache_entry(thread)->set_field(
+ cp_cache_entry->set_field(
get_code,
put_code,
info.field_holder(),
@@ -590,7 +593,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecode
info.access_flags().is_volatile(),
pool->pool_holder()
);
-IRT_END
+}
//------------------------------------------------------------------------------------------------------------------------
@@ -685,7 +688,8 @@ IRT_ENTRY(void, InterpreterRuntime::_breakpoint(JavaThread* thread, Method* meth
JvmtiExport::post_raw_breakpoint(thread, method, bcp);
IRT_END
-IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes::Code bytecode)) {
+void InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes::Code bytecode) {
+ Thread* THREAD = thread;
// extract receiver from the outgoing argument list if necessary
Handle receiver(thread, NULL);
if (bytecode == Bytecodes::_invokevirtual || bytecode == Bytecodes::_invokeinterface) {
@@ -709,7 +713,8 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes
{
JvmtiHideSingleStepping jhss(thread);
LinkResolver::resolve_invoke(info, receiver, pool,
- get_index_u2_cpcache(thread, bytecode), bytecode, CHECK);
+ get_index_u2_cpcache(thread, bytecode), bytecode,
+ CHECK);
if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
int retry_count = 0;
while (info.resolved_method()->is_old()) {
@@ -720,13 +725,15 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes
"Could not resolve to latest version of redefined method");
// method is redefined in the middle of resolve so re-try.
LinkResolver::resolve_invoke(info, receiver, pool,
- get_index_u2_cpcache(thread, bytecode), bytecode, CHECK);
+ get_index_u2_cpcache(thread, bytecode), bytecode,
+ CHECK);
}
}
} // end JvmtiHideSingleStepping
// check if link resolution caused cpCache to be updated
- if (already_resolved(thread)) return;
+ ConstantPoolCacheEntry* cp_cache_entry = cache_entry(thread);
+ if (cp_cache_entry->is_resolved(bytecode)) return;
if (bytecode == Bytecodes::_invokeinterface) {
if (TraceItables && Verbose) {
@@ -761,18 +768,18 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes
#endif
switch (info.call_kind()) {
case CallInfo::direct_call:
- cache_entry(thread)->set_direct_call(
+ cp_cache_entry->set_direct_call(
bytecode,
info.resolved_method());
break;
case CallInfo::vtable_call:
- cache_entry(thread)->set_vtable_call(
+ cp_cache_entry->set_vtable_call(
bytecode,
info.resolved_method(),
info.vtable_index());
break;
case CallInfo::itable_call:
- cache_entry(thread)->set_itable_call(
+ cp_cache_entry->set_itable_call(
bytecode,
info.resolved_method(),
info.itable_index());
@@ -780,30 +787,30 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes
default: ShouldNotReachHere();
}
}
-IRT_END
// First time execution: Resolve symbols, create a permanent MethodType object.
-IRT_ENTRY(void, InterpreterRuntime::resolve_invokehandle(JavaThread* thread)) {
+void InterpreterRuntime::resolve_invokehandle(JavaThread* thread) {
+ Thread* THREAD = thread;
const Bytecodes::Code bytecode = Bytecodes::_invokehandle;
// resolve method
CallInfo info;
constantPoolHandle pool(thread, method(thread)->constants());
-
{
JvmtiHideSingleStepping jhss(thread);
LinkResolver::resolve_invoke(info, Handle(), pool,
- get_index_u2_cpcache(thread, bytecode), bytecode, CHECK);
+ get_index_u2_cpcache(thread, bytecode), bytecode,
+ CHECK);
} // end JvmtiHideSingleStepping
- cache_entry(thread)->set_method_handle(pool, info);
+ ConstantPoolCacheEntry* cp_cache_entry = cache_entry(thread);
+ cp_cache_entry->set_method_handle(pool, info);
}
-IRT_END
-
// First time execution: Resolve symbols, create a permanent CallSite object.
-IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
+void InterpreterRuntime::resolve_invokedynamic(JavaThread* thread) {
+ Thread* THREAD = thread;
const Bytecodes::Code bytecode = Bytecodes::_invokedynamic;
//TO DO: consider passing BCI to Java.
@@ -822,8 +829,36 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
ConstantPoolCacheEntry* cp_cache_entry = pool->invokedynamic_cp_cache_entry_at(index);
cp_cache_entry->set_dynamic_call(pool, info);
}
-IRT_END
+// This function is the interface to the assembly code. It returns the resolved
+// cpCache entry. This doesn't safepoint, but the helper routines safepoint.
+// This function will check for redefinition!
+IRT_ENTRY(void, InterpreterRuntime::resolve_from_cache(JavaThread* thread, Bytecodes::Code bytecode)) {
+ switch (bytecode) {
+ case Bytecodes::_getstatic:
+ case Bytecodes::_putstatic:
+ case Bytecodes::_getfield:
+ case Bytecodes::_putfield:
+ resolve_get_put(thread, bytecode);
+ break;
+ case Bytecodes::_invokevirtual:
+ case Bytecodes::_invokespecial:
+ case Bytecodes::_invokestatic:
+ case Bytecodes::_invokeinterface:
+ resolve_invoke(thread, bytecode);
+ break;
+ case Bytecodes::_invokehandle:
+ resolve_invokehandle(thread);
+ break;
+ case Bytecodes::_invokedynamic:
+ resolve_invokedynamic(thread);
+ break;
+ default:
+ fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode)));
+ break;
+ }
+}
+IRT_END
//------------------------------------------------------------------------------------------------------------------------
// Miscellaneous
diff --git a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp
index 54cb71d333a..bd604215f89 100644
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -52,7 +52,6 @@ class InterpreterRuntime: AllStatic {
// pass method to avoid calling unsafe bcp_to_method (partial fix 4926272)
return Bytecodes::code_at(method(thread), bcp(thread));
}
- static bool already_resolved(JavaThread *thread) { return cache_entry(thread)->is_resolved(code(thread)); }
static Bytecode bytecode(JavaThread *thread) { return Bytecode(method(thread), bcp(thread)); }
static int get_index_u1(JavaThread *thread, Bytecodes::Code bc)
{ return bytecode(thread).get_index_u1(bc); }
@@ -117,9 +116,17 @@ class InterpreterRuntime: AllStatic {
static void note_no_trap(JavaThread* thread, Method *method, int trap_bci) {}
#endif // CC_INTERP
+ static void resolve_from_cache(JavaThread* thread, Bytecodes::Code bytecode);
+ private:
// Statics & fields
- static void resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode);
+ static void resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode);
+ // Calls
+ static void resolve_invoke(JavaThread* thread, Bytecodes::Code bytecode);
+ static void resolve_invokehandle (JavaThread* thread);
+ static void resolve_invokedynamic(JavaThread* thread);
+
+ public:
// Synchronization
static void monitorenter(JavaThread* thread, BasicObjectLock* elem);
static void monitorexit (JavaThread* thread, BasicObjectLock* elem);
@@ -127,11 +134,6 @@ class InterpreterRuntime: AllStatic {
static void throw_illegal_monitor_state_exception(JavaThread* thread);
static void new_illegal_monitor_state_exception(JavaThread* thread);
- // Calls
- static void resolve_invoke (JavaThread* thread, Bytecodes::Code bytecode);
- static void resolve_invokehandle (JavaThread* thread);
- static void resolve_invokedynamic(JavaThread* thread);
-
// Breakpoints
static void _breakpoint(JavaThread* thread, Method* method, address bcp);
static Bytecodes::Code get_original_bytecode_at(JavaThread* thread, Method* method, address bcp);
diff --git a/hotspot/src/share/vm/interpreter/linkResolver.cpp b/hotspot/src/share/vm/interpreter/linkResolver.cpp
index 9a62cd62807..49b5a51d026 100644
--- a/hotspot/src/share/vm/interpreter/linkResolver.cpp
+++ b/hotspot/src/share/vm/interpreter/linkResolver.cpp
@@ -777,11 +777,11 @@ void LinkResolver::resolve_field(fieldDescriptor& fd, KlassHandle resolved_klass
TRAPS) {
assert(byte == Bytecodes::_getstatic || byte == Bytecodes::_putstatic ||
byte == Bytecodes::_getfield || byte == Bytecodes::_putfield ||
+ byte == Bytecodes::_nofast_getfield || byte == Bytecodes::_nofast_putfield ||
(byte == Bytecodes::_nop && !check_access), "bad field access bytecode");
bool is_static = (byte == Bytecodes::_getstatic || byte == Bytecodes::_putstatic);
- bool is_put = (byte == Bytecodes::_putfield || byte == Bytecodes::_putstatic);
-
+ bool is_put = (byte == Bytecodes::_putfield || byte == Bytecodes::_putstatic || byte == Bytecodes::_nofast_putfield);
// Check if there's a resolved klass containing the field
if (resolved_klass.is_null()) {
ResourceMark rm(THREAD);
@@ -1406,10 +1406,11 @@ methodHandle LinkResolver::resolve_virtual_call_or_null(
KlassHandle resolved_klass,
Symbol* name,
Symbol* signature,
- KlassHandle current_klass) {
+ KlassHandle current_klass,
+ bool check_access) {
EXCEPTION_MARK;
CallInfo info;
- resolve_virtual_call(info, Handle(), receiver_klass, resolved_klass, name, signature, current_klass, true, false, THREAD);
+ resolve_virtual_call(info, Handle(), receiver_klass, resolved_klass, name, signature, current_klass, check_access, false, THREAD);
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
return methodHandle();
@@ -1422,10 +1423,11 @@ methodHandle LinkResolver::resolve_interface_call_or_null(
KlassHandle resolved_klass,
Symbol* name,
Symbol* signature,
- KlassHandle current_klass) {
+ KlassHandle current_klass,
+ bool check_access) {
EXCEPTION_MARK;
CallInfo info;
- resolve_interface_call(info, Handle(), receiver_klass, resolved_klass, name, signature, current_klass, true, false, THREAD);
+ resolve_interface_call(info, Handle(), receiver_klass, resolved_klass, name, signature, current_klass, check_access, false, THREAD);
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
return methodHandle();
@@ -1453,10 +1455,11 @@ methodHandle LinkResolver::resolve_static_call_or_null(
KlassHandle resolved_klass,
Symbol* name,
Symbol* signature,
- KlassHandle current_klass) {
+ KlassHandle current_klass,
+ bool check_access) {
EXCEPTION_MARK;
CallInfo info;
- resolve_static_call(info, resolved_klass, name, signature, current_klass, true, false, THREAD);
+ resolve_static_call(info, resolved_klass, name, signature, current_klass, check_access, false, THREAD);
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
return methodHandle();
@@ -1464,11 +1467,15 @@ methodHandle LinkResolver::resolve_static_call_or_null(
return info.selected_method();
}
-methodHandle LinkResolver::resolve_special_call_or_null(KlassHandle resolved_klass, Symbol* name, Symbol* signature,
- KlassHandle current_klass) {
+methodHandle LinkResolver::resolve_special_call_or_null(
+ KlassHandle resolved_klass,
+ Symbol* name,
+ Symbol* signature,
+ KlassHandle current_klass,
+ bool check_access) {
EXCEPTION_MARK;
CallInfo info;
- resolve_special_call(info, resolved_klass, name, signature, current_klass, true, THREAD);
+ resolve_special_call(info, resolved_klass, name, signature, current_klass, check_access, THREAD);
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
return methodHandle();
diff --git a/hotspot/src/share/vm/interpreter/linkResolver.hpp b/hotspot/src/share/vm/interpreter/linkResolver.hpp
index f598380591b..5fa536491c9 100644
--- a/hotspot/src/share/vm/interpreter/linkResolver.hpp
+++ b/hotspot/src/share/vm/interpreter/linkResolver.hpp
@@ -181,10 +181,10 @@ class LinkResolver: AllStatic {
// same as above for compile-time resolution; but returns null handle instead of throwing an exception on error
// also, does not initialize klass (i.e., no side effects)
- static methodHandle resolve_virtual_call_or_null (KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
- static methodHandle resolve_interface_call_or_null(KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
- static methodHandle resolve_static_call_or_null (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
- static methodHandle resolve_special_call_or_null (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
+ static methodHandle resolve_virtual_call_or_null (KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access = true);
+ static methodHandle resolve_interface_call_or_null(KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access = true);
+ static methodHandle resolve_static_call_or_null (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access = true);
+ static methodHandle resolve_special_call_or_null (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access = true);
static int vtable_index_of_interface_method(KlassHandle klass, methodHandle resolved_method);
// same as above for compile-time resolution; returns vtable_index if current_klass if linked
diff --git a/hotspot/src/share/vm/interpreter/rewriter.cpp b/hotspot/src/share/vm/interpreter/rewriter.cpp
index 2474ae758ba..2dc51f345d6 100644
--- a/hotspot/src/share/vm/interpreter/rewriter.cpp
+++ b/hotspot/src/share/vm/interpreter/rewriter.cpp
@@ -26,6 +26,7 @@
#include "interpreter/bytecodes.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/rewriter.hpp"
+#include "memory/metaspaceShared.hpp"
#include "memory/gcLocker.hpp"
#include "memory/resourceArea.hpp"
#include "oops/generateOopMap.hpp"
@@ -167,12 +168,12 @@ void Rewriter::rewrite_invokespecial(address bcp, int offset, bool reverse, bool
if (!reverse) {
int cp_index = Bytes::get_Java_u2(p);
if (_pool->tag_at(cp_index).is_interface_method()) {
- int cache_index = add_invokespecial_cp_cache_entry(cp_index);
- if (cache_index != (int)(jushort) cache_index) {
- *invokespecial_error = true;
- }
- Bytes::put_native_u2(p, cache_index);
- } else {
+ int cache_index = add_invokespecial_cp_cache_entry(cp_index);
+ if (cache_index != (int)(jushort) cache_index) {
+ *invokespecial_error = true;
+ }
+ Bytes::put_native_u2(p, cache_index);
+ } else {
rewrite_member_reference(bcp, offset, reverse);
}
} else {
@@ -500,12 +501,14 @@ void Rewriter::rewrite_bytecodes(TRAPS) {
}
void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
+ if (!DumpSharedSpaces) {
+ assert(!MetaspaceShared::is_in_shared_space(klass()), "archive methods must not be rewritten at run time");
+ }
ResourceMark rm(THREAD);
Rewriter rw(klass, klass->constants(), klass->methods(), CHECK);
// (That's all, folks.)
}
-
Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array* methods, TRAPS)
: _klass(klass),
_pool(cpool),
diff --git a/hotspot/src/share/vm/interpreter/templateTable.cpp b/hotspot/src/share/vm/interpreter/templateTable.cpp
index 8449321d9f3..3450dbd825f 100644
--- a/hotspot/src/share/vm/interpreter/templateTable.cpp
+++ b/hotspot/src/share/vm/interpreter/templateTable.cpp
@@ -517,6 +517,12 @@ void TemplateTable::initialize() {
def(Bytecodes::_invokehandle , ubcp|disp|clvm|____, vtos, vtos, invokehandle , f1_byte );
+ def(Bytecodes::_nofast_getfield , ubcp|____|clvm|____, vtos, vtos, nofast_getfield , f1_byte );
+ def(Bytecodes::_nofast_putfield , ubcp|____|clvm|____, vtos, vtos, nofast_putfield , f2_byte );
+
+ def(Bytecodes::_nofast_aload_0 , ____|____|clvm|____, vtos, atos, nofast_aload_0 , _ );
+ def(Bytecodes::_nofast_iload , ubcp|____|clvm|____, vtos, itos, nofast_iload , _ );
+
def(Bytecodes::_shouldnotreachhere , ____|____|____|____, vtos, vtos, shouldnotreachhere , _ );
// platform specific bytecodes
pd_initialize();
diff --git a/hotspot/src/share/vm/interpreter/templateTable.hpp b/hotspot/src/share/vm/interpreter/templateTable.hpp
index 6ead5074082..bd4a76493d9 100644
--- a/hotspot/src/share/vm/interpreter/templateTable.hpp
+++ b/hotspot/src/share/vm/interpreter/templateTable.hpp
@@ -82,6 +82,7 @@ class TemplateTable: AllStatic {
enum Operation { add, sub, mul, div, rem, _and, _or, _xor, shl, shr, ushr };
enum Condition { equal, not_equal, less, less_equal, greater, greater_equal };
enum CacheByte { f1_byte = 1, f2_byte = 2 }; // byte_no codes
+ enum RewriteControl { may_rewrite, may_not_rewrite }; // control for fast code under CDS
private:
static bool _is_initialized; // true if TemplateTable has been initialized
@@ -165,6 +166,10 @@ class TemplateTable: AllStatic {
static void dload(int n);
static void aload(int n);
static void aload_0();
+ static void nofast_aload_0();
+ static void nofast_iload();
+ static void iload_internal(RewriteControl rc = may_rewrite);
+ static void aload_0_internal(RewriteControl rc = may_rewrite);
static void istore();
static void lstore();
@@ -279,10 +284,13 @@ class TemplateTable: AllStatic {
static void invokehandle(int byte_no);
static void fast_invokevfinal(int byte_no);
- static void getfield_or_static(int byte_no, bool is_static);
- static void putfield_or_static(int byte_no, bool is_static);
+ static void getfield_or_static(int byte_no, bool is_static, RewriteControl rc = may_rewrite);
+ static void putfield_or_static(int byte_no, bool is_static, RewriteControl rc = may_rewrite);
+
static void getfield(int byte_no);
static void putfield(int byte_no);
+ static void nofast_getfield(int byte_no);
+ static void nofast_putfield(int byte_no);
static void getstatic(int byte_no);
static void putstatic(int byte_no);
static void pop_and_check_object(Register obj);
@@ -343,10 +351,8 @@ class TemplateTable: AllStatic {
// Platform specifics
#if defined TEMPLATETABLE_MD_HPP
# include TEMPLATETABLE_MD_HPP
-#elif defined TARGET_ARCH_MODEL_x86_32
-# include "templateTable_x86_32.hpp"
-#elif defined TARGET_ARCH_MODEL_x86_64
-# include "templateTable_x86_64.hpp"
+#elif defined (TARGET_ARCH_MODEL_x86_32) || defined (TARGET_ARCH_MODEL_x86_64)
+# include "templateTable_x86.hpp"
#elif defined TARGET_ARCH_MODEL_sparc
# include "templateTable_sparc.hpp"
#elif defined TARGET_ARCH_MODEL_zero
diff --git a/hotspot/src/share/vm/libadt/set.cpp b/hotspot/src/share/vm/libadt/set.cpp
index d2e1a68a03b..caeed325151 100644
--- a/hotspot/src/share/vm/libadt/set.cpp
+++ b/hotspot/src/share/vm/libadt/set.cpp
@@ -57,7 +57,7 @@ const CoSet *Set::asCoSet() const { assert(0); return NULL; }
// The caller must deallocate the string.
char *Set::setstr() const
{
- if( !this ) return os::strdup("{no set}");
+ if( this == NULL ) return os::strdup("{no set}");
Set &set = clone(); // Virtually copy the basic set.
set.Sort(); // Sort elements for in-order retrieval
diff --git a/hotspot/src/share/vm/memory/allocation.inline.hpp b/hotspot/src/share/vm/memory/allocation.inline.hpp
index 7d012f328bb..961f510b14e 100644
--- a/hotspot/src/share/vm/memory/allocation.inline.hpp
+++ b/hotspot/src/share/vm/memory/allocation.inline.hpp
@@ -62,11 +62,18 @@ inline char* AllocateHeap(size_t size, MEMFLAGS flags,
}
return p;
}
+
+#ifdef __GNUC__
+__attribute__((always_inline))
+#endif
inline char* AllocateHeap(size_t size, MEMFLAGS flags,
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
return AllocateHeap(size, flags, CURRENT_PC, alloc_failmode);
}
+#ifdef __GNUC__
+__attribute__((always_inline))
+#endif
inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flag,
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
char* p = (char*) os::realloc(old, size, flag, CURRENT_PC);
diff --git a/hotspot/src/share/vm/memory/blockOffsetTable.hpp b/hotspot/src/share/vm/memory/blockOffsetTable.hpp
index 53343e4ee09..8f0372941d9 100644
--- a/hotspot/src/share/vm/memory/blockOffsetTable.hpp
+++ b/hotspot/src/share/vm/memory/blockOffsetTable.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
#define SHARE_VM_MEMORY_BLOCKOFFSETTABLE_HPP
#include "memory/memRegion.hpp"
-#include "runtime/virtualspace.hpp"
+#include "memory/virtualspace.hpp"
#include "utilities/globalDefinitions.hpp"
// The CollectedHeap type requires subtypes to implement a method
diff --git a/hotspot/src/share/vm/memory/cardGeneration.cpp b/hotspot/src/share/vm/memory/cardGeneration.cpp
index 21e8d9e51a7..25f317c49bc 100644
--- a/hotspot/src/share/vm/memory/cardGeneration.cpp
+++ b/hotspot/src/share/vm/memory/cardGeneration.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -60,7 +60,7 @@ CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
// which would cause problems when we commit/uncommit memory, and when we
// clear and dirty cards.
guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
- if (reserved_mr.end() != Universe::heap()->reserved_region().end()) {
+ if (reserved_mr.end() != GenCollectedHeap::heap()->reserved_region().end()) {
// Don't check at the very end of the heap as we'll assert that we're probing off
// the end if we try.
guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
@@ -78,7 +78,7 @@ bool CardGeneration::grow_by(size_t bytes) {
heap_word_size(_virtual_space.committed_size());
MemRegion mr(space()->bottom(), new_word_size);
// Expand card table
- Universe::heap()->barrier_set()->resize_covered_region(mr);
+ GenCollectedHeap::heap()->barrier_set()->resize_covered_region(mr);
// Expand shared block offset array
_bts->resize(new_word_size);
@@ -170,7 +170,7 @@ void CardGeneration::shrink(size_t bytes) {
_bts->resize(new_word_size);
MemRegion mr(space()->bottom(), new_word_size);
// Shrink the card table
- Universe::heap()->barrier_set()->resize_covered_region(mr);
+ GenCollectedHeap::heap()->barrier_set()->resize_covered_region(mr);
if (Verbose && PrintGC) {
size_t new_mem_size = _virtual_space.committed_size();
diff --git a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp
index ac8c7484f98..903b0a0eee3 100644
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp
@@ -23,16 +23,17 @@
*/
#include "precompiled.hpp"
+#include "gc_interface/collectedHeap.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/cardTableModRefBS.inline.hpp"
#include "memory/cardTableRS.hpp"
-#include "memory/sharedHeap.hpp"
+#include "memory/genCollectedHeap.hpp"
#include "memory/space.hpp"
#include "memory/space.inline.hpp"
#include "memory/universe.hpp"
+#include "memory/virtualspace.hpp"
#include "runtime/java.hpp"
#include "runtime/mutexLocker.hpp"
-#include "runtime/virtualspace.hpp"
#include "services/memTracker.hpp"
#include "utilities/macros.hpp"
#ifdef COMPILER1
@@ -450,21 +451,20 @@ void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
// This is an example of where n_par_threads() is used instead
// of workers()->active_workers(). n_par_threads can be set to 0 to
// turn off parallelism. For example when this code is called as
- // part of verification and SharedHeap::process_roots() is being
- // used, then n_par_threads() may have been set to 0. active_workers
- // is not overloaded with the meaning that it is a switch to disable
- // parallelism and so keeps the meaning of the number of
- // active gc workers. If parallelism has not been shut off by
- // setting n_par_threads to 0, then n_par_threads should be
- // equal to active_workers. When a different mechanism for shutting
- // off parallelism is used, then active_workers can be used in
+ // part of verification during root processing then n_par_threads()
+ // may have been set to 0. active_workers is not overloaded with
+ // the meaning that it is a switch to disable parallelism and so keeps
+ // the meaning of the number of active gc workers. If parallelism has
+ // not been shut off by setting n_par_threads to 0, then n_par_threads
+ // should be equal to active_workers. When a different mechanism for
+ // shutting off parallelism is used, then active_workers can be used in
// place of n_par_threads.
- int n_threads = SharedHeap::heap()->n_par_threads();
+ int n_threads = GenCollectedHeap::heap()->n_par_threads();
bool is_par = n_threads > 0;
if (is_par) {
#if INCLUDE_ALL_GCS
- assert(SharedHeap::heap()->n_par_threads() ==
- SharedHeap::heap()->workers()->active_workers(), "Mismatch");
+ assert(GenCollectedHeap::heap()->n_par_threads() ==
+ GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch");
non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
#else // INCLUDE_ALL_GCS
fatal("Parallel gc not supported here.");
diff --git a/hotspot/src/share/vm/memory/cardTableRS.cpp b/hotspot/src/share/vm/memory/cardTableRS.cpp
index be98f7a70a7..bd3604916d8 100644
--- a/hotspot/src/share/vm/memory/cardTableRS.cpp
+++ b/hotspot/src/share/vm/memory/cardTableRS.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,16 +38,18 @@ CardTableRS::CardTableRS(MemRegion whole_heap) :
GenRemSet(),
_cur_youngergen_card_val(youngergenP1_card)
{
- guarantee(Universe::heap()->kind() == CollectedHeap::GenCollectedHeap, "sanity");
_ct_bs = new CardTableModRefBSForCTRS(whole_heap);
_ct_bs->initialize();
set_bs(_ct_bs);
- _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1,
+ // max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations()
+ // (which is always 2, young & old), but GenCollectedHeap has not been initialized yet.
+ uint max_gens = 2;
+ _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, max_gens + 1,
mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
if (_last_cur_val_in_gen == NULL) {
vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
}
- for (int i = 0; i < GenCollectedHeap::max_gens + 1; i++) {
+ for (uint i = 0; i < max_gens + 1; i++) {
_last_cur_val_in_gen[i] = clean_card_val();
}
_ct_bs->set_CTRS(this);
@@ -167,16 +169,20 @@ ClearNoncleanCardWrapper::ClearNoncleanCardWrapper(
// Cannot yet substitute active_workers for n_par_threads
// in the case where parallelism is being turned off by
// setting n_par_threads to 0.
- _is_par = (SharedHeap::heap()->n_par_threads() > 0);
+ _is_par = (GenCollectedHeap::heap()->n_par_threads() > 0);
assert(!_is_par ||
- (SharedHeap::heap()->n_par_threads() ==
- SharedHeap::heap()->workers()->active_workers()), "Mismatch");
+ (GenCollectedHeap::heap()->n_par_threads() ==
+ GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch");
}
bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) {
return (((intptr_t)entry) & (BytesPerWord-1)) == 0;
}
+// The regions are visited in *decreasing* address order.
+// This order aids with imprecise card marking, where a dirty
+// card may cause scanning, and summarization marking, of objects
+// that extend onto subsequent cards.
void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {
assert(mr.word_size() > 0, "Error");
assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned");
@@ -591,10 +597,6 @@ void CardTableRS::verify() {
// At present, we only know how to verify the card table RS for
// generational heaps.
VerifyCTGenClosure blk(this);
- CollectedHeap* ch = Universe::heap();
-
- if (ch->kind() == CollectedHeap::GenCollectedHeap) {
- GenCollectedHeap::heap()->generation_iterate(&blk, false);
- _ct_bs->verify();
- }
- }
+ GenCollectedHeap::heap()->generation_iterate(&blk, false);
+ _ct_bs->verify();
+}
diff --git a/hotspot/src/share/vm/memory/collectorPolicy.cpp b/hotspot/src/share/vm/memory/collectorPolicy.cpp
index 094e3c3e28e..08464c513eb 100644
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp
@@ -669,7 +669,7 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
}
// Read the gc count while the heap lock is held.
- gc_count_before = Universe::heap()->total_collections();
+ gc_count_before = gch->total_collections();
}
VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
diff --git a/hotspot/src/share/vm/memory/defNewGeneration.cpp b/hotspot/src/share/vm/memory/defNewGeneration.cpp
index e306632374c..b414621db29 100644
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp
@@ -48,6 +48,9 @@
#include "utilities/copy.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/stack.inline.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc_implementation/parNew/parOopClosures.hpp"
+#endif
//
// DefNewGeneration functions.
@@ -190,7 +193,9 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
{
MemRegion cmr((HeapWord*)_virtual_space.low(),
(HeapWord*)_virtual_space.high());
- Universe::heap()->barrier_set()->resize_covered_region(cmr);
+ GenCollectedHeap* gch = GenCollectedHeap::heap();
+
+ gch->barrier_set()->resize_covered_region(cmr);
_eden_space = new ContiguousSpace();
_from_space = new ContiguousSpace();
@@ -202,13 +207,13 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
// Compute the maximum eden and survivor space sizes. These sizes
// are computed assuming the entire reserved space is committed.
// These values are exported as performance counters.
- uintx alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
+ uintx alignment = gch->collector_policy()->space_alignment();
uintx size = _virtual_space.reserved_size();
_max_survivor_size = compute_survivor_size(size, alignment);
_max_eden_size = size - (2*_max_survivor_size);
// allocate the performance counters
- GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
+ GenCollectorPolicy* gcp = (GenCollectorPolicy*)gch->collector_policy();
// Generation counters -- generation 0, 3 subspaces
_gen_counters = new GenerationCounters("new", 0, 3,
@@ -378,8 +383,7 @@ void DefNewGeneration::compute_new_size() {
int next_level = level() + 1;
GenCollectedHeap* gch = GenCollectedHeap::heap();
- assert(next_level < gch->n_gens(),
- "DefNewGeneration cannot be an oldest gen");
+ assert(next_level == 1, "DefNewGeneration must be a young gen");
Generation* old_gen = gch->old_gen();
size_t old_size = old_gen->capacity();
@@ -431,7 +435,7 @@ void DefNewGeneration::compute_new_size() {
SpaceDecorator::DontMangle);
MemRegion cmr((HeapWord*)_virtual_space.low(),
(HeapWord*)_virtual_space.high());
- Universe::heap()->barrier_set()->resize_covered_region(cmr);
+ gch->barrier_set()->resize_covered_region(cmr);
if (Verbose && PrintGC) {
size_t new_size_after = _virtual_space.committed_size();
size_t eden_size_after = eden()->capacity();
@@ -550,8 +554,9 @@ HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
void DefNewGeneration::adjust_desired_tenuring_threshold() {
// Set the desired survivor size to half the real survivor space
+ GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->collector_policy()->counters();
_tenuring_threshold =
- age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
+ age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize, gc_counters);
}
void DefNewGeneration::collect(bool full,
@@ -688,7 +693,7 @@ void DefNewGeneration::collect(bool full,
gc_tracer.report_promotion_failed(_promotion_failed_info);
// Reset the PromotionFailureALot counters.
- NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
+ NOT_PRODUCT(gch->reset_promotion_should_fail();)
}
if (PrintGC && !PrintGCDetails) {
gch->print_heap_change(gch_prev_used);
diff --git a/hotspot/src/share/vm/memory/defNewGeneration.inline.hpp b/hotspot/src/share/vm/memory/defNewGeneration.inline.hpp
index 111db332bab..b10d6d9e783 100644
--- a/hotspot/src/share/vm/memory/defNewGeneration.inline.hpp
+++ b/hotspot/src/share/vm/memory/defNewGeneration.inline.hpp
@@ -25,9 +25,9 @@
#ifndef SHARE_VM_MEMORY_DEFNEWGENERATION_INLINE_HPP
#define SHARE_VM_MEMORY_DEFNEWGENERATION_INLINE_HPP
-#include "gc_interface/collectedHeap.hpp"
#include "memory/cardTableRS.hpp"
#include "memory/defNewGeneration.hpp"
+#include "memory/genCollectedHeap.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/space.hpp"
@@ -60,7 +60,7 @@ inline void DefNewGeneration::KeepAliveClosure::do_oop_work(T* p) {
// We could check that p is also in an older generation, but
// dirty cards in the youngest gen are never scanned, so the
// extra check probably isn't worthwhile.
- if (Universe::heap()->is_in_reserved(p)) {
+ if (GenCollectedHeap::heap()->is_in_reserved(p)) {
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
_rs->inline_write_ref_field_gc(p, obj);
}
@@ -84,7 +84,7 @@ inline void DefNewGeneration::FastKeepAliveClosure::do_oop_work(T* p) {
// we set a younger_gen card if we have an older->youngest
// generation pointer.
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
- if (((HeapWord*)obj < _boundary) && Universe::heap()->is_in_reserved(p)) {
+ if (((HeapWord*)obj < _boundary) && GenCollectedHeap::heap()->is_in_reserved(p)) {
_rs->inline_write_ref_field_gc(p, obj);
}
}
diff --git a/hotspot/src/share/vm/memory/freeList.cpp b/hotspot/src/share/vm/memory/freeList.cpp
index 2eb74992b89..6cddc6aed41 100644
--- a/hotspot/src/share/vm/memory/freeList.cpp
+++ b/hotspot/src/share/vm/memory/freeList.cpp
@@ -23,10 +23,10 @@
*/
#include "precompiled.hpp"
+#include "gc_interface/collectedHeap.hpp"
#include "memory/freeBlockDictionary.hpp"
#include "memory/freeList.hpp"
#include "memory/metachunk.hpp"
-#include "memory/sharedHeap.hpp"
#include "runtime/globals.hpp"
#include "runtime/mutex.hpp"
#include "runtime/vmThread.hpp"
diff --git a/hotspot/src/share/vm/memory/gcLocker.cpp b/hotspot/src/share/vm/memory/gcLocker.cpp
index fae40920a87..6c82777628c 100644
--- a/hotspot/src/share/vm/memory/gcLocker.cpp
+++ b/hotspot/src/share/vm/memory/gcLocker.cpp
@@ -23,9 +23,9 @@
*/
#include "precompiled.hpp"
+#include "gc_interface/collectedHeap.hpp"
#include "memory/gcLocker.inline.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/sharedHeap.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/thread.inline.hpp"
diff --git a/hotspot/src/share/vm/memory/genCollectedHeap.cpp b/hotspot/src/share/vm/memory/genCollectedHeap.cpp
index 8b8114cb498..85dacb7cd81 100644
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp
@@ -39,7 +39,7 @@
#include "memory/genOopClosures.inline.hpp"
#include "memory/generationSpec.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/sharedHeap.hpp"
+#include "memory/strongRootsScope.hpp"
#include "memory/space.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/biasedLocking.hpp"
@@ -50,15 +50,15 @@
#include "runtime/vmThread.hpp"
#include "services/management.hpp"
#include "services/memoryService.hpp"
+#include "utilities/macros.hpp"
+#include "utilities/stack.inline.hpp"
#include "utilities/vmError.hpp"
#include "utilities/workgroup.hpp"
-#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
#include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
#endif // INCLUDE_ALL_GCS
-GenCollectedHeap* GenCollectedHeap::_gch;
NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
// The set of potentially parallel tasks in root scanning.
@@ -78,21 +78,27 @@ enum GCH_strong_roots_tasks {
};
GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
- SharedHeap(),
+ CollectedHeap(),
_rem_set(NULL),
_gen_policy(policy),
_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
_full_collections_completed(0)
{
assert(policy != NULL, "Sanity check");
+ if (UseConcMarkSweepGC) {
+ _workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads,
+ /* are_GC_task_threads */true,
+ /* are_ConcurrentGC_threads */false);
+ _workers->initialize_workers();
+ } else {
+ // Serial GC does not use workers.
+ _workers = NULL;
+ }
}
jint GenCollectedHeap::initialize() {
CollectedHeap::pre_initialize();
- _n_gens = gen_policy()->number_of_generations();
- assert(_n_gens == 2, "There is no support for more than two generations");
-
// While there are no constraints in the GC code that HeapWordSize
// be any particular value, there are multiple other areas in the
// system which believe this to be true (e.g. oop->object_size in some
@@ -120,8 +126,6 @@ jint GenCollectedHeap::initialize() {
_rem_set = collector_policy()->create_rem_set(reserved_region());
set_barrier_set(rem_set()->bs());
- _gch = this;
-
ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false);
_young_gen = gen_policy()->young_gen_spec()->init(young_rs, 0, rem_set());
heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size());
@@ -166,7 +170,8 @@ char* GenCollectedHeap::allocate(size_t alignment,
}
void GenCollectedHeap::post_initialize() {
- SharedHeap::post_initialize();
+ CollectedHeap::post_initialize();
+ ref_processing_init();
GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
guarantee(policy->is_generation_policy(), "Illegal policy type");
assert((_young_gen->kind() == Generation::DefNew) ||
@@ -185,7 +190,6 @@ void GenCollectedHeap::post_initialize() {
}
void GenCollectedHeap::ref_processing_init() {
- SharedHeap::ref_processing_init();
_young_gen->ref_processor_init();
_old_gen->ref_processor_init();
}
@@ -200,8 +204,7 @@ size_t GenCollectedHeap::used() const {
// Save the "used_region" for generations level and lower.
void GenCollectedHeap::save_used_regions(int level) {
- assert(level >= 0, "Illegal level parameter");
- assert(level < _n_gens, "Illegal level parameter");
+ assert(level == 0 || level == 1, "Illegal level parameter");
if (level == 1) {
_old_gen->save_used_region();
}
@@ -417,7 +420,6 @@ void GenCollectedHeap::do_collection(bool full,
assert(Heap_lock->is_locked(),
"the requesting thread should have the Heap_lock");
guarantee(!is_gc_active(), "collection is not reentrant");
- assert(max_level < n_gens(), "sanity check");
if (GC_locker::check_active_before_gc()) {
return; // GC is disabled (e.g. JNI GetXXXCritical operation)
@@ -435,7 +437,7 @@ void GenCollectedHeap::do_collection(bool full,
{
FlagSetting fl(_is_gc_active, true);
- bool complete = full && (max_level == (n_gens()-1));
+ bool complete = full && (max_level == 1 /* old */);
const char* gc_cause_prefix = complete ? "Full GC" : "GC";
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
// The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
@@ -507,7 +509,7 @@ void GenCollectedHeap::do_collection(bool full,
// Update "complete" boolean wrt what actually transpired --
// for instance, a promotion failure could have led to
// a whole heap collection.
- complete = complete || (max_level_collected == n_gens() - 1);
+ complete = complete || (max_level_collected == 1 /* old */);
if (complete) { // We did a "major" collection
// FIXME: See comment at pre_full_gc_dump call
@@ -524,7 +526,7 @@ void GenCollectedHeap::do_collection(bool full,
}
// Adjust generation sizes.
- if (max_level_collected == 1) {
+ if (max_level_collected == 1 /* old */) {
_old_gen->compute_new_size();
}
_young_gen->compute_new_size();
@@ -560,7 +562,8 @@ HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab)
}
void GenCollectedHeap::set_par_threads(uint t) {
- SharedHeap::set_par_threads(t);
+ assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
+ CollectedHeap::set_par_threads(t);
set_n_termination(t);
}
@@ -586,7 +589,7 @@ void GenCollectedHeap::process_roots(bool activate_scope,
CLDClosure* strong_cld_closure,
CLDClosure* weak_cld_closure,
CodeBlobClosure* code_roots) {
- StrongRootsScope srs(this, activate_scope);
+ StrongRootsScope srs(activate_scope);
// General roots.
assert(Threads::thread_claim_parity() != 0, "must have called prologue code");
@@ -606,7 +609,8 @@ void GenCollectedHeap::process_roots(bool activate_scope,
// Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
- Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
+ bool is_par = n_par_threads() > 0;
+ Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_clds_p, roots_from_code_p);
if (!_process_strong_tasks->is_task_claimed(GCH_PS_Universe_oops_do)) {
Universe::oops_do(strong_roots);
@@ -771,19 +775,19 @@ void GenCollectedHeap::collect(GCCause::Cause cause) {
#endif // INCLUDE_ALL_GCS
} else if (cause == GCCause::_wb_young_gc) {
// minor collection for WhiteBox API
- collect(cause, 0);
+ collect(cause, 0 /* young */);
} else {
#ifdef ASSERT
if (cause == GCCause::_scavenge_alot) {
// minor collection only
- collect(cause, 0);
+ collect(cause, 0 /* young */);
} else {
// Stop-the-world full collection
- collect(cause, n_gens() - 1);
+ collect(cause, 1 /* old */);
}
#else
// Stop-the-world full collection
- collect(cause, n_gens() - 1);
+ collect(cause, 1 /* old */);
#endif
}
}
@@ -798,7 +802,7 @@ void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
// The caller has the Heap_lock
assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
- collect_locked(cause, n_gens() - 1);
+ collect_locked(cause, 1 /* old */);
}
// this is the private collection interface
@@ -854,7 +858,7 @@ void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
#endif // INCLUDE_ALL_GCS
void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
- do_full_collection(clear_all_soft_refs, _n_gens - 1);
+ do_full_collection(clear_all_soft_refs, 1 /* old */);
}
void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
@@ -886,7 +890,7 @@ void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
clear_all_soft_refs /* clear_all_soft_refs */,
0 /* size */,
false /* is_tlab */,
- n_gens() - 1 /* max_level */);
+ 1 /* old */ /* max_level */);
}
}
@@ -899,17 +903,6 @@ bool GenCollectedHeap::is_in_young(oop p) {
// Returns "TRUE" iff "p" points into the committed areas of the heap.
bool GenCollectedHeap::is_in(const void* p) const {
- #ifndef ASSERT
- guarantee(VerifyBeforeGC ||
- VerifyDuringGC ||
- VerifyBeforeExit ||
- VerifyDuringStartup ||
- PrintAssembly ||
- tty->count() != 0 || // already printing
- VerifyAfterGC ||
- VMError::fatal_error_in_progress(), "too expensive");
-
- #endif
return _young_gen->is_in(p) || _old_gen->is_in(p);
}
@@ -923,6 +916,11 @@ bool GenCollectedHeap::is_in_partial_collection(const void* p) {
}
#endif
+void GenCollectedHeap::oop_iterate_no_header(OopClosure* cl) {
+ NoHeaderExtendedOopClosure no_header_cl(cl);
+ oop_iterate(&no_header_cl);
+}
+
void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
_young_gen->oop_iterate(cl);
_old_gen->oop_iterate(cl);
@@ -1092,11 +1090,6 @@ void GenCollectedHeap::generation_iterate(GenClosure* cl,
}
}
-void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
- _young_gen->space_iterate(cl, true);
- _old_gen->space_iterate(cl, true);
-}
-
bool GenCollectedHeap::is_maximal_no_gc() const {
return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
}
@@ -1107,14 +1100,13 @@ void GenCollectedHeap::save_marks() {
}
GenCollectedHeap* GenCollectedHeap::heap() {
- assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
- assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
- return _gch;
+ CollectedHeap* heap = Universe::heap();
+ assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
+ assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Not a GenCollectedHeap");
+ return (GenCollectedHeap*)heap;
}
-
void GenCollectedHeap::prepare_for_compaction() {
- guarantee(_n_gens = 2, "Wrong number of generations");
// Start by compacting into same gen.
CompactPoint cp(_old_gen);
_old_gen->prepare_for_compaction(&cp);
diff --git a/hotspot/src/share/vm/memory/genCollectedHeap.hpp b/hotspot/src/share/vm/memory/genCollectedHeap.hpp
index a91432748a0..80ba8c18827 100644
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp
@@ -26,15 +26,16 @@
#define SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
+#include "gc_interface/collectedHeap.hpp"
#include "memory/collectorPolicy.hpp"
#include "memory/generation.hpp"
-#include "memory/sharedHeap.hpp"
class SubTasksDone;
+class FlexibleWorkGang;
-// A "GenCollectedHeap" is a SharedHeap that uses generational
+// A "GenCollectedHeap" is a CollectedHeap that uses generational
// collection. It has two generations, young and old.
-class GenCollectedHeap : public SharedHeap {
+class GenCollectedHeap : public CollectedHeap {
friend class GenCollectorPolicy;
friend class Generation;
friend class DefNewGeneration;
@@ -51,19 +52,9 @@ class GenCollectedHeap : public SharedHeap {
friend class GCCauseSetter;
friend class VMStructs;
public:
- enum SomeConstants {
- max_gens = 10
- };
-
friend class VM_PopulateDumpSharedSpace;
- protected:
- // Fields:
- static GenCollectedHeap* _gch;
-
- private:
- int _n_gens;
-
+private:
Generation* _young_gen;
Generation* _old_gen;
@@ -93,6 +84,8 @@ public:
// In block contents verification, the number of header words to skip
NOT_PRODUCT(static size_t _skip_header_HeapWords;)
+ FlexibleWorkGang* _workers;
+
protected:
// Helper functions for allocation
HeapWord* attempt_allocation(size_t size,
@@ -125,6 +118,8 @@ protected:
public:
GenCollectedHeap(GenCollectorPolicy *policy);
+ FlexibleWorkGang* workers() const { return _workers; }
+
GCStats* gc_stats(int level) const;
// Returns JNI_OK on success
@@ -178,9 +173,6 @@ public:
HeapWord** top_addr() const;
HeapWord** end_addr() const;
- // Does this heap support heap inspection? (+PrintClassHistogram)
- virtual bool supports_heap_inspection() const { return true; }
-
// Perform a full collection of the heap; intended for use in implementing
// "System.gc". This implies as full a collection as the CollectedHeap
// supports. Caller does not hold the Heap_lock on entry.
@@ -223,6 +215,7 @@ public:
}
// Iteration functions.
+ void oop_iterate_no_header(OopClosure* cl);
void oop_iterate(ExtendedOopClosure* cl);
void object_iterate(ObjectClosure* cl);
void safe_object_iterate(ObjectClosure* cl);
@@ -280,11 +273,6 @@ public:
// only and may need to be re-examined in case other
// kinds of collectors are implemented in the future.
virtual bool can_elide_initializing_store_barrier(oop new_obj) {
- // We wanted to assert that:-
- // assert(UseSerialGC || UseConcMarkSweepGC,
- // "Check can_elide_initializing_store_barrier() for this collector");
- // but unfortunately the flag UseSerialGC need not necessarily always
- // be set when DefNew+Tenured are being used.
return is_in_young(new_obj);
}
@@ -331,7 +319,6 @@ public:
_old_gen->update_gc_stats(current_level, full);
}
- // Override.
bool no_gc_in_progress() { return !is_gc_active(); }
// Override.
@@ -363,18 +350,11 @@ public:
// If "old_to_young" determines the order.
void generation_iterate(GenClosure* cl, bool old_to_young);
- void space_iterate(SpaceClosure* cl);
-
// Return "true" if all generations have reached the
// maximal committed limit that they can reach, without a garbage
// collection.
virtual bool is_maximal_no_gc() const;
- int n_gens() const {
- assert(_n_gens == gen_policy()->number_of_generations(), "Sanity");
- return _n_gens;
- }
-
// This function returns the "GenRemSet" object that allows us to scan
// generations in a fully generational heap.
GenRemSet* rem_set() { return _rem_set; }
@@ -531,8 +511,8 @@ private:
void record_gen_tops_before_GC() PRODUCT_RETURN;
protected:
- virtual void gc_prologue(bool full);
- virtual void gc_epilogue(bool full);
+ void gc_prologue(bool full);
+ void gc_epilogue(bool full);
};
#endif // SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP
diff --git a/hotspot/src/share/vm/memory/genMarkSweep.cpp b/hotspot/src/share/vm/memory/genMarkSweep.cpp
index ef2707999f2..94100be74d4 100644
--- a/hotspot/src/share/vm/memory/genMarkSweep.cpp
+++ b/hotspot/src/share/vm/memory/genMarkSweep.cpp
@@ -187,7 +187,6 @@ void GenMarkSweep::mark_sweep_phase1(int level,
bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
- trace(" 1");
GenCollectedHeap* gch = GenCollectedHeap::heap();
@@ -258,7 +257,6 @@ void GenMarkSweep::mark_sweep_phase2() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
- trace("2");
gch->prepare_for_compaction();
}
@@ -275,7 +273,6 @@ void GenMarkSweep::mark_sweep_phase3(int level) {
// Adjust the pointers to reflect the new locations
GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
- trace("3");
// Need new claim bits for the pointer adjustment tracing.
ClassLoaderDataGraph::clear_claimed_marks();
@@ -325,7 +322,6 @@ void GenMarkSweep::mark_sweep_phase4() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
- trace("4");
GenCompactClosure blk;
gch->generation_iterate(&blk, true);
diff --git a/hotspot/src/share/vm/memory/genOopClosures.cpp b/hotspot/src/share/vm/memory/genOopClosures.cpp
new file mode 100644
index 00000000000..d076f5ef90f
--- /dev/null
+++ b/hotspot/src/share/vm/memory/genOopClosures.cpp
@@ -0,0 +1,30 @@
+/* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "memory/genOopClosures.inline.hpp"
+#include "memory/iterator.inline.hpp"
+#include "memory/specialized_oop_closures.hpp"
+
+// Generate Serial GC specialized oop_oop_iterate functions.
+SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_S(ALL_KLASS_OOP_OOP_ITERATE_DEFN)
diff --git a/hotspot/src/share/vm/memory/genOopClosures.inline.hpp b/hotspot/src/share/vm/memory/genOopClosures.inline.hpp
index 3b2b83dfa0c..6842d88cb7f 100644
--- a/hotspot/src/share/vm/memory/genOopClosures.inline.hpp
+++ b/hotspot/src/share/vm/memory/genOopClosures.inline.hpp
@@ -31,7 +31,6 @@
#include "memory/genOopClosures.hpp"
#include "memory/genRemSet.hpp"
#include "memory/generation.hpp"
-#include "memory/sharedHeap.hpp"
#include "memory/space.hpp"
inline OopsInGenClosure::OopsInGenClosure(Generation* gen) :
diff --git a/hotspot/src/share/vm/memory/generation.cpp b/hotspot/src/share/vm/memory/generation.cpp
index e27cb603291..2518f2c53a6 100644
--- a/hotspot/src/share/vm/memory/generation.cpp
+++ b/hotspot/src/share/vm/memory/generation.cpp
@@ -187,7 +187,7 @@ oop Generation::promote(oop obj, size_t obj_size) {
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
#ifndef PRODUCT
- if (Universe::heap()->promotion_should_fail()) {
+ if (GenCollectedHeap::heap()->promotion_should_fail()) {
return NULL;
}
#endif // #ifndef PRODUCT
diff --git a/hotspot/src/share/vm/memory/generation.hpp b/hotspot/src/share/vm/memory/generation.hpp
index 650f54b72f8..00a7efaca23 100644
--- a/hotspot/src/share/vm/memory/generation.hpp
+++ b/hotspot/src/share/vm/memory/generation.hpp
@@ -30,10 +30,10 @@
#include "memory/memRegion.hpp"
#include "memory/referenceProcessor.hpp"
#include "memory/universe.hpp"
+#include "memory/virtualspace.hpp"
#include "memory/watermark.hpp"
#include "runtime/mutex.hpp"
#include "runtime/perfData.hpp"
-#include "runtime/virtualspace.hpp"
// A Generation models a heap area for similarly-aged objects.
// It will contain one ore more spaces holding the actual objects.
diff --git a/hotspot/src/share/vm/memory/heap.hpp b/hotspot/src/share/vm/memory/heap.hpp
index 45b43ce06c1..f623e45fa32 100644
--- a/hotspot/src/share/vm/memory/heap.hpp
+++ b/hotspot/src/share/vm/memory/heap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "code/codeBlob.hpp"
#include "memory/allocation.hpp"
-#include "runtime/virtualspace.hpp"
+#include "memory/virtualspace.hpp"
// Blocks
diff --git a/hotspot/src/share/vm/memory/iterator.cpp b/hotspot/src/share/vm/memory/iterator.cpp
index 1022ece4617..83288217c62 100644
--- a/hotspot/src/share/vm/memory/iterator.cpp
+++ b/hotspot/src/share/vm/memory/iterator.cpp
@@ -23,8 +23,11 @@
*/
#include "precompiled.hpp"
-#include "memory/iterator.hpp"
+#include "memory/iterator.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
void KlassToOopClosure::do_klass(Klass* k) {
assert(_oop_closure != NULL, "Not initialized?");
@@ -61,19 +64,18 @@ void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
}
}
-MarkingCodeBlobClosure::MarkScope::MarkScope(bool activate)
- : _active(activate)
-{
- if (_active) nmethod::oops_do_marking_prologue();
-}
-
-MarkingCodeBlobClosure::MarkScope::~MarkScope() {
- if (_active) nmethod::oops_do_marking_epilogue();
-}
-
void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
nmethod* nm = cb->as_nmethod_or_null();
if (nm != NULL && !nm->test_set_oops_do_mark()) {
do_nmethod(nm);
}
}
+
+// Generate the *Klass::oop_oop_iterate functions for the base class
+// of the oop closures. These versions use the virtual do_oop calls,
+// instead of the devirtualized do_oop_nv version.
+ALL_KLASS_OOP_OOP_ITERATE_DEFN(ExtendedOopClosure, _v)
+
+// Generate the *Klass::oop_oop_iterate functions
+// for the NoHeaderExtendedOopClosure helper class.
+ALL_KLASS_OOP_OOP_ITERATE_DEFN(NoHeaderExtendedOopClosure, _nv)
diff --git a/hotspot/src/share/vm/memory/iterator.hpp b/hotspot/src/share/vm/memory/iterator.hpp
index 12684527023..53335b8d5f1 100644
--- a/hotspot/src/share/vm/memory/iterator.hpp
+++ b/hotspot/src/share/vm/memory/iterator.hpp
@@ -44,9 +44,7 @@ class Closure : public StackObj { };
class OopClosure : public Closure {
public:
virtual void do_oop(oop* o) = 0;
- virtual void do_oop_v(oop* o) { do_oop(o); }
virtual void do_oop(narrowOop* o) = 0;
- virtual void do_oop_v(narrowOop* o) { do_oop(o); }
};
// ExtendedOopClosure adds extra code to be run during oop iterations.
@@ -74,11 +72,9 @@ class ExtendedOopClosure : public OopClosure {
// Currently, only CMS and G1 need these.
virtual bool do_metadata() { return do_metadata_nv(); }
- bool do_metadata_v() { return do_metadata(); }
bool do_metadata_nv() { return false; }
virtual void do_klass(Klass* k) { do_klass_nv(k); }
- void do_klass_v(Klass* k) { do_klass(k); }
void do_klass_nv(Klass* k) { ShouldNotReachHere(); }
virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
@@ -87,6 +83,14 @@ class ExtendedOopClosure : public OopClosure {
// location without an intervening "major reset" (like the end of a GC).
virtual bool idempotent() { return false; }
virtual bool apply_to_weak_ref_discovered_field() { return false; }
+
+#ifdef ASSERT
+ // Default verification of each visited oop field.
+ template void verify(T* p);
+
+ // Can be used by subclasses to turn off the default verification of oop fields.
+ virtual bool should_verify_oops() { return true; }
+#endif
};
// Wrapper closure only used to implement oop_iterate_no_header().
@@ -147,7 +151,6 @@ class CLDToOopClosure : public CLDClosure {
};
class CLDToKlassAndOopClosure : public CLDClosure {
- friend class SharedHeap;
friend class G1CollectedHeap;
protected:
OopClosure* _oop_closure;
@@ -284,16 +287,6 @@ class MarkingCodeBlobClosure : public CodeBlobToOopClosure {
// Called for each code blob, but at most once per unique blob.
virtual void do_code_blob(CodeBlob* cb);
-
- class MarkScope : public StackObj {
- protected:
- bool _active;
- public:
- MarkScope(bool activate = true);
- // = { if (active) nmethod::oops_do_marking_prologue(); }
- ~MarkScope();
- // = { if (active) nmethod::oops_do_marking_epilogue(); }
- };
};
// MonitorClosure is used for iterating over monitors in the monitors cache
@@ -364,16 +357,33 @@ class SymbolClosure : public StackObj {
}
};
+// The two class template specializations are used to dispatch calls
+// to the ExtendedOopClosure functions. If use_non_virtual_call is true,
+// the non-virtual versions are called (E.g. do_oop_nv), otherwise the
+// virtual versions are called (E.g. do_oop).
-// Helper defines for ExtendOopClosure
+template
+class Devirtualizer {};
-#define if_do_metadata_checked(closure, nv_suffix) \
- /* Make sure the non-virtual and the virtual versions match. */ \
- assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \
- "Inconsistency in do_metadata"); \
- if (closure->do_metadata##nv_suffix())
+// Dispatches to the non-virtual functions.
+template <> class Devirtualizer {
+ public:
+ template static void do_oop(OopClosureType* closure, T* p);
+ template static void do_klass(OopClosureType* closure, Klass* k);
+ template static bool do_metadata(OopClosureType* closure);
+};
-#define assert_should_ignore_metadata(closure, nv_suffix) \
- assert(!closure->do_metadata##nv_suffix(), "Code to handle metadata is not implemented")
+// Dispatches to the virtual functions.
+template <> class Devirtualizer {
+ public:
+ template static void do_oop(OopClosureType* closure, T* p);
+ template static void do_klass(OopClosureType* closure, Klass* k);
+ template static bool do_metadata(OopClosureType* closure);
+};
+
+// Helper to convert the oop iterate macro suffixes into bool values that can be used by template functions.
+#define nvs_nv_to_bool true
+#define nvs_v_to_bool false
+#define nvs_to_bool(nv_suffix) nvs##nv_suffix##_to_bool
#endif // SHARE_VM_MEMORY_ITERATOR_HPP
diff --git a/hotspot/src/share/vm/memory/iterator.inline.hpp b/hotspot/src/share/vm/memory/iterator.inline.hpp
index cef12666012..e1d30becc7e 100644
--- a/hotspot/src/share/vm/memory/iterator.inline.hpp
+++ b/hotspot/src/share/vm/memory/iterator.inline.hpp
@@ -28,6 +28,12 @@
#include "classfile/classLoaderData.hpp"
#include "memory/iterator.hpp"
#include "oops/klass.hpp"
+#include "oops/instanceKlass.inline.hpp"
+#include "oops/instanceMirrorKlass.inline.hpp"
+#include "oops/instanceClassLoaderKlass.inline.hpp"
+#include "oops/instanceRefKlass.inline.hpp"
+#include "oops/objArrayKlass.inline.hpp"
+#include "oops/typeArrayKlass.inline.hpp"
#include "utilities/debug.hpp"
inline void MetadataAwareOopClosure::do_class_loader_data(ClassLoaderData* cld) {
@@ -44,4 +50,63 @@ inline void MetadataAwareOopClosure::do_klass_nv(Klass* k) {
inline void MetadataAwareOopClosure::do_klass(Klass* k) { do_klass_nv(k); }
+#ifdef ASSERT
+// This verification is applied to all visited oops.
+// The closures can turn is off by overriding should_verify_oops().
+template
+void ExtendedOopClosure::verify(T* p) {
+ if (should_verify_oops()) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
+ assert(Universe::heap()->is_in_closed_subset(o),
+ err_msg("should be in closed *p " PTR_FORMAT " " PTR_FORMAT, p2i(p), p2i(o)));
+ }
+ }
+}
+#endif
+
+// Implementation of the non-virtual do_oop dispatch.
+
+template
+inline void Devirtualizer::do_oop(OopClosureType* closure, T* p) {
+ debug_only(closure->verify(p));
+ closure->do_oop_nv(p);
+}
+template
+inline void Devirtualizer::do_klass(OopClosureType* closure, Klass* k) {
+ closure->do_klass_nv(k);
+}
+template
+inline bool Devirtualizer::do_metadata(OopClosureType* closure) {
+ // Make sure the non-virtual and the virtual versions match.
+ assert(closure->do_metadata_nv() == closure->do_metadata(), "Inconsistency in do_metadata");
+ return closure->do_metadata_nv();
+}
+
+// Implementation of the virtual do_oop dispatch.
+
+template
+void Devirtualizer::do_oop(OopClosureType* closure, T* p) {
+ debug_only(closure->verify(p));
+ closure->do_oop(p);
+}
+template
+void Devirtualizer::do_klass(OopClosureType* closure, Klass* k) {
+ closure->do_klass(k);
+}
+template
+bool Devirtualizer::do_metadata(OopClosureType* closure) {
+ return closure->do_metadata();
+}
+
+// The list of all "specializable" oop_oop_iterate function definitions.
+#define ALL_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
+ ALL_INSTANCE_KLASS_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \
+ ALL_INSTANCE_REF_KLASS_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \
+ ALL_INSTANCE_MIRROR_KLASS_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \
+ ALL_INSTANCE_CLASS_LOADER_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
+ ALL_OBJ_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \
+ ALL_TYPE_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix)
+
#endif // SHARE_VM_MEMORY_ITERATOR_INLINE_HPP
diff --git a/hotspot/src/share/vm/memory/metaspace.hpp b/hotspot/src/share/vm/memory/metaspace.hpp
index f85b6c44dd8..ef88f8d0ae3 100644
--- a/hotspot/src/share/vm/memory/metaspace.hpp
+++ b/hotspot/src/share/vm/memory/metaspace.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
#include "memory/metaspaceChunkFreeListSummary.hpp"
-#include "runtime/virtualspace.hpp"
+#include "memory/virtualspace.hpp"
#include "utilities/exceptions.hpp"
// Metaspace
diff --git a/hotspot/src/share/vm/memory/metaspaceShared.cpp b/hotspot/src/share/vm/memory/metaspaceShared.cpp
index ecfa27fa3f4..613907d8420 100644
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp
@@ -30,6 +30,8 @@
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
+#include "interpreter/bytecodes.hpp"
+#include "interpreter/bytecodeStream.hpp"
#include "memory/filemap.hpp"
#include "memory/gcLocker.hpp"
#include "memory/metaspace.hpp"
@@ -104,15 +106,33 @@ static void remove_unshareable_in_classes() {
}
}
-// Walk all methods in the class list and assign a fingerprint.
-// so that this part of the ConstMethod* is read only.
-static void calculate_fingerprints() {
+static void rewrite_nofast_bytecode(Method* method) {
+ RawBytecodeStream bcs(method);
+ while (!bcs.is_last_bytecode()) {
+ Bytecodes::Code opcode = bcs.raw_next();
+ switch (opcode) {
+ case Bytecodes::_getfield: *bcs.bcp() = Bytecodes::_nofast_getfield; break;
+ case Bytecodes::_putfield: *bcs.bcp() = Bytecodes::_nofast_putfield; break;
+ case Bytecodes::_aload_0: *bcs.bcp() = Bytecodes::_nofast_aload_0; break;
+ case Bytecodes::_iload: *bcs.bcp() = Bytecodes::_nofast_iload; break;
+ default: break;
+ }
+ }
+}
+
+// Walk all methods in the class list to ensure that they won't be modified at
+// run time. This includes:
+// [1] Rewrite all bytecodes as needed, so that the ConstMethod* will not be modified
+// at run time by RewriteBytecodes/RewriteFrequentPairs
+// [2] Assign a fingerprint, so one doesn't need to be assigned at run-time.
+static void rewrite_nofast_bytecodes_and_calculate_fingerprints() {
for (int i = 0; i < _global_klass_objects->length(); i++) {
Klass* k = _global_klass_objects->at(i);
if (k->oop_is_instance()) {
InstanceKlass* ik = InstanceKlass::cast(k);
for (int i = 0; i < ik->methods()->length(); i++) {
Method* m = ik->methods()->at(i);
+ rewrite_nofast_bytecode(m);
Fingerprinter fp(m);
// The side effect of this call sets method's fingerprint field.
fp.fingerprint();
@@ -476,9 +496,10 @@ void VM_PopulateDumpSharedSpace::doit() {
tty->print_cr(" type array classes = %5d", num_type_array);
}
- // Update all the fingerprints in the shared methods.
- tty->print("Calculating fingerprints ... ");
- calculate_fingerprints();
+
+ // Ensure the ConstMethods won't be modified at run-time
+ tty->print("Updating ConstMethods ... ");
+ rewrite_nofast_bytecodes_and_calculate_fingerprints();
tty->print_cr("done. ");
// Remove all references outside the metadata
diff --git a/hotspot/src/share/vm/memory/metaspaceShared.hpp b/hotspot/src/share/vm/memory/metaspaceShared.hpp
index 85bf0e4a303..97094252ced 100644
--- a/hotspot/src/share/vm/memory/metaspaceShared.hpp
+++ b/hotspot/src/share/vm/memory/metaspaceShared.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "classfile/compactHashtable.hpp"
#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
-#include "runtime/virtualspace.hpp"
+#include "memory/virtualspace.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/macros.hpp"
diff --git a/hotspot/src/share/vm/memory/sharedHeap.cpp b/hotspot/src/share/vm/memory/sharedHeap.cpp
deleted file mode 100644
index 34424a66c33..00000000000
--- a/hotspot/src/share/vm/memory/sharedHeap.cpp
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/stringTable.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "code/codeCache.hpp"
-#include "gc_interface/collectedHeap.inline.hpp"
-#include "memory/sharedHeap.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/atomic.inline.hpp"
-#include "runtime/fprofiler.hpp"
-#include "runtime/java.hpp"
-#include "utilities/copy.hpp"
-#include "utilities/workgroup.hpp"
-
-SharedHeap* SharedHeap::_sh;
-
-SharedHeap::SharedHeap() :
- CollectedHeap(),
- _workers(NULL)
-{
- _sh = this; // ch is static, should be set only once.
- if (UseConcMarkSweepGC || UseG1GC) {
- _workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads,
- /* are_GC_task_threads */true,
- /* are_ConcurrentGC_threads */false);
- if (_workers == NULL) {
- vm_exit_during_initialization("Failed necessary allocation.");
- } else {
- _workers->initialize_workers();
- }
- }
-}
-
-bool SharedHeap::heap_lock_held_for_gc() {
- Thread* t = Thread::current();
- return Heap_lock->owned_by_self()
- || ( (t->is_GC_task_thread() || t->is_VM_thread())
- && _thread_holds_heap_lock_for_gc);
-}
-
-void SharedHeap::set_par_threads(uint t) {
- assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
- _n_par_threads = t;
-}
-
-SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate)
- : MarkScope(activate), _sh(heap)
-{
- if (_active) {
- Threads::change_thread_claim_parity();
- // Zero the claimed high water mark in the StringTable
- StringTable::clear_parallel_claimed_index();
- }
-}
-
-SharedHeap::StrongRootsScope::~StrongRootsScope() {
- Threads::assert_all_threads_claimed();
-}
-
-void SharedHeap::set_barrier_set(BarrierSet* bs) {
- _barrier_set = bs;
- // Cached barrier set for fast access in oops
- oopDesc::set_bs(bs);
-}
-
-void SharedHeap::post_initialize() {
- CollectedHeap::post_initialize();
- ref_processing_init();
-}
-
-void SharedHeap::ref_processing_init() {}
diff --git a/hotspot/src/share/vm/memory/sharedHeap.hpp b/hotspot/src/share/vm/memory/sharedHeap.hpp
deleted file mode 100644
index e65ca85fcbf..00000000000
--- a/hotspot/src/share/vm/memory/sharedHeap.hpp
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_MEMORY_SHAREDHEAP_HPP
-#define SHARE_VM_MEMORY_SHAREDHEAP_HPP
-
-#include "gc_interface/collectedHeap.hpp"
-#include "memory/generation.hpp"
-
-// A "SharedHeap" is an implementation of a java heap for HotSpot. This
-// is an abstract class: there may be many different kinds of heaps. This
-// class defines the functions that a heap must implement, and contains
-// infrastructure common to all heaps.
-
-class Generation;
-class BarrierSet;
-class GenRemSet;
-class Space;
-class SpaceClosure;
-class OopClosure;
-class OopsInGenClosure;
-class ObjectClosure;
-class SubTasksDone;
-class WorkGang;
-class FlexibleWorkGang;
-class CollectorPolicy;
-class KlassClosure;
-
-// Note on use of FlexibleWorkGang's for GC.
-// There are three places where task completion is determined.
-// In
-// 1) ParallelTaskTerminator::offer_termination() where _n_threads
-// must be set to the correct value so that count of workers that
-// have offered termination will exactly match the number
-// working on the task. Tasks such as those derived from GCTask
-// use ParallelTaskTerminator's. Tasks that want load balancing
-// by work stealing use this method to gauge completion.
-// 2) SubTasksDone has a variable _n_threads that is used in
-// all_tasks_completed() to determine completion. all_tasks_complete()
-// counts the number of tasks that have been done and then reset
-// the SubTasksDone so that it can be used again. When the number of
-// tasks is set to the number of GC workers, then _n_threads must
-// be set to the number of active GC workers. G1RootProcessor and
-// GenCollectedHeap have SubTasksDone.
-// 3) SequentialSubTasksDone has an _n_threads that is used in
-// a way similar to SubTasksDone and has the same dependency on the
-// number of active GC workers. CompactibleFreeListSpace and Space
-// have SequentialSubTasksDone's.
-//
-// Examples of using SubTasksDone and SequentialSubTasksDone:
-// G1RootProcessor and GenCollectedHeap::process_roots() use
-// SubTasksDone* _process_strong_tasks to claim tasks for workers
-//
-// GenCollectedHeap::gen_process_roots() calls
-// rem_set()->younger_refs_iterate()
-// to scan the card table and which eventually calls down into
-// CardTableModRefBS::par_non_clean_card_iterate_work(). This method
-// uses SequentialSubTasksDone* _pst to claim tasks.
-// Both SubTasksDone and SequentialSubTasksDone call their method
-// all_tasks_completed() to count the number of GC workers that have
-// finished their work. That logic is "when all the workers are
-// finished the tasks are finished".
-//
-// The pattern that appears in the code is to set _n_threads
-// to a value > 1 before a task that you would like executed in parallel
-// and then to set it to 0 after that task has completed. A value of
-// 0 is a "special" value in set_n_threads() which translates to
-// setting _n_threads to 1.
-//
-// Some code uses _n_termination to decide if work should be done in
-// parallel. The notorious possibly_parallel_oops_do() in threads.cpp
-// is an example of such code. Look for variable "is_par" for other
-// examples.
-//
-// The active_workers is not reset to 0 after a parallel phase. It's
-// value may be used in later phases and in one instance at least
-// (the parallel remark) it has to be used (the parallel remark depends
-// on the partitioning done in the previous parallel scavenge).
-
-class SharedHeap : public CollectedHeap {
- friend class VMStructs;
-
- friend class VM_GC_Operation;
- friend class VM_CGC_Operation;
-
-protected:
- // There should be only a single instance of "SharedHeap" in a program.
- // This is enforced with the protected constructor below, which will also
- // set the static pointer "_sh" to that instance.
- static SharedHeap* _sh;
-
- // If we're doing parallel GC, use this gang of threads.
- FlexibleWorkGang* _workers;
-
- // Full initialization is done in a concrete subtype's "initialize"
- // function.
- SharedHeap();
-
- // Returns true if the calling thread holds the heap lock,
- // or the calling thread is a par gc thread and the heap_lock is held
- // by the vm thread doing a gc operation.
- bool heap_lock_held_for_gc();
- // True if the heap_lock is held by the a non-gc thread invoking a gc
- // operation.
- bool _thread_holds_heap_lock_for_gc;
-
-public:
- static SharedHeap* heap() { return _sh; }
-
- void set_barrier_set(BarrierSet* bs);
-
- // Does operations required after initialization has been done.
- virtual void post_initialize();
-
- // Initialization of ("weak") reference processing support
- virtual void ref_processing_init();
-
- // Iteration functions.
- void oop_iterate(ExtendedOopClosure* cl) = 0;
-
- // Iterate over all spaces in use in the heap, in an undefined order.
- virtual void space_iterate(SpaceClosure* cl) = 0;
-
- // A SharedHeap will contain some number of spaces. This finds the
- // space whose reserved area contains the given address, or else returns
- // NULL.
- virtual Space* space_containing(const void* addr) const = 0;
-
- bool no_gc_in_progress() { return !is_gc_active(); }
-
- // Note, the below comment needs to be updated to reflect the changes
- // introduced by JDK-8076225. This should be done as part of JDK-8076289.
- //
- //Some collectors will perform "process_strong_roots" in parallel.
- // Such a call will involve claiming some fine-grained tasks, such as
- // scanning of threads. To make this process simpler, we provide the
- // "strong_roots_parity()" method. Collectors that start parallel tasks
- // whose threads invoke "process_strong_roots" must
- // call "change_strong_roots_parity" in sequential code starting such a
- // task. (This also means that a parallel thread may only call
- // process_strong_roots once.)
- //
- // For calls to process_roots by sequential code, the parity is
- // updated automatically.
- //
- // The idea is that objects representing fine-grained tasks, such as
- // threads, will contain a "parity" field. A task will is claimed in the
- // current "process_roots" call only if its parity field is the
- // same as the "strong_roots_parity"; task claiming is accomplished by
- // updating the parity field to the strong_roots_parity with a CAS.
- //
- // If the client meats this spec, then strong_roots_parity() will have
- // the following properties:
- // a) to return a different value than was returned before the last
- // call to change_strong_roots_parity, and
- // c) to never return a distinguished value (zero) with which such
- // task-claiming variables may be initialized, to indicate "never
- // claimed".
- public:
-
- // Call these in sequential code around process_roots.
- // strong_roots_prologue calls change_strong_roots_parity, if
- // parallel tasks are enabled.
- class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
- SharedHeap* _sh;
-
- public:
- StrongRootsScope(SharedHeap* heap, bool activate = true);
- ~StrongRootsScope();
- };
-
- private:
-
- public:
- FlexibleWorkGang* workers() const { return _workers; }
-
- // The functions below are helper functions that a subclass of
- // "SharedHeap" can use in the implementation of its virtual
- // functions.
-
-public:
-
- // Do anything common to GC's.
- virtual void gc_prologue(bool full) = 0;
- virtual void gc_epilogue(bool full) = 0;
-
- // Sets the number of parallel threads that will be doing tasks
- // (such as process roots) subsequently.
- virtual void set_par_threads(uint t);
-};
-
-#endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP
diff --git a/hotspot/src/share/vm/memory/space.cpp b/hotspot/src/share/vm/memory/space.cpp
index d0fdd3c615e..681c8e84201 100644
--- a/hotspot/src/share/vm/memory/space.cpp
+++ b/hotspot/src/share/vm/memory/space.cpp
@@ -31,6 +31,7 @@
#include "memory/blockOffsetTable.inline.hpp"
#include "memory/defNewGeneration.hpp"
#include "memory/genCollectedHeap.hpp"
+#include "memory/genOopClosures.inline.hpp"
#include "memory/space.hpp"
#include "memory/space.inline.hpp"
#include "memory/universe.inline.hpp"
diff --git a/hotspot/src/share/vm/memory/strongRootsScope.cpp b/hotspot/src/share/vm/memory/strongRootsScope.cpp
new file mode 100644
index 00000000000..9b28475d485
--- /dev/null
+++ b/hotspot/src/share/vm/memory/strongRootsScope.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/stringTable.hpp"
+#include "code/nmethod.hpp"
+#include "memory/strongRootsScope.hpp"
+#include "runtime/thread.hpp"
+
+MarkScope::MarkScope(bool activate) : _active(activate) {
+ if (_active) {
+ nmethod::oops_do_marking_prologue();
+ }
+}
+
+MarkScope::~MarkScope() {
+ if (_active) {
+ nmethod::oops_do_marking_epilogue();
+ }
+}
+
+StrongRootsScope::StrongRootsScope(bool activate) : MarkScope(activate) {
+ if (_active) {
+ Threads::change_thread_claim_parity();
+ // Zero the claimed high water mark in the StringTable
+ StringTable::clear_parallel_claimed_index();
+ }
+}
+
+StrongRootsScope::~StrongRootsScope() {
+ Threads::assert_all_threads_claimed();
+}
diff --git a/hotspot/src/share/vm/oops/oop.psgc.inline.hpp b/hotspot/src/share/vm/memory/strongRootsScope.hpp
similarity index 56%
rename from hotspot/src/share/vm/oops/oop.psgc.inline.hpp
rename to hotspot/src/share/vm/memory/strongRootsScope.hpp
index 63081244495..3f8c7138dbe 100644
--- a/hotspot/src/share/vm/oops/oop.psgc.inline.hpp
+++ b/hotspot/src/share/vm/memory/strongRootsScope.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,24 +22,25 @@
*
*/
-#ifndef SHARE_VM_OOPS_OOP_PSGC_INLINE_HPP
-#define SHARE_VM_OOPS_OOP_PSGC_INLINE_HPP
+#ifndef SHARE_VM_MEMORY_STRONGROOTSSCOPE_HPP
+#define SHARE_VM_MEMORY_STRONGROOTSSCOPE_HPP
-#include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
-#include "gc_implementation/parallelScavenge/psScavenge.hpp"
-#endif // INCLUDE_ALL_GCS
+#include "memory/allocation.hpp"
-// ParallelScavengeHeap methods
+class MarkScope : public StackObj {
+ protected:
+ bool _active;
+ public:
+ MarkScope(bool activate = true);
+ ~MarkScope();
+};
-inline void oopDesc::push_contents(PSPromotionManager* pm) {
- Klass* k = klass();
- if (!k->oop_is_typeArray()) {
- // It might contain oops beyond the header, so take the virtual call.
- k->oop_push_contents(pm, this);
- }
- // Else skip it. The TypeArrayKlass in the header never needs scavenging.
-}
+// Sets up and tears down the required state for parallel root processing.
-#endif // SHARE_VM_OOPS_OOP_PSGC_INLINE_HPP
+class StrongRootsScope : public MarkScope {
+ public:
+ StrongRootsScope(bool activate = true);
+ ~StrongRootsScope();
+};
+
+#endif // SHARE_VM_MEMORY_STRONGROOTSSCOPE_HPP
diff --git a/hotspot/src/share/vm/memory/tenuredGeneration.cpp b/hotspot/src/share/vm/memory/tenuredGeneration.cpp
index e300ee43eea..63026205c1a 100644
--- a/hotspot/src/share/vm/memory/tenuredGeneration.cpp
+++ b/hotspot/src/share/vm/memory/tenuredGeneration.cpp
@@ -36,6 +36,9 @@
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc_implementation/parNew/parOopClosures.hpp"
+#endif
TenuredGeneration::TenuredGeneration(ReservedSpace rs,
size_t initial_byte_size, int level,
diff --git a/hotspot/src/share/vm/memory/universe.cpp b/hotspot/src/share/vm/memory/universe.cpp
index 75f4f96e23a..1cd4d909b0a 100644
--- a/hotspot/src/share/vm/memory/universe.cpp
+++ b/hotspot/src/share/vm/memory/universe.cpp
@@ -687,6 +687,15 @@ jint universe_init() {
return JNI_OK;
}
+template
+jint Universe::create_heap() {
+ assert(_collectedHeap == NULL, "Heap already created");
+ Policy* policy = new Policy();
+ policy->initialize_all();
+ _collectedHeap = new Heap(policy);
+ return _collectedHeap->initialize();
+}
+
// Choose the heap base address and oop encoding mode
// when compressed oops are used:
// Unscaled - Use 32-bits oops without encoding when
@@ -696,50 +705,35 @@ jint universe_init() {
// HeapBased - Use compressed oops with heap base + encoding.
jint Universe::initialize_heap() {
+ jint status = JNI_ERR;
+#if !INCLUDE_ALL_GCS
if (UseParallelGC) {
-#if INCLUDE_ALL_GCS
- Universe::_collectedHeap = new ParallelScavengeHeap();
-#else // INCLUDE_ALL_GCS
fatal("UseParallelGC not supported in this VM.");
-#endif // INCLUDE_ALL_GCS
-
} else if (UseG1GC) {
-#if INCLUDE_ALL_GCS
- G1CollectorPolicyExt* g1p = new G1CollectorPolicyExt();
- g1p->initialize_all();
- G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
- Universe::_collectedHeap = g1h;
-#else // INCLUDE_ALL_GCS
- fatal("UseG1GC not supported in java kernel vm.");
-#endif // INCLUDE_ALL_GCS
-
+ fatal("UseG1GC not supported in this VM.");
+ } else if (UseConcMarkSweepGC) {
+ fatal("UseConcMarkSweepGC not supported in this VM.");
+#else
+ if (UseParallelGC) {
+ status = Universe::create_heap();
+ } else if (UseG1GC) {
+ status = Universe::create_heap();
+ } else if (UseConcMarkSweepGC) {
+ status = Universe::create_heap();
+#endif
+ } else if (UseSerialGC) {
+ status = Universe::create_heap