This commit is contained in:
Lana Steuck 2014-07-10 14:06:04 -07:00
commit 592a889772
1656 changed files with 47111 additions and 30111 deletions

View File

@ -263,3 +263,4 @@ efe7dbc6088691757404e0c8745f894e3ca9c022 jdk9-b09
c5495e25c7258ab5f96a1ae14610887d76d2be63 jdk9-b18
2dcf544eb7ed5ac6a3f7813a32e33acea7442405 jdk9-b19
89731ae72a761afdf4262e8b9513f302f6563f89 jdk9-b20
28dd0c7beb3cad9cf95f17b4b5ad87eb447a4084 jdk9-b21

View File

@ -263,3 +263,4 @@ cf22a728521f91a4692b433d39d730a0a1b23155 jdk9-b16
65abab59f783fcf02ff8e133431c252f9e5f07d5 jdk9-b18
75a08df650eb3126bab0c4d15241f5886162393c jdk9-b19
ee4fd72b2ec3d92497f37163352f294aa695c6fb jdk9-b20
9052803f4d01feda28b3d65f2b64dd457d21c7b6 jdk9-b21

View File

@ -512,7 +512,7 @@ AC_DEFUN_ONCE([BASIC_SETUP_DEVKIT],
)
AC_ARG_WITH(sysroot, [AS_HELP_STRING([--with-sysroot],
[use this directory as sysroot)])],
[use this directory as sysroot])],
[SYSROOT=$with_sysroot]
)
@ -531,6 +531,75 @@ AC_DEFUN_ONCE([BASIC_SETUP_DEVKIT],
[BASIC_PREPEND_TO_PATH([EXTRA_PATH],$with_extra_path)]
)
if test "x$OPENJDK_BUILD_OS" = "xmacosx"; then
# detect if Xcode is installed by running xcodebuild -version
# if no Xcode installed, xcodebuild exits with 1
# if Xcode is installed, even if xcode-select is misconfigured, then it exits with 0
if /usr/bin/xcodebuild -version >/dev/null 2>&1; then
# We need to use xcodebuild in the toolchain dir provided by the user, this will
# fall back on the stub binary in /usr/bin/xcodebuild
AC_PATH_PROG([XCODEBUILD], [xcodebuild], [/usr/bin/xcodebuild], [$TOOLCHAIN_PATH])
else
# this should result in SYSROOT being empty, unless --with-sysroot is provided
# when only the command line tools are installed there are no SDKs, so headers
# are copied into the system frameworks
XCODEBUILD=
AC_SUBST(XCODEBUILD)
fi
AC_MSG_CHECKING([for sdk name])
AC_ARG_WITH([sdk-name], [AS_HELP_STRING([--with-sdk-name],
[use the platform SDK of the given name. @<:@macosx@:>@])],
[SDKNAME=$with_sdk_name]
)
AC_MSG_RESULT([$SDKNAME])
# if toolchain path is specified then don't rely on system headers, they may not compile
HAVE_SYSTEM_FRAMEWORK_HEADERS=0
test -z "$TOOLCHAIN_PATH" && \
HAVE_SYSTEM_FRAMEWORK_HEADERS=`test ! -f /System/Library/Frameworks/Foundation.framework/Headers/Foundation.h; echo $?`
if test -z "$SYSROOT"; then
if test -n "$XCODEBUILD"; then
# if we don't have system headers, use default SDK name (last resort)
if test -z "$SDKNAME" -a $HAVE_SYSTEM_FRAMEWORK_HEADERS -eq 0; then
SDKNAME=${SDKNAME:-macosx}
fi
if test -n "$SDKNAME"; then
# Call xcodebuild to determine SYSROOT
SYSROOT=`"$XCODEBUILD" -sdk $SDKNAME -version | grep '^Path: ' | sed 's/Path: //'`
fi
else
if test $HAVE_SYSTEM_FRAMEWORK_HEADERS -eq 0; then
AC_MSG_ERROR([No xcodebuild tool and no system framework headers found, use --with-sysroot or --with-sdk-name to provide a path to a valid SDK])
fi
fi
else
# warn user if --with-sdk-name was also set
if test -n "$with_sdk_name"; then
AC_MSG_WARN([Both SYSROOT and --with-sdk-name are set, only SYSROOT will be used])
fi
fi
if test $HAVE_SYSTEM_FRAMEWORK_HEADERS -eq 0 -a -z "$SYSROOT"; then
# If no system framework headers, then SYSROOT must be set, or we won't build
AC_MSG_ERROR([Unable to determine SYSROOT and no headers found in /System/Library/Frameworks. Check Xcode configuration, --with-sysroot or --with-sdk-name arguments.])
fi
# Perform a basic sanity test
if test ! -f "$SYSROOT/System/Library/Frameworks/Foundation.framework/Headers/Foundation.h"; then
if test -z "$SYSROOT"; then
AC_MSG_ERROR([Unable to find required framework headers, provide a path to an SDK via --with-sysroot or --with-sdk-name and be sure Xcode is installed properly])
else
AC_MSG_ERROR([Invalid SDK or SYSROOT path, dependent framework headers not found])
fi
fi
# set SDKROOT too, Xcode tools will pick it up
AC_SUBST(SDKROOT,$SYSROOT)
fi
# Prepend the extra path to the global path
BASIC_PREPEND_TO_PATH([PATH],$EXTRA_PATH)

View File

@ -131,6 +131,10 @@ AC_DEFUN_ONCE([FLAGS_SETUP_INIT_FLAGS],
-L$SYSROOT/lib$OPENJDK_TARGET_CPU_ISADIR \
-L$SYSROOT/usr/ccs/lib$OPENJDK_TARGET_CPU_ISADIR"
fi
elif test "x$OPENJDK_TARGET_OS" = xmacosx; then
# Apple only wants -isysroot <path>, but we also need -iframework<path>/System/Library/Frameworks
SYSROOT_CFLAGS="-isysroot \"$SYSROOT\" -iframework\"$SYSROOT/System/Library/Frameworks\""
SYSROOT_LDFLAGS=$SYSROOT_CFLAGS
elif test "x$TOOLCHAIN_TYPE" = xgcc; then
SYSROOT_CFLAGS="--sysroot=\"$SYSROOT\""
SYSROOT_LDFLAGS="--sysroot=\"$SYSROOT\""
@ -143,6 +147,14 @@ AC_DEFUN_ONCE([FLAGS_SETUP_INIT_FLAGS],
LEGACY_EXTRA_CXXFLAGS="$LEGACY_EXTRA_CXXFLAGS $SYSROOT_CFLAGS"
LEGACY_EXTRA_LDFLAGS="$LEGACY_EXTRA_LDFLAGS $SYSROOT_LDFLAGS"
fi
# These always need to be set, or we can't find the frameworks embedded in JavaVM.framework
# set this here so it doesn't have to be peppered throughout the forest
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
SYSROOT_CFLAGS="$SYSROOT_CFLAGS -F\"$SYSROOT/System/Library/Frameworks/JavaVM.framework/Frameworks\""
SYSROOT_LDFLAGS="$SYSROOT_LDFLAGS -F\"$SYSROOT/System/Library/Frameworks/JavaVM.framework/Frameworks\""
fi
AC_SUBST(SYSROOT_CFLAGS)
AC_SUBST(SYSROOT_LDFLAGS)
])
@ -644,23 +656,18 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
# Additional macosx handling
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
if test "x$TOOLCHAIN_TYPE" = xgcc; then
# FIXME: This needs to be exported in spec.gmk due to closed legacy code.
# FIXME: clean this up, and/or move it elsewhere.
# Setting these parameters makes it an error to link to macosx APIs that are
# newer than the given OS version and makes the linked binaries compatible
# even if built on a newer version of the OS.
# The expected format is X.Y.Z
MACOSX_VERSION_MIN=10.7.0
AC_SUBST(MACOSX_VERSION_MIN)
# Setting these parameters makes it an error to link to macosx APIs that are
# newer than the given OS version and makes the linked binaries compatible
# even if built on a newer version of the OS.
# The expected format is X.Y.Z
MACOSX_VERSION_MIN=10.7.0
AC_SUBST(MACOSX_VERSION_MIN)
# The macro takes the version with no dots, ex: 1070
# Let the flags variables get resolved in make for easier override on make
# command line.
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DMAC_OS_X_VERSION_MAX_ALLOWED=\$(subst .,,\$(MACOSX_VERSION_MIN)) -mmacosx-version-min=\$(MACOSX_VERSION_MIN)"
LDFLAGS_JDK="$LDFLAGS_JDK -mmacosx-version-min=\$(MACOSX_VERSION_MIN)"
fi
# The macro takes the version with no dots, ex: 1070
# Let the flags variables get resolved in make for easier override on make
# command line.
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DMAC_OS_X_VERSION_MAX_ALLOWED=\$(subst .,,\$(MACOSX_VERSION_MIN)) -mmacosx-version-min=\$(MACOSX_VERSION_MIN)"
LDFLAGS_JDK="$LDFLAGS_JDK -mmacosx-version-min=\$(MACOSX_VERSION_MIN)"
fi
# Setup some hard coded includes

View File

@ -874,6 +874,8 @@ PKGHANDLER
OUTPUT_ROOT
CONF_NAME
SPEC
SDKROOT
XCODEBUILD
BUILD_VARIANT_RELEASE
DEBUG_CLASSFILES
FASTDEBUG
@ -1040,6 +1042,7 @@ with_sysroot
with_tools_dir
with_toolchain_path
with_extra_path
with_sdk_name
with_conf_name
with_builddeps_conf
with_builddeps_server
@ -1074,7 +1077,6 @@ with_extra_cxxflags
with_extra_ldflags
enable_debug_symbols
enable_zip_debug_info
enable_macosx_runtime_support
with_x
with_cups
with_cups_include
@ -1085,6 +1087,7 @@ enable_freetype_bundling
with_alsa
with_alsa_include
with_alsa_lib
with_libjpeg
with_giflib
with_lcms
with_libpng
@ -1841,9 +1844,6 @@ Optional Features:
--disable-debug-symbols disable generation of debug symbols [enabled]
--disable-zip-debug-info
disable zipping of debug-info files [enabled]
--enable-macosx-runtime-support
Deprecated. Option is kept for backwards
compatibility and is ignored
--disable-freetype-bundling
disable bundling of the freetype library with the
build result [enabled on Windows or when using
@ -1872,12 +1872,13 @@ Optional Packages:
optimized (HotSpot build only)) [release]
--with-devkit use this devkit for compilers, tools and resources
--with-sys-root alias for --with-sysroot for backwards compatability
--with-sysroot use this directory as sysroot)
--with-sysroot use this directory as sysroot
--with-tools-dir alias for --with-toolchain-path for backwards
compatibility
--with-toolchain-path prepend these directories when searching for
toolchain binaries (compilers etc)
--with-extra-path prepend these directories to the default path
--with-sdk-name use the platform SDK of the given name. [macosx]
--with-conf-name use this as the name of the configuration [generated
from important configuration options]
--with-builddeps-conf use this configuration file for the builddeps
@ -1942,6 +1943,8 @@ Optional Packages:
headers under PATH/include)
--with-alsa-include specify directory for the alsa include files
--with-alsa-lib specify directory for the alsa library
--with-libjpeg use libjpeg from build system or OpenJDK source
(system, bundled) [bundled]
--with-giflib use giflib from build system or OpenJDK source
(system, bundled) [bundled]
--with-lcms use lcms2 from build system or OpenJDK source
@ -4308,7 +4311,7 @@ TOOLCHAIN_DESCRIPTION_xlc="IBM XL C/C++"
#CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks:
DATE_WHEN_GENERATED=1402614845
DATE_WHEN_GENERATED=1403557683
###############################################################################
#
@ -13607,7 +13610,7 @@ test -n "$target_alias" &&
VAR_CPU_BITS=32
VAR_CPU_ENDIAN=big
;;
sparcv9)
sparcv9|sparc64)
VAR_CPU=sparcv9
VAR_CPU_ARCH=sparc
VAR_CPU_BITS=64
@ -13738,7 +13741,7 @@ $as_echo "$OPENJDK_BUILD_OS-$OPENJDK_BUILD_CPU" >&6; }
VAR_CPU_BITS=32
VAR_CPU_ENDIAN=big
;;
sparcv9)
sparcv9|sparc64)
VAR_CPU=sparcv9
VAR_CPU_ARCH=sparc
VAR_CPU_BITS=64
@ -14968,6 +14971,122 @@ if test "${with_extra_path+set}" = set; then :
fi
if test "x$OPENJDK_BUILD_OS" = "xmacosx"; then
# detect if Xcode is installed by running xcodebuild -version
# if no Xcode installed, xcodebuild exits with 1
# if Xcode is installed, even if xcode-select is misconfigured, then it exits with 0
if /usr/bin/xcodebuild -version >/dev/null 2>&1; then
# We need to use xcodebuild in the toolchain dir provided by the user, this will
# fall back on the stub binary in /usr/bin/xcodebuild
# Extract the first word of "xcodebuild", so it can be a program name with args.
set dummy xcodebuild; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
if ${ac_cv_path_XCODEBUILD+:} false; then :
$as_echo_n "(cached) " >&6
else
case $XCODEBUILD in
[\\/]* | ?:[\\/]*)
ac_cv_path_XCODEBUILD="$XCODEBUILD" # Let the user override the test with a path.
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $TOOLCHAIN_PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_path_XCODEBUILD="$as_dir/$ac_word$ac_exec_ext"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
fi
done
done
IFS=$as_save_IFS
test -z "$ac_cv_path_XCODEBUILD" && ac_cv_path_XCODEBUILD="/usr/bin/xcodebuild"
;;
esac
fi
XCODEBUILD=$ac_cv_path_XCODEBUILD
if test -n "$XCODEBUILD"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $XCODEBUILD" >&5
$as_echo "$XCODEBUILD" >&6; }
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
fi
else
# this should result in SYSROOT being empty, unless --with-sysroot is provided
# when only the command line tools are installed there are no SDKs, so headers
# are copied into the system frameworks
XCODEBUILD=
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sdk name" >&5
$as_echo_n "checking for sdk name... " >&6; }
# Check whether --with-sdk-name was given.
if test "${with_sdk_name+set}" = set; then :
withval=$with_sdk_name; SDKNAME=$with_sdk_name
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $SDKNAME" >&5
$as_echo "$SDKNAME" >&6; }
# if toolchain path is specified then don't rely on system headers, they may not compile
HAVE_SYSTEM_FRAMEWORK_HEADERS=0
test -z "$TOOLCHAIN_PATH" && \
HAVE_SYSTEM_FRAMEWORK_HEADERS=`test ! -f /System/Library/Frameworks/Foundation.framework/Headers/Foundation.h; echo $?`
if test -z "$SYSROOT"; then
if test -n "$XCODEBUILD"; then
# if we don't have system headers, use default SDK name (last resort)
if test -z "$SDKNAME" -a $HAVE_SYSTEM_FRAMEWORK_HEADERS -eq 0; then
SDKNAME=${SDKNAME:-macosx}
fi
if test -n "$SDKNAME"; then
# Call xcodebuild to determine SYSROOT
SYSROOT=`"$XCODEBUILD" -sdk $SDKNAME -version | grep '^Path: ' | sed 's/Path: //'`
fi
else
if test $HAVE_SYSTEM_FRAMEWORK_HEADERS -eq 0; then
as_fn_error $? "No xcodebuild tool and no system framework headers found, use --with-sysroot or --with-sdk-name to provide a path to a valid SDK" "$LINENO" 5
fi
fi
else
# warn user if --with-sdk-name was also set
if test -n "$with_sdk_name"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Both SYSROOT and --with-sdk-name are set, only SYSROOT will be used" >&5
$as_echo "$as_me: WARNING: Both SYSROOT and --with-sdk-name are set, only SYSROOT will be used" >&2;}
fi
fi
if test $HAVE_SYSTEM_FRAMEWORK_HEADERS -eq 0 -a -z "$SYSROOT"; then
# If no system framework headers, then SYSROOT must be set, or we won't build
as_fn_error $? "Unable to determine SYSROOT and no headers found in /System/Library/Frameworks. Check Xcode configuration, --with-sysroot or --with-sdk-name arguments." "$LINENO" 5
fi
# Perform a basic sanity test
if test ! -f "$SYSROOT/System/Library/Frameworks/Foundation.framework/Headers/Foundation.h"; then
if test -z "$SYSROOT"; then
as_fn_error $? "Unable to find required framework headers, provide a path to an SDK via --with-sysroot or --with-sdk-name and be sure Xcode is installed properly" "$LINENO" 5
else
as_fn_error $? "Invalid SDK or SYSROOT path, dependent framework headers not found" "$LINENO" 5
fi
fi
# set SDKROOT too, Xcode tools will pick it up
SDKROOT=$SYSROOT
fi
# Prepend the extra path to the global path
if test "x$EXTRA_PATH" != x; then
@ -26596,21 +26715,28 @@ fi
VALID_TOOLCHAINS=${!toolchain_var_name}
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
# On Mac OS X, default toolchain to clang after Xcode 5
XCODE_VERSION_OUTPUT=`xcodebuild -version 2>&1 | $HEAD -n 1`
$ECHO "$XCODE_VERSION_OUTPUT" | $GREP "Xcode " > /dev/null
if test $? -ne 0; then
as_fn_error $? "Failed to determine Xcode version." "$LINENO" 5
fi
XCODE_MAJOR_VERSION=`$ECHO $XCODE_VERSION_OUTPUT | \
$SED -e 's/^Xcode \([1-9][0-9.]*\)/\1/' | \
$CUT -f 1 -d .`
{ $as_echo "$as_me:${as_lineno-$LINENO}: Xcode major version: $XCODE_MAJOR_VERSION" >&5
if test -n "$XCODEBUILD"; then
# On Mac OS X, default toolchain to clang after Xcode 5
XCODE_VERSION_OUTPUT=`"$XCODEBUILD" -version 2>&1 | $HEAD -n 1`
$ECHO "$XCODE_VERSION_OUTPUT" | $GREP "Xcode " > /dev/null
if test $? -ne 0; then
as_fn_error $? "Failed to determine Xcode version." "$LINENO" 5
fi
XCODE_MAJOR_VERSION=`$ECHO $XCODE_VERSION_OUTPUT | \
$SED -e 's/^Xcode \([1-9][0-9.]*\)/\1/' | \
$CUT -f 1 -d .`
{ $as_echo "$as_me:${as_lineno-$LINENO}: Xcode major version: $XCODE_MAJOR_VERSION" >&5
$as_echo "$as_me: Xcode major version: $XCODE_MAJOR_VERSION" >&6;}
if test $XCODE_MAJOR_VERSION -ge 5; then
DEFAULT_TOOLCHAIN="clang"
if test $XCODE_MAJOR_VERSION -ge 5; then
DEFAULT_TOOLCHAIN="clang"
else
DEFAULT_TOOLCHAIN="gcc"
fi
else
DEFAULT_TOOLCHAIN="gcc"
# If Xcode is not installed, but the command line tools are
# then we can't run xcodebuild. On these systems we should
# default to clang
DEFAULT_TOOLCHAIN="clang"
fi
else
# First toolchain type in the list is the default
@ -41076,6 +41202,10 @@ $as_echo "$tool_specified" >&6; }
-L$SYSROOT/lib$OPENJDK_TARGET_CPU_ISADIR \
-L$SYSROOT/usr/ccs/lib$OPENJDK_TARGET_CPU_ISADIR"
fi
elif test "x$OPENJDK_TARGET_OS" = xmacosx; then
# Apple only wants -isysroot <path>, but we also need -iframework<path>/System/Library/Frameworks
SYSROOT_CFLAGS="-isysroot \"$SYSROOT\" -iframework\"$SYSROOT/System/Library/Frameworks\""
SYSROOT_LDFLAGS=$SYSROOT_CFLAGS
elif test "x$TOOLCHAIN_TYPE" = xgcc; then
SYSROOT_CFLAGS="--sysroot=\"$SYSROOT\""
SYSROOT_LDFLAGS="--sysroot=\"$SYSROOT\""
@ -41089,6 +41219,14 @@ $as_echo "$tool_specified" >&6; }
LEGACY_EXTRA_LDFLAGS="$LEGACY_EXTRA_LDFLAGS $SYSROOT_LDFLAGS"
fi
# These always need to be set, or we can't find the frameworks embedded in JavaVM.framework
# set this here so it doesn't have to be peppered throughout the forest
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
SYSROOT_CFLAGS="$SYSROOT_CFLAGS -F\"$SYSROOT/System/Library/Frameworks/JavaVM.framework/Frameworks\""
SYSROOT_LDFLAGS="$SYSROOT_LDFLAGS -F\"$SYSROOT/System/Library/Frameworks/JavaVM.framework/Frameworks\""
fi
@ -42135,23 +42273,18 @@ fi
# Additional macosx handling
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
if test "x$TOOLCHAIN_TYPE" = xgcc; then
# FIXME: This needs to be exported in spec.gmk due to closed legacy code.
# FIXME: clean this up, and/or move it elsewhere.
# Setting these parameters makes it an error to link to macosx APIs that are
# newer than the given OS version and makes the linked binaries compatible
# even if built on a newer version of the OS.
# The expected format is X.Y.Z
MACOSX_VERSION_MIN=10.7.0
# Setting these parameters makes it an error to link to macosx APIs that are
# newer than the given OS version and makes the linked binaries compatible
# even if built on a newer version of the OS.
# The expected format is X.Y.Z
MACOSX_VERSION_MIN=10.7.0
# The macro takes the version with no dots, ex: 1070
# Let the flags variables get resolved in make for easier override on make
# command line.
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DMAC_OS_X_VERSION_MAX_ALLOWED=\$(subst .,,\$(MACOSX_VERSION_MIN)) -mmacosx-version-min=\$(MACOSX_VERSION_MIN)"
LDFLAGS_JDK="$LDFLAGS_JDK -mmacosx-version-min=\$(MACOSX_VERSION_MIN)"
fi
# The macro takes the version with no dots, ex: 1070
# Let the flags variables get resolved in make for easier override on make
# command line.
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DMAC_OS_X_VERSION_MAX_ALLOWED=\$(subst .,,\$(MACOSX_VERSION_MIN)) -mmacosx-version-min=\$(MACOSX_VERSION_MIN)"
LDFLAGS_JDK="$LDFLAGS_JDK -mmacosx-version-min=\$(MACOSX_VERSION_MIN)"
fi
# Setup some hard coded includes
@ -42654,8 +42787,6 @@ $as_echo_n "checking what is not needed on MacOSX?... " >&6; }
ALSA_NOT_NEEDED=yes
PULSE_NOT_NEEDED=yes
X11_NOT_NEEDED=yes
# If the java runtime framework is disabled, then we need X11.
# This will be adjusted below.
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: alsa pulse x11" >&5
$as_echo "alsa pulse x11" >&6; }
fi
@ -42676,33 +42807,6 @@ $as_echo "alsa" >&6; }
X11_NOT_NEEDED=yes
fi
###############################################################################
#
# Check for MacOSX support for OpenJDK.
#
# Check whether --enable-macosx-runtime-support was given.
if test "${enable_macosx_runtime_support+set}" = set; then :
enableval=$enable_macosx_runtime_support;
fi
if test "x$enable_macosx_runtime_support" != x; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Option --enable-macosx-runtime-support is deprecated and will be ignored." >&5
$as_echo "$as_me: WARNING: Option --enable-macosx-runtime-support is deprecated and will be ignored." >&2;}
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Mac OS X Java Framework" >&5
$as_echo_n "checking for Mac OS X Java Framework... " >&6; }
if test -f /System/Library/Frameworks/JavaVM.framework/Frameworks/JavaRuntimeSupport.framework/Headers/JavaRuntimeSupport.h; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: /System/Library/Frameworks/JavaVM.framework" >&5
$as_echo "/System/Library/Frameworks/JavaVM.framework" >&6; }
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
fi
###############################################################################
@ -47573,10 +47677,43 @@ done
# Check for the jpeg library
#
USE_EXTERNAL_LIBJPEG=true
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for main in -ljpeg" >&5
$as_echo_n "checking for main in -ljpeg... " >&6; }
if ${ac_cv_lib_jpeg_main+:} false; then :
# Check whether --with-libjpeg was given.
if test "${with_libjpeg+set}" = set; then :
withval=$with_libjpeg;
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for which libjpeg to use" >&5
$as_echo_n "checking for which libjpeg to use... " >&6; }
# default is bundled
DEFAULT_LIBJPEG=bundled
#
# if user didn't specify, use DEFAULT_LIBJPEG
#
if test "x${with_libjpeg}" = "x"; then
with_libjpeg=${DEFAULT_LIBJPEG}
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_libjpeg}" >&5
$as_echo "${with_libjpeg}" >&6; }
if test "x${with_libjpeg}" = "xbundled"; then
USE_EXTERNAL_LIBJPEG=false
elif test "x${with_libjpeg}" = "xsystem"; then
ac_fn_cxx_check_header_mongrel "$LINENO" "jpeglib.h" "ac_cv_header_jpeglib_h" "$ac_includes_default"
if test "x$ac_cv_header_jpeglib_h" = xyes; then :
else
as_fn_error $? "--with-libjpeg=system specified, but jpeglib.h not found!" "$LINENO" 5
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for jpeg_CreateDecompress in -ljpeg" >&5
$as_echo_n "checking for jpeg_CreateDecompress in -ljpeg... " >&6; }
if ${ac_cv_lib_jpeg_jpeg_CreateDecompress+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@ -47584,27 +47721,33 @@ LIBS="-ljpeg $LIBS"
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
/* Override any GCC internal prototype to avoid an error.
Use char because int might match the return type of a GCC
builtin and then its argument prototype would still apply. */
#ifdef __cplusplus
extern "C"
#endif
char jpeg_CreateDecompress ();
int
main ()
{
return main ();
return jpeg_CreateDecompress ();
;
return 0;
}
_ACEOF
if ac_fn_cxx_try_link "$LINENO"; then :
ac_cv_lib_jpeg_main=yes
ac_cv_lib_jpeg_jpeg_CreateDecompress=yes
else
ac_cv_lib_jpeg_main=no
ac_cv_lib_jpeg_jpeg_CreateDecompress=no
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_jpeg_main" >&5
$as_echo "$ac_cv_lib_jpeg_main" >&6; }
if test "x$ac_cv_lib_jpeg_main" = xyes; then :
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_jpeg_jpeg_CreateDecompress" >&5
$as_echo "$ac_cv_lib_jpeg_jpeg_CreateDecompress" >&6; }
if test "x$ac_cv_lib_jpeg_jpeg_CreateDecompress" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_LIBJPEG 1
_ACEOF
@ -47612,13 +47755,16 @@ _ACEOF
LIBS="-ljpeg $LIBS"
else
USE_EXTERNAL_LIBJPEG=false
{ $as_echo "$as_me:${as_lineno-$LINENO}: Will use jpeg decoder bundled with the OpenJDK source" >&5
$as_echo "$as_me: Will use jpeg decoder bundled with the OpenJDK source" >&6;}
as_fn_error $? "--with-libjpeg=system specified, but no libjpeg found" "$LINENO" 5
fi
USE_EXTERNAL_LIBJPEG=true
else
as_fn_error $? "Invalid use of --with-libjpeg: ${with_libjpeg}, use 'system' or 'bundled'" "$LINENO" 5
fi
###############################################################################
#

View File

@ -65,8 +65,6 @@ AC_DEFUN_ONCE([LIB_SETUP_INIT],
ALSA_NOT_NEEDED=yes
PULSE_NOT_NEEDED=yes
X11_NOT_NEEDED=yes
# If the java runtime framework is disabled, then we need X11.
# This will be adjusted below.
AC_MSG_RESULT([alsa pulse x11])
fi
@ -83,20 +81,6 @@ AC_DEFUN_ONCE([LIB_SETUP_INIT],
if test "x$SUPPORT_HEADFUL" = xno; then
X11_NOT_NEEDED=yes
fi
###############################################################################
#
# Check for MacOSX support for OpenJDK.
#
BASIC_DEPRECATED_ARG_ENABLE(macosx-runtime-support, macosx_runtime_support)
AC_MSG_CHECKING([for Mac OS X Java Framework])
if test -f /System/Library/Frameworks/JavaVM.framework/Frameworks/JavaRuntimeSupport.framework/Headers/JavaRuntimeSupport.h; then
AC_MSG_RESULT([/System/Library/Frameworks/JavaVM.framework])
else
AC_MSG_RESULT([no])
fi
])
AC_DEFUN_ONCE([LIB_SETUP_X11],
@ -620,11 +604,36 @@ AC_DEFUN_ONCE([LIB_SETUP_MISC_LIBS],
# Check for the jpeg library
#
USE_EXTERNAL_LIBJPEG=true
AC_CHECK_LIB(jpeg, main, [],
[ USE_EXTERNAL_LIBJPEG=false
AC_MSG_NOTICE([Will use jpeg decoder bundled with the OpenJDK source])
])
AC_ARG_WITH(libjpeg, [AS_HELP_STRING([--with-libjpeg],
[use libjpeg from build system or OpenJDK source (system, bundled) @<:@bundled@:>@])])
AC_MSG_CHECKING([for which libjpeg to use])
# default is bundled
DEFAULT_LIBJPEG=bundled
#
# if user didn't specify, use DEFAULT_LIBJPEG
#
if test "x${with_libjpeg}" = "x"; then
with_libjpeg=${DEFAULT_LIBJPEG}
fi
AC_MSG_RESULT(${with_libjpeg})
if test "x${with_libjpeg}" = "xbundled"; then
USE_EXTERNAL_LIBJPEG=false
elif test "x${with_libjpeg}" = "xsystem"; then
AC_CHECK_HEADER(jpeglib.h, [],
[ AC_MSG_ERROR([--with-libjpeg=system specified, but jpeglib.h not found!])])
AC_CHECK_LIB(jpeg, jpeg_CreateDecompress, [],
[ AC_MSG_ERROR([--with-libjpeg=system specified, but no libjpeg found])])
USE_EXTERNAL_LIBJPEG=true
else
AC_MSG_ERROR([Invalid use of --with-libjpeg: ${with_libjpeg}, use 'system' or 'bundled'])
fi
AC_SUBST(USE_EXTERNAL_LIBJPEG)
###############################################################################

View File

@ -84,7 +84,7 @@ AC_DEFUN([PLATFORM_EXTRACT_VARS_FROM_CPU],
VAR_CPU_BITS=32
VAR_CPU_ENDIAN=big
;;
sparcv9)
sparcv9|sparc64)
VAR_CPU=sparcv9
VAR_CPU_ARCH=sparc
VAR_CPU_BITS=64

View File

@ -347,6 +347,9 @@ CPP:=@FIXPATH@ @CPP@
# The linker can be gcc or ld on posix systems, or link.exe on windows systems.
LD:=@FIXPATH@ @LD@
# Xcode SDK path
SDKROOT:=@SDKROOT@
# The linker on older SuSE distros (e.g. on SLES 10) complains with:
# "Invalid version tag `SUNWprivate_1.1'. Only anonymous version tag is allowed in executable."
# if feeded with a version script which contains named tags.
@ -544,7 +547,7 @@ SETFILE:=@SETFILE@
XATTR:=@XATTR@
JT_HOME:=@JT_HOME@
JTREGEXE:=@JTREGEXE@
XCODEBUILD=@XCODEBUILD@
FIXPATH:=@FIXPATH@
# Where the build output is stored for your convenience.

View File

@ -98,20 +98,27 @@ AC_DEFUN_ONCE([TOOLCHAIN_DETERMINE_TOOLCHAIN_TYPE],
VALID_TOOLCHAINS=${!toolchain_var_name}
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
# On Mac OS X, default toolchain to clang after Xcode 5
XCODE_VERSION_OUTPUT=`xcodebuild -version 2>&1 | $HEAD -n 1`
$ECHO "$XCODE_VERSION_OUTPUT" | $GREP "Xcode " > /dev/null
if test $? -ne 0; then
AC_MSG_ERROR([Failed to determine Xcode version.])
fi
XCODE_MAJOR_VERSION=`$ECHO $XCODE_VERSION_OUTPUT | \
$SED -e 's/^Xcode \(@<:@1-9@:>@@<:@0-9.@:>@*\)/\1/' | \
$CUT -f 1 -d .`
AC_MSG_NOTICE([Xcode major version: $XCODE_MAJOR_VERSION])
if test $XCODE_MAJOR_VERSION -ge 5; then
DEFAULT_TOOLCHAIN="clang"
if test -n "$XCODEBUILD"; then
# On Mac OS X, default toolchain to clang after Xcode 5
XCODE_VERSION_OUTPUT=`"$XCODEBUILD" -version 2>&1 | $HEAD -n 1`
$ECHO "$XCODE_VERSION_OUTPUT" | $GREP "Xcode " > /dev/null
if test $? -ne 0; then
AC_MSG_ERROR([Failed to determine Xcode version.])
fi
XCODE_MAJOR_VERSION=`$ECHO $XCODE_VERSION_OUTPUT | \
$SED -e 's/^Xcode \(@<:@1-9@:>@@<:@0-9.@:>@*\)/\1/' | \
$CUT -f 1 -d .`
AC_MSG_NOTICE([Xcode major version: $XCODE_MAJOR_VERSION])
if test $XCODE_MAJOR_VERSION -ge 5; then
DEFAULT_TOOLCHAIN="clang"
else
DEFAULT_TOOLCHAIN="gcc"
fi
else
DEFAULT_TOOLCHAIN="gcc"
# If Xcode is not installed, but the command line tools are
# then we can't run xcodebuild. On these systems we should
# default to clang
DEFAULT_TOOLCHAIN="clang"
fi
else
# First toolchain type in the list is the default

View File

@ -263,3 +263,4 @@ e54022d0dd92106fff7f7fe670010cd7e6517ee3 jdk9-b15
77565aaaa2bb814e94817e92d680168052a25395 jdk9-b18
eecc1b6adc7e193d00a0641eb0963add5a4c06e8 jdk9-b19
87f36eecb1665012d01c5cf102494e591c943ea6 jdk9-b20
3615a4e7f0542ca7552ad6454b742c73ee211d8e jdk9-b21

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,7 +35,8 @@ import java.io.IOException;
import java.io.OutputStream;
import java.io.ObjectOutputStream;
import java.io.ObjectOutput;
import java.util.Hashtable;
import java.util.Map;
import java.util.HashMap;
import org.omg.CORBA.INTERNAL;
@ -49,7 +50,7 @@ public abstract class OutputStreamHook extends ObjectOutputStream
*/
private class HookPutFields extends ObjectOutputStream.PutField
{
private Hashtable fields = new Hashtable();
private Map<String,Object> fields = new HashMap<>();
/**
* Put the value of the named boolean field into the persistent field.
@ -140,7 +141,6 @@ public abstract class OutputStreamHook extends ObjectOutputStream
public OutputStreamHook()
throws java.io.IOException {
super();
}
public void defaultWriteObject() throws IOException {

View File

@ -1,7 +1,7 @@
#!/bin/sh
#
# Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -25,9 +25,48 @@
# questions.
#
# Get clones of all nested repositories
sh ./common/bin/hgforest.sh clone "$@" || exit 1
# Version check
# required
reqdmajor=1
reqdminor=5
reqdrev=0
# requested
rqstmajor=2
rqstminor=6
rqstrev=3
# installed
hgwhere="`which hg 2> /dev/null | grep -v '^no hg in '`"
if [ "x$hgwhere" = "x" ]; then
echo "ERROR: Could not locate Mercurial command" >&2
exit 126
fi
hgversion="`hg --version 2> /dev/null | sed -n -e 's@^Mercurial Distributed SCM (version \(.*\))\$@\1@p'`"
if [ "x${hgversion}" = "x" ] ; then
echo "ERROR: Could not determine Mercurial version" >&2
exit 126
fi
hgmajor="`echo $hgversion | cut -f 1 -d .`"
hgminor="`echo $hgversion | cut -f 2 -d .`"
hgrev="`echo $hgversion.0 | cut -f 3 -d .`" # rev is omitted for minor and major releases
# Require
if [ $hgmajor -lt $reqdmajor -o \( $hgmajor -eq $reqdmajor -a $hgminor -lt $reqdminor \) -o \( $hgmajor -eq $reqdmajor -a $hgminor -eq $reqdminor -a $hgrev -lt $reqdrev \) ] ; then
echo "ERROR: Mercurial version $reqdmajor.$reqdminor.$reqdrev or later is required. $hgwhere is version $hgversion" >&2
exit 126
fi
# Request
if [ $hgmajor -lt $rqstmajor -o \( $hgmajor -eq $rqstmajor -a $hgminor -lt $rqstminor \) -o \( $hgmajor -eq $rqstmajor -a $hgminor -eq $rqstminor -a $hgrev -lt $rqstrev \) ] ; then
echo "WARNING: Mercurial version $rqstmajor.$rqstminor.$rqstrev or later is recommended. $hgwhere is version $hgversion" >&2
fi
# Get clones of all absent nested repositories (harmless if already exist)
sh ./common/bin/hgforest.sh clone "$@" || exit $?
# Update all existing repositories to the latest sources
sh ./common/bin/hgforest.sh pull -u

View File

@ -423,3 +423,4 @@ b14e7c0b7d3ec04127f565cda1d84122e205680c jdk9-b16
871fd128548480095e0dc3fc34c422666baeec75 jdk9-b18
d4cffb3ae6213c66c7522ebffe0349360a45f0ef jdk9-b19
c1af79d122ec9f715fa29312b5e91763f3a4dfc4 jdk9-b20
17b4a5e831b398738feedb0afe75245744510153 jdk9-b21

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,7 @@
#import <Foundation/Foundation.h>
#import <JavaNativeFoundation/JavaNativeFoundation.h>
#include <JavaVM/jni.h>
#include <jni.h>
#import <mach/mach.h>
#import <mach/mach_types.h>

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -50,9 +50,9 @@ SOURCES = symtab.c \
ps_core.c
OBJS = $(SOURCES:.c=.o)
OBJSPLUS = MacosxDebuggerLocal.o sadis.o $(OBJS)
EXTINCLUDE = -I/System/Library/Frameworks/JavaVM.framework/Headers -I.
EXTINCLUDE = -I.
EXTCFLAGS = -m64 -D__APPLE__ -framework JavaNativeFoundation
FOUNDATIONFLAGS = -framework Foundation -F/System/Library/Frameworks/JavaVM.framework/Frameworks -framework JavaNativeFoundation -framework Security -framework CoreFoundation
FOUNDATIONFLAGS = -framework Foundation -framework JavaNativeFoundation -framework Security -framework CoreFoundation
LIBSA = $(ARCH)/libsaproc.dylib
endif # Darwin

View File

@ -34,7 +34,7 @@
#include "libproc_md.h"
#endif
#include <linux/ptrace.h>
#include <sys/ptrace.h>
/************************************************************************************

View File

@ -263,7 +263,7 @@ static bool add_new_thread(struct ps_prochandle* ph, pthread_t pthread_id, lwpid
static bool read_lib_info(struct ps_prochandle* ph) {
char fname[32];
char buf[256];
char buf[PATH_MAX];
FILE *fp = NULL;
sprintf(fname, "/proc/%d/maps", ph->pid);
@ -273,10 +273,41 @@ static bool read_lib_info(struct ps_prochandle* ph) {
return false;
}
while(fgets_no_cr(buf, 256, fp)){
char * word[6];
int nwords = split_n_str(buf, 6, word, ' ', '\0');
if (nwords > 5 && find_lib(ph, word[5]) == false) {
while(fgets_no_cr(buf, PATH_MAX, fp)){
char * word[7];
int nwords = split_n_str(buf, 7, word, ' ', '\0');
if (nwords < 6) {
// not a shared library entry. ignore.
continue;
}
// SA does not handle the lines with patterns:
// "[stack]", "[heap]", "[vdso]", "[vsyscall]", etc.
if (word[5][0] == '[') {
// not a shared library entry. ignore.
continue;
}
if (nwords > 6) {
// prelink altered mapfile when the program is running.
// Entries like one below have to be skipped
// /lib64/libc-2.15.so (deleted)
// SO name in entries like one below have to be stripped.
// /lib64/libpthread-2.15.so.#prelink#.EECVts
char *s = strstr(word[5],".#prelink#");
if (s == NULL) {
// No prelink keyword. skip deleted library
print_debug("skip shared object %s deleted by prelink\n", word[5]);
continue;
}
// Fall through
print_debug("rectifying shared object name %s changed by prelink\n", word[5]);
*s = 0;
}
if (find_lib(ph, word[5]) == false) {
intptr_t base;
lib_info* lib;
#ifdef _LP64

View File

@ -64,9 +64,23 @@ ifeq ($(OS_VENDOR), FreeBSD)
else
ifeq ($(OS_VENDOR), Darwin)
SASRCFILES = $(DARWIN_NON_STUB_SASRCFILES)
SALIBS = -g -framework Foundation -F/System/Library/Frameworks/JavaVM.framework/Frameworks -framework JavaNativeFoundation -framework Security -framework CoreFoundation
SALIBS = -g \
-framework Foundation \
-framework JavaNativeFoundation \
-framework Security \
-framework CoreFoundation
#objc compiler blows up on -march=i586, perhaps it should not be included in the macosx intel 32-bit C++ compiles?
SAARCH = $(subst -march=i586,,$(ARCHFLAG))
# This is needed to locate JavaNativeFoundation.framework
ifeq ($(SYSROOT_CFLAGS),)
# this will happen when building without spec.gmk, set SDKROOT to a valid SDK
# path if your system does not have headers installed in the system frameworks
SA_SYSROOT_FLAGS = -F"$(SDKROOT)/System/Library/Frameworks/JavaVM.framework/Frameworks"
else
# Just use SYSROOT_CFLAGS
SA_SYSROOT_FLAGS=$(SYSROOT_CFLAGS)
endif
else
SASRCFILES = $(SASRCDIR)/StubDebuggerLocal.c
SALIBS =
@ -100,14 +114,8 @@ SA_LFLAGS = $(MAPFLAG:FILENAME=$(SAMAPFILE))
endif
SA_LFLAGS += $(LDFLAGS_HASH_STYLE)
ifeq ($(OS_VENDOR), Darwin)
BOOT_JAVA_INCLUDES = -I$(BOOT_JAVA_HOME)/include \
-I$(BOOT_JAVA_HOME)/include/$(shell uname -s | tr "[:upper:]" "[:lower:]") \
-I/System/Library/Frameworks/JavaVM.framework/Headers
else
BOOT_JAVA_INCLUDES = -I$(BOOT_JAVA_HOME)/include \
-I$(BOOT_JAVA_HOME)/include/$(shell uname -s | tr "[:upper:]" "[:lower:]")
endif
BOOT_JAVA_INCLUDES = -I$(BOOT_JAVA_HOME)/include \
-I$(BOOT_JAVA_HOME)/include/$(shell uname -s | tr "[:upper:]" "[:lower:]")
$(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
@ -116,6 +124,7 @@ $(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
fi
@echo Making SA debugger back-end...
$(QUIETLY) $(CC) -D$(BUILDARCH) -D_GNU_SOURCE \
$(SA_SYSROOT_FLAGS) \
$(SYMFLAG) $(SAARCH) $(SHARED_FLAG) $(PICFLAG) \
-I$(SASRCDIR) \
-I$(GENERATED) \

View File

@ -295,6 +295,7 @@ endif
$(PRECOMPILED_HEADER):
$(QUIETLY) echo Generating precompiled header $@
$(QUIETLY) mkdir -p $(PRECOMPILED_HEADER_DIR)
$(QUIETLY) rm -f $@
$(QUIETLY) $(COMPILE.CXX) $(DEPFLAGS) -x c++-header $(PRECOMPILED_HEADER_SRC) -o $@ $(COMPILE_DONE)
# making the library:

View File

@ -290,6 +290,7 @@ LINK_VM = $(LINK_LIB.CC)
$(PRECOMPILED_HEADER):
$(QUIETLY) echo Generating precompiled header $@
$(QUIETLY) mkdir -p $(PRECOMPILED_HEADER_DIR)
$(QUIETLY) rm -f $@
$(QUIETLY) $(COMPILE.CXX) $(DEPFLAGS) -x c++-header $(PRECOMPILED_HEADER_SRC) -o $@ $(COMPILE_DONE)
# making the library:

View File

@ -1,6 +1,6 @@
@echo off
REM
REM Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
REM Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
REM DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
REM
REM This code is free software; you can redistribute it and/or modify it
@ -81,33 +81,8 @@ REM figure out MSC version
for /F %%i in ('sh %HotSpotWorkSpace%/make/windows/get_msc_ver.sh') do set %%i
echo **************************************************************
set ProjectFile=%HotSpotBuildSpace%\jvm.vcproj
echo MSC_VER = "%MSC_VER%"
if "%MSC_VER%" == "1200" (
set ProjectFile=%HotSpotBuildSpace%\jvm.dsp
echo Will generate VC6 project {unsupported}
) else (
if "%MSC_VER%" == "1400" (
echo Will generate VC8 {Visual Studio 2005}
) else (
if "%MSC_VER%" == "1500" (
echo Will generate VC9 {Visual Studio 2008}
) else (
if "%MSC_VER%" == "1600" (
echo Will generate VC10 {Visual Studio 2010}
set ProjectFile=%HotSpotBuildSpace%\jvm.vcxproj
) else (
if "%MSC_VER%" == "1700" (
echo Will generate VC10 {compatible with Visual Studio 2012}
echo After opening in VS 2012, click "Update" when prompted.
set ProjectFile=%HotSpotBuildSpace%\jvm.vcxproj
) else (
echo Will generate VC7 project {Visual Studio 2003 .NET}
)
)
)
)
)
echo %ProjectFile%
echo **************************************************************

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -63,28 +63,20 @@ CXX_FLAGS=$(CXX_FLAGS) /Zi
# Based on BUILDARCH we add some flags and select the default compiler name
!if "$(BUILDARCH)" == "ia64"
MACHINE=IA64
DEFAULT_COMPILER_NAME=VS2003
CXX_FLAGS=$(CXX_FLAGS) /D "CC_INTERP" /D "_LP64" /D "IA64"
!endif
!if "$(BUILDARCH)" == "amd64"
MACHINE=AMD64
DEFAULT_COMPILER_NAME=VS2005
CXX_FLAGS=$(CXX_FLAGS) /D "_LP64" /D "AMD64"
LP64=1
!endif
!if "$(BUILDARCH)" == "i486"
MACHINE=I386
DEFAULT_COMPILER_NAME=VS2003
CXX_FLAGS=$(CXX_FLAGS) /D "IA32"
!endif
# Sanity check, this is the default if not amd64, ia64, or i486
!ifndef DEFAULT_COMPILER_NAME
CXX=ARCH_ERROR
!endif
CXX_FLAGS=$(CXX_FLAGS) /D "WIN32" /D "_WINDOWS"
# Must specify this for sharedRuntimeTrig.cpp
CXX_FLAGS=$(CXX_FLAGS) /D "VM_LITTLE_ENDIAN"
@ -112,6 +104,7 @@ CXX_FLAGS=$(CXX_FLAGS) /D TARGET_COMPILER_visCPP
# 1500 is for VS2008
# 1600 is for VS2010
# 1700 is for VS2012
# 1800 is for VS2013
# Do not confuse this MSC_VER with the predefined macro _MSC_VER that the
# compiler provides, when MSC_VER==1399, _MSC_VER will be 1400.
# Normally they are the same, but a pre-release of the VS2005 compilers
@ -119,35 +112,6 @@ CXX_FLAGS=$(CXX_FLAGS) /D TARGET_COMPILER_visCPP
# closer to VS2003 in terms of option spellings, so we use 1399 for that
# 1400 version that really isn't 1400.
# See the file get_msc_ver.sh for more info.
!if "x$(MSC_VER)" == "x"
COMPILER_NAME=$(DEFAULT_COMPILER_NAME)
!else
!if "$(MSC_VER)" == "1200"
COMPILER_NAME=VC6
!endif
!if "$(MSC_VER)" == "1300"
COMPILER_NAME=VS2003
!endif
!if "$(MSC_VER)" == "1310"
COMPILER_NAME=VS2003
!endif
!if "$(MSC_VER)" == "1399"
# Compiler might say 1400, but if it's 14.00.30701, it isn't really VS2005
COMPILER_NAME=VS2003
!endif
!if "$(MSC_VER)" == "1400"
COMPILER_NAME=VS2005
!endif
!if "$(MSC_VER)" == "1500"
COMPILER_NAME=VS2008
!endif
!if "$(MSC_VER)" == "1600"
COMPILER_NAME=VS2010
!endif
!if "$(MSC_VER)" == "1700"
COMPILER_NAME=VS2012
!endif
!endif
# By default, we do not want to use the debug version of the msvcrt.dll file
# but if MFC_DEBUG is defined in the environment it will be used.
@ -165,60 +129,6 @@ MS_RUNTIME_OPTION = $(MS_RUNTIME_OPTION) $(STATIC_CPPLIB_OPTION)
!endif
CXX_FLAGS=$(CXX_FLAGS) $(MS_RUNTIME_OPTION)
# How /GX option is spelled
GX_OPTION = /GX
# Optimization settings for various versions of the compilers and types of
# builds. Three basic sets of settings: product, fastdebug, and debug.
# These get added into CXX_FLAGS as needed by other makefiles.
!if "$(COMPILER_NAME)" == "VC6"
PRODUCT_OPT_OPTION = /Ox /Os /Gy /GF
FASTDEBUG_OPT_OPTION = /Ox /Os /Gy /GF
DEBUG_OPT_OPTION = /Od
!endif
!if "$(COMPILER_NAME)" == "VS2003"
PRODUCT_OPT_OPTION = /O2 /Oy-
FASTDEBUG_OPT_OPTION = /O2 /Oy-
DEBUG_OPT_OPTION = /Od
SAFESEH_FLAG = /SAFESEH
!endif
!if "$(COMPILER_NAME)" == "VS2005"
PRODUCT_OPT_OPTION = /O2 /Oy-
FASTDEBUG_OPT_OPTION = /O2 /Oy-
DEBUG_OPT_OPTION = /Od
GX_OPTION = /EHsc
# This VS2005 compiler has /GS as a default and requires bufferoverflowU.lib
# on the link command line, otherwise we get missing __security_check_cookie
# externals at link time. Even with /GS-, you need bufferoverflowU.lib.
# NOTE: Currently we decided to not use /GS-
BUFFEROVERFLOWLIB = bufferoverflowU.lib
LD_FLAGS = /manifest $(LD_FLAGS) $(BUFFEROVERFLOWLIB)
# Manifest Tool - used in VS2005 and later to adjust manifests stored
# as resources inside build artifacts.
!if "x$(MT)" == "x"
MT=mt.exe
!endif
SAFESEH_FLAG = /SAFESEH
!endif
!if "$(COMPILER_NAME)" == "VS2008"
PRODUCT_OPT_OPTION = /O2 /Oy-
FASTDEBUG_OPT_OPTION = /O2 /Oy-
DEBUG_OPT_OPTION = /Od
GX_OPTION = /EHsc
LD_FLAGS = /manifest $(LD_FLAGS)
MP_FLAG = /MP
# Manifest Tool - used in VS2005 and later to adjust manifests stored
# as resources inside build artifacts.
!if "x$(MT)" == "x"
MT=mt.exe
!endif
SAFESEH_FLAG = /SAFESEH
!endif
!if "$(COMPILER_NAME)" == "VS2010"
PRODUCT_OPT_OPTION = /O2 /Oy-
FASTDEBUG_OPT_OPTION = /O2 /Oy-
DEBUG_OPT_OPTION = /Od
@ -233,26 +143,6 @@ MT=mt.exe
!if "$(BUILDARCH)" == "i486"
LD_FLAGS = /SAFESEH $(LD_FLAGS)
!endif
!endif
!if "$(COMPILER_NAME)" == "VS2012"
PRODUCT_OPT_OPTION = /O2 /Oy-
FASTDEBUG_OPT_OPTION = /O2 /Oy-
DEBUG_OPT_OPTION = /Od
GX_OPTION = /EHsc
LD_FLAGS = /manifest $(LD_FLAGS)
MP_FLAG = /MP
# Manifest Tool - used in VS2005 and later to adjust manifests stored
# as resources inside build artifacts.
!if "x$(MT)" == "x"
MT=mt.exe
!endif
SAFESEH_FLAG = /SAFESEH
!endif
!if "$(BUILDARCH)" == "i486"
LD_FLAGS = $(SAFESEH_FLAG) $(LD_FLAGS)
!endif
CXX_FLAGS = $(CXX_FLAGS) $(MP_FLAG)

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -49,35 +49,8 @@ BOOT_TARGET_CLASS_VERSION=6
JAVAC_FLAGS=-g -encoding ascii
BOOTSTRAP_JAVAC_FLAGS=$(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION)
ProjectFile=jvm.vcproj
!if "$(MSC_VER)" == "1200"
VcVersion=VC6
ProjectFile=jvm.dsp
!elseif "$(MSC_VER)" == "1400"
VcVersion=VC8
!elseif "$(MSC_VER)" == "1500"
VcVersion=VC9
!elseif "$(MSC_VER)" == "1600"
VcVersion=VC10
ProjectFile=jvm.vcxproj
!elseif "$(MSC_VER)" == "1700"
# This is VS2012, but it loads VS10 projects just fine (and will
# VS2012 and VS2013 loads VS10 projects just fine (and will
# upgrade them automatically to VS2012 format).
VcVersion=VC10
ProjectFile=jvm.vcxproj
!else
VcVersion=VC7
!endif

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -85,14 +85,9 @@ checkAndBuildSA:: $(SAWINDBG)
# will be useful to have the assertion checks in place
!if "$(BUILDARCH)" == "ia64"
SA_CFLAGS = -nologo $(MS_RUNTIME_OPTION) -W3 $(GX_OPTION) -Od -D "WIN32" -D "WIN64" -D "_WINDOWS" -D "_DEBUG" -D "_CONSOLE" -D "_MBCS" -YX -FD -c
SA_CFLAGS = -nologo $(MS_RUNTIME_OPTION) -W3 $(GX_OPTION) -Od -D "WIN32" -D "WIN64" -D "_WINDOWS" -D "_DEBUG" -D "_CONSOLE" -D "_MBCS" -FD -c
!elseif "$(BUILDARCH)" == "amd64"
SA_CFLAGS = -nologo $(MS_RUNTIME_OPTION) -W3 $(GX_OPTION) -Od -D "WIN32" -D "WIN64" -D "_WINDOWS" -D "_DEBUG" -D "_CONSOLE" -D "_MBCS" -YX -FD -c
!if "$(COMPILER_NAME)" == "VS2005"
# On amd64, VS2005 compiler requires bufferoverflowU.lib on the link command line,
# otherwise we get missing __security_check_cookie externals at link time.
SA_LD_FLAGS = bufferoverflowU.lib
!endif
SA_CFLAGS = -nologo $(MS_RUNTIME_OPTION) -W3 $(GX_OPTION) -Od -D "WIN32" -D "WIN64" -D "_WINDOWS" -D "_DEBUG" -D "_CONSOLE" -D "_MBCS" -FD -c
!else
SA_CFLAGS = -nologo $(MS_RUNTIME_OPTION) -W3 $(GX_OPTION) -Od -D "WIN32" -D "_WINDOWS" -D "_DEBUG" -D "_CONSOLE" -D "_MBCS" -FD -RTC1 -c
!if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -27,9 +27,9 @@
all: checkCL checkLink
checkCL:
@ if "$(MSC_VER)" NEQ "1310" if "$(MSC_VER)" NEQ "1399" if "$(MSC_VER)" NEQ "1400" if "$(MSC_VER)" NEQ "1500" if "$(MSC_VER)" NEQ "1600" if "$(MSC_VER)" NEQ "1700" \
echo *** WARNING *** unrecognized cl.exe version $(MSC_VER) ($(RAW_MSC_VER)). Use FORCE_MSC_VER to override automatic detection.
@ if "$(MSC_VER)" NEQ "1600" if "$(MSC_VER)" NEQ "1700" if "$(MSC_VER)" NEQ "1800" \
echo *** WARNING *** Unsupported cl.exe version detected: $(MSC_VER) ($(RAW_MSC_VER)), only 1600/1700/1800 (Visual Studio 2010/2012/2013) are supported.
checkLink:
@ if "$(LD_VER)" NEQ "710" if "$(LD_VER)" NEQ "800" if "$(LD_VER)" NEQ "900" if "$(LD_VER)" NEQ "1000" if "$(LD_VER)" NEQ "1100" \
echo *** WARNING *** unrecognized link.exe version $(LD_VER) ($(RAW_LD_VER)). Use FORCE_LD_VER to override automatic detection.
@ if "$(LD_VER)" NEQ "1000" if "$(LD_VER)" NEQ "1100" if "$(LD_VER)" NEQ "1200" \
echo *** WARNING *** Unsupported link.exe version detected: $(LD_VER) ($(RAW_LD_VER)), only 1000/1100/1200 (Visual Studio 2010/2012/2013) are supported.

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -132,7 +132,7 @@ CXX_DONT_USE_PCH=/D DONT_USE_PRECOMPILED_HEADER
!if "$(USE_PRECOMPILED_HEADER)" != "0"
CXX_USE_PCH=/Fp"vm.pch" /Yu"precompiled.hpp"
!if "$(COMPILER_NAME)" == "VS2012"
!if "$(MSC_VER)" > "1600"
# VS2012 requires this object file to be listed:
LD_FLAGS=$(LD_FLAGS) _build_pch_file.obj
!endif

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -120,7 +120,6 @@ ReleaseOptions = -define HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) -def
ProjectCreatorIDEOptions = $(ProjectCreatorIDEOptions) $(ReleaseOptions)
$(HOTSPOTBUILDSPACE)/$(ProjectFile): $(HOTSPOTBUILDSPACE)/classes/ProjectCreator.class
@if "$(MSC_VER)"=="1500" echo Make sure you have VS2008 SP1 or later, or you may see 'expanded command line too long'
@$(RUN_JAVA) -Djava.class.path="$(HOTSPOTBUILDSPACE)/classes" ProjectCreator WinGammaPlatform$(VcVersion) $(ProjectCreatorIDEOptions)
clean:

View File

@ -625,6 +625,7 @@ int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
__ lea(rscratch1, polling_page);
offset = __ offset();
add_debug_info_for_branch(info);
__ relocate(relocInfo::poll_type);
__ testl(rax, Address(rscratch1, 0));
} else {
add_debug_info_for_branch(info);

View File

@ -786,7 +786,7 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
case os::java_thread:
// Java threads use ThreadStackSize which default value can be
// changed with the flag -Xss
assert (JavaThread::stack_size_at_create() > 0, "this should be set");
assert(JavaThread::stack_size_at_create() > 0, "this should be set");
stack_size = JavaThread::stack_size_at_create();
break;
case os::compiler_thread:
@ -1303,7 +1303,7 @@ bool os::dll_build_name(char* buffer, size_t buflen,
if (pelements == NULL) {
return false;
}
for (int i = 0 ; i < n ; i++) {
for (int i = 0; i < n; i++) {
// Really shouldn't be NULL, but check can't hurt
if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
continue; // skip the empty path values
@ -1316,7 +1316,7 @@ bool os::dll_build_name(char* buffer, size_t buflen,
}
}
// release the storage
for (int i = 0 ; i < n ; i++) {
for (int i = 0; i < n; i++) {
if (pelements[i] != NULL) {
FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
}
@ -1467,7 +1467,7 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
bool failed_to_read_elf_head=
(sizeof(elf_head)!=
(::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
(::read(file_descriptor, &elf_head,sizeof(elf_head))));
::close(file_descriptor);
if (failed_to_read_elf_head) {
@ -1565,7 +1565,7 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
int running_arch_index=-1;
for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
for (unsigned int i=0; i < ARRAY_SIZE(arch_array); i++) {
if (running_arch_code == arch_array[i].code) {
running_arch_index = i;
}
@ -1596,7 +1596,7 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
#endif // !S390
if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
if ( lib_arch.name!=NULL ) {
if (lib_arch.name!=NULL) {
::snprintf(diag_msg_buf, diag_msg_max_length-1,
" (Possible cause: can't load %s-bit .so on a %s-bit platform)",
lib_arch.name, arch_array[running_arch_index].name);
@ -2598,7 +2598,7 @@ void os::yield() {
sched_yield();
}
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN ;}
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }
void os::yield_all() {
// Yields to all threads, including threads with lower priorities
@ -2686,7 +2686,7 @@ static int prio_init() {
}
OSReturn os::set_native_priority(Thread* thread, int newpri) {
if ( !UseThreadPriorities || ThreadPriorityPolicy == 0 ) return OS_OK;
if (!UseThreadPriorities || ThreadPriorityPolicy == 0) return OS_OK;
#ifdef __OpenBSD__
// OpenBSD pthread_setprio starves low priority threads
@ -2713,7 +2713,7 @@ OSReturn os::set_native_priority(Thread* thread, int newpri) {
}
OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
if ( !UseThreadPriorities || ThreadPriorityPolicy == 0 ) {
if (!UseThreadPriorities || ThreadPriorityPolicy == 0) {
*priority_ptr = java_to_os_priority[NormPriority];
return OS_OK;
}
@ -3079,7 +3079,7 @@ bool os::Bsd::chained_handler(int sig, siginfo_t* siginfo, void* context) {
}
struct sigaction* os::Bsd::get_preinstalled_handler(int sig) {
if ((( (unsigned int)1 << sig ) & sigs) != 0) {
if ((((unsigned int)1 << sig) & sigs) != 0) {
return &sigact[sig];
}
return NULL;
@ -3300,7 +3300,7 @@ static void print_signal_handler(outputStream* st, int sig,
address rh = VMError::get_resetted_sighandler(sig);
// May be, handler was resetted by VMError?
if(rh != NULL) {
if (rh != NULL) {
handler = rh;
sa.sa_flags = VMError::get_resetted_sigflags(sig) & SIGNIFICANT_SIGNAL_MASK;
}
@ -3309,11 +3309,11 @@ static void print_signal_handler(outputStream* st, int sig,
os::Posix::print_sa_flags(st, sa.sa_flags);
// Check: is it our handler?
if(handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler) ||
if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler) ||
handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
// It is our signal handler
// check for flags, reset system-used one!
if((int)sa.sa_flags != os::Bsd::get_our_sigflags(sig)) {
if ((int)sa.sa_flags != os::Bsd::get_our_sigflags(sig)) {
st->print(
", flags was changed from " PTR32_FORMAT ", consider using jsig library",
os::Bsd::get_our_sigflags(sig));
@ -3382,10 +3382,10 @@ void os::Bsd::check_signal_handler(int sig) {
address thisHandler = (act.sa_flags & SA_SIGINFO)
? CAST_FROM_FN_PTR(address, act.sa_sigaction)
: CAST_FROM_FN_PTR(address, act.sa_handler) ;
: CAST_FROM_FN_PTR(address, act.sa_handler);
switch(sig) {
switch (sig) {
case SIGSEGV:
case SIGBUS:
case SIGFPE:
@ -3515,22 +3515,22 @@ jint os::init_2(void)
{
// Allocate a single page and mark it as readable for safepoint polling
address polling_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
guarantee( polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page" );
guarantee(polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page");
os::set_polling_page( polling_page );
os::set_polling_page(polling_page);
#ifndef PRODUCT
if(Verbose && PrintMiscellaneous)
if (Verbose && PrintMiscellaneous)
tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
#endif
if (!UseMembar) {
address mem_serialize_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
guarantee( mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
os::set_memory_serialize_page( mem_serialize_page );
guarantee(mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
os::set_memory_serialize_page(mem_serialize_page);
#ifndef PRODUCT
if(Verbose && PrintMiscellaneous)
if (Verbose && PrintMiscellaneous)
tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
#endif
}
@ -3631,13 +3631,13 @@ void os::init_3(void) { }
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
if( !guard_memory((char*)_polling_page, Bsd::page_size()) )
if (!guard_memory((char*)_polling_page, Bsd::page_size()))
fatal("Could not disable polling page");
};
// Mark the polling page as readable
void os::make_polling_page_readable(void) {
if( !bsd_mprotect((char *)_polling_page, Bsd::page_size(), PROT_READ)) {
if (!bsd_mprotect((char *)_polling_page, Bsd::page_size(), PROT_READ)) {
fatal("Could not enable polling page");
}
};
@ -4229,9 +4229,9 @@ static struct timespec* compute_abstime(struct timespec* abstime, jlong millis)
int os::PlatformEvent::TryPark() {
for (;;) {
const int v = _Event ;
guarantee ((v == 0) || (v == 1), "invariant") ;
if (Atomic::cmpxchg (0, &_Event, v) == v) return v ;
const int v = _Event;
guarantee((v == 0) || (v == 1), "invariant");
if (Atomic::cmpxchg(0, &_Event, v) == v) return v;
}
}
@ -4239,18 +4239,18 @@ void os::PlatformEvent::park() { // AKA "down()"
// Invariant: Only the thread associated with the Event/PlatformEvent
// may call park().
// TODO: assert that _Assoc != NULL or _Assoc == Self
int v ;
int v;
for (;;) {
v = _Event ;
if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
v = _Event;
if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
}
guarantee (v >= 0, "invariant") ;
guarantee(v >= 0, "invariant");
if (v == 0) {
// Do this the hard way by blocking ...
int status = pthread_mutex_lock(_mutex);
assert_status(status == 0, status, "mutex_lock");
guarantee (_nParked == 0, "invariant") ;
++ _nParked ;
guarantee(_nParked == 0, "invariant");
++_nParked;
while (_Event < 0) {
status = pthread_cond_wait(_cond, _mutex);
// for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
@ -4258,28 +4258,28 @@ void os::PlatformEvent::park() { // AKA "down()"
if (status == ETIMEDOUT) { status = EINTR; }
assert_status(status == 0 || status == EINTR, status, "cond_wait");
}
-- _nParked ;
--_nParked;
_Event = 0 ;
_Event = 0;
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other.
OrderAccess::fence();
}
guarantee (_Event >= 0, "invariant") ;
guarantee(_Event >= 0, "invariant");
}
int os::PlatformEvent::park(jlong millis) {
guarantee (_nParked == 0, "invariant") ;
guarantee(_nParked == 0, "invariant");
int v ;
int v;
for (;;) {
v = _Event ;
if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
v = _Event;
if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
}
guarantee (v >= 0, "invariant") ;
if (v != 0) return OS_OK ;
guarantee(v >= 0, "invariant");
if (v != 0) return OS_OK;
// We do this the hard way, by blocking the thread.
// Consider enforcing a minimum timeout value.
@ -4289,8 +4289,8 @@ int os::PlatformEvent::park(jlong millis) {
int ret = OS_TIMEOUT;
int status = pthread_mutex_lock(_mutex);
assert_status(status == 0, status, "mutex_lock");
guarantee (_nParked == 0, "invariant") ;
++_nParked ;
guarantee(_nParked == 0, "invariant");
++_nParked;
// Object.wait(timo) will return because of
// (a) notification
@ -4308,24 +4308,24 @@ int os::PlatformEvent::park(jlong millis) {
while (_Event < 0) {
status = os::Bsd::safe_cond_timedwait(_cond, _mutex, &abst);
if (status != 0 && WorkAroundNPTLTimedWaitHang) {
pthread_cond_destroy (_cond);
pthread_cond_init (_cond, NULL) ;
pthread_cond_destroy(_cond);
pthread_cond_init(_cond, NULL);
}
assert_status(status == 0 || status == EINTR ||
status == ETIMEDOUT,
status, "cond_timedwait");
if (!FilterSpuriousWakeups) break ; // previous semantics
if (status == ETIMEDOUT) break ;
if (!FilterSpuriousWakeups) break; // previous semantics
if (status == ETIMEDOUT) break;
// We consume and ignore EINTR and spurious wakeups.
}
--_nParked ;
--_nParked;
if (_Event >= 0) {
ret = OS_OK;
}
_Event = 0 ;
_Event = 0;
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
assert (_nParked == 0, "invariant") ;
assert(_nParked == 0, "invariant");
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other.
OrderAccess::fence();
@ -4409,7 +4409,7 @@ void os::PlatformEvent::unpark() {
*/
static void unpackTime(struct timespec* absTime, bool isAbsolute, jlong time) {
assert (time > 0, "convertTime");
assert(time > 0, "convertTime");
struct timeval now;
int status = gettimeofday(&now, NULL);
@ -4470,7 +4470,7 @@ void Parker::park(bool isAbsolute, jlong time) {
// Next, demultiplex/decode time arguments
struct timespec absTime;
if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
return;
}
if (time > 0) {
@ -4492,11 +4492,11 @@ void Parker::park(bool isAbsolute, jlong time) {
return;
}
int status ;
int status;
if (_counter > 0) { // no wait needed
_counter = 0;
status = pthread_mutex_unlock(_mutex);
assert (status == 0, "invariant") ;
assert(status == 0, "invariant");
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other and Java-level accesses.
OrderAccess::fence();
@ -4516,12 +4516,12 @@ void Parker::park(bool isAbsolute, jlong time) {
// cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
if (time == 0) {
status = pthread_cond_wait (_cond, _mutex) ;
status = pthread_cond_wait(_cond, _mutex);
} else {
status = os::Bsd::safe_cond_timedwait (_cond, _mutex, &absTime) ;
status = os::Bsd::safe_cond_timedwait(_cond, _mutex, &absTime);
if (status != 0 && WorkAroundNPTLTimedWaitHang) {
pthread_cond_destroy (_cond) ;
pthread_cond_init (_cond, NULL);
pthread_cond_destroy(_cond);
pthread_cond_init(_cond, NULL);
}
}
assert_status(status == 0 || status == EINTR ||
@ -4532,9 +4532,9 @@ void Parker::park(bool isAbsolute, jlong time) {
pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
#endif
_counter = 0 ;
status = pthread_mutex_unlock(_mutex) ;
assert_status(status == 0, status, "invariant") ;
_counter = 0;
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "invariant");
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other and Java-level accesses.
OrderAccess::fence();
@ -4546,26 +4546,26 @@ void Parker::park(bool isAbsolute, jlong time) {
}
void Parker::unpark() {
int s, status ;
int s, status;
status = pthread_mutex_lock(_mutex);
assert (status == 0, "invariant") ;
assert(status == 0, "invariant");
s = _counter;
_counter = 1;
if (s < 1) {
if (WorkAroundNPTLTimedWaitHang) {
status = pthread_cond_signal (_cond) ;
assert (status == 0, "invariant") ;
status = pthread_cond_signal(_cond);
assert(status == 0, "invariant");
status = pthread_mutex_unlock(_mutex);
assert (status == 0, "invariant") ;
assert(status == 0, "invariant");
} else {
status = pthread_mutex_unlock(_mutex);
assert (status == 0, "invariant") ;
status = pthread_cond_signal (_cond) ;
assert (status == 0, "invariant") ;
assert(status == 0, "invariant");
status = pthread_cond_signal(_cond);
assert(status == 0, "invariant");
}
} else {
pthread_mutex_unlock(_mutex);
assert (status == 0, "invariant") ;
assert(status == 0, "invariant");
}
}

View File

@ -191,16 +191,16 @@ public:
class PlatformEvent : public CHeapObj<mtInternal> {
private:
double CachePad [4] ; // increase odds that _mutex is sole occupant of cache line
volatile int _Event ;
volatile int _nParked ;
pthread_mutex_t _mutex [1] ;
pthread_cond_t _cond [1] ;
double PostPad [2] ;
Thread * _Assoc ;
double CachePad[4]; // increase odds that _mutex is sole occupant of cache line
volatile int _Event;
volatile int _nParked;
pthread_mutex_t _mutex[1];
pthread_cond_t _cond[1];
double PostPad[2];
Thread * _Assoc;
public: // TODO-FIXME: make dtor private
~PlatformEvent() { guarantee (0, "invariant") ; }
~PlatformEvent() { guarantee(0, "invariant"); }
public:
PlatformEvent() {
@ -209,28 +209,28 @@ class PlatformEvent : public CHeapObj<mtInternal> {
assert_status(status == 0, status, "cond_init");
status = pthread_mutex_init (_mutex, NULL);
assert_status(status == 0, status, "mutex_init");
_Event = 0 ;
_nParked = 0 ;
_Assoc = NULL ;
_Event = 0;
_nParked = 0;
_Assoc = NULL;
}
// Use caution with reset() and fired() -- they may require MEMBARs
void reset() { _Event = 0 ; }
void reset() { _Event = 0; }
int fired() { return _Event; }
void park () ;
void unpark () ;
int TryPark () ;
int park (jlong millis) ;
void SetAssociation (Thread * a) { _Assoc = a ; }
void park();
void unpark();
int TryPark();
int park(jlong millis);
void SetAssociation(Thread * a) { _Assoc = a; }
};
class PlatformParker : public CHeapObj<mtInternal> {
protected:
pthread_mutex_t _mutex [1] ;
pthread_cond_t _cond [1] ;
pthread_mutex_t _mutex[1];
pthread_cond_t _cond[1];
public: // TODO-FIXME: make dtor private
~PlatformParker() { guarantee (0, "invariant") ; }
~PlatformParker() { guarantee(0, "invariant"); }
public:
PlatformParker() {

View File

@ -862,7 +862,7 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
case os::java_thread:
// Java threads use ThreadStackSize which default value can be
// changed with the flag -Xss
assert (JavaThread::stack_size_at_create() > 0, "this should be set");
assert(JavaThread::stack_size_at_create() > 0, "this should be set");
stack_size = JavaThread::stack_size_at_create();
break;
case os::compiler_thread:
@ -1097,7 +1097,7 @@ static bool find_vma(address addr, address* vma_low, address* vma_high) {
if (low <= addr && addr < high) {
if (vma_low) *vma_low = low;
if (vma_high) *vma_high = high;
fclose (fp);
fclose(fp);
return true;
}
}
@ -1420,7 +1420,7 @@ void os::Linux::fast_thread_clock_init() {
// must return at least tp.tv_sec == 0 which means a resolution
// better than 1 sec. This is extra check for reliability.
if(pthread_getcpuclockid_func &&
if (pthread_getcpuclockid_func &&
pthread_getcpuclockid_func(_main_thread, &clockid) == 0 &&
sys_clock_getres(clockid, &tp) == 0 && tp.tv_sec == 0) {
@ -1630,7 +1630,7 @@ bool os::dll_build_name(char* buffer, size_t buflen,
if (pelements == NULL) {
return false;
}
for (int i = 0 ; i < n ; i++) {
for (int i = 0; i < n; i++) {
// Really shouldn't be NULL, but check can't hurt
if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
continue; // skip the empty path values
@ -1642,7 +1642,7 @@ bool os::dll_build_name(char* buffer, size_t buflen,
}
}
// release the storage
for (int i = 0 ; i < n ; i++) {
for (int i = 0; i < n; i++) {
if (pelements[i] != NULL) {
FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
}
@ -1906,7 +1906,7 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
bool failed_to_read_elf_head=
(sizeof(elf_head)!=
(::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
(::read(file_descriptor, &elf_head,sizeof(elf_head))));
::close(file_descriptor);
if (failed_to_read_elf_head) {
@ -1988,7 +1988,7 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
int running_arch_index=-1;
for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
for (unsigned int i=0; i < ARRAY_SIZE(arch_array); i++) {
if (running_arch_code == arch_array[i].code) {
running_arch_index = i;
}
@ -2019,7 +2019,7 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
#endif // !S390
if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
if ( lib_arch.name!=NULL ) {
if (lib_arch.name!=NULL) {
::snprintf(diag_msg_buf, diag_msg_max_length-1,
" (Possible cause: can't load %s-bit .so on a %s-bit platform)",
lib_arch.name, arch_array[running_arch_index].name);
@ -3793,7 +3793,7 @@ void os::yield() {
sched_yield();
}
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN ;}
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }
void os::yield_all() {
// Yields to all threads, including threads with lower priorities
@ -3858,14 +3858,14 @@ static int prio_init() {
}
OSReturn os::set_native_priority(Thread* thread, int newpri) {
if ( !UseThreadPriorities || ThreadPriorityPolicy == 0 ) return OS_OK;
if (!UseThreadPriorities || ThreadPriorityPolicy == 0) return OS_OK;
int ret = setpriority(PRIO_PROCESS, thread->osthread()->thread_id(), newpri);
return (ret == 0) ? OS_OK : OS_ERR;
}
OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
if ( !UseThreadPriorities || ThreadPriorityPolicy == 0 ) {
if (!UseThreadPriorities || ThreadPriorityPolicy == 0) {
*priority_ptr = java_to_os_priority[NormPriority];
return OS_OK;
}
@ -4219,7 +4219,7 @@ bool os::Linux::chained_handler(int sig, siginfo_t* siginfo, void* context) {
}
struct sigaction* os::Linux::get_preinstalled_handler(int sig) {
if ((( (unsigned int)1 << sig ) & sigs) != 0) {
if ((((unsigned int)1 << sig) & sigs) != 0) {
return &sigact[sig];
}
return NULL;
@ -4423,7 +4423,7 @@ static void print_signal_handler(outputStream* st, int sig,
address rh = VMError::get_resetted_sighandler(sig);
// May be, handler was resetted by VMError?
if(rh != NULL) {
if (rh != NULL) {
handler = rh;
sa.sa_flags = VMError::get_resetted_sigflags(sig) & SIGNIFICANT_SIGNAL_MASK;
}
@ -4432,11 +4432,11 @@ static void print_signal_handler(outputStream* st, int sig,
os::Posix::print_sa_flags(st, sa.sa_flags);
// Check: is it our handler?
if(handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler) ||
if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler) ||
handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
// It is our signal handler
// check for flags, reset system-used one!
if((int)sa.sa_flags != os::Linux::get_our_sigflags(sig)) {
if ((int)sa.sa_flags != os::Linux::get_our_sigflags(sig)) {
st->print(
", flags was changed from " PTR32_FORMAT ", consider using jsig library",
os::Linux::get_our_sigflags(sig));
@ -4507,10 +4507,10 @@ void os::Linux::check_signal_handler(int sig) {
address thisHandler = (act.sa_flags & SA_SIGINFO)
? CAST_FROM_FN_PTR(address, act.sa_sigaction)
: CAST_FROM_FN_PTR(address, act.sa_handler) ;
: CAST_FROM_FN_PTR(address, act.sa_handler);
switch(sig) {
switch (sig) {
case SIGSEGV:
case SIGBUS:
case SIGFPE:
@ -4662,22 +4662,22 @@ jint os::init_2(void)
// Allocate a single page and mark it as readable for safepoint polling
address polling_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
guarantee( polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page" );
guarantee(polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page");
os::set_polling_page( polling_page );
os::set_polling_page(polling_page);
#ifndef PRODUCT
if(Verbose && PrintMiscellaneous)
if (Verbose && PrintMiscellaneous)
tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
#endif
if (!UseMembar) {
address mem_serialize_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
guarantee( mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
os::set_memory_serialize_page( mem_serialize_page );
guarantee(mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
os::set_memory_serialize_page(mem_serialize_page);
#ifndef PRODUCT
if(Verbose && PrintMiscellaneous)
if (Verbose && PrintMiscellaneous)
tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
#endif
}
@ -4819,13 +4819,13 @@ void os::init_3(void) {
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
if( !guard_memory((char*)_polling_page, Linux::page_size()) )
if (!guard_memory((char*)_polling_page, Linux::page_size()))
fatal("Could not disable polling page");
};
// Mark the polling page as readable
void os::make_polling_page_readable(void) {
if( !linux_mprotect((char *)_polling_page, Linux::page_size(), PROT_READ)) {
if (!linux_mprotect((char *)_polling_page, Linux::page_size(), PROT_READ)) {
fatal("Could not enable polling page");
}
};
@ -5288,7 +5288,7 @@ static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
snprintf(proc_name, 64, "/proc/self/task/%d/stat", tid);
fp = fopen(proc_name, "r");
if ( fp == NULL ) return -1;
if (fp == NULL) return -1;
statlen = fread(stat, 1, 2047, fp);
stat[statlen] = '\0';
fclose(fp);
@ -5300,7 +5300,7 @@ static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
// We don't really need to know the command string, just find the last
// occurrence of ")" and then start parsing from there. See bug 4726580.
s = strrchr(stat, ')');
if (s == NULL ) return -1;
if (s == NULL) return -1;
// Skip blank chars
do s++; while (isspace(*s));
@ -5309,7 +5309,7 @@ static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
&cdummy, &idummy, &idummy, &idummy, &idummy, &idummy,
&ldummy, &ldummy, &ldummy, &ldummy, &ldummy,
&user_time, &sys_time);
if ( count != 13 ) return -1;
if (count != 13) return -1;
if (user_sys_cpu_time) {
return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec);
} else {
@ -5468,9 +5468,9 @@ static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
int os::PlatformEvent::TryPark() {
for (;;) {
const int v = _Event ;
guarantee ((v == 0) || (v == 1), "invariant") ;
if (Atomic::cmpxchg (0, &_Event, v) == v) return v ;
const int v = _Event;
guarantee((v == 0) || (v == 1), "invariant");
if (Atomic::cmpxchg(0, &_Event, v) == v) return v;
}
}
@ -5478,18 +5478,18 @@ void os::PlatformEvent::park() { // AKA "down()"
// Invariant: Only the thread associated with the Event/PlatformEvent
// may call park().
// TODO: assert that _Assoc != NULL or _Assoc == Self
int v ;
int v;
for (;;) {
v = _Event ;
if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
v = _Event;
if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
}
guarantee (v >= 0, "invariant") ;
guarantee(v >= 0, "invariant");
if (v == 0) {
// Do this the hard way by blocking ...
int status = pthread_mutex_lock(_mutex);
assert_status(status == 0, status, "mutex_lock");
guarantee (_nParked == 0, "invariant") ;
++ _nParked ;
guarantee(_nParked == 0, "invariant");
++_nParked;
while (_Event < 0) {
status = pthread_cond_wait(_cond, _mutex);
// for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
@ -5497,28 +5497,28 @@ void os::PlatformEvent::park() { // AKA "down()"
if (status == ETIME) { status = EINTR; }
assert_status(status == 0 || status == EINTR, status, "cond_wait");
}
-- _nParked ;
--_nParked;
_Event = 0 ;
_Event = 0;
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other.
OrderAccess::fence();
}
guarantee (_Event >= 0, "invariant") ;
guarantee(_Event >= 0, "invariant");
}
int os::PlatformEvent::park(jlong millis) {
guarantee (_nParked == 0, "invariant") ;
guarantee(_nParked == 0, "invariant");
int v ;
int v;
for (;;) {
v = _Event ;
if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
v = _Event;
if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
}
guarantee (v >= 0, "invariant") ;
if (v != 0) return OS_OK ;
guarantee(v >= 0, "invariant");
if (v != 0) return OS_OK;
// We do this the hard way, by blocking the thread.
// Consider enforcing a minimum timeout value.
@ -5528,8 +5528,8 @@ int os::PlatformEvent::park(jlong millis) {
int ret = OS_TIMEOUT;
int status = pthread_mutex_lock(_mutex);
assert_status(status == 0, status, "mutex_lock");
guarantee (_nParked == 0, "invariant") ;
++_nParked ;
guarantee(_nParked == 0, "invariant");
++_nParked;
// Object.wait(timo) will return because of
// (a) notification
@ -5547,24 +5547,24 @@ int os::PlatformEvent::park(jlong millis) {
while (_Event < 0) {
status = os::Linux::safe_cond_timedwait(_cond, _mutex, &abst);
if (status != 0 && WorkAroundNPTLTimedWaitHang) {
pthread_cond_destroy (_cond);
pthread_cond_init (_cond, os::Linux::condAttr()) ;
pthread_cond_destroy(_cond);
pthread_cond_init(_cond, os::Linux::condAttr());
}
assert_status(status == 0 || status == EINTR ||
status == ETIME || status == ETIMEDOUT,
status, "cond_timedwait");
if (!FilterSpuriousWakeups) break ; // previous semantics
if (status == ETIME || status == ETIMEDOUT) break ;
if (!FilterSpuriousWakeups) break; // previous semantics
if (status == ETIME || status == ETIMEDOUT) break;
// We consume and ignore EINTR and spurious wakeups.
}
--_nParked ;
--_nParked;
if (_Event >= 0) {
ret = OS_OK;
}
_Event = 0 ;
_Event = 0;
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
assert (_nParked == 0, "invariant") ;
assert(_nParked == 0, "invariant");
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other.
OrderAccess::fence();
@ -5647,7 +5647,7 @@ void os::PlatformEvent::unpark() {
*/
static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
assert (time > 0, "convertTime");
assert(time > 0, "convertTime");
time_t max_secs = 0;
if (!os::supports_monotonic_clock() || isAbsolute) {
@ -5726,7 +5726,7 @@ void Parker::park(bool isAbsolute, jlong time) {
// Next, demultiplex/decode time arguments
timespec absTime;
if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
return;
}
if (time > 0) {
@ -5748,11 +5748,11 @@ void Parker::park(bool isAbsolute, jlong time) {
return;
}
int status ;
int status;
if (_counter > 0) { // no wait needed
_counter = 0;
status = pthread_mutex_unlock(_mutex);
assert (status == 0, "invariant") ;
assert(status == 0, "invariant");
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other and Java-level accesses.
OrderAccess::fence();
@ -5774,13 +5774,13 @@ void Parker::park(bool isAbsolute, jlong time) {
assert(_cur_index == -1, "invariant");
if (time == 0) {
_cur_index = REL_INDEX; // arbitrary choice when not timed
status = pthread_cond_wait (&_cond[_cur_index], _mutex) ;
status = pthread_cond_wait(&_cond[_cur_index], _mutex);
} else {
_cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
status = os::Linux::safe_cond_timedwait (&_cond[_cur_index], _mutex, &absTime) ;
status = os::Linux::safe_cond_timedwait(&_cond[_cur_index], _mutex, &absTime);
if (status != 0 && WorkAroundNPTLTimedWaitHang) {
pthread_cond_destroy (&_cond[_cur_index]) ;
pthread_cond_init (&_cond[_cur_index], isAbsolute ? NULL : os::Linux::condAttr());
pthread_cond_destroy(&_cond[_cur_index]);
pthread_cond_init(&_cond[_cur_index], isAbsolute ? NULL : os::Linux::condAttr());
}
}
_cur_index = -1;
@ -5792,9 +5792,9 @@ void Parker::park(bool isAbsolute, jlong time) {
pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
#endif
_counter = 0 ;
status = pthread_mutex_unlock(_mutex) ;
assert_status(status == 0, status, "invariant") ;
_counter = 0;
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "invariant");
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other and Java-level accesses.
OrderAccess::fence();
@ -5806,9 +5806,9 @@ void Parker::park(bool isAbsolute, jlong time) {
}
void Parker::unpark() {
int s, status ;
int s, status;
status = pthread_mutex_lock(_mutex);
assert (status == 0, "invariant") ;
assert(status == 0, "invariant");
s = _counter;
_counter = 1;
if (s < 1) {
@ -5817,22 +5817,22 @@ void Parker::unpark() {
// thread is definitely parked
if (WorkAroundNPTLTimedWaitHang) {
status = pthread_cond_signal (&_cond[_cur_index]);
assert (status == 0, "invariant");
assert(status == 0, "invariant");
status = pthread_mutex_unlock(_mutex);
assert (status == 0, "invariant");
assert(status == 0, "invariant");
} else {
status = pthread_mutex_unlock(_mutex);
assert (status == 0, "invariant");
assert(status == 0, "invariant");
status = pthread_cond_signal (&_cond[_cur_index]);
assert (status == 0, "invariant");
assert(status == 0, "invariant");
}
} else {
pthread_mutex_unlock(_mutex);
assert (status == 0, "invariant") ;
assert(status == 0, "invariant");
}
} else {
pthread_mutex_unlock(_mutex);
assert (status == 0, "invariant") ;
assert(status == 0, "invariant");
}
}

View File

@ -287,16 +287,16 @@ public:
class PlatformEvent : public CHeapObj<mtInternal> {
private:
double CachePad [4] ; // increase odds that _mutex is sole occupant of cache line
volatile int _Event ;
volatile int _nParked ;
pthread_mutex_t _mutex [1] ;
pthread_cond_t _cond [1] ;
double PostPad [2] ;
Thread * _Assoc ;
double CachePad[4]; // increase odds that _mutex is sole occupant of cache line
volatile int _Event;
volatile int _nParked;
pthread_mutex_t _mutex[1];
pthread_cond_t _cond[1];
double PostPad[2];
Thread * _Assoc;
public: // TODO-FIXME: make dtor private
~PlatformEvent() { guarantee (0, "invariant") ; }
~PlatformEvent() { guarantee(0, "invariant"); }
public:
PlatformEvent() {
@ -305,20 +305,20 @@ class PlatformEvent : public CHeapObj<mtInternal> {
assert_status(status == 0, status, "cond_init");
status = pthread_mutex_init (_mutex, NULL);
assert_status(status == 0, status, "mutex_init");
_Event = 0 ;
_nParked = 0 ;
_Assoc = NULL ;
_Event = 0;
_nParked = 0;
_Assoc = NULL;
}
// Use caution with reset() and fired() -- they may require MEMBARs
void reset() { _Event = 0 ; }
void reset() { _Event = 0; }
int fired() { return _Event; }
void park () ;
void unpark () ;
int TryPark () ;
int park (jlong millis) ; // relative timed-wait only
void SetAssociation (Thread * a) { _Assoc = a ; }
} ;
void park();
void unpark();
int TryPark();
int park(jlong millis); // relative timed-wait only
void SetAssociation(Thread * a) { _Assoc = a; }
};
class PlatformParker : public CHeapObj<mtInternal> {
protected:
@ -327,11 +327,11 @@ class PlatformParker : public CHeapObj<mtInternal> {
ABS_INDEX = 1
};
int _cur_index; // which cond is in use: -1, 0, 1
pthread_mutex_t _mutex [1] ;
pthread_cond_t _cond [2] ; // one for relative times and one for abs.
pthread_mutex_t _mutex[1];
pthread_cond_t _cond[2]; // one for relative times and one for abs.
public: // TODO-FIXME: make dtor private
~PlatformParker() { guarantee (0, "invariant") ; }
~PlatformParker() { guarantee(0, "invariant"); }
public:
PlatformParker() {

View File

@ -212,13 +212,13 @@ Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
"sp must be inside of selected thread stack");
thread->set_self_raw_id(raw_id); // mark for quick retrieval
_get_thread_cache[ index ] = thread;
_get_thread_cache[index] = thread;
}
return thread;
}
static const double all_zero[ sizeof(Thread) / sizeof(double) + 1 ] = {0};
static const double all_zero[sizeof(Thread) / sizeof(double) + 1] = {0};
#define NO_CACHED_THREAD ((Thread*)all_zero)
void ThreadLocalStorage::pd_set_thread(Thread* thread) {
@ -270,8 +270,8 @@ static inline stack_t get_stack_info() {
}
address os::current_stack_base() {
int r = thr_main() ;
guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
int r = thr_main();
guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
bool is_primordial_thread = r;
// Workaround 4352906, avoid calls to thr_stksegment by
@ -293,9 +293,9 @@ address os::current_stack_base() {
size_t os::current_stack_size() {
size_t size;
int r = thr_main() ;
guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
if(!r) {
int r = thr_main();
guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
if (!r) {
size = get_stack_info().ss_size;
} else {
struct rlimit limits;
@ -409,7 +409,7 @@ static bool find_processors_in_pset(psetid_t pset,
static bool find_processors_online(processorid_t** id_array,
uint* id_length) {
const processorid_t MAX_PROCESSOR_ID = 100000 ;
const processorid_t MAX_PROCESSOR_ID = 100000;
// Find the number of processors online.
*id_length = sysconf(_SC_NPROCESSORS_ONLN);
// Make up an array to hold their ids.
@ -436,7 +436,7 @@ static bool find_processors_online(processorid_t** id_array,
// we've got. Note that in the worst case find_processors_online() could
// return an empty set. (As a fall-back in the case of the empty set we
// could just return the ID of the current processor).
*id_length = found ;
*id_length = found;
}
return true;
@ -552,13 +552,13 @@ bool os::bind_to_processor(uint processor_id) {
}
bool os::getenv(const char* name, char* buffer, int len) {
char* val = ::getenv( name );
if ( val == NULL
char* val = ::getenv(name);
if (val == NULL
|| strlen(val) + 1 > len ) {
if (len > 0) buffer[0] = 0; // return a null string
return false;
}
strcpy( buffer, val );
strcpy(buffer, val);
return true;
}
@ -672,7 +672,7 @@ void os::init_system_properties_values() {
// Determine search path count and required buffer size.
if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
}
@ -683,7 +683,7 @@ void os::init_system_properties_values() {
// Obtain search path information.
if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
FREE_C_HEAP_ARRAY(char, info, mtInternal);
vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
}
@ -794,7 +794,7 @@ bool os::obsolete_option(const JavaVMOption *option)
bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
address stackStart = (address)thread->stack_base();
address stackEnd = (address)(stackStart - (address)thread->stack_size());
if (sp < stackStart && sp >= stackEnd ) return true;
if (sp < stackStart && sp >= stackEnd) return true;
return false;
}
@ -819,8 +819,8 @@ extern "C" void* java_start(void* thread_addr) {
Thread* thread = (Thread*)thread_addr;
OSThread* osthr = thread->osthread();
osthr->set_lwp_id( _lwp_self() ); // Store lwp in case we are bound
thread->_schedctl = (void *) schedctl_init () ;
osthr->set_lwp_id(_lwp_self()); // Store lwp in case we are bound
thread->_schedctl = (void *) schedctl_init();
if (UseNUMA) {
int lgrp_id = os::numa_get_group_id();
@ -839,8 +839,8 @@ extern "C" void* java_start(void* thread_addr) {
// in java_to_os_priority. So we save the native priority
// in the osThread and recall it here.
if ( osthr->thread_id() != -1 ) {
if ( UseThreadPriorities ) {
if (osthr->thread_id() != -1) {
if (UseThreadPriorities) {
int prio = osthr->native_priority();
if (ThreadPriorityVerbose) {
tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
@ -882,7 +882,7 @@ static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
// Store info on the Solaris thread into the OSThread
osthread->set_thread_id(thread_id);
osthread->set_lwp_id(_lwp_self());
thread->_schedctl = (void *) schedctl_init () ;
thread->_schedctl = (void *) schedctl_init();
if (UseNUMA) {
int lgrp_id = os::numa_get_group_id();
@ -891,9 +891,9 @@ static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
}
}
if ( ThreadPriorityVerbose ) {
if (ThreadPriorityVerbose) {
tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
osthread->thread_id(), osthread->lwp_id() );
osthread->thread_id(), osthread->lwp_id());
}
// Initial thread state is INITIALIZED, not SUSPENDED
@ -974,9 +974,9 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
return false;
}
if ( ThreadPriorityVerbose ) {
if (ThreadPriorityVerbose) {
char *thrtyp;
switch ( thr_type ) {
switch (thr_type) {
case vm_thread:
thrtyp = (char *)"vm";
break;
@ -1207,11 +1207,11 @@ void _handle_uncaught_cxx_exception() {
// First crack at OS-specific initialization, from inside the new thread.
void os::initialize_thread(Thread* thr) {
int r = thr_main() ;
guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
int r = thr_main();
guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
if (r) {
JavaThread* jt = (JavaThread *)thr;
assert(jt != NULL,"Sanity check");
assert(jt != NULL, "Sanity check");
size_t stack_size;
address base = jt->stack_base();
if (Arguments::created_by_java_launcher()) {
@ -1322,7 +1322,7 @@ int os::allocate_thread_local_storage() {
// JavaThread in Java code, and have stubs simply
// treat %g2 as a caller-save register, preserving it in a %lN.
thread_key_t tk;
if (thr_keycreate( &tk, NULL ) )
if (thr_keycreate( &tk, NULL))
fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
"(%s)", strerror(errno)));
return int(tk);
@ -1347,7 +1347,7 @@ void os::thread_local_storage_at_put(int index, void* value) {
"(%s)", strerror(errno)));
}
} else {
ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ;
ThreadLocalStorage::set_thread_in_slot((Thread *) value);
}
}
@ -1579,7 +1579,7 @@ bool os::dll_build_name(char* buffer, size_t buflen,
if (pelements == NULL) {
return false;
}
for (int i = 0 ; i < n ; i++) {
for (int i = 0; i < n; i++) {
// really shouldn't be NULL but what the heck, check can't hurt
if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
continue; // skip the empty path values
@ -1591,7 +1591,7 @@ bool os::dll_build_name(char* buffer, size_t buflen,
}
}
// release the storage
for (int i = 0 ; i < n ; i++) {
for (int i = 0; i < n; i++) {
if (pelements[i] != NULL) {
FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
}
@ -1795,7 +1795,7 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
bool failed_to_read_elf_head=
(sizeof(elf_head)!=
(::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
(::read(file_descriptor, &elf_head,sizeof(elf_head))));
::close(file_descriptor);
if (failed_to_read_elf_head) {
@ -1851,7 +1851,7 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
int running_arch_index=-1;
for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
for (unsigned int i=0; i < ARRAY_SIZE(arch_array); i++) {
if (running_arch_code == arch_array[i].code) {
running_arch_index = i;
}
@ -1880,7 +1880,7 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
}
if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
if ( lib_arch.name!=NULL ) {
if (lib_arch.name!=NULL) {
::snprintf(diag_msg_buf, diag_msg_max_length-1,
" (Possible cause: can't load %s-bit .so on a %s-bit platform)",
lib_arch.name, arch_array[running_arch_index].name);
@ -1969,7 +1969,7 @@ static bool check_addr0(outputStream* st) {
int fd = ::open("/proc/self/map",O_RDONLY);
if (fd >= 0) {
prmap_t p;
while(::read(fd, &p, sizeof(p)) > 0) {
while (::read(fd, &p, sizeof(p)) > 0) {
if (p.pr_vaddr == 0x0) {
st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
@ -2079,7 +2079,7 @@ static void print_signal_handler(outputStream* st, int sig,
address rh = VMError::get_resetted_sighandler(sig);
// May be, handler was resetted by VMError?
if(rh != NULL) {
if (rh != NULL) {
handler = rh;
sa.sa_flags = VMError::get_resetted_sigflags(sig);
}
@ -2088,11 +2088,11 @@ static void print_signal_handler(outputStream* st, int sig,
os::Posix::print_sa_flags(st, sa.sa_flags);
// Check: is it our handler?
if(handler == CAST_FROM_FN_PTR(address, signalHandler) ||
if (handler == CAST_FROM_FN_PTR(address, signalHandler) ||
handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
// It is our signal handler
// check for flags
if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
if (sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
st->print(
", flags was changed from " PTR32_FORMAT ", consider using jsig library",
os::Solaris::get_our_sigflags(sig));
@ -2403,7 +2403,7 @@ static int check_pending_signals(bool wait_for_signal) {
do {
thread->set_suspend_equivalent();
// cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
while((ret = ::sema_wait(&sig_sem)) == EINTR)
while ((ret = ::sema_wait(&sig_sem)) == EINTR)
;
assert(ret == 0, "sema_wait() failed");
@ -2635,7 +2635,7 @@ size_t os::numa_get_leaf_groups(int *ids, size_t size) {
}
if (!r) {
// That's a leaf node.
assert (bottom <= cur, "Sanity check");
assert(bottom <= cur, "Sanity check");
// Check if the node has memory
if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
NULL, 0, LGRP_RSRC_MEM) > 0) {
@ -3051,7 +3051,7 @@ bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
const size_t size_limit =
FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
int beg;
for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ;
for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */;
const int end = MIN2((int)usable_count, n) - 1;
for (int cur = 0; cur < end; ++cur, ++beg) {
_page_sizes[cur] = _page_sizes[beg];
@ -3264,7 +3264,7 @@ static int java_MaxPriority_to_os_priority = 0; // Saved mapping
//
// Return errno or 0 if OK.
//
static int lwp_priocntl_init () {
static int lwp_priocntl_init() {
int rslt;
pcinfo_t ClassInfo;
pcparms_t ParmInfo;
@ -3274,7 +3274,7 @@ static int lwp_priocntl_init () {
// If ThreadPriorityPolicy is 1, switch tables
if (ThreadPriorityPolicy == 1) {
for (i = 0 ; i < CriticalPriority+1; i++)
for (i = 0; i < CriticalPriority+1; i++)
os::java_to_os_priority[i] = prio_policy1[i];
}
if (UseCriticalJavaThreadPriority) {
@ -3373,12 +3373,12 @@ static int lwp_priocntl_init () {
} else {
// No clue - punt
if (ThreadPriorityVerbose)
tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname);
tty->print_cr("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname);
return EINVAL; // no clue, punt
}
if (ThreadPriorityVerbose) {
tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
tty->print_cr("Thread priority Range: [%d..%d]\n", myMin, myMax);
}
priocntl_enable = true; // Enable changing priorities
@ -3424,7 +3424,7 @@ int set_lwp_class_and_priority(int ThreadID, int lwpid,
// TODO: elide set-to-same-value
// If something went wrong on init, don't change priorities.
if ( !priocntl_enable ) {
if (!priocntl_enable) {
if (ThreadPriorityVerbose)
tty->print_cr("Trying to set priority but init failed, ignoring");
return EINVAL;
@ -3432,9 +3432,9 @@ int set_lwp_class_and_priority(int ThreadID, int lwpid,
// If lwp hasn't started yet, just return
// the _start routine will call us again.
if ( lwpid <= 0 ) {
if (lwpid <= 0) {
if (ThreadPriorityVerbose) {
tty->print_cr ("deferring the set_lwp_class_and_priority of thread "
tty->print_cr("deferring the set_lwp_class_and_priority of thread "
INTPTR_FORMAT " to %d, lwpid not set",
ThreadID, newPrio);
}
@ -3653,7 +3653,7 @@ OSReturn os::set_native_priority(Thread* thread, int newpri) {
OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
int p;
if ( !UseThreadPriorities ) {
if (!UseThreadPriorities) {
*priority_ptr = NormalPriority;
return OS_OK;
}
@ -4099,7 +4099,7 @@ void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain
void os::run_periodic_checks() {
// A big source of grief is hijacking virt. addr 0x0 on Solaris,
// thereby preventing a NULL checks.
if(!check_addr0_done) check_addr0_done = check_addr0(tty);
if (!check_addr0_done) check_addr0_done = check_addr0(tty);
if (check_signals == false) return;
@ -4148,10 +4148,10 @@ void os::Solaris::check_signal_handler(int sig) {
address thisHandler = (act.sa_flags & SA_SIGINFO)
? CAST_FROM_FN_PTR(address, act.sa_sigaction)
: CAST_FROM_FN_PTR(address, act.sa_handler) ;
: CAST_FROM_FN_PTR(address, act.sa_handler);
switch(sig) {
switch (sig) {
case SIGSEGV:
case SIGBUS:
case SIGFPE:
@ -4332,7 +4332,7 @@ os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
static address resolve_symbol_lazy(const char* name) {
address addr = (address) dlsym(RTLD_DEFAULT, name);
if(addr == NULL) {
if (addr == NULL) {
// RTLD_DEFAULT was not defined on some early versions of 2.5.1
addr = (address) dlsym(RTLD_NEXT, name);
}
@ -4341,7 +4341,7 @@ static address resolve_symbol_lazy(const char* name) {
static address resolve_symbol(const char* name) {
address addr = resolve_symbol_lazy(name);
if(addr == NULL) {
if (addr == NULL) {
fatal(dlerror());
}
return addr;
@ -4353,7 +4353,7 @@ void os::Solaris::libthread_init() {
lwp_priocntl_init();
// RTLD_DEFAULT was not defined on some early versions of 5.5.1
if(func == NULL) {
if (func == NULL) {
func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
// Guarantee that this VM is running on an new enough OS (5.6 or
// later) that it will have a new enough libthread.so.
@ -4384,7 +4384,7 @@ int_fnP_cond_tP os::Solaris::_cond_destroy;
int os::Solaris::_cond_scope = USYNC_THREAD;
void os::Solaris::synchronization_init() {
if(UseLWPSynchronization) {
if (UseLWPSynchronization) {
os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
@ -4404,7 +4404,7 @@ void os::Solaris::synchronization_init() {
os::Solaris::set_mutex_scope(USYNC_THREAD);
os::Solaris::set_cond_scope(USYNC_THREAD);
if(UsePthreads) {
if (UsePthreads) {
os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
@ -4576,17 +4576,17 @@ jint os::init_2(void) {
os::set_polling_page(polling_page);
#ifndef PRODUCT
if( Verbose && PrintMiscellaneous )
if (Verbose && PrintMiscellaneous)
tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
#endif
if (!UseMembar) {
address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE );
guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
os::set_memory_serialize_page( mem_serialize_page );
address mem_serialize_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE);
guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
os::set_memory_serialize_page(mem_serialize_page);
#ifndef PRODUCT
if(Verbose && PrintMiscellaneous)
if (Verbose && PrintMiscellaneous)
tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
#endif
}
@ -4725,13 +4725,13 @@ void os::init_3(void) {
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 )
if (mprotect((char *)_polling_page, page_size, PROT_NONE) != 0)
fatal("Could not disable polling page");
};
// Mark the polling page as readable
void os::make_polling_page_readable(void) {
if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 )
if (mprotect((char *)_polling_page, page_size, PROT_READ) != 0)
fatal("Could not enable polling page");
};
@ -5221,7 +5221,7 @@ jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
getpid(),
thread->osthread()->lwp_id());
fd = ::open(proc_name, O_RDONLY);
if ( fd == -1 ) return -1;
if (fd == -1) return -1;
do {
count = ::pread(fd,
@ -5230,7 +5230,7 @@ jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
thr_time_off);
} while (count < 0 && errno == EINTR);
::close(fd);
if ( count < 0 ) return -1;
if (count < 0) return -1;
if (user_sys_cpu_time) {
// user + system CPU time
@ -5244,7 +5244,7 @@ jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
(jlong)prusage.pr_utime.tv_nsec;
}
return(lwp_time);
return (lwp_time);
}
void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
@ -5448,43 +5448,43 @@ static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
int os::PlatformEvent::TryPark() {
for (;;) {
const int v = _Event ;
guarantee ((v == 0) || (v == 1), "invariant") ;
if (Atomic::cmpxchg (0, &_Event, v) == v) return v ;
const int v = _Event;
guarantee((v == 0) || (v == 1), "invariant");
if (Atomic::cmpxchg(0, &_Event, v) == v) return v;
}
}
void os::PlatformEvent::park() { // AKA: down()
// Invariant: Only the thread associated with the Event/PlatformEvent
// may call park().
int v ;
int v;
for (;;) {
v = _Event ;
if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
v = _Event;
if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
}
guarantee (v >= 0, "invariant") ;
guarantee(v >= 0, "invariant");
if (v == 0) {
// Do this the hard way by blocking ...
// See http://monaco.sfbay/detail.jsf?cr=5094058.
// TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
// Only for SPARC >= V8PlusA
#if defined(__sparc) && defined(COMPILER2)
if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
if (ClearFPUAtPark) { _mark_fpu_nosave(); }
#endif
int status = os::Solaris::mutex_lock(_mutex);
assert_status(status == 0, status, "mutex_lock");
guarantee (_nParked == 0, "invariant") ;
++ _nParked ;
assert_status(status == 0, status, "mutex_lock");
guarantee(_nParked == 0, "invariant");
++_nParked;
while (_Event < 0) {
// for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
// Treat this the same as if the wait was interrupted
// With usr/lib/lwp going to kernel, always handle ETIME
status = os::Solaris::cond_wait(_cond, _mutex);
if (status == ETIME) status = EINTR ;
if (status == ETIME) status = EINTR;
assert_status(status == 0 || status == EINTR, status, "cond_wait");
}
-- _nParked ;
_Event = 0 ;
--_nParked;
_Event = 0;
status = os::Solaris::mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
// Paranoia to ensure our locked and lock-free paths interact
@ -5494,41 +5494,41 @@ void os::PlatformEvent::park() { // AKA: down()
}
int os::PlatformEvent::park(jlong millis) {
guarantee (_nParked == 0, "invariant") ;
int v ;
guarantee(_nParked == 0, "invariant");
int v;
for (;;) {
v = _Event ;
if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
v = _Event;
if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
}
guarantee (v >= 0, "invariant") ;
if (v != 0) return OS_OK ;
guarantee(v >= 0, "invariant");
if (v != 0) return OS_OK;
int ret = OS_TIMEOUT;
timestruc_t abst;
compute_abstime (&abst, millis);
compute_abstime(&abst, millis);
// See http://monaco.sfbay/detail.jsf?cr=5094058.
// For Solaris SPARC set fprs.FEF=0 prior to parking.
// Only for SPARC >= V8PlusA
#if defined(__sparc) && defined(COMPILER2)
if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
if (ClearFPUAtPark) { _mark_fpu_nosave(); }
#endif
int status = os::Solaris::mutex_lock(_mutex);
assert_status(status == 0, status, "mutex_lock");
guarantee (_nParked == 0, "invariant") ;
++ _nParked ;
guarantee(_nParked == 0, "invariant");
++_nParked;
while (_Event < 0) {
int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
assert_status(status == 0 || status == EINTR ||
status == ETIME || status == ETIMEDOUT,
status, "cond_timedwait");
if (!FilterSpuriousWakeups) break ; // previous semantics
if (status == ETIME || status == ETIMEDOUT) break ;
if (!FilterSpuriousWakeups) break; // previous semantics
if (status == ETIME || status == ETIMEDOUT) break;
// We consume and ignore EINTR and spurious wakeups.
}
-- _nParked ;
if (_Event >= 0) ret = OS_OK ;
_Event = 0 ;
--_nParked;
if (_Event >= 0) ret = OS_OK;
_Event = 0;
status = os::Solaris::mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
// Paranoia to ensure our locked and lock-free paths interact
@ -5605,7 +5605,7 @@ void os::PlatformEvent::unpark() {
* years from "now".
*/
static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
assert (time > 0, "convertTime");
assert(time > 0, "convertTime");
struct timeval now;
int status = gettimeofday(&now, NULL);
@ -5664,7 +5664,7 @@ void Parker::park(bool isAbsolute, jlong time) {
// First, demultiplex/decode time arguments
timespec absTime;
if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
return;
}
if (time > 0) {
@ -5688,12 +5688,12 @@ void Parker::park(bool isAbsolute, jlong time) {
return;
}
int status ;
int status;
if (_counter > 0) { // no wait needed
_counter = 0;
status = os::Solaris::mutex_unlock(_mutex);
assert (status == 0, "invariant") ;
assert(status == 0, "invariant");
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other and Java-level accesses.
OrderAccess::fence();
@ -5717,11 +5717,11 @@ void Parker::park(bool isAbsolute, jlong time) {
// TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
// Only for SPARC >= V8PlusA
#if defined(__sparc) && defined(COMPILER2)
if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
if (ClearFPUAtPark) { _mark_fpu_nosave(); }
#endif
if (time == 0) {
status = os::Solaris::cond_wait (_cond, _mutex) ;
status = os::Solaris::cond_wait(_cond, _mutex);
} else {
status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
}
@ -5734,9 +5734,9 @@ void Parker::park(bool isAbsolute, jlong time) {
#ifdef ASSERT
thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
#endif
_counter = 0 ;
_counter = 0;
status = os::Solaris::mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock") ;
assert_status(status == 0, status, "mutex_unlock");
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other and Java-level accesses.
OrderAccess::fence();
@ -5748,17 +5748,17 @@ void Parker::park(bool isAbsolute, jlong time) {
}
void Parker::unpark() {
int s, status ;
status = os::Solaris::mutex_lock (_mutex) ;
assert (status == 0, "invariant") ;
int s, status;
status = os::Solaris::mutex_lock(_mutex);
assert(status == 0, "invariant");
s = _counter;
_counter = 1;
status = os::Solaris::mutex_unlock (_mutex) ;
assert (status == 0, "invariant") ;
status = os::Solaris::mutex_unlock(_mutex);
assert(status == 0, "invariant");
if (s < 1) {
status = os::Solaris::cond_signal (_cond) ;
assert (status == 0, "invariant") ;
status = os::Solaris::cond_signal(_cond);
assert(status == 0, "invariant");
}
}
@ -5925,14 +5925,14 @@ int os::timeout(int fd, long timeout) {
gettimeofday(&t, &aNull);
prevtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
for(;;) {
for (;;) {
res = ::poll(&pfd, 1, timeout);
if(res == OS_ERR && errno == EINTR) {
if(timeout != -1) {
if (res == OS_ERR && errno == EINTR) {
if (timeout != -1) {
gettimeofday(&t, &aNull);
newtime = ((julong)t.tv_sec * 1000) + t.tv_usec /1000;
timeout -= newtime - prevtime;
if(timeout <= 0)
if (timeout <= 0)
return OS_OK;
prevtime = newtime;
}

View File

@ -301,48 +301,48 @@ class Solaris {
class PlatformEvent : public CHeapObj<mtInternal> {
private:
double CachePad [4] ; // increase odds that _mutex is sole occupant of cache line
volatile int _Event ;
int _nParked ;
int _pipev [2] ;
mutex_t _mutex [1] ;
cond_t _cond [1] ;
double PostPad [2] ;
double CachePad[4]; // increase odds that _mutex is sole occupant of cache line
volatile int _Event;
int _nParked;
int _pipev[2];
mutex_t _mutex[1];
cond_t _cond[1];
double PostPad[2];
protected:
// Defining a protected ctor effectively gives us an abstract base class.
// That is, a PlatformEvent can never be instantiated "naked" but only
// as a part of a ParkEvent (recall that ParkEvent extends PlatformEvent).
// TODO-FIXME: make dtor private
~PlatformEvent() { guarantee (0, "invariant") ; }
~PlatformEvent() { guarantee(0, "invariant"); }
PlatformEvent() {
int status;
status = os::Solaris::cond_init(_cond);
assert_status(status == 0, status, "cond_init");
status = os::Solaris::mutex_init(_mutex);
assert_status(status == 0, status, "mutex_init");
_Event = 0 ;
_nParked = 0 ;
_pipev[0] = _pipev[1] = -1 ;
_Event = 0;
_nParked = 0;
_pipev[0] = _pipev[1] = -1;
}
public:
// Exercise caution using reset() and fired() -- they may require MEMBARs
void reset() { _Event = 0 ; }
void reset() { _Event = 0; }
int fired() { return _Event; }
void park () ;
int park (jlong millis) ;
int TryPark () ;
void unpark () ;
} ;
void park();
int park(jlong millis);
int TryPark();
void unpark();
};
class PlatformParker : public CHeapObj<mtInternal> {
protected:
mutex_t _mutex [1] ;
cond_t _cond [1] ;
mutex_t _mutex[1];
cond_t _cond[1];
public: // TODO-FIXME: make dtor private
~PlatformParker() { guarantee (0, "invariant") ; }
~PlatformParker() { guarantee(0, "invariant"); }
public:
PlatformParker() {
@ -352,6 +352,6 @@ class PlatformParker : public CHeapObj<mtInternal> {
status = os::Solaris::mutex_init(_mutex);
assert_status(status == 0, status, "mutex_init");
}
} ;
};
#endif // OS_SOLARIS_VM_OS_SOLARIS_HPP

View File

@ -125,11 +125,11 @@ BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
switch (reason) {
case DLL_PROCESS_ATTACH:
vm_lib_handle = hinst;
if(ForceTimeHighResolution)
if (ForceTimeHighResolution)
timeBeginPeriod(1L);
break;
case DLL_PROCESS_DETACH:
if(ForceTimeHighResolution)
if (ForceTimeHighResolution)
timeEndPeriod(1L);
// Workaround for issue when a custom launcher doesn't call
@ -318,7 +318,7 @@ extern "C" void breakpoint() {
*/
address os::get_caller_pc(int n) {
#ifdef _NMT_NOINLINE_
n ++;
n++;
#endif
address pc;
if (os::Kernel32Dll::RtlCaptureStackBackTrace(n + 1, 1, (PVOID*)&pc, NULL) == 1) {
@ -345,10 +345,10 @@ address os::current_stack_base() {
// Add up the sizes of all the regions with the same
// AllocationBase.
while( 1 )
while (1)
{
VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
if ( stack_bottom == (address)minfo.AllocationBase )
if (stack_bottom == (address)minfo.AllocationBase)
stack_size += minfo.RegionSize;
else
break;
@ -644,7 +644,7 @@ static jlong performance_frequency;
jlong as_long(LARGE_INTEGER x) {
jlong result = 0; // initialization to avoid warning
set_high(&result, x.HighPart);
set_low(&result, x.LowPart);
set_low(&result, x.LowPart);
return result;
}
@ -999,7 +999,7 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
#endif
cwd = get_current_directory(NULL, 0);
jio_snprintf(buffer, bufferSize, "%s\\hs_err_pid%u.mdmp",cwd, current_process_id());
jio_snprintf(buffer, bufferSize, "%s\\hs_err_pid%u.mdmp", cwd, current_process_id());
dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
if (dumpFile == INVALID_HANDLE_VALUE) {
@ -1217,7 +1217,7 @@ bool os::dll_build_name(char *buffer, size_t buflen,
if (pelements == NULL) {
return false;
}
for (int i = 0 ; i < n ; i++) {
for (int i = 0; i < n; i++) {
char* path = pelements[i];
// Really shouldn't be NULL, but check can't hurt
size_t plen = (path == NULL) ? 0 : strlen(path);
@ -1236,7 +1236,7 @@ bool os::dll_build_name(char *buffer, size_t buflen,
}
}
// release the storage
for (int i = 0 ; i < n ; i++) {
for (int i = 0; i < n; i++) {
if (pelements[i] != NULL) {
FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
}
@ -1271,12 +1271,12 @@ static bool _addr_in_ntdll( address addr )
MODULEINFO minfo;
hmod = GetModuleHandle("NTDLL.DLL");
if ( hmod == NULL ) return false;
if ( !os::PSApiDll::GetModuleInformation( GetCurrentProcess(), hmod,
if (hmod == NULL) return false;
if (!os::PSApiDll::GetModuleInformation( GetCurrentProcess(), hmod,
&minfo, sizeof(MODULEINFO)) )
return false;
if ( (addr >= minfo.lpBaseOfDll) &&
if ((addr >= minfo.lpBaseOfDll) &&
(addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage)))
return true;
else
@ -1304,11 +1304,11 @@ typedef int (*EnumModulesCallbackFunc)(int, char *, address, unsigned, void *);
// enumerate_modules for Windows NT, using PSAPI
static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void * param)
{
HANDLE hProcess ;
HANDLE hProcess;
# define MAX_NUM_MODULES 128
HMODULE modules[MAX_NUM_MODULES];
static char filename[ MAX_PATH ];
static char filename[MAX_PATH];
int result = 0;
if (!os::PSApiDll::PSApiAvailable()) {
@ -1316,13 +1316,13 @@ static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void
}
hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
FALSE, pid ) ;
FALSE, pid);
if (hProcess == NULL) return 0;
DWORD size_needed;
if (!os::PSApiDll::EnumProcessModules(hProcess, modules,
sizeof(modules), &size_needed)) {
CloseHandle( hProcess );
CloseHandle(hProcess);
return 0;
}
@ -1331,7 +1331,7 @@ static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void
for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
// Get Full pathname:
if(!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i],
if (!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i],
filename, sizeof(filename))) {
filename[0] = '\0';
}
@ -1349,7 +1349,7 @@ static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void
if (result) break;
}
CloseHandle( hProcess ) ;
CloseHandle(hProcess);
return result;
}
@ -1357,8 +1357,8 @@ static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void
// enumerate_modules for Windows 95/98/ME, using TOOLHELP
static int _enumerate_modules_windows( int pid, EnumModulesCallbackFunc func, void *param)
{
HANDLE hSnapShot ;
static MODULEENTRY32 modentry ;
HANDLE hSnapShot;
static MODULEENTRY32 modentry;
int result = 0;
if (!os::Kernel32Dll::HelpToolsAvailable()) {
@ -1366,22 +1366,22 @@ static int _enumerate_modules_windows( int pid, EnumModulesCallbackFunc func, vo
}
// Get a handle to a Toolhelp snapshot of the system
hSnapShot = os::Kernel32Dll::CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pid ) ;
if( hSnapShot == INVALID_HANDLE_VALUE ) {
return FALSE ;
hSnapShot = os::Kernel32Dll::CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pid);
if (hSnapShot == INVALID_HANDLE_VALUE) {
return FALSE;
}
// iterate through all modules
modentry.dwSize = sizeof(MODULEENTRY32) ;
modentry.dwSize = sizeof(MODULEENTRY32);
bool not_done = os::Kernel32Dll::Module32First( hSnapShot, &modentry ) != 0;
while( not_done ) {
while (not_done) {
// invoke the callback
result=func(pid, modentry.szExePath, (address)modentry.modBaseAddr,
modentry.modBaseSize, param);
if (result) break;
modentry.dwSize = sizeof(MODULEENTRY32) ;
modentry.dwSize = sizeof(MODULEENTRY32);
not_done = os::Kernel32Dll::Module32Next( hSnapShot, &modentry ) != 0;
}
@ -1941,7 +1941,7 @@ void os::signal_raise(int signal_number) {
// that raises SIGTERM for the latter cases.
//
static BOOL WINAPI consoleHandler(DWORD event) {
switch(event) {
switch (event) {
case CTRL_C_EVENT:
if (is_error_reported()) {
// Ctrl-C is pressed during error reporting, likely because the error
@ -1965,7 +1965,7 @@ static BOOL WINAPI consoleHandler(DWORD event) {
HANDLE handle = GetProcessWindowStation();
if (handle != NULL &&
GetUserObjectInformation(handle, UOI_FLAGS, &flags,
sizeof( USEROBJECTFLAGS), NULL)) {
sizeof(USEROBJECTFLAGS), NULL)) {
// If it is a non-interactive session, let next handler to deal
// with it.
if ((flags.dwFlags & WSF_VISIBLE) == 0) {
@ -1991,7 +1991,7 @@ static BOOL WINAPI consoleHandler(DWORD event) {
// Return maximum OS signal used + 1 for internal use only
// Used as exit signal for signal_thread
int os::sigexitnum_pd(){
int os::sigexitnum_pd() {
return NSIG;
}
@ -2422,11 +2422,11 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
// process of write protecting the memory serialization page.
// It write enables the page immediately after protecting it
// so just return.
if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) {
if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
JavaThread* thread = (JavaThread*) t;
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
address addr = (address) exceptionRecord->ExceptionInformation[1];
if ( os::is_memory_serialize_page(thread, addr) ) {
if (os::is_memory_serialize_page(thread, addr)) {
// Block current thread until the memory serialize page permission restored.
os::block_on_serialize_page_trap();
return EXCEPTION_CONTINUE_EXECUTION;
@ -2543,7 +2543,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
//
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
address addr = (address) exceptionRecord->ExceptionInformation[1];
if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) {
if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base()) {
addr = (address)((uintptr_t)addr &
(~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
os::commit_memory((char *)addr, thread->stack_base() - addr,
@ -2623,7 +2623,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
// Compiled method patched to be non entrant? Following conditions must apply:
// 1. must be first instruction in bundle
// 2. must be a break instruction with appropriate code
if((((uint64_t) pc & 0x0F) == 0) &&
if ((((uint64_t) pc & 0x0F) == 0) &&
(((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) {
return Handle_Exception(exceptionInfo,
(address)SharedRuntime::get_handle_wrong_method_stub());
@ -2794,7 +2794,7 @@ public:
return (_numa_used_node_count > 1);
}
int get_count() {return _numa_used_node_count;}
int get_count() { return _numa_used_node_count; }
int get_node_list_entry(int n) {
// for indexes out of range, returns -1
return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
@ -3112,14 +3112,14 @@ char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
} else {
elapsedTimer reserveTimer;
if( Verbose && PrintMiscellaneous ) reserveTimer.start();
if (Verbose && PrintMiscellaneous) reserveTimer.start();
// in numa interleaving, we have to allocate pages individually
// (well really chunks of NUMAInterleaveGranularity size)
res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
if (res == NULL) {
warning("NUMA page allocation failed");
}
if( Verbose && PrintMiscellaneous ) {
if (Verbose && PrintMiscellaneous) {
reserveTimer.stop();
tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
reserveTimer.milliseconds(), reserveTimer.ticks());
@ -3450,14 +3450,14 @@ public:
int os::sleep(Thread* thread, jlong ms, bool interruptable) {
jlong limit = (jlong) MAXDWORD;
while(ms > limit) {
while (ms > limit) {
int res;
if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT)
return res;
ms -= limit;
}
assert(thread == Thread::current(), "thread consistency check");
assert(thread == Thread::current(), "thread consistency check");
OSThread* osthread = thread->osthread();
OSThreadWaitState osts(osthread, false /* not Object.wait() */);
int result;
@ -3473,8 +3473,8 @@ int os::sleep(Thread* thread, jlong ms, bool interruptable) {
HANDLE events[1];
events[0] = osthread->interrupt_event();
HighResolutionInterval *phri=NULL;
if(!ForceTimeHighResolution)
phri = new HighResolutionInterval( ms );
if (!ForceTimeHighResolution)
phri = new HighResolutionInterval(ms);
if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) {
result = OS_TIMEOUT;
} else {
@ -3511,17 +3511,17 @@ void os::infinite_sleep() {
}
}
typedef BOOL (WINAPI * STTSignature)(void) ;
typedef BOOL (WINAPI * STTSignature)(void);
os::YieldResult os::NakedYield() {
// Use either SwitchToThread() or Sleep(0)
// Consider passing back the return value from SwitchToThread().
if (os::Kernel32Dll::SwitchToThreadAvailable()) {
return SwitchToThread() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY ;
return SwitchToThread() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY;
} else {
Sleep(0);
}
return os::YIELD_UNKNOWN ;
return os::YIELD_UNKNOWN;
}
void os::yield() { os::NakedYield(); }
@ -3574,7 +3574,7 @@ static int prio_init() {
}
}
if (UseCriticalJavaThreadPriority) {
os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority] ;
os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
}
return 0;
}
@ -3586,7 +3586,7 @@ OSReturn os::set_native_priority(Thread* thread, int priority) {
}
OSReturn os::get_native_priority(const Thread* const thread, int* priority_ptr) {
if ( !UseThreadPriorities ) {
if (!UseThreadPriorities) {
*priority_ptr = java_to_os_priority[NormPriority];
return OS_OK;
}
@ -3620,8 +3620,8 @@ void os::interrupt(Thread* thread) {
if (thread->is_Java_thread())
((JavaThread*)thread)->parker()->unpark();
ParkEvent * ev = thread->_ParkEvent ;
if (ev != NULL) ev->unpark() ;
ParkEvent * ev = thread->_ParkEvent;
if (ev != NULL) ev->unpark();
}
@ -3715,7 +3715,7 @@ void os::win32::initialize_system_info() {
OSVERSIONINFOEX oi;
oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
GetVersionEx((OSVERSIONINFO*)&oi);
switch(oi.dwPlatformId) {
switch (oi.dwPlatformId) {
case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break;
case VER_PLATFORM_WIN32_NT:
_is_nt = true;
@ -3898,29 +3898,29 @@ static jint initSock();
jint os::init_2(void) {
// Allocate a single page and mark it as readable for safepoint polling
address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY);
guarantee( polling_page != NULL, "Reserve Failed for polling page");
guarantee(polling_page != NULL, "Reserve Failed for polling page");
address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY);
guarantee( return_page != NULL, "Commit Failed for polling page");
guarantee(return_page != NULL, "Commit Failed for polling page");
os::set_polling_page( polling_page );
os::set_polling_page(polling_page);
#ifndef PRODUCT
if( Verbose && PrintMiscellaneous )
if (Verbose && PrintMiscellaneous)
tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
#endif
if (!UseMembar) {
address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
guarantee( mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
guarantee( return_page != NULL, "Commit Failed for memory serialize page");
guarantee(return_page != NULL, "Commit Failed for memory serialize page");
os::set_memory_serialize_page( mem_serialize_page );
os::set_memory_serialize_page(mem_serialize_page);
#ifndef PRODUCT
if(Verbose && PrintMiscellaneous)
if (Verbose && PrintMiscellaneous)
tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
#endif
}
@ -4036,14 +4036,14 @@ void os::init_3(void) {
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
DWORD old_status;
if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status) )
if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status))
fatal("Could not disable polling page");
};
// Mark the polling page as readable
void os::make_polling_page_readable(void) {
DWORD old_status;
if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status) )
if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status))
fatal("Could not enable polling page");
};
@ -4121,7 +4121,7 @@ jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
FILETIME KernelTime;
FILETIME UserTime;
if ( GetThreadTimes(thread->osthread()->thread_handle(),
if (GetThreadTimes(thread->osthread()->thread_handle(),
&CreationTime, &ExitTime, &KernelTime, &UserTime) == 0)
return -1;
else
@ -4157,7 +4157,7 @@ bool os::is_thread_cpu_time_supported() {
FILETIME KernelTime;
FILETIME UserTime;
if ( GetThreadTimes(GetCurrentThread(),
if (GetThreadTimes(GetCurrentThread(),
&CreationTime, &ExitTime, &KernelTime, &UserTime) == 0)
return false;
else
@ -4391,7 +4391,7 @@ int os::ftruncate(int fd, jlong length) {
int os::fsync(int fd) {
HANDLE handle = (HANDLE)::_get_osfhandle(fd);
if ( (!::FlushFileBuffers(handle)) &&
if ((!::FlushFileBuffers(handle)) &&
(GetLastError() != ERROR_ACCESS_DENIED) ) {
/* from winerror.h */
return -1;
@ -4512,7 +4512,7 @@ static int stdinAvailable(int fd, long *pbytes) {
}
/* Examine input records for the number of bytes available */
for(i=0; i<numEvents; i++) {
for (i=0; i<numEvents; i++) {
if (lpBuffer[i].EventType == KEY_EVENT) {
KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
@ -4527,7 +4527,7 @@ static int stdinAvailable(int fd, long *pbytes) {
}
}
if(lpBuffer != NULL) {
if (lpBuffer != NULL) {
os::free(lpBuffer, mtInternal);
}
@ -4790,19 +4790,19 @@ bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
// with explicit "PARKED" and "SIGNALED" bits.
int os::PlatformEvent::park (jlong Millis) {
guarantee (_ParkHandle != NULL , "Invariant") ;
guarantee (Millis > 0 , "Invariant") ;
int v ;
guarantee(_ParkHandle != NULL , "Invariant");
guarantee(Millis > 0 , "Invariant");
int v;
// CONSIDER: defer assigning a CreateEvent() handle to the Event until
// the initial park() operation.
for (;;) {
v = _Event ;
if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
v = _Event;
if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
}
guarantee ((v == 0) || (v == 1), "invariant") ;
if (v != 0) return OS_OK ;
guarantee((v == 0) || (v == 1), "invariant");
if (v != 0) return OS_OK;
// Do this the hard way by blocking ...
// TODO: consider a brief spin here, gated on the success of recent
@ -4820,59 +4820,59 @@ int os::PlatformEvent::park (jlong Millis) {
// In the future, however, we might want to track the accumulated wait time and
// adjust Millis accordingly if we encounter a spurious wakeup.
const int MAXTIMEOUT = 0x10000000 ;
DWORD rv = WAIT_TIMEOUT ;
const int MAXTIMEOUT = 0x10000000;
DWORD rv = WAIT_TIMEOUT;
while (_Event < 0 && Millis > 0) {
DWORD prd = Millis ; // set prd = MAX (Millis, MAXTIMEOUT)
DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT)
if (Millis > MAXTIMEOUT) {
prd = MAXTIMEOUT ;
prd = MAXTIMEOUT;
}
rv = ::WaitForSingleObject (_ParkHandle, prd) ;
assert (rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed") ;
rv = ::WaitForSingleObject(_ParkHandle, prd);
assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
if (rv == WAIT_TIMEOUT) {
Millis -= prd ;
Millis -= prd;
}
}
v = _Event ;
_Event = 0 ;
v = _Event;
_Event = 0;
// see comment at end of os::PlatformEvent::park() below:
OrderAccess::fence() ;
OrderAccess::fence();
// If we encounter a nearly simultanous timeout expiry and unpark()
// we return OS_OK indicating we awoke via unpark().
// Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
return (v >= 0) ? OS_OK : OS_TIMEOUT ;
return (v >= 0) ? OS_OK : OS_TIMEOUT;
}
void os::PlatformEvent::park () {
guarantee (_ParkHandle != NULL, "Invariant") ;
void os::PlatformEvent::park() {
guarantee(_ParkHandle != NULL, "Invariant");
// Invariant: Only the thread associated with the Event/PlatformEvent
// may call park().
int v ;
int v;
for (;;) {
v = _Event ;
if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
v = _Event;
if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
}
guarantee ((v == 0) || (v == 1), "invariant") ;
if (v != 0) return ;
guarantee((v == 0) || (v == 1), "invariant");
if (v != 0) return;
// Do this the hard way by blocking ...
// TODO: consider a brief spin here, gated on the success of recent
// spin attempts by this thread.
while (_Event < 0) {
DWORD rv = ::WaitForSingleObject (_ParkHandle, INFINITE) ;
assert (rv == WAIT_OBJECT_0, "WaitForSingleObject failed") ;
DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
}
// Usually we'll find _Event == 0 at this point, but as
// an optional optimization we clear it, just in case can
// multiple unpark() operations drove _Event up to 1.
_Event = 0 ;
OrderAccess::fence() ;
guarantee (_Event >= 0, "invariant") ;
_Event = 0;
OrderAccess::fence();
guarantee(_Event >= 0, "invariant");
}
void os::PlatformEvent::unpark() {
guarantee (_ParkHandle != NULL, "Invariant") ;
guarantee(_ParkHandle != NULL, "Invariant");
// Transitions for _Event:
// 0 :=> 1
@ -4907,7 +4907,7 @@ void os::PlatformEvent::unpark() {
void Parker::park(bool isAbsolute, jlong time) {
guarantee (_ParkEvent != NULL, "invariant") ;
guarantee(_ParkEvent != NULL, "invariant");
// First, demultiplex/decode time arguments
if (time < 0) { // don't wait
return;
@ -4941,7 +4941,7 @@ void Parker::park(bool isAbsolute, jlong time) {
OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
jt->set_suspend_equivalent();
WaitForSingleObject(_ParkEvent, time);
WaitForSingleObject(_ParkEvent, time);
ResetEvent(_ParkEvent);
// If externally suspended while waiting, re-suspend
@ -4952,7 +4952,7 @@ void Parker::park(bool isAbsolute, jlong time) {
}
void Parker::unpark() {
guarantee (_ParkEvent != NULL, "invariant") ;
guarantee(_ParkEvent != NULL, "invariant");
SetEvent(_ParkEvent);
}
@ -5040,7 +5040,7 @@ bool os::find(address addr, outputStream* st) {
LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
DWORD exception_code = e->ExceptionRecord->ExceptionCode;
if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) {
if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow();
PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord;
address addr = (address) exceptionRecord->ExceptionInformation[1];

View File

@ -55,7 +55,7 @@ int VM_Version::platform_features(int features) {
if (detect_niagara()) {
NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Detected Linux on Niagara");)
features = niagara1_m;
features = niagara1_m | T_family_m;
}
return features;

View File

@ -2984,9 +2984,12 @@ void ClassFileParser::parse_classfile_attributes(ClassFileParser::ClassAnnotatio
} else if (tag == vmSymbols::tag_enclosing_method()) {
if (parsed_enclosingmethod_attribute) {
classfile_parse_error("Multiple EnclosingMethod attributes in class file %s", CHECK);
} else {
} else {
parsed_enclosingmethod_attribute = true;
}
guarantee_property(attribute_length == 4,
"Wrong EnclosingMethod attribute length %u in class file %s",
attribute_length, CHECK);
cfs->guarantee_more(4, CHECK); // class_index, method_index
enclosing_method_class_index = cfs->get_u2_fast();
enclosing_method_method_index = cfs->get_u2_fast();
@ -4067,6 +4070,11 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
this_klass->set_major_version(major_version);
this_klass->set_has_default_methods(has_default_methods);
if (!host_klass.is_null()) {
assert (this_klass->is_anonymous(), "should be the same");
this_klass->set_host_klass(host_klass());
}
// Set up Method*::intrinsic_id as soon as we know the names of methods.
// (We used to do this lazily, but now we query it in Rewriter,
// which is eagerly done for every method, so we might as well do it now,
@ -4664,9 +4672,7 @@ bool ClassFileParser::has_illegal_visibility(jint flags) {
}
bool ClassFileParser::is_supported_version(u2 major, u2 minor) {
u2 max_version =
JDK_Version::is_gte_jdk17x_version() ? JAVA_MAX_SUPPORTED_VERSION :
(JDK_Version::is_gte_jdk16x_version() ? JAVA_6_VERSION : JAVA_1_5_VERSION);
u2 max_version = JAVA_MAX_SUPPORTED_VERSION;
return (major >= JAVA_MIN_SUPPORTED_VERSION) &&
(major <= max_version) &&
((major != max_version) ||

View File

@ -1126,7 +1126,7 @@ void ClassLoader::verify() {
// JDK 1.3 version
typedef struct real_jzentry13 { /* Zip file entry */
typedef struct real_jzentry { /* Zip file entry */
char *name; /* entry name */
jint time; /* modification time */
jint size; /* size of uncompressed data */
@ -1135,9 +1135,9 @@ typedef struct real_jzentry13 { /* Zip file entry */
char *comment; /* optional zip file comment */
jbyte *extra; /* optional extra data */
jint pos; /* position of LOC header (if negative) or data */
} real_jzentry13;
} real_jzentry;
typedef struct real_jzfile13 { /* Zip file */
typedef struct real_jzfile { /* Zip file */
char *name; /* zip file name */
jint refs; /* number of active references */
jint fd; /* open file descriptor */
@ -1148,42 +1148,14 @@ typedef struct real_jzfile13 { /* Zip file */
jint total; /* total number of entries */
unsigned short *table; /* Hash chain heads: indexes into entries */
jint tablelen; /* number of hash eads */
real_jzfile13 *next; /* next zip file in search list */
real_jzfile *next; /* next zip file in search list */
jzentry *cache; /* we cache the most recently freed jzentry */
/* Information on metadata names in META-INF directory */
char **metanames; /* array of meta names (may have null names) */
jint metacount; /* number of slots in metanames array */
/* If there are any per-entry comments, they are in the comments array */
char **comments;
} real_jzfile13;
// JDK 1.2 version
typedef struct real_jzentry12 { /* Zip file entry */
char *name; /* entry name */
jint time; /* modification time */
jint size; /* size of uncompressed data */
jint csize; /* size of compressed data (zero if uncompressed) */
jint crc; /* crc of uncompressed data */
char *comment; /* optional zip file comment */
jbyte *extra; /* optional extra data */
jint pos; /* position of LOC header (if negative) or data */
struct real_jzentry12 *next; /* next entry in hash table */
} real_jzentry12;
typedef struct real_jzfile12 { /* Zip file */
char *name; /* zip file name */
jint refs; /* number of active references */
jint fd; /* open file descriptor */
void *lock; /* read lock */
char *comment; /* zip file comment */
char *msg; /* zip error message */
real_jzentry12 *entries; /* array of zip entries */
jint total; /* total number of entries */
real_jzentry12 **table; /* hash table of entries */
jint tablelen; /* number of buckets */
jzfile *next; /* next zip file in search list */
} real_jzfile12;
} real_jzfile;
void ClassPathDirEntry::compile_the_world(Handle loader, TRAPS) {
// For now we only compile all methods in all classes in zip/jar files
@ -1197,10 +1169,14 @@ bool ClassPathDirEntry::is_rt_jar() {
}
void ClassPathZipEntry::compile_the_world(Handle loader, TRAPS) {
if (JDK_Version::is_jdk12x_version()) {
compile_the_world12(loader, THREAD);
} else {
compile_the_world13(loader, THREAD);
real_jzfile* zip = (real_jzfile*) _zip;
tty->print_cr("CompileTheWorld : Compiling all classes in %s", zip->name);
tty->cr();
// Iterate over all entries in zip file
for (int n = 0; ; n++) {
real_jzentry * ze = (real_jzentry *)((*GetNextEntry)(_zip, n));
if (ze == NULL) break;
ClassLoader::compile_the_world_in(ze->name, loader, CHECK);
}
if (HAS_PENDING_EXCEPTION) {
if (PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())) {
@ -1213,54 +1189,8 @@ void ClassPathZipEntry::compile_the_world(Handle loader, TRAPS) {
}
}
// Version that works for JDK 1.3.x
void ClassPathZipEntry::compile_the_world13(Handle loader, TRAPS) {
real_jzfile13* zip = (real_jzfile13*) _zip;
tty->print_cr("CompileTheWorld : Compiling all classes in %s", zip->name);
tty->cr();
// Iterate over all entries in zip file
for (int n = 0; ; n++) {
real_jzentry13 * ze = (real_jzentry13 *)((*GetNextEntry)(_zip, n));
if (ze == NULL) break;
ClassLoader::compile_the_world_in(ze->name, loader, CHECK);
}
}
// Version that works for JDK 1.2.x
void ClassPathZipEntry::compile_the_world12(Handle loader, TRAPS) {
real_jzfile12* zip = (real_jzfile12*) _zip;
tty->print_cr("CompileTheWorld : Compiling all classes in %s", zip->name);
tty->cr();
// Iterate over all entries in zip file
for (int n = 0; ; n++) {
real_jzentry12 * ze = (real_jzentry12 *)((*GetNextEntry)(_zip, n));
if (ze == NULL) break;
ClassLoader::compile_the_world_in(ze->name, loader, CHECK);
}
}
bool ClassPathZipEntry::is_rt_jar() {
if (JDK_Version::is_jdk12x_version()) {
return is_rt_jar12();
} else {
return is_rt_jar13();
}
}
// JDK 1.3 version
bool ClassPathZipEntry::is_rt_jar13() {
real_jzfile13* zip = (real_jzfile13*) _zip;
int len = (int)strlen(zip->name);
// Check whether zip name ends in "rt.jar"
// This will match other archives named rt.jar as well, but this is
// only used for debugging.
return (len >= 6) && (strcasecmp(zip->name + len - 6, "rt.jar") == 0);
}
// JDK 1.2 version
bool ClassPathZipEntry::is_rt_jar12() {
real_jzfile12* zip = (real_jzfile12*) _zip;
real_jzfile* zip = (real_jzfile*) _zip;
int len = (int)strlen(zip->name);
// Check whether zip name ends in "rt.jar"
// This will match other archives named rt.jar as well, but this is

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -111,11 +111,7 @@ class ClassPathZipEntry: public ClassPathEntry {
void contents_do(void f(const char* name, void* context), void* context);
// Debugging
NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
NOT_PRODUCT(void compile_the_world12(Handle loader, TRAPS);) // JDK 1.2 version
NOT_PRODUCT(void compile_the_world13(Handle loader, TRAPS);) // JDK 1.3 version
NOT_PRODUCT(bool is_rt_jar();)
NOT_PRODUCT(bool is_rt_jar12();)
NOT_PRODUCT(bool is_rt_jar13();)
};

View File

@ -624,6 +624,12 @@ void ClassLoaderDataGraph::always_strong_oops_do(OopClosure* f, KlassClosure* kl
}
}
void ClassLoaderDataGraph::cld_do(CLDClosure* cl) {
for (ClassLoaderData* cld = _head; cl != NULL && cld != NULL; cld = cld->next()) {
cl->do_cld(cld);
}
}
void ClassLoaderDataGraph::classes_do(KlassClosure* klass_closure) {
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
cld->classes_do(klass_closure);

View File

@ -77,6 +77,7 @@ class ClassLoaderDataGraph : public AllStatic {
static void oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim);
static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
static void cld_do(CLDClosure* cl);
static void classes_do(KlassClosure* klass_closure);
static void classes_do(void f(Klass* const));
static void methods_do(void f(Method*));

View File

@ -0,0 +1,167 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/classLoaderStats.hpp"
#include "utilities/globalDefinitions.hpp"
class ClassStatsClosure : public KlassClosure {
public:
int _num_classes;
ClassStatsClosure() :
_num_classes(0) {
}
virtual void do_klass(Klass* k) {
_num_classes++;
}
};
void ClassLoaderStatsClosure::do_cld(ClassLoaderData* cld) {
oop cl = cld->class_loader();
ClassLoaderStats* cls;
// The hashtable key is the ClassLoader oop since we want to account
// for "real" classes and anonymous classes together
ClassLoaderStats** cls_ptr = _stats->get(cl);
if (cls_ptr == NULL) {
cls = new ClassLoaderStats();
_stats->put(cl, cls);
_total_loaders++;
} else {
cls = *cls_ptr;
}
if (!cld->is_anonymous()) {
cls->_cld = cld;
}
cls->_class_loader = cl;
if (cl != NULL) {
cls->_parent = java_lang_ClassLoader::parent(cl);
addEmptyParents(cls->_parent);
}
ClassStatsClosure csc;
cld->classes_do(&csc);
if(cld->is_anonymous()) {
cls->_anon_classes_count += csc._num_classes;
} else {
cls->_classes_count = csc._num_classes;
}
_total_classes += csc._num_classes;
Metaspace* ms = cld->metaspace_or_null();
if (ms != NULL) {
if(cld->is_anonymous()) {
cls->_anon_chunk_sz += ms->allocated_chunks_bytes();
cls->_anon_block_sz += ms->allocated_blocks_bytes();
} else {
cls->_chunk_sz = ms->allocated_chunks_bytes();
cls->_block_sz = ms->allocated_blocks_bytes();
}
_total_chunk_sz += ms->allocated_chunks_bytes();
_total_block_sz += ms->allocated_blocks_bytes();
}
}
// Handles the difference in pointer width on 32 and 64 bit platforms
#ifdef _LP64
#define SPACE "%8s"
#else
#define SPACE "%s"
#endif
bool ClassLoaderStatsClosure::do_entry(oop const& key, ClassLoaderStats* const& cls) {
Klass* class_loader_klass = (cls->_class_loader == NULL ? NULL : cls->_class_loader->klass());
Klass* parent_klass = (cls->_parent == NULL ? NULL : cls->_parent->klass());
_out->print(INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " " UINTX_FORMAT_W(6) " " SIZE_FORMAT_W(8) " " SIZE_FORMAT_W(8) " ",
p2i(class_loader_klass), p2i(parent_klass), p2i(cls->_cld),
cls->_classes_count,
cls->_chunk_sz, cls->_block_sz);
if (class_loader_klass != NULL) {
_out->print("%s", class_loader_klass->external_name());
} else {
_out->print("<boot class loader>");
}
_out->cr();
if (cls->_anon_classes_count > 0) {
_out->print_cr(SPACE SPACE SPACE " " UINTX_FORMAT_W(6) " " SIZE_FORMAT_W(8) " " SIZE_FORMAT_W(8) " + unsafe anonymous classes",
"", "", "",
cls->_anon_classes_count,
cls->_anon_chunk_sz, cls->_anon_block_sz);
}
return true;
}
void ClassLoaderStatsClosure::print() {
_out->print_cr("ClassLoader" SPACE " Parent" SPACE " CLD*" SPACE " Classes ChunkSz BlockSz Type", "", "", "");
_stats->iterate(this);
_out->print("Total = " UINTX_FORMAT_W(-6), _total_loaders);
_out->print(SPACE SPACE SPACE " ", "", "", "");
_out->print_cr(UINTX_FORMAT_W(6) " " SIZE_FORMAT_W(8) " " SIZE_FORMAT_W(8) " ",
_total_classes,
_total_chunk_sz,
_total_block_sz);
_out->print_cr("ChunkSz: Total size of all allocated metaspace chunks");
_out->print_cr("BlockSz: Total size of all allocated metaspace blocks (each chunk has several blocks)");
}
void ClassLoaderStatsClosure::addEmptyParents(oop cl) {
while (cl != NULL && java_lang_ClassLoader::loader_data(cl) == NULL) {
// This classloader has not loaded any classes
ClassLoaderStats** cls_ptr = _stats->get(cl);
if (cls_ptr == NULL) {
// It does not exist in our table - add it
ClassLoaderStats* cls = new ClassLoaderStats();
cls->_class_loader = cl;
cls->_parent = java_lang_ClassLoader::parent(cl);
_stats->put(cl, cls);
_total_loaders++;
}
cl = java_lang_ClassLoader::parent(cl);
}
}
void ClassLoaderStatsVMOperation::doit() {
ClassLoaderStatsClosure clsc (_out);
ClassLoaderDataGraph::cld_do(&clsc);
clsc.print();
}
void ClassLoaderStatsDCmd::execute(DCmdSource source, TRAPS) {
ClassLoaderStatsVMOperation op(output());
VMThread::execute(&op);
}

View File

@ -0,0 +1,152 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_CLASSFILE_CLASSLOADERSTATS_HPP
#define SHARE_VM_CLASSFILE_CLASSLOADERSTATS_HPP
#include "classfile/classLoaderData.hpp"
#include "oops/klass.hpp"
#include "oops/oopsHierarchy.hpp"
#include "runtime/vm_operations.hpp"
#include "services/diagnosticCommand.hpp"
#include "utilities/resourceHash.hpp"
class ClassLoaderStatsDCmd : public DCmd {
public:
ClassLoaderStatsDCmd(outputStream* output, bool heap) :
DCmd(output, heap) {
}
static const char* name() {
return "VM.classloader_stats";
}
static const char* description() {
return "Print statistics about all ClassLoaders.";
}
static const char* impact() {
return "Low";
}
virtual void execute(DCmdSource source, TRAPS);
static int num_arguments() {
return 0;
}
static const JavaPermission permission() {
JavaPermission p = {"java.lang.management.ManagementPermission",
"monitor", NULL};
return p;
}
};
class ClassLoaderStats : public ResourceObj {
public:
ClassLoaderData* _cld;
oop _class_loader;
oop _parent;
size_t _chunk_sz;
size_t _block_sz;
uintx _classes_count;
size_t _anon_chunk_sz;
size_t _anon_block_sz;
uintx _anon_classes_count;
ClassLoaderStats() :
_cld(0),
_class_loader(0),
_parent(0),
_chunk_sz(0),
_block_sz(0),
_classes_count(0),
_anon_block_sz(0),
_anon_chunk_sz(0),
_anon_classes_count(0) {
}
};
class ClassLoaderStatsClosure : public CLDClosure {
protected:
static bool oop_equals(oop const& s1, oop const& s2) {
return s1 == s2;
}
static unsigned oop_hash(oop const& s1) {
unsigned hash = (unsigned)((uintptr_t)&s1);
return hash ^ (hash >> LogMinObjAlignment);
}
typedef ResourceHashtable<oop, ClassLoaderStats*,
ClassLoaderStatsClosure::oop_hash, ClassLoaderStatsClosure::oop_equals> StatsTable;
outputStream* _out;
StatsTable* _stats;
uintx _total_loaders;
uintx _total_classes;
size_t _total_chunk_sz;
size_t _total_block_sz;
public:
ClassLoaderStatsClosure(outputStream* out) :
_out(out),
_total_loaders(0),
_total_block_sz(0),
_total_chunk_sz(0),
_total_classes(0),
_stats(new StatsTable()) {
}
virtual void do_cld(ClassLoaderData* cld);
virtual bool do_entry(oop const& key, ClassLoaderStats* const& cls);
void print();
private:
void addEmptyParents(oop cl);
};
class ClassLoaderStatsVMOperation : public VM_Operation {
outputStream* _out;
public:
ClassLoaderStatsVMOperation(outputStream* out) :
_out(out) {
}
VMOp_Type type() const {
return VMOp_ClassLoaderStatsOperation;
}
void doit();
};
#endif // SHARE_VM_CLASSFILE_CLASSLOADERSTATS_HPP

View File

@ -857,9 +857,7 @@ void java_lang_Class::compute_offsets() {
}
int java_lang_Class::classRedefinedCount(oop the_class_mirror) {
if (!JDK_Version::is_gte_jdk15x_version()
|| classRedefinedCount_offset == -1) {
// The classRedefinedCount field is only present starting in 1.5.
if (classRedefinedCount_offset == -1) {
// If we don't have an offset for it then just return -1 as a marker.
return -1;
}
@ -868,9 +866,7 @@ int java_lang_Class::classRedefinedCount(oop the_class_mirror) {
}
void java_lang_Class::set_classRedefinedCount(oop the_class_mirror, int value) {
if (!JDK_Version::is_gte_jdk15x_version()
|| classRedefinedCount_offset == -1) {
// The classRedefinedCount field is only present starting in 1.5.
if (classRedefinedCount_offset == -1) {
// If we don't have an offset for it then nothing to set.
return;
}
@ -1000,9 +996,7 @@ oop java_lang_Thread::inherited_access_control_context(oop java_thread) {
jlong java_lang_Thread::stackSize(oop java_thread) {
// The stackSize field is only present starting in 1.4
if (_stackSize_offset > 0) {
assert(JDK_Version::is_gte_jdk14x_version(), "sanity check");
return java_thread->long_field(_stackSize_offset);
} else {
return 0;
@ -1078,7 +1072,7 @@ bool java_lang_Thread::set_park_event(oop java_thread, jlong ptr) {
const char* java_lang_Thread::thread_status_name(oop java_thread) {
assert(JDK_Version::is_gte_jdk15x_version() && _thread_status_offset != 0, "Must have thread status");
assert(_thread_status_offset != 0, "Must have thread status");
ThreadStatus status = (java_lang_Thread::ThreadStatus)java_thread->int_field(_thread_status_offset);
switch (status) {
case NEW : return "NEW";
@ -1217,7 +1211,6 @@ void java_lang_Throwable::set_stacktrace(oop throwable, oop st_element_array) {
}
void java_lang_Throwable::clear_stacktrace(oop throwable) {
assert(JDK_Version::is_gte_jdk14x_version(), "should only be called in >= 1.4");
set_stacktrace(throwable, NULL);
}
@ -1548,12 +1541,9 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, methodHandle met
// Start out by clearing the backtrace for this object, in case the VM
// runs out of memory while allocating the stack trace
set_backtrace(throwable(), NULL);
if (JDK_Version::is_gte_jdk14x_version()) {
// New since 1.4, clear lazily constructed Java level stacktrace if
// refilling occurs
// This is unnecessary in 1.7+ but harmless
clear_stacktrace(throwable());
}
// Clear lazily constructed Java level stacktrace if refilling occurs
// This is unnecessary in 1.7+ but harmless
clear_stacktrace(throwable());
int max_depth = MaxJavaStackTraceDepth;
JavaThread* thread = (JavaThread*)THREAD;
@ -1739,13 +1729,9 @@ void java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(Handle t
if (chunk_count >= max_chunks) break;
}
// For Java 7+ we support the Throwable immutability protocol defined for Java 7. This support
// was missing in 7u0 so in 7u0 there is a workaround in the Throwable class. That workaround
// can be removed in a JDK using this JVM version
if (JDK_Version::is_gte_jdk17x_version()) {
java_lang_Throwable::set_stacktrace(throwable(), java_lang_Throwable::unassigned_stacktrace());
assert(java_lang_Throwable::unassigned_stacktrace() != NULL, "not initialized");
}
// We support the Throwable immutability protocol defined for Java 7.
java_lang_Throwable::set_stacktrace(throwable(), java_lang_Throwable::unassigned_stacktrace());
assert(java_lang_Throwable::unassigned_stacktrace() != NULL, "not initialized");
}
@ -3022,8 +3008,7 @@ bool java_lang_ClassLoader::isAncestor(oop loader, oop cl) {
// based on non-null field
// Written to by java.lang.ClassLoader, vm only reads this field, doesn't set it
bool java_lang_ClassLoader::parallelCapable(oop class_loader) {
if (!JDK_Version::is_gte_jdk17x_version()
|| parallelCapable_offset == -1) {
if (parallelCapable_offset == -1) {
// Default for backward compatibility is false
return false;
}
@ -3219,7 +3204,6 @@ void java_nio_Buffer::compute_offsets() {
void java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(TRAPS) {
if (_owner_offset != 0) return;
assert(JDK_Version::is_gte_jdk16x_version(), "Must be JDK 1.6 or later");
SystemDictionary::load_abstract_ownable_synchronizer_klass(CHECK);
Klass* k = SystemDictionary::abstract_ownable_synchronizer_klass();
compute_offset(_owner_offset, k,
@ -3309,15 +3293,10 @@ void JavaClasses::compute_offsets() {
java_lang_reflect_Method::compute_offsets();
java_lang_reflect_Constructor::compute_offsets();
java_lang_reflect_Field::compute_offsets();
if (JDK_Version::is_gte_jdk14x_version()) {
java_nio_Buffer::compute_offsets();
}
if (JDK_Version::is_gte_jdk15x_version()) {
sun_reflect_ConstantPool::compute_offsets();
sun_reflect_UnsafeStaticFieldAccessorImpl::compute_offsets();
}
if (JDK_Version::is_gte_jdk18x_version())
java_lang_reflect_Parameter::compute_offsets();
java_nio_Buffer::compute_offsets();
sun_reflect_ConstantPool::compute_offsets();
sun_reflect_UnsafeStaticFieldAccessorImpl::compute_offsets();
java_lang_reflect_Parameter::compute_offsets();
// generated interpreter code wants to know about the offsets we just computed:
AbstractAssembler::update_delayed_values();
@ -3502,7 +3481,7 @@ void JavaClasses::check_offsets() {
// into merlin "for some time." Without it, the vm will fail with early
// merlin builds.
if (CheckAssertionStatusDirectives && JDK_Version::is_gte_jdk14x_version()) {
if (CheckAssertionStatusDirectives) {
const char* nm = "java/lang/AssertionStatusDirectives";
const char* sig = "[Ljava/lang/String;";
CHECK_OFFSET(nm, java_lang_AssertionStatusDirectives, classes, sig);

View File

@ -997,7 +997,6 @@ Klass* SystemDictionary::parse_stream(Symbol* class_name,
if (host_klass.not_null() && k.not_null()) {
k->set_host_klass(host_klass());
// If it's anonymous, initialize it now, since nobody else will.
{
@ -1754,8 +1753,6 @@ void SystemDictionary::methods_do(void f(Method*)) {
// Lazily load klasses
void SystemDictionary::load_abstract_ownable_synchronizer_klass(TRAPS) {
assert(JDK_Version::is_gte_jdk16x_version(), "Must be JDK 1.6 or later");
// if multiple threads calling this function, only one thread will load
// the class. The other threads will find the loaded version once the
// class is loaded.

View File

@ -139,14 +139,13 @@ class Ticks;
do_klass(reflect_Constructor_klass, java_lang_reflect_Constructor, Pre ) \
\
/* NOTE: needed too early in bootstrapping process to have checks based on JDK version */ \
/* Universe::is_gte_jdk14x_version() is not set up by this point. */ \
/* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \
do_klass(reflect_MagicAccessorImpl_klass, sun_reflect_MagicAccessorImpl, Opt ) \
do_klass(reflect_MethodAccessorImpl_klass, sun_reflect_MethodAccessorImpl, Opt_Only_JDK14NewRef) \
do_klass(reflect_ConstructorAccessorImpl_klass, sun_reflect_ConstructorAccessorImpl, Opt_Only_JDK14NewRef) \
do_klass(reflect_MethodAccessorImpl_klass, sun_reflect_MethodAccessorImpl, Pre ) \
do_klass(reflect_ConstructorAccessorImpl_klass, sun_reflect_ConstructorAccessorImpl, Pre ) \
do_klass(reflect_DelegatingClassLoader_klass, sun_reflect_DelegatingClassLoader, Opt ) \
do_klass(reflect_ConstantPool_klass, sun_reflect_ConstantPool, Opt_Only_JDK15 ) \
do_klass(reflect_UnsafeStaticFieldAccessorImpl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt_Only_JDK15 ) \
do_klass(reflect_ConstantPool_klass, sun_reflect_ConstantPool, Opt ) \
do_klass(reflect_UnsafeStaticFieldAccessorImpl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt ) \
do_klass(reflect_CallerSensitive_klass, sun_reflect_CallerSensitive, Opt ) \
\
/* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \
@ -169,7 +168,6 @@ class Ticks;
\
/* It's NULL in non-1.4 JDKs. */ \
do_klass(StackTraceElement_klass, java_lang_StackTraceElement, Opt ) \
/* Universe::is_gte_jdk14x_version() is not set up by this point. */ \
/* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \
do_klass(nio_Buffer_klass, java_nio_Buffer, Opt ) \
\
@ -209,10 +207,8 @@ class SystemDictionary : AllStatic {
// Options after this point will use resolve_or_null instead.
Opt, // preload tried; NULL if not present
Opt_Only_JDK14NewRef, // preload tried; use only with NewReflection
Opt_Only_JDK15, // preload tried; use only with JDK1.5+
OPTION_LIMIT,
CEIL_LG_OPTION_LIMIT = 4 // OPTION_LIMIT <= (1<<CEIL_LG_OPTION_LIMIT)
CEIL_LG_OPTION_LIMIT = 2 // OPTION_LIMIT <= (1<<CEIL_LG_OPTION_LIMIT)
};
@ -385,15 +381,6 @@ public:
static Klass* check_klass_Pre( Klass* k) { return check_klass(k); }
static Klass* check_klass_Opt( Klass* k) { return k; }
static Klass* check_klass_Opt_Only_JDK15(Klass* k) {
assert(JDK_Version::is_gte_jdk15x_version(), "JDK 1.5 only");
return k;
}
static Klass* check_klass_Opt_Only_JDK14NewRef(Klass* k) {
assert(JDK_Version::is_gte_jdk14x_version(), "JDK 1.4 only");
// despite the optional loading, if you use this it must be present:
return check_klass(k);
}
static bool initialize_wk_klass(WKID id, int init_opt, TRAPS);
static void initialize_wk_klasses_until(WKID limit_id, WKID &start_id, TRAPS);

View File

@ -320,7 +320,6 @@
template(reference_discovered_name, "discovered") \
template(run_finalization_name, "runFinalization") \
template(run_finalizers_on_exit_name, "runFinalizersOnExit") \
template(uncaughtException_name, "uncaughtException") \
template(dispatchUncaughtException_name, "dispatchUncaughtException") \
template(initializeSystemClass_name, "initializeSystemClass") \
template(loadClass_name, "loadClass") \

View File

@ -475,7 +475,7 @@ MethodLivenessResult MethodLiveness::get_liveness_at(int entry_bci) {
bci = 0;
}
MethodLivenessResult answer((uintptr_t*)NULL,0);
MethodLivenessResult answer((BitMap::bm_word_t*)NULL,0);
if (_block_count > 0) {
if (TimeLivenessAnalysis) _time_total.start();
@ -1000,7 +1000,7 @@ bool MethodLiveness::BasicBlock::merge_exception(BitMap other) {
}
MethodLivenessResult MethodLiveness::BasicBlock::get_liveness_at(ciMethod* method, int bci) {
MethodLivenessResult answer(NEW_RESOURCE_ARRAY(uintptr_t, _analyzer->bit_map_size_words()),
MethodLivenessResult answer(NEW_RESOURCE_ARRAY(BitMap::bm_word_t, _analyzer->bit_map_size_words()),
_analyzer->bit_map_size_bits());
answer.set_is_valid();

View File

@ -127,7 +127,7 @@ bool CMBitMap::allocate(ReservedSpace heap_rs) {
}
assert(_virtual_space.committed_size() == brs.size(),
"didn't reserve backing store for all of concurrent marking bit map?");
_bm.set_map((uintptr_t*)_virtual_space.low());
_bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
_bmWordSize, "inconsistency in bit map sizing");
_bm.set_size(_bmWordSize >> _shifter);

View File

@ -433,14 +433,6 @@ HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
return hr;
}
void G1CollectedHeap::stop_conc_gc_threads() {
_cg1r->stop();
_cmThread->stop();
if (G1StringDedup::is_enabled()) {
G1StringDedup::stop();
}
}
#ifdef ASSERT
// A region is added to the collection set as it is retired
// so an address p can point to a region which will be in the
@ -2174,20 +2166,14 @@ jint G1CollectedHeap::initialize() {
}
void G1CollectedHeap::stop() {
#if 0
// Stopping concurrent worker threads is currently disabled until
// some bugs in concurrent mark has been resolve. Without fixing
// those bugs first we risk haning during VM exit when trying to
// stop these threads.
// Abort any ongoing concurrent root region scanning and stop all
// concurrent threads. We do this to make sure these threads do
// not continue to execute and access resources (e.g. gclog_or_tty)
// Stop all concurrent threads. We do this to make sure these threads
// do not continue to execute and access resources (e.g. gclog_or_tty)
// that are destroyed during shutdown.
_cm->root_regions()->abort();
_cm->root_regions()->wait_until_scan_finished();
stop_conc_gc_threads();
#endif
_cg1r->stop();
_cmThread->stop();
if (G1StringDedup::is_enabled()) {
G1StringDedup::stop();
}
}
size_t G1CollectedHeap::conservative_max_heap_alignment() {

View File

@ -1684,8 +1684,6 @@ public:
void print_all_rsets() PRODUCT_RETURN;
public:
void stop_conc_gc_threads();
size_t pending_card_num();
size_t cards_scanned();

View File

@ -71,7 +71,7 @@ ParMarkBitMap::initialize(MemRegion covered_region)
if (_virtual_space != NULL && _virtual_space->expand_by(_reserved_byte_size)) {
_region_start = covered_region.start();
_region_size = covered_region.word_size();
idx_t* map = (idx_t*)_virtual_space->reserved_low_addr();
BitMap::bm_word_t* map = (BitMap::bm_word_t*)_virtual_space->reserved_low_addr();
_beg_bits.set_map(map);
_beg_bits.set_size(bits / 2);
_end_bits.set_map(map + words / 2);

View File

@ -945,12 +945,8 @@ void LinkResolver::linktime_resolve_special_method(methodHandle& resolved_method
Klass *klass_to_check = !InstanceKlass::cast(current_klass())->is_anonymous() ?
current_klass() :
InstanceKlass::cast(current_klass())->host_klass();
// As of the fix for 4486457 we disable verification for all of the
// dynamically-generated bytecodes associated with the 1.4
// reflection implementation, not just those associated with
// sun/reflect/SerializationConstructorAccessor.
bool is_reflect = JDK_Version::is_gte_jdk14x_version() &&
klass_to_check->is_subclass_of(
// Disable verification for the dynamically-generated reflection bytecodes.
bool is_reflect = klass_to_check->is_subclass_of(
SystemDictionary::reflect_MagicAccessorImpl_klass());
if (!is_reflect &&

View File

@ -0,0 +1,161 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/guardedMemory.hpp"
#include "runtime/os.hpp"
void* GuardedMemory::wrap_copy(const void* ptr, const size_t len, const void* tag) {
size_t total_sz = GuardedMemory::get_total_size(len);
void* outerp = os::malloc(total_sz, mtInternal);
if (outerp != NULL) {
GuardedMemory guarded(outerp, len, tag);
void* innerp = guarded.get_user_ptr();
memcpy(innerp, ptr, len);
return innerp;
}
return NULL; // OOM
}
bool GuardedMemory::free_copy(void* p) {
if (p == NULL) {
return true;
}
GuardedMemory guarded((u_char*)p);
bool verify_ok = guarded.verify_guards();
/* always attempt to free, pass problem on to any nested memchecker */
os::free(guarded.release_for_freeing());
return verify_ok;
}
void GuardedMemory::print_on(outputStream* st) const {
if (_base_addr == NULL) {
st->print_cr("GuardedMemory(" PTR_FORMAT ") not associated to any memory", p2i(this));
return;
}
st->print_cr("GuardedMemory(" PTR_FORMAT ") base_addr=" PTR_FORMAT
" tag=" PTR_FORMAT " user_size=" SIZE_FORMAT " user_data=" PTR_FORMAT,
p2i(this), p2i(_base_addr), p2i(get_tag()), get_user_size(), p2i(get_user_ptr()));
Guard* guard = get_head_guard();
st->print_cr(" Header guard @" PTR_FORMAT " is %s", p2i(guard), (guard->verify() ? "OK" : "BROKEN"));
guard = get_tail_guard();
st->print_cr(" Trailer guard @" PTR_FORMAT " is %s", p2i(guard), (guard->verify() ? "OK" : "BROKEN"));
u_char udata = *get_user_ptr();
switch (udata) {
case uninitBlockPad:
st->print_cr(" User data appears unused");
break;
case freeBlockPad:
st->print_cr(" User data appears to have been freed");
break;
default:
st->print_cr(" User data appears to be in use");
break;
}
}
// test code...
#ifndef PRODUCT
static void guarded_memory_test_check(void* p, size_t sz, void* tag) {
assert(p != NULL, "NULL pointer given to check");
u_char* c = (u_char*) p;
GuardedMemory guarded(c);
assert(guarded.get_tag() == tag, "Tag is not the same as supplied");
assert(guarded.get_user_ptr() == c, "User pointer is not the same as supplied");
assert(guarded.get_user_size() == sz, "User size is not the same as supplied");
assert(guarded.verify_guards(), "Guard broken");
}
void GuardedMemory::test_guarded_memory() {
// Test the basic characteristics...
size_t total_sz = GuardedMemory::get_total_size(1);
assert(total_sz > 1 && total_sz >= (sizeof(GuardHeader) + 1 + sizeof(Guard)), "Unexpected size");
u_char* basep = (u_char*) os::malloc(total_sz, mtInternal);
GuardedMemory guarded(basep, 1, (void*)0xf000f000);
assert(*basep == badResourceValue, "Expected guard in the form of badResourceValue");
u_char* userp = guarded.get_user_ptr();
assert(*userp == uninitBlockPad, "Expected uninitialized data in the form of uninitBlockPad");
guarded_memory_test_check(userp, 1, (void*)0xf000f000);
void* freep = guarded.release_for_freeing();
assert((u_char*)freep == basep, "Expected the same pointer guard was ");
assert(*userp == freeBlockPad, "Expected user data to be free block padded");
assert(!guarded.verify_guards(), "Expected failed");
os::free(freep);
// Test a number of odd sizes...
size_t sz = 0;
do {
void* p = os::malloc(GuardedMemory::get_total_size(sz), mtInternal);
void* up = guarded.wrap_with_guards(p, sz, (void*)1);
memset(up, 0, sz);
guarded_memory_test_check(up, sz, (void*)1);
os::free(guarded.release_for_freeing());
sz = (sz << 4) + 1;
} while (sz < (256 * 1024));
// Test buffer overrun into head...
basep = (u_char*) os::malloc(GuardedMemory::get_total_size(1), mtInternal);
guarded.wrap_with_guards(basep, 1);
*basep = 0;
assert(!guarded.verify_guards(), "Expected failure");
os::free(basep);
// Test buffer overrun into tail with a number of odd sizes...
sz = 1;
do {
void* p = os::malloc(GuardedMemory::get_total_size(sz), mtInternal);
void* up = guarded.wrap_with_guards(p, sz, (void*)1);
memset(up, 0, sz + 1); // Buffer-overwrite (within guard)
assert(!guarded.verify_guards(), "Guard was not broken as expected");
os::free(guarded.release_for_freeing());
sz = (sz << 4) + 1;
} while (sz < (256 * 1024));
// Test wrap_copy/wrap_free...
assert(GuardedMemory::free_copy(NULL), "Expected free NULL to be OK");
const char* str = "Check my bounds out";
size_t str_sz = strlen(str) + 1;
char* str_copy = (char*) GuardedMemory::wrap_copy(str, str_sz);
guarded_memory_test_check(str_copy, str_sz, NULL);
assert(strcmp(str, str_copy) == 0, "Not identical copy");
assert(GuardedMemory::free_copy(str_copy), "Free copy failed to verify");
void* no_data = NULL;
void* no_data_copy = GuardedMemory::wrap_copy(no_data, 0);
assert(GuardedMemory::free_copy(no_data_copy), "Expected valid guards even for no data copy");
}
#endif // !PRODUCT

View File

@ -0,0 +1,326 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_MEMORY_GUARDED_MEMORY_HPP
#define SHARE_VM_MEMORY_GUARDED_MEMORY_HPP
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
/**
* Guarded memory for detecting buffer overrun.
*
* Allows allocations to be wrapped with padded bytes of a known byte pattern,
* that is a "guard". Guard patterns may be verified to detect buffer overruns.
*
* Primarily used by "debug malloc" and "checked JNI".
*
* Memory layout:
*
* |Offset | Content | Description |
* |------------------------------------------------------------
* |base_addr | 0xABABABABABABABAB | Head guard |
* |+16 | <size_t:user_size> | User data size |
* |+sizeof(uintptr_t) | <tag> | Tag word |
* |+sizeof(void*) | 0xF1 <user_data> ( | User data |
* |+user_size | 0xABABABABABABABAB | Tail guard |
* -------------------------------------------------------------
*
* Where:
* - guard padding uses "badResourceValue" (0xAB)
* - tag word is general purpose
* - user data
* -- initially padded with "uninitBlockPad" (0xF1),
* -- to "freeBlockPad" (0xBA), when freed
*
* Usage:
*
* * Allocations: one may wrap allocations with guard memory:
* <code>
* Thing* alloc_thing() {
* void* mem = user_alloc_fn(GuardedMemory::get_total_size(sizeof(thing)));
* GuardedMemory guarded(mem, sizeof(thing));
* return (Thing*) guarded.get_user_ptr();
* }
* </code>
* * Verify: memory guards are still in tact
* <code>
* bool verify_thing(Thing* thing) {
* GuardedMemory guarded((void*)thing);
* return guarded.verify_guards();
* }
* </code>
* * Free: one may mark bytes as freed (further debugging support)
* <code>
* void free_thing(Thing* thing) {
* GuardedMemory guarded((void*)thing);
* assert(guarded.verify_guards(), "Corrupt thing");
* user_free_fn(guards.release_for_freeing();
* }
* </code>
*/
class GuardedMemory : StackObj { // Wrapper on stack
// Private inner classes for memory layout...
protected:
/**
* Guard class for header and trailer known pattern to test for overwrites.
*/
class Guard { // Class for raw memory (no vtbl allowed)
friend class GuardedMemory;
protected:
enum {
GUARD_SIZE = 16
};
u_char _guard[GUARD_SIZE];
public:
void build() {
u_char* c = _guard; // Possibly unaligned if tail guard
u_char* end = c + GUARD_SIZE;
while (c < end) {
*c = badResourceValue;
c++;
}
}
bool verify() const {
u_char* c = (u_char*) _guard;
u_char* end = c + GUARD_SIZE;
while (c < end) {
if (*c != badResourceValue) {
return false;
}
c++;
}
return true;
}
}; // GuardedMemory::Guard
/**
* Header guard and size
*/
class GuardHeader : Guard {
friend class GuardedMemory;
protected:
// Take care in modifying fields here, will effect alignment
// e.g. x86 ABI 16 byte stack alignment
union {
uintptr_t __unused_full_word1;
size_t _user_size;
};
void* _tag;
public:
void set_user_size(const size_t usz) { _user_size = usz; }
size_t get_user_size() const { return _user_size; }
void set_tag(const void* tag) { _tag = (void*) tag; }
void* get_tag() const { return _tag; }
}; // GuardedMemory::GuardHeader
// Guarded Memory...
protected:
u_char* _base_addr;
public:
/**
* Create new guarded memory.
*
* Wraps, starting at the given "base_ptr" with guards. Use "get_user_ptr()"
* to return a pointer suitable for user data.
*
* @param base_ptr allocation wishing to be wrapped, must be at least "GuardedMemory::get_total_size()" bytes.
* @param user_size the size of the user data to be wrapped.
* @param tag optional general purpose tag.
*/
GuardedMemory(void* base_ptr, const size_t user_size, const void* tag = NULL) {
wrap_with_guards(base_ptr, user_size, tag);
}
/**
* Wrap existing guarded memory.
*
* To use this constructor, one must have created guarded memory with
* "GuardedMemory(void*, size_t, void*)" (or indirectly via helper, e.g. "wrap_copy()").
*
* @param user_p existing wrapped memory.
*/
GuardedMemory(void* userp) {
u_char* user_ptr = (u_char*) userp;
assert((uintptr_t)user_ptr > (sizeof(GuardHeader) + 0x1000), "Invalid pointer");
_base_addr = (user_ptr - sizeof(GuardHeader));
}
/**
* Create new guarded memory.
*
* Wraps, starting at the given "base_ptr" with guards. Allows reuse of stack allocated helper.
*
* @param base_ptr allocation wishing to be wrapped, must be at least "GuardedMemory::get_total_size()" bytes.
* @param user_size the size of the user data to be wrapped.
* @param tag optional general purpose tag.
*
* @return user data pointer (inner pointer to supplied "base_ptr").
*/
void* wrap_with_guards(void* base_ptr, size_t user_size, const void* tag = NULL) {
assert(base_ptr != NULL, "Attempt to wrap NULL with memory guard");
_base_addr = (u_char*)base_ptr;
get_head_guard()->build();
get_head_guard()->set_user_size(user_size);
get_tail_guard()->build();
set_tag(tag);
set_user_bytes(uninitBlockPad);
assert(verify_guards(), "Expected valid memory guards");
return get_user_ptr();
}
/**
* Verify head and tail guards.
*
* @return true if guards are intact, false would indicate a buffer overrun.
*/
bool verify_guards() const {
if (_base_addr != NULL) {
return (get_head_guard()->verify() && get_tail_guard()->verify());
}
return false;
}
/**
* Set the general purpose tag.
*
* @param tag general purpose tag.
*/
void set_tag(const void* tag) { get_head_guard()->set_tag(tag); }
/**
* Return the general purpose tag.
*
* @return the general purpose tag, defaults to NULL.
*/
void* get_tag() const { return get_head_guard()->get_tag(); }
/**
* Return the size of the user data.
*
* @return the size of the user data.
*/
size_t get_user_size() const {
assert(_base_addr != NULL, "Not wrapping any memory");
return get_head_guard()->get_user_size();
}
/**
* Return the user data pointer.
*
* @return the user data pointer.
*/
u_char* get_user_ptr() const {
assert(_base_addr != NULL, "Not wrapping any memory");
return _base_addr + sizeof(GuardHeader);
}
/**
* Release the wrapped pointer for resource freeing.
*
* Pads the user data with "freeBlockPad", and dis-associates the helper.
*
* @return the original base pointer used to wrap the data.
*/
void* release_for_freeing() {
set_user_bytes(freeBlockPad);
return release();
}
/**
* Dis-associate the help from the original base address.
*
* @return the original base pointer used to wrap the data.
*/
void* release() {
void* p = (void*) _base_addr;
_base_addr = NULL;
return p;
}
virtual void print_on(outputStream* st) const;
protected:
GuardHeader* get_head_guard() const { return (GuardHeader*) _base_addr; }
Guard* get_tail_guard() const { return (Guard*) (get_user_ptr() + get_user_size()); };
void set_user_bytes(u_char ch) {
memset(get_user_ptr(), ch, get_user_size());
}
public:
/**
* Return the total size required for wrapping the given user size.
*
* @return the total size required for wrapping the given user size.
*/
static size_t get_total_size(size_t user_size) {
size_t total_size = sizeof(GuardHeader) + user_size + sizeof(Guard);
assert(total_size > user_size, "Unexpected wrap-around");
return total_size;
}
// Helper functions...
/**
* Wrap a copy of size "len" of "ptr".
*
* @param ptr the memory to be copied
* @param len the length of the copy
* @param tag optional general purpose tag (see GuardedMemory::get_tag())
*
* @return guarded wrapped memory pointer to the user area, or NULL if OOM.
*/
static void* wrap_copy(const void* p, const size_t len, const void* tag = NULL);
/**
* Free wrapped copy.
*
* Frees memory copied with "wrap_copy()".
*
* @param p memory returned by "wrap_copy()".
*
* @return true if guards were verified as intact. false indicates a buffer overrun.
*/
static bool free_copy(void* p);
// Testing...
#ifndef PRODUCT
static void test_guarded_memory(void);
#endif
}; // GuardedMemory
#endif // SHARE_VM_MEMORY_GUARDED_MEMORY_HPP

View File

@ -697,6 +697,7 @@ class SpaceManager : public CHeapObj<mtClass> {
size_t allocated_blocks_words() const { return _allocated_blocks_words; }
size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
size_t allocated_chunks_words() const { return _allocated_chunks_words; }
size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
size_t allocated_chunks_count() const { return _allocated_chunks_count; }
bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
@ -3365,6 +3366,16 @@ size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
return capacity_words_slow(mdtype) * BytesPerWord;
}
size_t Metaspace::allocated_blocks_bytes() const {
return vsm()->allocated_blocks_bytes() +
(using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
}
size_t Metaspace::allocated_chunks_bytes() const {
return vsm()->allocated_chunks_bytes() +
(using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
}
void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
assert(!SafepointSynchronize::is_at_safepoint()
|| Thread::current()->is_VM_thread(), "should be the VM thread");

View File

@ -226,6 +226,9 @@ class Metaspace : public CHeapObj<mtClass> {
size_t used_bytes_slow(MetadataType mdtype) const;
size_t capacity_bytes_slow(MetadataType mdtype) const;
size_t allocated_blocks_bytes() const;
size_t allocated_chunks_bytes() const;
static MetaWord* allocate(ClassLoaderData* loader_data, size_t word_size,
bool read_only, MetaspaceObj::Type type, TRAPS);
void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);

View File

@ -1000,9 +1000,6 @@ void universe2_init() {
}
// This function is defined in JVM.cpp
extern void initialize_converter_functions();
bool universe_post_init() {
assert(!is_init_completed(), "Error: initialization not yet completed!");
Universe::_fully_initialized = true;
@ -1144,11 +1141,6 @@ bool universe_post_init() {
SystemDictionary::ProtectionDomain_klass(), m);;
}
// The following is initializing converter functions for serialization in
// JVM.cpp. If we clean up the StrictMath code above we may want to find
// a better solution for this as well.
initialize_converter_functions();
// This needs to be done before the first scavenge/gc, since
// it's an input to soft ref clearing policy.
{

View File

@ -1023,8 +1023,7 @@ bool Method::is_ignored_by_security_stack_walk() const {
// This is Method.invoke() -- ignore it
return true;
}
if (JDK_Version::is_gte_jdk14x_version() &&
method_holder()->is_subclass_of(SystemDictionary::reflect_MethodAccessorImpl_klass())) {
if (method_holder()->is_subclass_of(SystemDictionary::reflect_MethodAccessorImpl_klass())) {
// This is an auxilary frame -- ignore it
return true;
}

View File

@ -3858,6 +3858,7 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_GetDefaultJavaVMInitArgs(void *args_) {
#if INCLUDE_ALL_GCS
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#endif
#include "memory/guardedMemory.hpp"
#include "utilities/quickSort.hpp"
#include "utilities/ostream.hpp"
#if INCLUDE_VM_STRUCTS
@ -3901,6 +3902,7 @@ void execute_internal_vm_tests() {
run_unit_test(arrayOopDesc::test_max_array_length());
run_unit_test(CollectedHeap::test_is_in());
run_unit_test(QuickSort::test_quick_sort());
run_unit_test(GuardedMemory::test_guarded_memory());
run_unit_test(AltHashing::test_alt_hash());
run_unit_test(test_loggc_filename());
run_unit_test(TestNewSize_test());

File diff suppressed because it is too large Load Diff

View File

@ -3923,50 +3923,6 @@ JNIEXPORT void JNICALL JVM_RawMonitorExit(void *mon) {
}
// Support for Serialization
typedef jfloat (JNICALL *IntBitsToFloatFn )(JNIEnv* env, jclass cb, jint value);
typedef jdouble (JNICALL *LongBitsToDoubleFn)(JNIEnv* env, jclass cb, jlong value);
typedef jint (JNICALL *FloatToIntBitsFn )(JNIEnv* env, jclass cb, jfloat value);
typedef jlong (JNICALL *DoubleToLongBitsFn)(JNIEnv* env, jclass cb, jdouble value);
static IntBitsToFloatFn int_bits_to_float_fn = NULL;
static LongBitsToDoubleFn long_bits_to_double_fn = NULL;
static FloatToIntBitsFn float_to_int_bits_fn = NULL;
static DoubleToLongBitsFn double_to_long_bits_fn = NULL;
void initialize_converter_functions() {
if (JDK_Version::is_gte_jdk14x_version()) {
// These functions only exist for compatibility with 1.3.1 and earlier
return;
}
// called from universe_post_init()
assert(
int_bits_to_float_fn == NULL &&
long_bits_to_double_fn == NULL &&
float_to_int_bits_fn == NULL &&
double_to_long_bits_fn == NULL ,
"initialization done twice"
);
// initialize
int_bits_to_float_fn = CAST_TO_FN_PTR(IntBitsToFloatFn , NativeLookup::base_library_lookup("java/lang/Float" , "intBitsToFloat" , "(I)F"));
long_bits_to_double_fn = CAST_TO_FN_PTR(LongBitsToDoubleFn, NativeLookup::base_library_lookup("java/lang/Double", "longBitsToDouble", "(J)D"));
float_to_int_bits_fn = CAST_TO_FN_PTR(FloatToIntBitsFn , NativeLookup::base_library_lookup("java/lang/Float" , "floatToIntBits" , "(F)I"));
double_to_long_bits_fn = CAST_TO_FN_PTR(DoubleToLongBitsFn, NativeLookup::base_library_lookup("java/lang/Double", "doubleToLongBits", "(D)J"));
// verify
assert(
int_bits_to_float_fn != NULL &&
long_bits_to_double_fn != NULL &&
float_to_int_bits_fn != NULL &&
double_to_long_bits_fn != NULL ,
"initialization failed"
);
}
// Shared JNI/JVM entry points //////////////////////////////////////////////////////////////
jclass find_class_from_class_loader(JNIEnv* env, Symbol* name, jboolean init, Handle loader, Handle protection_domain, jboolean throwError, TRAPS) {

View File

@ -185,64 +185,7 @@ jint Unsafe_invocation_key_to_method_slot(jint key) {
// Get/SetObject must be special-cased, since it works with handles.
// The xxx140 variants for backward compatibility do not allow a full-width offset.
UNSAFE_ENTRY(jobject, Unsafe_GetObject140(JNIEnv *env, jobject unsafe, jobject obj, jint offset))
UnsafeWrapper("Unsafe_GetObject");
if (obj == NULL) THROW_0(vmSymbols::java_lang_NullPointerException());
GET_OOP_FIELD(obj, offset, v)
jobject ret = JNIHandles::make_local(env, v);
#if INCLUDE_ALL_GCS
// We could be accessing the referent field in a reference
// object. If G1 is enabled then we need to register a non-null
// referent with the SATB barrier.
if (UseG1GC) {
bool needs_barrier = false;
if (ret != NULL) {
if (offset == java_lang_ref_Reference::referent_offset) {
oop o = JNIHandles::resolve_non_null(obj);
Klass* k = o->klass();
if (InstanceKlass::cast(k)->reference_type() != REF_NONE) {
assert(InstanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity");
needs_barrier = true;
}
}
}
if (needs_barrier) {
oop referent = JNIHandles::resolve(ret);
G1SATBCardTableModRefBS::enqueue(referent);
}
}
#endif // INCLUDE_ALL_GCS
return ret;
UNSAFE_END
UNSAFE_ENTRY(void, Unsafe_SetObject140(JNIEnv *env, jobject unsafe, jobject obj, jint offset, jobject x_h))
UnsafeWrapper("Unsafe_SetObject");
if (obj == NULL) THROW(vmSymbols::java_lang_NullPointerException());
oop x = JNIHandles::resolve(x_h);
//SET_FIELD(obj, offset, oop, x);
oop p = JNIHandles::resolve(obj);
if (UseCompressedOops) {
if (x != NULL) {
// If there is a heap base pointer, we are obliged to emit a store barrier.
oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x);
} else {
narrowOop n = oopDesc::encode_heap_oop_not_null(x);
*(narrowOop*)index_oop_from_field_offset_long(p, offset) = n;
}
} else {
if (x != NULL) {
// If there is a heap base pointer, we are obliged to emit a store barrier.
oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
} else {
*(oop*)index_oop_from_field_offset_long(p, offset) = x;
}
}
UNSAFE_END
// The normal variants allow a null base pointer with an arbitrary address.
// These functions allow a null base pointer with an arbitrary address.
// But if the base pointer is non-null, the offset should make some sense.
// That is, it should be in the range [0, MAX_OBJECT_SIZE].
UNSAFE_ENTRY(jobject, Unsafe_GetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset))
@ -1350,9 +1293,6 @@ UNSAFE_END
// These are the methods for 1.4.0
static JNINativeMethod methods_140[] = {
{CC"getObject", CC"("OBJ"I)"OBJ"", FN_PTR(Unsafe_GetObject140)},
{CC"putObject", CC"("OBJ"I"OBJ")V", FN_PTR(Unsafe_SetObject140)},
DECLARE_GETSETOOP_140(Boolean, Z),
DECLARE_GETSETOOP_140(Byte, B),
DECLARE_GETSETOOP_140(Short, S),

View File

@ -222,10 +222,8 @@ void Arguments::init_version_specific_system_properties() {
const char* spec_vendor = "Sun Microsystems Inc.";
uint32_t spec_version = 0;
if (JDK_Version::is_gte_jdk17x_version()) {
spec_vendor = "Oracle Corporation";
spec_version = JDK_Version::current().major_version();
}
spec_vendor = "Oracle Corporation";
spec_version = JDK_Version::current().major_version();
jio_snprintf(buffer, bufsz, "1." UINT32_FORMAT, spec_version);
PropertyList_add(&_system_properties,
@ -2455,6 +2453,8 @@ bool Arguments::check_vm_args_consistency() {
warning("The VM option CICompilerCountPerCPU overrides CICompilerCount.");
}
status &= check_vm_args_consistency_ext();
return status;
}
@ -3699,14 +3699,6 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
PrintGC = true;
}
if (!JDK_Version::is_gte_jdk18x_version()) {
// To avoid changing the log format for 7 updates this flag is only
// true by default in JDK8 and above.
if (FLAG_IS_DEFAULT(PrintGCCause)) {
FLAG_SET_DEFAULT(PrintGCCause, false);
}
}
// Set object alignment values.
set_object_alignment();

View File

@ -462,6 +462,7 @@ class Arguments : AllStatic {
static void check_deprecated_gc_flags();
// Check consistency or otherwise of VM argument settings
static bool check_vm_args_consistency();
static bool check_vm_args_consistency_ext();
// Check stack pages settings
static bool check_stack_pages();
// Used by os_solaris

View File

@ -1,12 +1,10 @@
/*
* Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
@ -21,16 +19,12 @@
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package com.sun.java.browser.net;
/**
*
* @author Zhengyu Gu
*/
public interface ProxyInfo {
public String getHost();
public int getPort();
public boolean isSocks();
#include "precompiled.hpp"
#include "runtime/arguments.hpp"
bool Arguments::check_vm_args_consistency_ext() {
return true;
}

View File

@ -116,7 +116,7 @@ class Atomic : AllStatic {
atomic_decl
#else
#define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl) \
atomic_decl ; \
atomic_decl; \
non_atomic_decl
#endif

View File

@ -501,9 +501,6 @@ void before_exit(JavaThread * thread) {
os::infinite_sleep();
}
// Stop any ongoing concurrent GC work
Universe::heap()->stop();
// Terminate watcher thread - must before disenrolling any periodic task
if (PeriodicTask::num_tasks() > 0)
WatcherThread::stop();
@ -518,10 +515,8 @@ void before_exit(JavaThread * thread) {
StatSampler::disengage();
StatSampler::destroy();
// We do not need to explicitly stop concurrent GC threads because the
// JVM will be taken down at a safepoint when such threads are inactive --
// except for some concurrent G1 threads, see (comment in)
// Threads::destroy_vm().
// Stop concurrent GC threads
Universe::heap()->stop();
// Print GC/heap related information.
if (PrintGCDetails) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -197,58 +197,6 @@ class JDK_Version VALUE_OBJ_CLASS_SPEC {
_runtime_version = version;
}
// Convenience methods for queries on the current major/minor version
static bool is_jdk12x_version() {
return current().compare_major(2) == 0;
}
static bool is_jdk13x_version() {
return current().compare_major(3) == 0;
}
static bool is_jdk14x_version() {
return current().compare_major(4) == 0;
}
static bool is_jdk15x_version() {
return current().compare_major(5) == 0;
}
static bool is_jdk16x_version() {
return current().compare_major(6) == 0;
}
static bool is_jdk17x_version() {
return current().compare_major(7) == 0;
}
static bool is_jdk18x_version() {
return current().compare_major(8) == 0;
}
static bool is_gte_jdk13x_version() {
return current().compare_major(3) >= 0;
}
static bool is_gte_jdk14x_version() {
return current().compare_major(4) >= 0;
}
static bool is_gte_jdk15x_version() {
return current().compare_major(5) >= 0;
}
static bool is_gte_jdk16x_version() {
return current().compare_major(6) >= 0;
}
static bool is_gte_jdk17x_version() {
return current().compare_major(7) >= 0;
}
static bool is_gte_jdk18x_version() {
return current().compare_major(8) >= 0;
}
};
#endif // SHARE_VM_RUNTIME_JAVA_HPP

View File

@ -298,6 +298,7 @@ JNIHandleBlock* JNIHandleBlock::allocate_block(Thread* thread) {
block->_top = 0;
block->_next = NULL;
block->_pop_frame_link = NULL;
block->_planned_capacity = block_size_in_oops;
// _last, _free_list & _allocate_before_rebuild initialized in allocate_handle
debug_only(block->_last = NULL);
debug_only(block->_free_list = NULL);
@ -531,6 +532,12 @@ int JNIHandleBlock::length() const {
return result;
}
const size_t JNIHandleBlock::get_number_of_live_handles() {
CountHandleClosure counter;
oops_do(&counter);
return counter.count();
}
// This method is not thread-safe, i.e., must be called while holding a lock on the
// structure.
long JNIHandleBlock::memory_usage() const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -112,6 +112,9 @@ class JNIHandleBlock : public CHeapObj<mtInternal> {
oop* _free_list; // Handle free list
int _allocate_before_rebuild; // Number of blocks to allocate before rebuilding free list
// Check JNI, "planned capacity" for current frame (or push/ensure)
size_t _planned_capacity;
#ifndef PRODUCT
JNIHandleBlock* _block_list_link; // Link for list below
static JNIHandleBlock* _block_list; // List of all allocated blocks (for debugging only)
@ -152,6 +155,11 @@ class JNIHandleBlock : public CHeapObj<mtInternal> {
// Traversal of weak handles. Unreachable oops are cleared.
void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
// Checked JNI support
void set_planned_capacity(size_t planned_capacity) { _planned_capacity = planned_capacity; }
const size_t get_planned_capacity() { return _planned_capacity; }
const size_t get_number_of_live_handles();
// Debugging
bool chain_contains(jobject handle) const; // Does this block or following blocks contain handle
bool contains(jobject handle) const; // Does this block contain handle

View File

@ -269,62 +269,62 @@ PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
#define CASPTR(a,c,s) intptr_t(Atomic::cmpxchg_ptr ((void *)(s),(void *)(a),(void *)(c)))
#define UNS(x) (uintptr_t(x))
#define TRACE(m) { static volatile int ctr = 0 ; int x = ++ctr ; if ((x & (x-1))==0) { ::printf ("%d:%s\n", x, #m); ::fflush(stdout); }}
#define TRACE(m) { static volatile int ctr = 0; int x = ++ctr; if ((x & (x-1))==0) { ::printf ("%d:%s\n", x, #m); ::fflush(stdout); }}
// Simplistic low-quality Marsaglia SHIFT-XOR RNG.
// Bijective except for the trailing mask operation.
// Useful for spin loops as the compiler can't optimize it away.
static inline jint MarsagliaXORV (jint x) {
if (x == 0) x = 1|os::random() ;
if (x == 0) x = 1|os::random();
x ^= x << 6;
x ^= ((unsigned)x) >> 21;
x ^= x << 7 ;
return x & 0x7FFFFFFF ;
x ^= x << 7;
return x & 0x7FFFFFFF;
}
static int Stall (int its) {
static volatile jint rv = 1 ;
volatile int OnFrame = 0 ;
jint v = rv ^ UNS(OnFrame) ;
static volatile jint rv = 1;
volatile int OnFrame = 0;
jint v = rv ^ UNS(OnFrame);
while (--its >= 0) {
v = MarsagliaXORV (v) ;
v = MarsagliaXORV(v);
}
// Make this impossible for the compiler to optimize away,
// but (mostly) avoid W coherency sharing on MP systems.
if (v == 0x12345) rv = v ;
return v ;
if (v == 0x12345) rv = v;
return v;
}
int Monitor::TryLock () {
intptr_t v = _LockWord.FullWord ;
int Monitor::TryLock() {
intptr_t v = _LockWord.FullWord;
for (;;) {
if ((v & _LBIT) != 0) return 0 ;
const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
if (v == u) return 1 ;
v = u ;
if ((v & _LBIT) != 0) return 0;
const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT);
if (v == u) return 1;
v = u;
}
}
int Monitor::TryFast () {
int Monitor::TryFast() {
// Optimistic fast-path form ...
// Fast-path attempt for the common uncontended case.
// Avoid RTS->RTO $ coherence upgrade on typical SMP systems.
intptr_t v = CASPTR (&_LockWord, 0, _LBIT) ; // agro ...
if (v == 0) return 1 ;
intptr_t v = CASPTR(&_LockWord, 0, _LBIT); // agro ...
if (v == 0) return 1;
for (;;) {
if ((v & _LBIT) != 0) return 0 ;
const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
if (v == u) return 1 ;
v = u ;
if ((v & _LBIT) != 0) return 0;
const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT);
if (v == u) return 1;
v = u;
}
}
int Monitor::ILocked () {
const intptr_t w = _LockWord.FullWord & 0xFF ;
assert (w == 0 || w == _LBIT, "invariant") ;
return w == _LBIT ;
int Monitor::ILocked() {
const intptr_t w = _LockWord.FullWord & 0xFF;
assert(w == 0 || w == _LBIT, "invariant");
return w == _LBIT;
}
// Polite TATAS spinlock with exponential backoff - bounded spin.
@ -342,38 +342,38 @@ int Monitor::ILocked () {
// See synchronizer.cpp for details and rationale.
int Monitor::TrySpin (Thread * const Self) {
if (TryLock()) return 1 ;
if (!os::is_MP()) return 0 ;
if (TryLock()) return 1;
if (!os::is_MP()) return 0;
int Probes = 0 ;
int Delay = 0 ;
int Steps = 0 ;
int SpinMax = NativeMonitorSpinLimit ;
int flgs = NativeMonitorFlags ;
int Probes = 0;
int Delay = 0;
int Steps = 0;
int SpinMax = NativeMonitorSpinLimit;
int flgs = NativeMonitorFlags;
for (;;) {
intptr_t v = _LockWord.FullWord;
if ((v & _LBIT) == 0) {
if (CASPTR (&_LockWord, v, v|_LBIT) == v) {
return 1 ;
return 1;
}
continue ;
continue;
}
if ((flgs & 8) == 0) {
SpinPause () ;
SpinPause();
}
// Periodically increase Delay -- variable Delay form
// conceptually: delay *= 1 + 1/Exponent
++ Probes;
if (Probes > SpinMax) return 0 ;
++Probes;
if (Probes > SpinMax) return 0;
if ((Probes & 0x7) == 0) {
Delay = ((Delay << 1)|1) & 0x7FF ;
Delay = ((Delay << 1)|1) & 0x7FF;
// CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ;
}
if (flgs & 2) continue ;
if (flgs & 2) continue;
// Consider checking _owner's schedctl state, if OFFPROC abort spin.
// If the owner is OFFPROC then it's unlike that the lock will be dropped
@ -389,48 +389,48 @@ int Monitor::TrySpin (Thread * const Self) {
// spin loop. N1 and brethren write-around the L1$ over the xbar into the L2$.
// Furthermore, they don't have a W$ like traditional SPARC processors.
// We currently use a Marsaglia Shift-Xor RNG loop.
Steps += Delay ;
Steps += Delay;
if (Self != NULL) {
jint rv = Self->rng[0] ;
for (int k = Delay ; --k >= 0; ) {
rv = MarsagliaXORV (rv) ;
if ((flgs & 4) == 0 && SafepointSynchronize::do_call_back()) return 0 ;
jint rv = Self->rng[0];
for (int k = Delay; --k >= 0;) {
rv = MarsagliaXORV(rv);
if ((flgs & 4) == 0 && SafepointSynchronize::do_call_back()) return 0;
}
Self->rng[0] = rv ;
Self->rng[0] = rv;
} else {
Stall (Delay) ;
Stall(Delay);
}
}
}
static int ParkCommon (ParkEvent * ev, jlong timo) {
// Diagnostic support - periodically unwedge blocked threads
intx nmt = NativeMonitorTimeout ;
intx nmt = NativeMonitorTimeout;
if (nmt > 0 && (nmt < timo || timo <= 0)) {
timo = nmt ;
timo = nmt;
}
int err = OS_OK ;
int err = OS_OK;
if (0 == timo) {
ev->park() ;
ev->park();
} else {
err = ev->park(timo) ;
err = ev->park(timo);
}
return err ;
return err;
}
inline int Monitor::AcquireOrPush (ParkEvent * ESelf) {
intptr_t v = _LockWord.FullWord ;
intptr_t v = _LockWord.FullWord;
for (;;) {
if ((v & _LBIT) == 0) {
const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
if (u == v) return 1 ; // indicate acquired
v = u ;
const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT);
if (u == v) return 1; // indicate acquired
v = u;
} else {
// Anticipate success ...
ESelf->ListNext = (ParkEvent *) (v & ~_LBIT) ;
const intptr_t u = CASPTR (&_LockWord, v, intptr_t(ESelf)|_LBIT) ;
if (u == v) return 0 ; // indicate pushed onto cxq
v = u ;
ESelf->ListNext = (ParkEvent *)(v & ~_LBIT);
const intptr_t u = CASPTR(&_LockWord, v, intptr_t(ESelf)|_LBIT);
if (u == v) return 0; // indicate pushed onto cxq
v = u;
}
// Interference - LockWord change - just retry
}
@ -444,33 +444,33 @@ inline int Monitor::AcquireOrPush (ParkEvent * ESelf) {
// _owner is a higher-level logical concept.
void Monitor::ILock (Thread * Self) {
assert (_OnDeck != Self->_MutexEvent, "invariant") ;
assert(_OnDeck != Self->_MutexEvent, "invariant");
if (TryFast()) {
Exeunt:
assert (ILocked(), "invariant") ;
return ;
assert(ILocked(), "invariant");
return;
}
ParkEvent * const ESelf = Self->_MutexEvent ;
assert (_OnDeck != ESelf, "invariant") ;
ParkEvent * const ESelf = Self->_MutexEvent;
assert(_OnDeck != ESelf, "invariant");
// As an optimization, spinners could conditionally try to set ONDECK to _LBIT
// Synchronizer.cpp uses a similar optimization.
if (TrySpin (Self)) goto Exeunt ;
if (TrySpin(Self)) goto Exeunt;
// Slow-path - the lock is contended.
// Either Enqueue Self on cxq or acquire the outer lock.
// LockWord encoding = (cxq,LOCKBYTE)
ESelf->reset() ;
OrderAccess::fence() ;
ESelf->reset();
OrderAccess::fence();
// Optional optimization ... try barging on the inner lock
if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(Self)) == 0) {
goto OnDeck_LOOP ;
goto OnDeck_LOOP;
}
if (AcquireOrPush (ESelf)) goto Exeunt ;
if (AcquireOrPush(ESelf)) goto Exeunt;
// At any given time there is at most one ondeck thread.
// ondeck implies not resident on cxq and not resident on EntryList
@ -478,26 +478,26 @@ void Monitor::ILock (Thread * Self) {
// CONSIDER: use Self->OnDeck instead of m->OnDeck.
// Deschedule Self so that others may run.
while (_OnDeck != ESelf) {
ParkCommon (ESelf, 0) ;
ParkCommon(ESelf, 0);
}
// Self is now in the ONDECK position and will remain so until it
// manages to acquire the lock.
OnDeck_LOOP:
for (;;) {
assert (_OnDeck == ESelf, "invariant") ;
if (TrySpin (Self)) break ;
assert(_OnDeck == ESelf, "invariant");
if (TrySpin(Self)) break;
// CONSIDER: if ESelf->TryPark() && TryLock() break ...
// It's probably wise to spin only if we *actually* blocked
// CONSIDER: check the lockbyte, if it remains set then
// preemptively drain the cxq into the EntryList.
// The best place and time to perform queue operations -- lock metadata --
// is _before having acquired the outer lock, while waiting for the lock to drop.
ParkCommon (ESelf, 0) ;
ParkCommon(ESelf, 0);
}
assert (_OnDeck == ESelf, "invariant") ;
_OnDeck = NULL ;
assert(_OnDeck == ESelf, "invariant");
_OnDeck = NULL;
// Note that we current drop the inner lock (clear OnDeck) in the slow-path
// epilogue immediately after having acquired the outer lock.
@ -512,11 +512,11 @@ void Monitor::ILock (Thread * Self) {
// effective length of the critical section.
// Note that (A) and (B) are tantamount to succession by direct handoff for
// the inner lock.
goto Exeunt ;
goto Exeunt;
}
void Monitor::IUnlock (bool RelaxAssert) {
assert (ILocked(), "invariant") ;
assert(ILocked(), "invariant");
// Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately
// before the store that releases the lock. Crucially, all the stores and loads in the
// critical section must be globally visible before the store of 0 into the lock-word
@ -532,9 +532,9 @@ void Monitor::IUnlock (bool RelaxAssert) {
// safety or lock release consistency.
OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], 0); // drop outer lock
OrderAccess::storeload ();
ParkEvent * const w = _OnDeck ;
assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ;
OrderAccess::storeload();
ParkEvent * const w = _OnDeck;
assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant");
if (w != NULL) {
// Either we have a valid ondeck thread or ondeck is transiently "locked"
// by some exiting thread as it arranges for succession. The LSBit of
@ -549,19 +549,19 @@ void Monitor::IUnlock (bool RelaxAssert) {
// then progress is known to have occurred as that means the thread associated
// with "w" acquired the lock. In that case this thread need take no further
// action to guarantee progress.
if ((UNS(w) & _LBIT) == 0) w->unpark() ;
return ;
if ((UNS(w) & _LBIT) == 0) w->unpark();
return;
}
intptr_t cxq = _LockWord.FullWord ;
intptr_t cxq = _LockWord.FullWord;
if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) {
return ; // normal fast-path exit - cxq and EntryList both empty
return; // normal fast-path exit - cxq and EntryList both empty
}
if (cxq & _LBIT) {
// Optional optimization ...
// Some other thread acquired the lock in the window since this
// thread released it. Succession is now that thread's responsibility.
return ;
return;
}
Succession:
@ -575,22 +575,22 @@ void Monitor::IUnlock (bool RelaxAssert) {
// picks a successor and marks that thread as OnDeck. That successor
// thread will then clear OnDeck once it eventually acquires the outer lock.
if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) {
return ;
return;
}
ParkEvent * List = _EntryList ;
ParkEvent * List = _EntryList;
if (List != NULL) {
// Transfer the head of the EntryList to the OnDeck position.
// Once OnDeck, a thread stays OnDeck until it acquires the lock.
// For a given lock there is at most OnDeck thread at any one instant.
WakeOne:
assert (List == _EntryList, "invariant") ;
ParkEvent * const w = List ;
assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ;
_EntryList = w->ListNext ;
assert(List == _EntryList, "invariant");
ParkEvent * const w = List;
assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant");
_EntryList = w->ListNext;
// as a diagnostic measure consider setting w->_ListNext = BAD
assert (UNS(_OnDeck) == _LBIT, "invariant") ;
_OnDeck = w ; // pass OnDeck to w.
assert(UNS(_OnDeck) == _LBIT, "invariant");
_OnDeck = w; // pass OnDeck to w.
// w will clear OnDeck once it acquires the outer lock
// Another optional optimization ...
@ -599,25 +599,25 @@ void Monitor::IUnlock (bool RelaxAssert) {
// Try to defer the unpark() operation - Delegate the responsibility
// for unpark()ing the OnDeck thread to the current or subsequent owners
// That is, the new owner is responsible for unparking the OnDeck thread.
OrderAccess::storeload() ;
cxq = _LockWord.FullWord ;
if (cxq & _LBIT) return ;
OrderAccess::storeload();
cxq = _LockWord.FullWord;
if (cxq & _LBIT) return;
w->unpark() ;
return ;
w->unpark();
return;
}
cxq = _LockWord.FullWord ;
cxq = _LockWord.FullWord;
if ((cxq & ~_LBIT) != 0) {
// The EntryList is empty but the cxq is populated.
// drain RATs from cxq into EntryList
// Detach RATs segment with CAS and then merge into EntryList
for (;;) {
// optional optimization - if locked, the owner is responsible for succession
if (cxq & _LBIT) goto Punt ;
const intptr_t vfy = CASPTR (&_LockWord, cxq, cxq & _LBIT) ;
if (vfy == cxq) break ;
cxq = vfy ;
if (cxq & _LBIT) goto Punt;
const intptr_t vfy = CASPTR(&_LockWord, cxq, cxq & _LBIT);
if (vfy == cxq) break;
cxq = vfy;
// Interference - LockWord changed - Just retry
// We can see concurrent interference from contending threads
// pushing themselves onto the cxq or from lock-unlock operations.
@ -639,10 +639,10 @@ void Monitor::IUnlock (bool RelaxAssert) {
// the EntryList, but it might make sense to reverse the order
// or perhaps sort by thread priority. See the comments in
// synchronizer.cpp objectMonitor::exit().
assert (_EntryList == NULL, "invariant") ;
_EntryList = List = (ParkEvent *)(cxq & ~_LBIT) ;
assert (List != NULL, "invariant") ;
goto WakeOne ;
assert(_EntryList == NULL, "invariant");
_EntryList = List = (ParkEvent *)(cxq & ~_LBIT);
assert(List != NULL, "invariant");
goto WakeOne;
}
// cxq|EntryList is empty.
@ -651,8 +651,8 @@ void Monitor::IUnlock (bool RelaxAssert) {
// A thread could have added itself to cxq since this thread previously checked.
// Detect and recover by refetching cxq.
Punt:
assert (UNS(_OnDeck) == _LBIT, "invariant") ;
_OnDeck = NULL ; // Release inner lock.
assert(UNS(_OnDeck) == _LBIT, "invariant");
_OnDeck = NULL; // Release inner lock.
OrderAccess::storeload(); // Dekker duality - pivot point
// Resample LockWord/cxq to recover from possible race.
@ -665,32 +665,32 @@ void Monitor::IUnlock (bool RelaxAssert) {
// Note that we don't need to recheck EntryList, just cxq.
// If threads moved onto EntryList since we dropped OnDeck
// that implies some other thread forced succession.
cxq = _LockWord.FullWord ;
cxq = _LockWord.FullWord;
if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) {
goto Succession ; // potential race -- re-run succession
goto Succession; // potential race -- re-run succession
}
return ;
return;
}
bool Monitor::notify() {
assert (_owner == Thread::current(), "invariant") ;
assert (ILocked(), "invariant") ;
if (_WaitSet == NULL) return true ;
NotifyCount ++ ;
assert(_owner == Thread::current(), "invariant");
assert(ILocked(), "invariant");
if (_WaitSet == NULL) return true;
NotifyCount++;
// Transfer one thread from the WaitSet to the EntryList or cxq.
// Currently we just unlink the head of the WaitSet and prepend to the cxq.
// And of course we could just unlink it and unpark it, too, but
// in that case it'd likely impale itself on the reentry.
Thread::muxAcquire (_WaitLock, "notify:WaitLock") ;
ParkEvent * nfy = _WaitSet ;
Thread::muxAcquire(_WaitLock, "notify:WaitLock");
ParkEvent * nfy = _WaitSet;
if (nfy != NULL) { // DCL idiom
_WaitSet = nfy->ListNext ;
assert (nfy->Notified == 0, "invariant") ;
_WaitSet = nfy->ListNext;
assert(nfy->Notified == 0, "invariant");
// push nfy onto the cxq
for (;;) {
const intptr_t v = _LockWord.FullWord ;
assert ((v & 0xFF) == _LBIT, "invariant") ;
const intptr_t v = _LockWord.FullWord;
assert((v & 0xFF) == _LBIT, "invariant");
nfy->ListNext = (ParkEvent *)(v & ~_LBIT);
if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break;
// interference - _LockWord changed -- just retry
@ -698,17 +698,17 @@ bool Monitor::notify() {
// Note that setting Notified before pushing nfy onto the cxq is
// also legal and safe, but the safety properties are much more
// subtle, so for the sake of code stewardship ...
OrderAccess::fence() ;
OrderAccess::fence();
nfy->Notified = 1;
}
Thread::muxRelease (_WaitLock) ;
Thread::muxRelease(_WaitLock);
if (nfy != NULL && (NativeMonitorFlags & 16)) {
// Experimental code ... light up the wakee in the hope that this thread (the owner)
// will drop the lock just about the time the wakee comes ONPROC.
nfy->unpark() ;
nfy->unpark();
}
assert (ILocked(), "invariant") ;
return true ;
assert(ILocked(), "invariant");
return true;
}
// Currently notifyAll() transfers the waiters one-at-a-time from the waitset
@ -719,14 +719,14 @@ bool Monitor::notify() {
// will be empty and the cxq will be "DCBAXYZ". This is benign, of course.
bool Monitor::notify_all() {
assert (_owner == Thread::current(), "invariant") ;
assert (ILocked(), "invariant") ;
while (_WaitSet != NULL) notify() ;
return true ;
assert(_owner == Thread::current(), "invariant");
assert(ILocked(), "invariant");
while (_WaitSet != NULL) notify();
return true;
}
int Monitor::IWait (Thread * Self, jlong timo) {
assert (ILocked(), "invariant") ;
assert(ILocked(), "invariant");
// Phases:
// 1. Enqueue Self on WaitSet - currently prepend
@ -734,10 +734,10 @@ int Monitor::IWait (Thread * Self, jlong timo) {
// 3. wait for either notification or timeout
// 4. lock - reentry - reacquire the outer lock
ParkEvent * const ESelf = Self->_MutexEvent ;
ESelf->Notified = 0 ;
ESelf->reset() ;
OrderAccess::fence() ;
ParkEvent * const ESelf = Self->_MutexEvent;
ESelf->Notified = 0;
ESelf->reset();
OrderAccess::fence();
// Add Self to WaitSet
// Ideally only the holder of the outer lock would manipulate the WaitSet -
@ -766,10 +766,10 @@ int Monitor::IWait (Thread * Self, jlong timo) {
// In that case we could have one ListElement on the WaitSet and another
// on the EntryList, with both referring to the same pure Event.
Thread::muxAcquire (_WaitLock, "wait:WaitLock:Add") ;
ESelf->ListNext = _WaitSet ;
_WaitSet = ESelf ;
Thread::muxRelease (_WaitLock) ;
Thread::muxAcquire(_WaitLock, "wait:WaitLock:Add");
ESelf->ListNext = _WaitSet;
_WaitSet = ESelf;
Thread::muxRelease(_WaitLock);
// Release the outer lock
// We call IUnlock (RelaxAssert=true) as a thread T1 might
@ -781,16 +781,16 @@ int Monitor::IWait (Thread * Self, jlong timo) {
// IUnlock() call a thread should _never find itself on the EntryList
// or cxq, but in the case of wait() it's possible.
// See synchronizer.cpp objectMonitor::wait().
IUnlock (true) ;
IUnlock(true);
// Wait for either notification or timeout
// Beware that in some circumstances we might propagate
// spurious wakeups back to the caller.
for (;;) {
if (ESelf->Notified) break ;
int err = ParkCommon (ESelf, timo) ;
if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break ;
if (ESelf->Notified) break;
int err = ParkCommon(ESelf, timo);
if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break;
}
// Prepare for reentry - if necessary, remove ESelf from WaitSet
@ -799,55 +799,55 @@ int Monitor::IWait (Thread * Self, jlong timo) {
// 2. On the cxq or EntryList
// 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position.
OrderAccess::fence() ;
int WasOnWaitSet = 0 ;
OrderAccess::fence();
int WasOnWaitSet = 0;
if (ESelf->Notified == 0) {
Thread::muxAcquire (_WaitLock, "wait:WaitLock:remove") ;
Thread::muxAcquire(_WaitLock, "wait:WaitLock:remove");
if (ESelf->Notified == 0) { // DCL idiom
assert (_OnDeck != ESelf, "invariant") ; // can't be both OnDeck and on WaitSet
assert(_OnDeck != ESelf, "invariant"); // can't be both OnDeck and on WaitSet
// ESelf is resident on the WaitSet -- unlink it.
// A doubly-linked list would be better here so we can unlink in constant-time.
// We have to unlink before we potentially recontend as ESelf might otherwise
// end up on the cxq|EntryList -- it can't be on two lists at once.
ParkEvent * p = _WaitSet ;
ParkEvent * q = NULL ; // classic q chases p
ParkEvent * p = _WaitSet;
ParkEvent * q = NULL; // classic q chases p
while (p != NULL && p != ESelf) {
q = p ;
p = p->ListNext ;
q = p;
p = p->ListNext;
}
assert (p == ESelf, "invariant") ;
assert(p == ESelf, "invariant");
if (p == _WaitSet) { // found at head
assert (q == NULL, "invariant") ;
_WaitSet = p->ListNext ;
assert(q == NULL, "invariant");
_WaitSet = p->ListNext;
} else { // found in interior
assert (q->ListNext == p, "invariant") ;
q->ListNext = p->ListNext ;
assert(q->ListNext == p, "invariant");
q->ListNext = p->ListNext;
}
WasOnWaitSet = 1 ; // We were *not* notified but instead encountered timeout
WasOnWaitSet = 1; // We were *not* notified but instead encountered timeout
}
Thread::muxRelease (_WaitLock) ;
Thread::muxRelease(_WaitLock);
}
// Reentry phase - reacquire the lock
if (WasOnWaitSet) {
// ESelf was previously on the WaitSet but we just unlinked it above
// because of a timeout. ESelf is not resident on any list and is not OnDeck
assert (_OnDeck != ESelf, "invariant") ;
ILock (Self) ;
assert(_OnDeck != ESelf, "invariant");
ILock(Self);
} else {
// A prior notify() operation moved ESelf from the WaitSet to the cxq.
// ESelf is now on the cxq, EntryList or at the OnDeck position.
// The following fragment is extracted from Monitor::ILock()
for (;;) {
if (_OnDeck == ESelf && TrySpin(Self)) break ;
ParkCommon (ESelf, 0) ;
if (_OnDeck == ESelf && TrySpin(Self)) break;
ParkCommon(ESelf, 0);
}
assert (_OnDeck == ESelf, "invariant") ;
_OnDeck = NULL ;
assert(_OnDeck == ESelf, "invariant");
_OnDeck = NULL;
}
assert (ILocked(), "invariant") ;
return WasOnWaitSet != 0 ; // return true IFF timeout
assert(ILocked(), "invariant");
return WasOnWaitSet != 0; // return true IFF timeout
}
@ -896,15 +896,15 @@ void Monitor::lock (Thread * Self) {
#endif // CHECK_UNHANDLED_OOPS
debug_only(check_prelock_state(Self));
assert (_owner != Self , "invariant") ;
assert (_OnDeck != Self->_MutexEvent, "invariant") ;
assert(_owner != Self , "invariant");
assert(_OnDeck != Self->_MutexEvent, "invariant");
if (TryFast()) {
Exeunt:
assert (ILocked(), "invariant") ;
assert (owner() == NULL, "invariant");
set_owner (Self);
return ;
assert(ILocked(), "invariant");
assert(owner() == NULL, "invariant");
set_owner(Self);
return;
}
// The lock is contended ...
@ -916,23 +916,23 @@ void Monitor::lock (Thread * Self) {
// and go on. we note this with _snuck so we can also
// pretend to unlock when the time comes.
_snuck = true;
goto Exeunt ;
goto Exeunt;
}
// Try a brief spin to avoid passing thru thread state transition ...
if (TrySpin (Self)) goto Exeunt ;
if (TrySpin(Self)) goto Exeunt;
check_block_state(Self);
if (Self->is_Java_thread()) {
// Horrible dictu - we suffer through a state transition
assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex");
ThreadBlockInVM tbivm ((JavaThread *) Self) ;
ILock (Self) ;
ThreadBlockInVM tbivm((JavaThread *) Self);
ILock(Self);
} else {
// Mirabile dictu
ILock (Self) ;
ILock(Self);
}
goto Exeunt ;
goto Exeunt;
}
void Monitor::lock() {
@ -945,14 +945,14 @@ void Monitor::lock() {
// thread state set to be in VM, the safepoint synchronization code will deadlock!
void Monitor::lock_without_safepoint_check (Thread * Self) {
assert (_owner != Self, "invariant") ;
ILock (Self) ;
assert (_owner == NULL, "invariant");
set_owner (Self);
assert(_owner != Self, "invariant");
ILock(Self);
assert(_owner == NULL, "invariant");
set_owner(Self);
}
void Monitor::lock_without_safepoint_check () {
lock_without_safepoint_check (Thread::current()) ;
void Monitor::lock_without_safepoint_check() {
lock_without_safepoint_check(Thread::current());
}
@ -976,23 +976,23 @@ bool Monitor::try_lock() {
if (TryLock()) {
// We got the lock
assert (_owner == NULL, "invariant");
set_owner (Self);
assert(_owner == NULL, "invariant");
set_owner(Self);
return true;
}
return false;
}
void Monitor::unlock() {
assert (_owner == Thread::current(), "invariant") ;
assert (_OnDeck != Thread::current()->_MutexEvent , "invariant") ;
set_owner (NULL) ;
assert(_owner == Thread::current(), "invariant");
assert(_OnDeck != Thread::current()->_MutexEvent , "invariant");
set_owner(NULL);
if (_snuck) {
assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
_snuck = false;
return ;
return;
}
IUnlock (false) ;
IUnlock(false);
}
// Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check()
@ -1020,29 +1020,29 @@ void Monitor::jvm_raw_lock() {
if (TryLock()) {
Exeunt:
assert (ILocked(), "invariant") ;
assert (_owner == NULL, "invariant");
assert(ILocked(), "invariant");
assert(_owner == NULL, "invariant");
// This can potentially be called by non-java Threads. Thus, the ThreadLocalStorage
// might return NULL. Don't call set_owner since it will break on an NULL owner
// Consider installing a non-null "ANON" distinguished value instead of just NULL.
_owner = ThreadLocalStorage::thread();
return ;
return;
}
if (TrySpin(NULL)) goto Exeunt ;
if (TrySpin(NULL)) goto Exeunt;
// slow-path - apparent contention
// Allocate a ParkEvent for transient use.
// The ParkEvent remains associated with this thread until
// the time the thread manages to acquire the lock.
ParkEvent * const ESelf = ParkEvent::Allocate(NULL) ;
ESelf->reset() ;
OrderAccess::storeload() ;
ParkEvent * const ESelf = ParkEvent::Allocate(NULL);
ESelf->reset();
OrderAccess::storeload();
// Either Enqueue Self on cxq or acquire the outer lock.
if (AcquireOrPush (ESelf)) {
ParkEvent::Release (ESelf) ; // surrender the ParkEvent
goto Exeunt ;
ParkEvent::Release(ESelf); // surrender the ParkEvent
goto Exeunt;
}
// At any given time there is at most one ondeck thread.
@ -1050,37 +1050,37 @@ void Monitor::jvm_raw_lock() {
// Only the OnDeck thread can try to acquire -- contended for -- the lock.
// CONSIDER: use Self->OnDeck instead of m->OnDeck.
for (;;) {
if (_OnDeck == ESelf && TrySpin(NULL)) break ;
ParkCommon (ESelf, 0) ;
if (_OnDeck == ESelf && TrySpin(NULL)) break;
ParkCommon(ESelf, 0);
}
assert (_OnDeck == ESelf, "invariant") ;
_OnDeck = NULL ;
ParkEvent::Release (ESelf) ; // surrender the ParkEvent
goto Exeunt ;
assert(_OnDeck == ESelf, "invariant");
_OnDeck = NULL;
ParkEvent::Release(ESelf); // surrender the ParkEvent
goto Exeunt;
}
void Monitor::jvm_raw_unlock() {
// Nearly the same as Monitor::unlock() ...
// directly set _owner instead of using set_owner(null)
_owner = NULL ;
_owner = NULL;
if (_snuck) { // ???
assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
_snuck = false;
return ;
return;
}
IUnlock(false) ;
IUnlock(false);
}
bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equivalent) {
Thread * const Self = Thread::current() ;
assert (_owner == Self, "invariant") ;
assert (ILocked(), "invariant") ;
Thread * const Self = Thread::current();
assert(_owner == Self, "invariant");
assert(ILocked(), "invariant");
// as_suspend_equivalent logically implies !no_safepoint_check
guarantee (!as_suspend_equivalent || !no_safepoint_check, "invariant") ;
guarantee(!as_suspend_equivalent || !no_safepoint_check, "invariant");
// !no_safepoint_check logically implies java_thread
guarantee (no_safepoint_check || Self->is_Java_thread(), "invariant") ;
guarantee(no_safepoint_check || Self->is_Java_thread(), "invariant");
#ifdef ASSERT
Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
@ -1093,14 +1093,14 @@ bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equiva
}
#endif // ASSERT
int wait_status ;
int wait_status;
// conceptually set the owner to NULL in anticipation of
// abdicating the lock in wait
set_owner(NULL);
if (no_safepoint_check) {
wait_status = IWait (Self, timeout) ;
wait_status = IWait(Self, timeout);
} else {
assert (Self->is_Java_thread(), "invariant") ;
assert(Self->is_Java_thread(), "invariant");
JavaThread *jt = (JavaThread *)Self;
// Enter safepoint region - ornate and Rococo ...
@ -1113,7 +1113,7 @@ bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equiva
// java_suspend_self()
}
wait_status = IWait (Self, timeout) ;
wait_status = IWait(Self, timeout);
// were we externally suspended while we were waiting?
if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) {
@ -1121,67 +1121,67 @@ bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equiva
// while we were waiting another thread suspended us. We don't
// want to hold the lock while suspended because that
// would surprise the thread that suspended us.
assert (ILocked(), "invariant") ;
IUnlock (true) ;
assert(ILocked(), "invariant");
IUnlock(true);
jt->java_suspend_self();
ILock (Self) ;
assert (ILocked(), "invariant") ;
ILock(Self);
assert(ILocked(), "invariant");
}
}
// Conceptually reestablish ownership of the lock.
// The "real" lock -- the LockByte -- was reacquired by IWait().
assert (ILocked(), "invariant") ;
assert (_owner == NULL, "invariant") ;
set_owner (Self) ;
return wait_status != 0 ; // return true IFF timeout
assert(ILocked(), "invariant");
assert(_owner == NULL, "invariant");
set_owner(Self);
return wait_status != 0; // return true IFF timeout
}
Monitor::~Monitor() {
assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ;
assert((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "");
}
void Monitor::ClearMonitor (Monitor * m, const char *name) {
m->_owner = NULL ;
m->_snuck = false ;
m->_owner = NULL;
m->_snuck = false;
if (name == NULL) {
strcpy(m->_name, "UNKNOWN") ;
strcpy(m->_name, "UNKNOWN");
} else {
strncpy(m->_name, name, MONITOR_NAME_LEN - 1);
m->_name[MONITOR_NAME_LEN - 1] = '\0';
}
m->_LockWord.FullWord = 0 ;
m->_EntryList = NULL ;
m->_OnDeck = NULL ;
m->_WaitSet = NULL ;
m->_WaitLock[0] = 0 ;
m->_LockWord.FullWord = 0;
m->_EntryList = NULL;
m->_OnDeck = NULL;
m->_WaitSet = NULL;
m->_WaitLock[0] = 0;
}
Monitor::Monitor() { ClearMonitor(this); }
Monitor::Monitor (int Rank, const char * name, bool allow_vm_block) {
ClearMonitor (this, name) ;
ClearMonitor(this, name);
#ifdef ASSERT
_allow_vm_block = allow_vm_block;
_rank = Rank ;
_rank = Rank;
#endif
}
Mutex::~Mutex() {
assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ;
assert((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "");
}
Mutex::Mutex (int Rank, const char * name, bool allow_vm_block) {
ClearMonitor ((Monitor *) this, name) ;
ClearMonitor((Monitor *) this, name);
#ifdef ASSERT
_allow_vm_block = allow_vm_block;
_rank = Rank ;
_rank = Rank;
#endif
}
bool Monitor::owned_by_self() const {
bool ret = _owner == Thread::current();
assert (!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant") ;
assert(!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant");
return ret;
}

File diff suppressed because it is too large Load Diff

View File

@ -37,17 +37,17 @@
class ObjectWaiter : public StackObj {
public:
enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ } ;
enum Sorted { PREPEND, APPEND, SORTED } ;
enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ };
enum Sorted { PREPEND, APPEND, SORTED };
ObjectWaiter * volatile _next;
ObjectWaiter * volatile _prev;
Thread* _thread;
jlong _notifier_tid;
ParkEvent * _event;
volatile int _notified ;
volatile TStates TState ;
Sorted _Sorted ; // List placement disposition
bool _active ; // Contention monitoring is enabled
volatile int _notified;
volatile TStates TState;
Sorted _Sorted; // List placement disposition
bool _active; // Contention monitoring is enabled
public:
ObjectWaiter(Thread* thread);
@ -92,19 +92,19 @@ class ObjectMonitor {
static int owner_offset_in_bytes() { return offset_of(ObjectMonitor, _owner); }
static int count_offset_in_bytes() { return offset_of(ObjectMonitor, _count); }
static int recursions_offset_in_bytes() { return offset_of(ObjectMonitor, _recursions); }
static int cxq_offset_in_bytes() { return offset_of(ObjectMonitor, _cxq) ; }
static int succ_offset_in_bytes() { return offset_of(ObjectMonitor, _succ) ; }
static int cxq_offset_in_bytes() { return offset_of(ObjectMonitor, _cxq); }
static int succ_offset_in_bytes() { return offset_of(ObjectMonitor, _succ); }
static int EntryList_offset_in_bytes() { return offset_of(ObjectMonitor, _EntryList); }
static int FreeNext_offset_in_bytes() { return offset_of(ObjectMonitor, FreeNext); }
static int WaitSet_offset_in_bytes() { return offset_of(ObjectMonitor, _WaitSet) ; }
static int Responsible_offset_in_bytes() { return offset_of(ObjectMonitor, _Responsible);}
static int WaitSet_offset_in_bytes() { return offset_of(ObjectMonitor, _WaitSet); }
static int Responsible_offset_in_bytes() { return offset_of(ObjectMonitor, _Responsible); }
static int Spinner_offset_in_bytes() { return offset_of(ObjectMonitor, _Spinner); }
public:
// Eventually we'll make provisions for multiple callbacks, but
// now one will suffice.
static int (*SpinCallbackFunction)(intptr_t, int) ;
static intptr_t SpinCallbackArgument ;
static int (*SpinCallbackFunction)(intptr_t, int);
static intptr_t SpinCallbackArgument;
public:
@ -115,7 +115,7 @@ class ObjectMonitor {
// TODO-FIXME: merge _count and _waiters.
// TODO-FIXME: assert _owner == null implies _recursions = 0
// TODO-FIXME: assert _WaitSet != null implies _count > 0
return _count|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList ) ;
return _count|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList);
}
intptr_t is_entered(Thread* current) const;
@ -127,7 +127,7 @@ class ObjectMonitor {
intptr_t count() const;
void set_count(intptr_t count);
intptr_t contentions() const ;
intptr_t contentions() const;
intptr_t recursions() const { return _recursions; }
// JVM/DI GetMonitorInfo() needs this
@ -145,15 +145,15 @@ class ObjectMonitor {
_object = NULL;
_owner = NULL;
_WaitSet = NULL;
_WaitSetLock = 0 ;
_Responsible = NULL ;
_succ = NULL ;
_cxq = NULL ;
FreeNext = NULL ;
_EntryList = NULL ;
_SpinFreq = 0 ;
_SpinClock = 0 ;
OwnerIsThread = 0 ;
_WaitSetLock = 0;
_Responsible = NULL;
_succ = NULL;
_cxq = NULL;
FreeNext = NULL;
_EntryList = NULL;
_SpinFreq = 0;
_SpinClock = 0;
OwnerIsThread = 0;
_previous_owner_tid = 0;
}
@ -164,20 +164,20 @@ class ObjectMonitor {
}
private:
void Recycle () {
void Recycle() {
// TODO: add stronger asserts ...
// _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
// _count == 0 EntryList == NULL
// _recursions == 0 _WaitSet == NULL
// TODO: assert (is_busy()|_recursions) == 0
_succ = NULL ;
_EntryList = NULL ;
_cxq = NULL ;
_WaitSet = NULL ;
_recursions = 0 ;
_SpinFreq = 0 ;
_SpinClock = 0 ;
OwnerIsThread = 0 ;
_succ = NULL;
_EntryList = NULL;
_cxq = NULL;
_WaitSet = NULL;
_recursions = 0;
_SpinFreq = 0;
_SpinClock = 0;
OwnerIsThread = 0;
}
public:
@ -194,7 +194,7 @@ public:
void print();
#endif
bool try_enter (TRAPS) ;
bool try_enter(TRAPS);
void enter(TRAPS);
void exit(bool not_suspended, TRAPS);
void wait(jlong millis, bool interruptable, TRAPS);
@ -206,22 +206,22 @@ public:
void reenter(intptr_t recursions, TRAPS);
private:
void AddWaiter (ObjectWaiter * waiter) ;
void AddWaiter(ObjectWaiter * waiter);
static void DeferredInitialize();
ObjectWaiter * DequeueWaiter () ;
void DequeueSpecificWaiter (ObjectWaiter * waiter) ;
void EnterI (TRAPS) ;
void ReenterI (Thread * Self, ObjectWaiter * SelfNode) ;
void UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode) ;
int TryLock (Thread * Self) ;
int NotRunnable (Thread * Self, Thread * Owner) ;
int TrySpin_Fixed (Thread * Self) ;
int TrySpin_VaryFrequency (Thread * Self) ;
int TrySpin_VaryDuration (Thread * Self) ;
void ctAsserts () ;
void ExitEpilog (Thread * Self, ObjectWaiter * Wakee) ;
bool ExitSuspendEquivalent (JavaThread * Self) ;
ObjectWaiter * DequeueWaiter();
void DequeueSpecificWaiter(ObjectWaiter * waiter);
void EnterI(TRAPS);
void ReenterI(Thread * Self, ObjectWaiter * SelfNode);
void UnlinkAfterAcquire(Thread * Self, ObjectWaiter * SelfNode);
int TryLock(Thread * Self);
int NotRunnable(Thread * Self, Thread * Owner);
int TrySpin_Fixed(Thread * Self);
int TrySpin_VaryFrequency(Thread * Self);
int TrySpin_VaryDuration(Thread * Self);
void ctAsserts();
void ExitEpilog(Thread * Self, ObjectWaiter * Wakee);
bool ExitSuspendEquivalent(JavaThread * Self);
void post_monitor_wait_event(EventJavaMonitorWait * event,
jlong notifier_tid,
jlong timeout,
@ -240,7 +240,7 @@ public:
volatile markOop _header; // displaced object header word - mark
void* volatile _object; // backward object pointer - strong root
double SharingPad [1] ; // temp to reduce false sharing
double SharingPad[1]; // temp to reduce false sharing
// All the following fields must be machine word aligned
// The VM assumes write ordering wrt these fields, which can be
@ -251,22 +251,22 @@ public:
volatile jlong _previous_owner_tid; // thread id of the previous owner of the monitor
volatile intptr_t _recursions; // recursion count, 0 for first entry
private:
int OwnerIsThread ; // _owner is (Thread *) vs SP/BasicLock
ObjectWaiter * volatile _cxq ; // LL of recently-arrived threads blocked on entry.
int OwnerIsThread; // _owner is (Thread *) vs SP/BasicLock
ObjectWaiter * volatile _cxq; // LL of recently-arrived threads blocked on entry.
// The list is actually composed of WaitNodes, acting
// as proxies for Threads.
protected:
ObjectWaiter * volatile _EntryList ; // Threads blocked on entry or reentry.
ObjectWaiter * volatile _EntryList; // Threads blocked on entry or reentry.
private:
Thread * volatile _succ ; // Heir presumptive thread - used for futile wakeup throttling
Thread * volatile _Responsible ;
int _PromptDrain ; // rqst to drain cxq into EntryList ASAP
Thread * volatile _succ; // Heir presumptive thread - used for futile wakeup throttling
Thread * volatile _Responsible;
int _PromptDrain; // rqst to drain cxq into EntryList ASAP
volatile int _Spinner ; // for exit->spinner handoff optimization
volatile int _SpinFreq ; // Spin 1-out-of-N attempts: success rate
volatile int _SpinClock ;
volatile int _SpinDuration ;
volatile intptr_t _SpinState ; // MCS/CLH list of spinners
volatile int _Spinner; // for exit->spinner handoff optimization
volatile int _SpinFreq; // Spin 1-out-of-N attempts: success rate
volatile int _SpinClock;
volatile int _SpinDuration;
volatile intptr_t _SpinState; // MCS/CLH list of spinners
// TODO-FIXME: _count, _waiters and _recursions should be of
// type int, or int32_t but not intptr_t. There's no reason
@ -284,30 +284,30 @@ public:
volatile int _WaitSetLock; // protects Wait Queue - simple spinlock
public:
int _QMix ; // Mixed prepend queue discipline
ObjectMonitor * FreeNext ; // Free list linkage
intptr_t StatA, StatsB ;
int _QMix; // Mixed prepend queue discipline
ObjectMonitor * FreeNext; // Free list linkage
intptr_t StatA, StatsB;
public:
static void Initialize () ;
static PerfCounter * _sync_ContendedLockAttempts ;
static PerfCounter * _sync_FutileWakeups ;
static PerfCounter * _sync_Parks ;
static PerfCounter * _sync_EmptyNotifications ;
static PerfCounter * _sync_Notifications ;
static PerfCounter * _sync_SlowEnter ;
static PerfCounter * _sync_SlowExit ;
static PerfCounter * _sync_SlowNotify ;
static PerfCounter * _sync_SlowNotifyAll ;
static PerfCounter * _sync_FailedSpins ;
static PerfCounter * _sync_SuccessfulSpins ;
static PerfCounter * _sync_PrivateA ;
static PerfCounter * _sync_PrivateB ;
static PerfCounter * _sync_MonInCirculation ;
static PerfCounter * _sync_MonScavenged ;
static PerfCounter * _sync_Inflations ;
static PerfCounter * _sync_Deflations ;
static PerfLongVariable * _sync_MonExtant ;
static void Initialize();
static PerfCounter * _sync_ContendedLockAttempts;
static PerfCounter * _sync_FutileWakeups;
static PerfCounter * _sync_Parks;
static PerfCounter * _sync_EmptyNotifications;
static PerfCounter * _sync_Notifications;
static PerfCounter * _sync_SlowEnter;
static PerfCounter * _sync_SlowExit;
static PerfCounter * _sync_SlowNotify;
static PerfCounter * _sync_SlowNotifyAll;
static PerfCounter * _sync_FailedSpins;
static PerfCounter * _sync_SuccessfulSpins;
static PerfCounter * _sync_PrivateA;
static PerfCounter * _sync_PrivateB;
static PerfCounter * _sync_MonInCirculation;
static PerfCounter * _sync_MonScavenged;
static PerfCounter * _sync_Inflations;
static PerfCounter * _sync_Deflations;
static PerfLongVariable * _sync_MonExtant;
public:
static int Knob_Verbose;
@ -329,7 +329,7 @@ public:
#undef TEVENT
#define TEVENT(nom) {if (SyncVerbose) FEVENT(nom); }
#define FEVENT(nom) { static volatile int ctr = 0 ; int v = ++ctr ; if ((v & (v-1)) == 0) { ::printf (#nom " : %d \n", v); ::fflush(stdout); }}
#define FEVENT(nom) { static volatile int ctr = 0; int v = ++ctr; if ((v & (v-1)) == 0) { ::printf (#nom " : %d \n", v); ::fflush(stdout); }}
#undef TEVENT
#define TEVENT(nom) {;}

View File

@ -32,6 +32,9 @@
#include "gc_implementation/shared/vmGCOperations.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#ifdef ASSERT
#include "memory/guardedMemory.hpp"
#endif
#include "oops/oop.inline.hpp"
#include "prims/jvm.h"
#include "prims/jvm_misc.hpp"
@ -523,121 +526,20 @@ char *os::strdup(const char *str, MEMFLAGS flags) {
}
#ifdef ASSERT
#define space_before (MallocCushion + sizeof(double))
#define space_after MallocCushion
#define size_addr_from_base(p) (size_t*)(p + space_before - sizeof(size_t))
#define size_addr_from_obj(p) ((size_t*)p - 1)
// MallocCushion: size of extra cushion allocated around objects with +UseMallocOnly
// NB: cannot be debug variable, because these aren't set from the command line until
// *after* the first few allocs already happened
#define MallocCushion 16
#else
#define space_before 0
#define space_after 0
#define size_addr_from_base(p) should not use w/o ASSERT
#define size_addr_from_obj(p) should not use w/o ASSERT
#define MallocCushion 0
#endif
#define paranoid 0 /* only set to 1 if you suspect checking code has bug */
#ifdef ASSERT
inline size_t get_size(void* obj) {
size_t size = *size_addr_from_obj(obj);
if (size < 0) {
fatal(err_msg("free: size field of object #" PTR_FORMAT " was overwritten ("
SIZE_FORMAT ")", obj, size));
}
return size;
}
u_char* find_cushion_backwards(u_char* start) {
u_char* p = start;
while (p[ 0] != badResourceValue || p[-1] != badResourceValue ||
p[-2] != badResourceValue || p[-3] != badResourceValue) p--;
// ok, we have four consecutive marker bytes; find start
u_char* q = p - 4;
while (*q == badResourceValue) q--;
return q + 1;
}
u_char* find_cushion_forwards(u_char* start) {
u_char* p = start;
while (p[0] != badResourceValue || p[1] != badResourceValue ||
p[2] != badResourceValue || p[3] != badResourceValue) p++;
// ok, we have four consecutive marker bytes; find end of cushion
u_char* q = p + 4;
while (*q == badResourceValue) q++;
return q - MallocCushion;
}
void print_neighbor_blocks(void* ptr) {
// find block allocated before ptr (not entirely crash-proof)
if (MallocCushion < 4) {
tty->print_cr("### cannot find previous block (MallocCushion < 4)");
return;
}
u_char* start_of_this_block = (u_char*)ptr - space_before;
u_char* end_of_prev_block_data = start_of_this_block - space_after -1;
// look for cushion in front of prev. block
u_char* start_of_prev_block = find_cushion_backwards(end_of_prev_block_data);
ptrdiff_t size = *size_addr_from_base(start_of_prev_block);
u_char* obj = start_of_prev_block + space_before;
if (size <= 0 ) {
// start is bad; may have been confused by OS data in between objects
// search one more backwards
start_of_prev_block = find_cushion_backwards(start_of_prev_block);
size = *size_addr_from_base(start_of_prev_block);
obj = start_of_prev_block + space_before;
}
if (start_of_prev_block + space_before + size + space_after == start_of_this_block) {
tty->print_cr("### previous object: " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", obj, size);
} else {
tty->print_cr("### previous object (not sure if correct): " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", obj, size);
}
// now find successor block
u_char* start_of_next_block = (u_char*)ptr + *size_addr_from_obj(ptr) + space_after;
start_of_next_block = find_cushion_forwards(start_of_next_block);
u_char* next_obj = start_of_next_block + space_before;
ptrdiff_t next_size = *size_addr_from_base(start_of_next_block);
if (start_of_next_block[0] == badResourceValue &&
start_of_next_block[1] == badResourceValue &&
start_of_next_block[2] == badResourceValue &&
start_of_next_block[3] == badResourceValue) {
tty->print_cr("### next object: " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", next_obj, next_size);
} else {
tty->print_cr("### next object (not sure if correct): " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", next_obj, next_size);
static void verify_memory(void* ptr) {
GuardedMemory guarded(ptr);
if (!guarded.verify_guards()) {
tty->print_cr("## nof_mallocs = " UINT64_FORMAT ", nof_frees = " UINT64_FORMAT, os::num_mallocs, os::num_frees);
tty->print_cr("## memory stomp:");
guarded.print_on(tty);
fatal("memory stomping error");
}
}
void report_heap_error(void* memblock, void* bad, const char* where) {
tty->print_cr("## nof_mallocs = " UINT64_FORMAT ", nof_frees = " UINT64_FORMAT, os::num_mallocs, os::num_frees);
tty->print_cr("## memory stomp: byte at " PTR_FORMAT " %s object " PTR_FORMAT, bad, where, memblock);
print_neighbor_blocks(memblock);
fatal("memory stomping error");
}
void verify_block(void* memblock) {
size_t size = get_size(memblock);
if (MallocCushion) {
u_char* ptr = (u_char*)memblock - space_before;
for (int i = 0; i < MallocCushion; i++) {
if (ptr[i] != badResourceValue) {
report_heap_error(memblock, ptr+i, "in front of");
}
}
u_char* end = (u_char*)memblock + size + space_after;
for (int j = -MallocCushion; j < 0; j++) {
if (end[j] != badResourceValue) {
report_heap_error(memblock, end+j, "after");
}
}
}
}
#endif
//
@ -686,16 +588,18 @@ void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
size = 1;
}
const size_t alloc_size = size + space_before + space_after;
#ifndef ASSERT
const size_t alloc_size = size;
#else
const size_t alloc_size = GuardedMemory::get_total_size(size);
if (size > alloc_size) { // Check for rollover.
return NULL;
}
#endif
NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
u_char* ptr;
if (MallocMaxTestWords > 0) {
ptr = testMalloc(alloc_size);
} else {
@ -703,28 +607,26 @@ void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
}
#ifdef ASSERT
if (ptr == NULL) return NULL;
if (MallocCushion) {
for (u_char* p = ptr; p < ptr + MallocCushion; p++) *p = (u_char)badResourceValue;
u_char* end = ptr + space_before + size;
for (u_char* pq = ptr+MallocCushion; pq < end; pq++) *pq = (u_char)uninitBlockPad;
for (u_char* q = end; q < end + MallocCushion; q++) *q = (u_char)badResourceValue;
if (ptr == NULL) {
return NULL;
}
// put size just before data
*size_addr_from_base(ptr) = size;
// Wrap memory with guard
GuardedMemory guarded(ptr, size);
ptr = guarded.get_user_ptr();
#endif
u_char* memblock = ptr + space_before;
if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
tty->print_cr("os::malloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, memblock);
if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
tty->print_cr("os::malloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, ptr);
breakpoint();
}
debug_only(if (paranoid) verify_block(memblock));
if (PrintMalloc && tty != NULL) tty->print_cr("os::malloc " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, memblock);
debug_only(if (paranoid) verify_memory(ptr));
if (PrintMalloc && tty != NULL) {
tty->print_cr("os::malloc " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, ptr);
}
// we do not track MallocCushion memory
MemTracker::record_malloc((address)memblock, size, memflags, caller == 0 ? CALLER_PC : caller);
// we do not track guard memory
MemTracker::record_malloc((address)ptr, size, memflags, caller == 0 ? CALLER_PC : caller);
return memblock;
return ptr;
}
@ -743,27 +645,32 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller
return ptr;
#else
if (memblock == NULL) {
return malloc(size, memflags, (caller == 0 ? CALLER_PC : caller));
return os::malloc(size, memflags, (caller == 0 ? CALLER_PC : caller));
}
if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
tty->print_cr("os::realloc caught " PTR_FORMAT, memblock);
breakpoint();
}
verify_block(memblock);
verify_memory(memblock);
NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
if (size == 0) return NULL;
if (size == 0) {
return NULL;
}
// always move the block
void* ptr = malloc(size, memflags, caller == 0 ? CALLER_PC : caller);
if (PrintMalloc) tty->print_cr("os::remalloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr);
void* ptr = os::malloc(size, memflags, caller == 0 ? CALLER_PC : caller);
if (PrintMalloc) {
tty->print_cr("os::remalloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr);
}
// Copy to new memory if malloc didn't fail
if ( ptr != NULL ) {
memcpy(ptr, memblock, MIN2(size, get_size(memblock)));
if (paranoid) verify_block(ptr);
GuardedMemory guarded(memblock);
memcpy(ptr, memblock, MIN2(size, guarded.get_user_size()));
if (paranoid) verify_memory(ptr);
if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
tty->print_cr("os::realloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, ptr);
breakpoint();
}
free(memblock);
os::free(memblock);
}
return ptr;
#endif
@ -771,6 +678,7 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller
void os::free(void *memblock, MEMFLAGS memflags) {
address trackp = (address) memblock;
NOT_PRODUCT(inc_stat_counter(&num_frees, 1));
#ifdef ASSERT
if (memblock == NULL) return;
@ -778,34 +686,20 @@ void os::free(void *memblock, MEMFLAGS memflags) {
if (tty != NULL) tty->print_cr("os::free caught " PTR_FORMAT, memblock);
breakpoint();
}
verify_block(memblock);
verify_memory(memblock);
NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
// Added by detlefs.
if (MallocCushion) {
u_char* ptr = (u_char*)memblock - space_before;
for (u_char* p = ptr; p < ptr + MallocCushion; p++) {
guarantee(*p == badResourceValue,
"Thing freed should be malloc result.");
*p = (u_char)freeBlockPad;
}
size_t size = get_size(memblock);
inc_stat_counter(&free_bytes, size);
u_char* end = ptr + space_before + size;
for (u_char* q = end; q < end + MallocCushion; q++) {
guarantee(*q == badResourceValue,
"Thing freed should be malloc result.");
*q = (u_char)freeBlockPad;
}
if (PrintMalloc && tty != NULL)
GuardedMemory guarded(memblock);
size_t size = guarded.get_user_size();
inc_stat_counter(&free_bytes, size);
memblock = guarded.release_for_freeing();
if (PrintMalloc && tty != NULL) {
fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (uintptr_t)memblock);
} else if (PrintMalloc && tty != NULL) {
// tty->print_cr("os::free %p", memblock);
fprintf(stderr, "os::free " PTR_FORMAT "\n", (uintptr_t)memblock);
}
#endif
MemTracker::record_free((address)memblock, memflags);
MemTracker::record_free(trackp, memflags);
::free((char*)memblock - space_before);
::free(memblock);
}
void os::init_random(long initval) {

View File

@ -410,49 +410,6 @@ oop Reflection::array_component_type(oop mirror, TRAPS) {
}
bool Reflection::reflect_check_access(Klass* field_class, AccessFlags acc, Klass* target_class, bool is_method_invoke, TRAPS) {
// field_class : declaring class
// acc : declared field access
// target_class : for protected
// Check if field or method is accessible to client. Throw an
// IllegalAccessException and return false if not.
// The "client" is the class associated with the nearest real frame
// getCallerClass already skips Method.invoke frames, so pass 0 in
// that case (same as classic).
ResourceMark rm(THREAD);
assert(THREAD->is_Java_thread(), "sanity check");
Klass* client_class = ((JavaThread *)THREAD)->security_get_caller_class(is_method_invoke ? 0 : 1);
if (client_class != field_class) {
if (!verify_class_access(client_class, field_class, false)
|| !verify_field_access(client_class,
field_class,
field_class,
acc,
false)) {
THROW_(vmSymbols::java_lang_IllegalAccessException(), false);
}
}
// Additional test for protected members: JLS 6.6.2
if (acc.is_protected()) {
if (target_class != client_class) {
if (!is_same_class_package(client_class, field_class)) {
if (!target_class->is_subclass_of(client_class)) {
THROW_(vmSymbols::java_lang_IllegalAccessException(), false);
}
}
}
}
// Passed all tests
return true;
}
bool Reflection::verify_class_access(Klass* current_class, Klass* new_class, bool classloader_only) {
// Verify that current_class can access new_class. If the classloader_only
// flag is set, we automatically allow any accesses in which current_class
@ -463,10 +420,9 @@ bool Reflection::verify_class_access(Klass* current_class, Klass* new_class, boo
is_same_class_package(current_class, new_class)) {
return true;
}
// New (1.4) reflection implementation. Allow all accesses from
// sun/reflect/MagicAccessorImpl subclasses to succeed trivially.
if ( JDK_Version::is_gte_jdk14x_version()
&& current_class->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass())) {
// Allow all accesses from sun/reflect/MagicAccessorImpl subclasses to
// succeed trivially.
if (current_class->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass())) {
return true;
}
@ -567,10 +523,9 @@ bool Reflection::verify_field_access(Klass* current_class,
return true;
}
// New (1.4) reflection implementation. Allow all accesses from
// sun/reflect/MagicAccessorImpl subclasses to succeed trivially.
if ( JDK_Version::is_gte_jdk14x_version()
&& current_class->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass())) {
// Allow all accesses from sun/reflect/MagicAccessorImpl subclasses to
// succeed trivially.
if (current_class->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass())) {
return true;
}
@ -707,12 +662,10 @@ Handle Reflection::new_type(Symbol* signature, KlassHandle k, TRAPS) {
oop Reflection::new_method(methodHandle method, bool for_constant_pool_access, TRAPS) {
// In jdk1.2.x, getMethods on an interface erroneously includes <clinit>, thus the complicated assert.
// Also allow sun.reflect.ConstantPool to refer to <clinit> methods as java.lang.reflect.Methods.
// Allow sun.reflect.ConstantPool to refer to <clinit> methods as java.lang.reflect.Methods.
assert(!method()->is_initializer() ||
(for_constant_pool_access && method()->is_static()) ||
(method()->name() == vmSymbols::class_initializer_name()
&& method()->method_holder()->is_interface() && JDK_Version::is_jdk12x_version()), "should call new_constructor instead");
(for_constant_pool_access && method()->is_static()),
"should call new_constructor instead");
instanceKlassHandle holder (THREAD, method->method_holder());
int slot = method->method_idnum();
@ -978,22 +931,6 @@ oop Reflection::invoke(instanceKlassHandle klass, methodHandle reflected_method,
reflected_method->signature()));
}
// In the JDK 1.4 reflection implementation, the security check is
// done at the Java level
if (!JDK_Version::is_gte_jdk14x_version()) {
// Access checking (unless overridden by Method)
if (!override) {
if (!(klass->is_public() && reflected_method->is_public())) {
bool access = Reflection::reflect_check_access(klass(), reflected_method->access_flags(), target_klass(), is_method_invoke, CHECK_NULL);
if (!access) {
return NULL; // exception
}
}
}
} // !Universe::is_gte_jdk14x_version()
assert(ptypes->is_objArray(), "just checking");
int args_len = args.is_null() ? 0 : args->length();
// Check number of arguments

View File

@ -44,9 +44,6 @@ class FieldStream;
class Reflection: public AllStatic {
private:
// Access checking
static bool reflect_check_access(Klass* field_class, AccessFlags acc, Klass* target_class, bool is_method_invoke, TRAPS);
// Conversion
static Klass* basic_type_mirror_to_arrayklass(oop basic_type_mirror, TRAPS);
static oop basic_type_arrayklass_to_mirror(Klass* basic_type_arrayklass, TRAPS);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -76,15 +76,10 @@ void FilteredFieldsMap::initialize() {
int offset;
offset = java_lang_Throwable::get_backtrace_offset();
_filtered_fields->append(new FilteredField(SystemDictionary::Throwable_klass(), offset));
// The latest version of vm may be used with old jdk.
if (JDK_Version::is_gte_jdk16x_version()) {
// The following class fields do not exist in
// previous version of jdk.
offset = sun_reflect_ConstantPool::oop_offset();
_filtered_fields->append(new FilteredField(SystemDictionary::reflect_ConstantPool_klass(), offset));
offset = sun_reflect_UnsafeStaticFieldAccessorImpl::base_offset();
_filtered_fields->append(new FilteredField(SystemDictionary::reflect_UnsafeStaticFieldAccessorImpl_klass(), offset));
}
offset = sun_reflect_ConstantPool::oop_offset();
_filtered_fields->append(new FilteredField(SystemDictionary::reflect_ConstantPool_klass(), offset));
offset = sun_reflect_UnsafeStaticFieldAccessorImpl::base_offset();
_filtered_fields->append(new FilteredField(SystemDictionary::reflect_UnsafeStaticFieldAccessorImpl_klass(), offset));
}
int FilteredFieldStream::field_count() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,8 +41,7 @@ void ServiceThread::initialize() {
instanceKlassHandle klass (THREAD, SystemDictionary::Thread_klass());
instanceHandle thread_oop = klass->allocate_instance_handle(CHECK);
const char* name = JDK_Version::is_gte_jdk17x_version() ?
"Service Thread" : "Low Memory Detector";
const char* name = "Service Thread";
Handle string = java_lang_String::create_from_str(name, CHECK);

View File

@ -198,13 +198,13 @@ void SharedRuntime::trace_ic_miss(address at) {
void SharedRuntime::print_ic_miss_histogram() {
if (ICMissHistogram) {
tty->print_cr ("IC Miss Histogram:");
tty->print_cr("IC Miss Histogram:");
int tot_misses = 0;
for (int i = 0; i < _ICmiss_index; i++) {
tty->print_cr(" at: " INTPTR_FORMAT " nof: %d", _ICmiss_at[i], _ICmiss_count[i]);
tot_misses += _ICmiss_count[i];
}
tty->print_cr ("Total IC misses: %7d", tot_misses);
tty->print_cr("Total IC misses: %7d", tot_misses);
}
}
#endif // PRODUCT
@ -266,7 +266,7 @@ JRT_LEAF(jfloat, SharedRuntime::frem(jfloat x, jfloat y))
xbits.f = x;
ybits.f = y;
// x Mod Infinity == x unless x is infinity
if ( ((xbits.i & float_sign_mask) != float_infinity) &&
if (((xbits.i & float_sign_mask) != float_infinity) &&
((ybits.i & float_sign_mask) == float_infinity) ) {
return x;
}
@ -281,7 +281,7 @@ JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y))
xbits.d = x;
ybits.d = y;
// x Mod Infinity == x unless x is infinity
if ( ((xbits.l & double_sign_mask) != double_infinity) &&
if (((xbits.l & double_sign_mask) != double_infinity) &&
((ybits.l & double_sign_mask) == double_infinity) ) {
return x;
}
@ -537,13 +537,13 @@ address SharedRuntime::get_poll_stub(address pc) {
CodeBlob *cb = CodeCache::find_blob(pc);
// Should be an nmethod
assert( cb && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod" );
assert(cb && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod");
// Look up the relocation information
assert( ((nmethod*)cb)->is_at_poll_or_poll_return(pc),
"safepoint polling: type must be poll" );
assert(((nmethod*)cb)->is_at_poll_or_poll_return(pc),
"safepoint polling: type must be poll");
assert( ((NativeInstruction*)pc)->is_safepoint_poll(),
assert(((NativeInstruction*)pc)->is_safepoint_poll(),
"Only polling locations are used for safepoint");
bool at_poll_return = ((nmethod*)cb)->is_at_poll_return(pc);
@ -562,7 +562,7 @@ address SharedRuntime::get_poll_stub(address pc) {
stub = SharedRuntime::polling_page_safepoint_handler_blob()->entry_point();
}
#ifndef PRODUCT
if( TraceSafepoint ) {
if (TraceSafepoint) {
char buf[256];
jio_snprintf(buf, sizeof(buf),
"... found polling page %s exception at pc = "
@ -1474,7 +1474,7 @@ methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
should_be_mono = true;
} else if (inline_cache->is_icholder_call()) {
CompiledICHolder* ic_oop = inline_cache->cached_icholder();
if ( ic_oop != NULL) {
if (ic_oop != NULL) {
if (receiver()->klass() == ic_oop->holder_klass()) {
// This isn't a real miss. We must have seen that compiled code
@ -1728,7 +1728,7 @@ IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address cal
iter.next();
assert(iter.has_current(), "must have a reloc at java call site");
relocInfo::relocType typ = iter.reloc()->type();
if ( typ != relocInfo::static_call_type &&
if (typ != relocInfo::static_call_type &&
typ != relocInfo::opt_virtual_call_type &&
typ != relocInfo::static_stub_type) {
return;
@ -1784,7 +1784,7 @@ JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos,
// The copy_array mechanism is awkward and could be removed, but
// the compilers don't call this function except as a last resort,
// so it probably doesn't matter.
src->klass()->copy_array((arrayOopDesc*)src, src_pos,
src->klass()->copy_array((arrayOopDesc*)src, src_pos,
(arrayOopDesc*)dest, dest_pos,
length, thread);
}
@ -1891,8 +1891,8 @@ void SharedRuntime::print_statistics() {
ttyLocker ttyl;
if (xtty != NULL) xtty->head("statistics type='SharedRuntime'");
if (_monitor_enter_ctr ) tty->print_cr("%5d monitor enter slow", _monitor_enter_ctr);
if (_monitor_exit_ctr ) tty->print_cr("%5d monitor exit slow", _monitor_exit_ctr);
if (_monitor_enter_ctr) tty->print_cr("%5d monitor enter slow", _monitor_enter_ctr);
if (_monitor_exit_ctr) tty->print_cr("%5d monitor exit slow", _monitor_exit_ctr);
if (_throw_null_ctr) tty->print_cr("%5d implicit null throw", _throw_null_ctr);
SharedRuntime::print_ic_miss_histogram();
@ -1905,36 +1905,36 @@ void SharedRuntime::print_statistics() {
}
// Dump the JRT_ENTRY counters
if( _new_instance_ctr ) tty->print_cr("%5d new instance requires GC", _new_instance_ctr);
if( _new_array_ctr ) tty->print_cr("%5d new array requires GC", _new_array_ctr);
if( _multi1_ctr ) tty->print_cr("%5d multianewarray 1 dim", _multi1_ctr);
if( _multi2_ctr ) tty->print_cr("%5d multianewarray 2 dim", _multi2_ctr);
if( _multi3_ctr ) tty->print_cr("%5d multianewarray 3 dim", _multi3_ctr);
if( _multi4_ctr ) tty->print_cr("%5d multianewarray 4 dim", _multi4_ctr);
if( _multi5_ctr ) tty->print_cr("%5d multianewarray 5 dim", _multi5_ctr);
if (_new_instance_ctr) tty->print_cr("%5d new instance requires GC", _new_instance_ctr);
if (_new_array_ctr) tty->print_cr("%5d new array requires GC", _new_array_ctr);
if (_multi1_ctr) tty->print_cr("%5d multianewarray 1 dim", _multi1_ctr);
if (_multi2_ctr) tty->print_cr("%5d multianewarray 2 dim", _multi2_ctr);
if (_multi3_ctr) tty->print_cr("%5d multianewarray 3 dim", _multi3_ctr);
if (_multi4_ctr) tty->print_cr("%5d multianewarray 4 dim", _multi4_ctr);
if (_multi5_ctr) tty->print_cr("%5d multianewarray 5 dim", _multi5_ctr);
tty->print_cr("%5d inline cache miss in compiled", _ic_miss_ctr );
tty->print_cr("%5d wrong method", _wrong_method_ctr );
tty->print_cr("%5d unresolved static call site", _resolve_static_ctr );
tty->print_cr("%5d unresolved virtual call site", _resolve_virtual_ctr );
tty->print_cr("%5d unresolved opt virtual call site", _resolve_opt_virtual_ctr );
tty->print_cr("%5d inline cache miss in compiled", _ic_miss_ctr);
tty->print_cr("%5d wrong method", _wrong_method_ctr);
tty->print_cr("%5d unresolved static call site", _resolve_static_ctr);
tty->print_cr("%5d unresolved virtual call site", _resolve_virtual_ctr);
tty->print_cr("%5d unresolved opt virtual call site", _resolve_opt_virtual_ctr);
if( _mon_enter_stub_ctr ) tty->print_cr("%5d monitor enter stub", _mon_enter_stub_ctr );
if( _mon_exit_stub_ctr ) tty->print_cr("%5d monitor exit stub", _mon_exit_stub_ctr );
if( _mon_enter_ctr ) tty->print_cr("%5d monitor enter slow", _mon_enter_ctr );
if( _mon_exit_ctr ) tty->print_cr("%5d monitor exit slow", _mon_exit_ctr );
if( _partial_subtype_ctr) tty->print_cr("%5d slow partial subtype", _partial_subtype_ctr );
if( _jbyte_array_copy_ctr ) tty->print_cr("%5d byte array copies", _jbyte_array_copy_ctr );
if( _jshort_array_copy_ctr ) tty->print_cr("%5d short array copies", _jshort_array_copy_ctr );
if( _jint_array_copy_ctr ) tty->print_cr("%5d int array copies", _jint_array_copy_ctr );
if( _jlong_array_copy_ctr ) tty->print_cr("%5d long array copies", _jlong_array_copy_ctr );
if( _oop_array_copy_ctr ) tty->print_cr("%5d oop array copies", _oop_array_copy_ctr );
if( _checkcast_array_copy_ctr ) tty->print_cr("%5d checkcast array copies", _checkcast_array_copy_ctr );
if( _unsafe_array_copy_ctr ) tty->print_cr("%5d unsafe array copies", _unsafe_array_copy_ctr );
if( _generic_array_copy_ctr ) tty->print_cr("%5d generic array copies", _generic_array_copy_ctr );
if( _slow_array_copy_ctr ) tty->print_cr("%5d slow array copies", _slow_array_copy_ctr );
if( _find_handler_ctr ) tty->print_cr("%5d find exception handler", _find_handler_ctr );
if( _rethrow_ctr ) tty->print_cr("%5d rethrow handler", _rethrow_ctr );
if (_mon_enter_stub_ctr) tty->print_cr("%5d monitor enter stub", _mon_enter_stub_ctr);
if (_mon_exit_stub_ctr) tty->print_cr("%5d monitor exit stub", _mon_exit_stub_ctr);
if (_mon_enter_ctr) tty->print_cr("%5d monitor enter slow", _mon_enter_ctr);
if (_mon_exit_ctr) tty->print_cr("%5d monitor exit slow", _mon_exit_ctr);
if (_partial_subtype_ctr) tty->print_cr("%5d slow partial subtype", _partial_subtype_ctr);
if (_jbyte_array_copy_ctr) tty->print_cr("%5d byte array copies", _jbyte_array_copy_ctr);
if (_jshort_array_copy_ctr) tty->print_cr("%5d short array copies", _jshort_array_copy_ctr);
if (_jint_array_copy_ctr) tty->print_cr("%5d int array copies", _jint_array_copy_ctr);
if (_jlong_array_copy_ctr) tty->print_cr("%5d long array copies", _jlong_array_copy_ctr);
if (_oop_array_copy_ctr) tty->print_cr("%5d oop array copies", _oop_array_copy_ctr);
if (_checkcast_array_copy_ctr) tty->print_cr("%5d checkcast array copies", _checkcast_array_copy_ctr);
if (_unsafe_array_copy_ctr) tty->print_cr("%5d unsafe array copies", _unsafe_array_copy_ctr);
if (_generic_array_copy_ctr) tty->print_cr("%5d generic array copies", _generic_array_copy_ctr);
if (_slow_array_copy_ctr) tty->print_cr("%5d slow array copies", _slow_array_copy_ctr);
if (_find_handler_ctr) tty->print_cr("%5d find exception handler", _find_handler_ctr);
if (_rethrow_ctr) tty->print_cr("%5d rethrow handler", _rethrow_ctr);
AdapterHandlerLibrary::print_statistics();
@ -1997,7 +1997,7 @@ class MethodArityHistogram {
MethodArityHistogram() {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
_max_arity = _max_size = 0;
for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram [i] = 0;
for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram[i] = 0;
CodeCache::nmethods_do(add_method_to_histogram);
print_histogram();
}
@ -2062,7 +2062,7 @@ class AdapterFingerPrint : public CHeapObj<mtCode> {
// These are correct for the current system but someday it might be
// necessary to make this mapping platform dependent.
static int adapter_encoding(BasicType in) {
switch(in) {
switch (in) {
case T_BOOLEAN:
case T_BYTE:
case T_SHORT:
@ -2479,7 +2479,7 @@ AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(methodHandle method) {
tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)",
_adapters->number_of_entries(), (method->is_static() ? "static" : "receiver"),
method->signature()->as_C_string(), insts_size);
tty->print_cr("c2i argument handler starts at %p",entry->get_c2i_entry());
tty->print_cr("c2i argument handler starts at %p", entry->get_c2i_entry());
if (Verbose || PrintStubCode) {
address first_pc = entry->base_address();
if (first_pc != NULL) {
@ -2504,7 +2504,7 @@ AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(methodHandle method) {
new_adapter->name(),
fingerprint->as_string(),
new_adapter->content_begin());
Forte::register_stub(blob_id, new_adapter->content_begin(),new_adapter->content_end());
Forte::register_stub(blob_id, new_adapter->content_begin(), new_adapter->content_end());
if (JvmtiExport::should_post_dynamic_code_generated()) {
JvmtiExport::post_dynamic_code_generated(blob_id, new_adapter->content_begin(), new_adapter->content_end());
@ -2605,12 +2605,12 @@ void AdapterHandlerLibrary::create_native_wrapper(methodHandle method) {
BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
int i=0;
if( !method->is_static() ) // Pass in receiver first
if (!method->is_static()) // Pass in receiver first
sig_bt[i++] = T_OBJECT;
SignatureStream ss(method->signature());
for( ; !ss.at_return_type(); ss.next()) {
for (; !ss.at_return_type(); ss.next()) {
sig_bt[i++] = ss.type(); // Collect remaining bits of signature
if( ss.type() == T_LONG || ss.type() == T_DOUBLE )
if (ss.type() == T_LONG || ss.type() == T_DOUBLE)
sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
}
assert(i == total_args_passed, "");
@ -2762,10 +2762,10 @@ void SharedRuntime::convert_ints_to_longints(int i2l_argcnt, int& in_args_count,
case T_SHORT:
case T_INT:
// Convert (bt) to (T_LONG,bt).
new_in_sig_bt[argcnt ] = T_LONG;
new_in_sig_bt[argcnt] = T_LONG;
new_in_sig_bt[argcnt+1] = bt;
assert(reg.first()->is_valid() && !reg.second()->is_valid(), "");
new_in_regs[argcnt ].set2(reg.first());
new_in_regs[argcnt].set2(reg.first());
new_in_regs[argcnt+1].set_bad();
argcnt++;
break;
@ -2808,17 +2808,17 @@ VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver,
int len = (int)strlen(s);
s++; len--; // Skip opening paren
char *t = s+len;
while( *(--t) != ')' ) ; // Find close paren
while (*(--t) != ')'); // Find close paren
BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, 256 );
VMRegPair *regs = NEW_RESOURCE_ARRAY( VMRegPair, 256 );
BasicType *sig_bt = NEW_RESOURCE_ARRAY(BasicType, 256);
VMRegPair *regs = NEW_RESOURCE_ARRAY(VMRegPair, 256);
int cnt = 0;
if (has_receiver) {
sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
}
while( s < t ) {
switch( *s++ ) { // Switch on signature character
while (s < t) {
switch (*s++) { // Switch on signature character
case 'B': sig_bt[cnt++] = T_BYTE; break;
case 'C': sig_bt[cnt++] = T_CHAR; break;
case 'D': sig_bt[cnt++] = T_DOUBLE; sig_bt[cnt++] = T_VOID; break;
@ -2829,16 +2829,16 @@ VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver,
case 'Z': sig_bt[cnt++] = T_BOOLEAN; break;
case 'V': sig_bt[cnt++] = T_VOID; break;
case 'L': // Oop
while( *s++ != ';' ) ; // Skip signature
while (*s++ != ';'); // Skip signature
sig_bt[cnt++] = T_OBJECT;
break;
case '[': { // Array
do { // Skip optional size
while( *s >= '0' && *s <= '9' ) s++;
} while( *s++ == '[' ); // Nested arrays?
while (*s >= '0' && *s <= '9') s++;
} while (*s++ == '['); // Nested arrays?
// Skip element type
if( s[-1] == 'L' )
while( *s++ != ';' ) ; // Skip signature
if (s[-1] == 'L')
while (*s++ != ';'); // Skip signature
sig_bt[cnt++] = T_ARRAY;
break;
}
@ -2850,7 +2850,7 @@ VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver,
sig_bt[cnt++] = T_OBJECT;
}
assert( cnt < 256, "grow table size" );
assert(cnt < 256, "grow table size");
int comp_args_on_stack;
comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt, true);
@ -2861,12 +2861,12 @@ VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver,
if (comp_args_on_stack) {
for (int i = 0; i < cnt; i++) {
VMReg reg1 = regs[i].first();
if( reg1->is_stack()) {
if (reg1->is_stack()) {
// Yuck
reg1 = reg1->bias(out_preserve_stack_slots());
}
VMReg reg2 = regs[i].second();
if( reg2->is_stack()) {
if (reg2->is_stack()) {
// Yuck
reg2 = reg2->bias(out_preserve_stack_slots());
}
@ -2904,15 +2904,15 @@ JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *thread) )
// frame accessor methods and be platform independent.
frame fr = thread->last_frame();
assert( fr.is_interpreted_frame(), "" );
assert( fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks" );
assert(fr.is_interpreted_frame(), "");
assert(fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks");
// Figure out how many monitors are active.
int active_monitor_count = 0;
for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
for (BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
kptr < fr.interpreter_frame_monitor_begin();
kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
if( kptr->obj() != NULL ) active_monitor_count++;
if (kptr->obj() != NULL) active_monitor_count++;
}
// QQQ we could place number of active monitors in the array so that compiled code
@ -2926,17 +2926,17 @@ JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *thread) )
// Copy the locals. Order is preserved so that loading of longs works.
// Since there's no GC I can copy the oops blindly.
assert( sizeof(HeapWord)==sizeof(intptr_t), "fix this code");
assert(sizeof(HeapWord)==sizeof(intptr_t), "fix this code");
Copy::disjoint_words((HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
(HeapWord*)&buf[0],
max_locals);
// Inflate locks. Copy the displaced headers. Be careful, there can be holes.
int i = max_locals;
for( BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
for (BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
kptr2 < fr.interpreter_frame_monitor_begin();
kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
if( kptr2->obj() != NULL) { // Avoid 'holes' in the monitor array
if (kptr2->obj() != NULL) { // Avoid 'holes' in the monitor array
BasicLock *lock = kptr2->lock();
// Inflate so the displaced header becomes position-independent
if (lock->displaced_header()->is_unlocked())
@ -2946,20 +2946,20 @@ JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *thread) )
buf[i++] = cast_from_oop<intptr_t>(kptr2->obj());
}
}
assert( i - max_locals == active_monitor_count*2, "found the expected number of monitors" );
assert(i - max_locals == active_monitor_count*2, "found the expected number of monitors");
return buf;
JRT_END
JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) )
FREE_C_HEAP_ARRAY(intptr_t,buf, mtCode);
FREE_C_HEAP_ARRAY(intptr_t, buf, mtCode);
JRT_END
bool AdapterHandlerLibrary::contains(CodeBlob* b) {
AdapterHandlerTableIterator iter(_adapters);
while (iter.has_next()) {
AdapterHandlerEntry* a = iter.next();
if ( b == CodeCache::find_blob(a->get_i2c_entry()) ) return true;
if (b == CodeCache::find_blob(a->get_i2c_entry())) return true;
}
return false;
}

View File

@ -217,7 +217,7 @@ class SharedRuntime: AllStatic {
static UncommonTrapBlob* uncommon_trap_blob() { return _uncommon_trap_blob; }
#endif // COMPILER2
static address get_resolve_opt_virtual_call_stub(){
static address get_resolve_opt_virtual_call_stub() {
assert(_resolve_opt_virtual_call_blob != NULL, "oops");
return _resolve_opt_virtual_call_blob->entry_point();
}
@ -253,7 +253,7 @@ class SharedRuntime: AllStatic {
// bytecode tracing is only used by the TraceBytecodes
static intptr_t trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2) PRODUCT_RETURN0;
static oop retrieve_receiver( Symbol* sig, frame caller );
static oop retrieve_receiver(Symbol* sig, frame caller);
static void register_finalizer(JavaThread* thread, oopDesc* obj);
@ -446,8 +446,8 @@ class SharedRuntime: AllStatic {
static bool is_wide_vector(int size);
// Save and restore a native result
static void save_native_result(MacroAssembler *_masm, BasicType ret_type, int frame_slots );
static void restore_native_result(MacroAssembler *_masm, BasicType ret_type, int frame_slots );
static void save_native_result(MacroAssembler *_masm, BasicType ret_type, int frame_slots);
static void restore_native_result(MacroAssembler *_masm, BasicType ret_type, int frame_slots);
// Generate a native wrapper for a given method. The method takes arguments
// in the Java compiled code convention, marshals them to the native
@ -463,7 +463,7 @@ class SharedRuntime: AllStatic {
int compile_id,
BasicType* sig_bt,
VMRegPair* regs,
BasicType ret_type );
BasicType ret_type);
// Block before entering a JNI critical method
static void block_for_jni_critical(JavaThread* thread);

View File

@ -26,6 +26,7 @@
#include "prims/jni.h"
#include "runtime/interfaceSupport.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/sharedRuntimeMath.hpp"
// This file contains copies of the fdlibm routines used by
// StrictMath. It turns out that it is almost always required to use
@ -36,35 +37,6 @@
// pointer out to libjava.so in SharedRuntime speeds these routines up
// by roughly 15% on both Win32/x86 and Solaris/SPARC.
// Enabling optimizations in this file causes incorrect code to be
// generated; can not figure out how to turn down optimization for one
// file in the IDE on Windows
#ifdef WIN32
# pragma optimize ( "", off )
#endif
/* The above workaround now causes more problems with the latest MS compiler.
* Visual Studio 2010's /GS option tries to guard against buffer overruns.
* /GS is on by default if you specify optimizations, which we do globally
* via /W3 /O2. However the above selective turning off of optimizations means
* that /GS issues a warning "4748". And since we treat warnings as errors (/WX)
* then the compilation fails. There are several possible solutions
* (1) Remove that pragma above as obsolete with VS2010 - requires testing.
* (2) Stop treating warnings as errors - would be a backward step
* (3) Disable /GS - may help performance but you lose the security checks
* (4) Disable the warning with "#pragma warning( disable : 4748 )"
* (5) Disable planting the code with __declspec(safebuffers)
* I've opted for (5) although we should investigate the local performance
* benefits of (1) and global performance benefit of (3).
*/
#if defined(WIN32) && (defined(_MSC_VER) && (_MSC_VER >= 1600))
#define SAFEBUF __declspec(safebuffers)
#else
#define SAFEBUF
#endif
#include "runtime/sharedRuntimeMath.hpp"
/*
* __kernel_rem_pio2(x,y,e0,nx,prec,ipio2)
* double x[],y[]; int e0,nx,prec; int ipio2[];
@ -201,7 +173,7 @@ one = 1.0,
two24B = 1.67772160000000000000e+07, /* 0x41700000, 0x00000000 */
twon24 = 5.96046447753906250000e-08; /* 0x3E700000, 0x00000000 */
static SAFEBUF int __kernel_rem_pio2(double *x, double *y, int e0, int nx, int prec, const int *ipio2) {
static int __kernel_rem_pio2(double *x, double *y, int e0, int nx, int prec, const int *ipio2) {
int jz,jx,jv,jp,jk,carry,n,iq[20],i,j,k,m,q0,ih;
double z,fw,f[20],fq[20],q[20];
@ -417,7 +389,7 @@ pio2_2t = 2.02226624879595063154e-21, /* 0x3BA3198A, 0x2E037073 */
pio2_3 = 2.02226624871116645580e-21, /* 0x3BA3198A, 0x2E000000 */
pio2_3t = 8.47842766036889956997e-32; /* 0x397B839A, 0x252049C1 */
static SAFEBUF int __ieee754_rem_pio2(double x, double *y) {
static int __ieee754_rem_pio2(double x, double *y) {
double z,w,t,r,fn;
double tx[3];
int e0,i,j,nx,n,ix,hx,i0;
@ -916,8 +888,3 @@ JRT_LEAF(jdouble, SharedRuntime::dtan(jdouble x))
-1 -- n odd */
}
JRT_END
#ifdef WIN32
# pragma optimize ( "", on )
#endif

File diff suppressed because it is too large Load Diff

View File

@ -75,7 +75,7 @@ class ObjectSynchronizer : AllStatic {
// Special internal-use-only method for use by JVM infrastructure
// that needs to wait() on a java-level object but that can't risk
// throwing unexpected InterruptedExecutionExceptions.
static void waitUninterruptibly (Handle obj, jlong Millis, Thread * THREAD) ;
static void waitUninterruptibly(Handle obj, jlong Millis, Thread * THREAD);
// used by classloading to free classloader object lock,
// wait on an internal lock, and reclaim original lock
@ -85,9 +85,9 @@ class ObjectSynchronizer : AllStatic {
// thread-specific and global objectMonitor free list accessors
// static void verifyInUse (Thread * Self) ; too slow for general assert/debug
static ObjectMonitor * omAlloc (Thread * Self) ;
static void omRelease (Thread * Self, ObjectMonitor * m, bool FromPerThreadAlloc) ;
static void omFlush (Thread * Self) ;
static ObjectMonitor * omAlloc(Thread * Self);
static void omRelease(Thread * Self, ObjectMonitor * m, bool FromPerThreadAlloc);
static void omFlush(Thread * Self);
// Inflate light weight monitor to heavy weight monitor
static ObjectMonitor* inflate(Thread * Self, oop obj);
@ -97,7 +97,7 @@ class ObjectSynchronizer : AllStatic {
// Returns the identity hash value for an oop
// NOTE: It may cause monitor inflation
static intptr_t identity_hash_value_for(Handle obj);
static intptr_t FastHashCode (Thread * Self, oop obj) ;
static intptr_t FastHashCode(Thread * Self, oop obj);
// java.lang.Thread support
static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj);
@ -124,7 +124,7 @@ class ObjectSynchronizer : AllStatic {
static void verify() PRODUCT_RETURN;
static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
static void RegisterSpinCallback (int (*)(intptr_t, int), intptr_t) ;
static void RegisterSpinCallback(int(*)(intptr_t, int), intptr_t);
private:
enum { _BLOCKSIZE = 128 };
@ -155,7 +155,7 @@ class ObjectLocker : public StackObj {
// Monitor behavior
void wait (TRAPS) { ObjectSynchronizer::wait (_obj, 0, CHECK); } // wait forever
void notify_all(TRAPS) { ObjectSynchronizer::notifyall(_obj, CHECK); }
void waitUninterruptibly (TRAPS) { ObjectSynchronizer::waitUninterruptibly (_obj, 0, CHECK);}
void waitUninterruptibly (TRAPS) { ObjectSynchronizer::waitUninterruptibly (_obj, 0, CHECK); }
// complete_exit gives up lock completely, returning recursion count
// reenter reclaims lock with original recursion count
intptr_t complete_exit(TRAPS) { return ObjectSynchronizer::complete_exit(_obj, CHECK_0); }

View File

@ -225,11 +225,11 @@ Thread::Thread() {
_current_pending_monitor_is_from_java = true;
_current_waiting_monitor = NULL;
_num_nested_signal = 0;
omFreeList = NULL ;
omFreeCount = 0 ;
omFreeProvision = 32 ;
omInUseList = NULL ;
omInUseCount = 0 ;
omFreeList = NULL;
omFreeCount = 0;
omFreeProvision = 32;
omInUseList = NULL;
omInUseCount = 0;
#ifdef ASSERT
_visited_for_critical_count = false;
@ -239,15 +239,15 @@ Thread::Thread() {
_suspend_flags = 0;
// thread-specific hashCode stream generator state - Marsaglia shift-xor form
_hashStateX = os::random() ;
_hashStateY = 842502087 ;
_hashStateZ = 0x8767 ; // (int)(3579807591LL & 0xffff) ;
_hashStateW = 273326509 ;
_hashStateX = os::random();
_hashStateY = 842502087;
_hashStateZ = 0x8767; // (int)(3579807591LL & 0xffff) ;
_hashStateW = 273326509;
_OnTrap = 0 ;
_schedctl = NULL ;
_Stalled = 0 ;
_TypeTag = 0x2BAD ;
_OnTrap = 0;
_schedctl = NULL;
_Stalled = 0;
_TypeTag = 0x2BAD;
// Many of the following fields are effectively final - immutable
// Note that nascent threads can't use the Native Monitor-Mutex
@ -256,10 +256,10 @@ Thread::Thread() {
// we might instead use a stack of ParkEvents that we could provision on-demand.
// The stack would act as a cache to avoid calls to ParkEvent::Allocate()
// and ::Release()
_ParkEvent = ParkEvent::Allocate (this) ;
_SleepEvent = ParkEvent::Allocate (this) ;
_MutexEvent = ParkEvent::Allocate (this) ;
_MuxEvent = ParkEvent::Allocate (this) ;
_ParkEvent = ParkEvent::Allocate(this);
_SleepEvent = ParkEvent::Allocate(this);
_MutexEvent = ParkEvent::Allocate(this);
_MuxEvent = ParkEvent::Allocate(this);
#ifdef CHECK_UNHANDLED_OOPS
if (CheckUnhandledOops) {
@ -314,7 +314,7 @@ void Thread::record_stack_base_and_size() {
Thread::~Thread() {
// Reclaim the objectmonitors from the omFreeList of the moribund thread.
ObjectSynchronizer::omFlush (this) ;
ObjectSynchronizer::omFlush(this);
EVENT_THREAD_DESTRUCT(this);
@ -342,10 +342,10 @@ Thread::~Thread() {
// It's possible we can encounter a null _ParkEvent, etc., in stillborn threads.
// We NULL out the fields for good hygiene.
ParkEvent::Release (_ParkEvent) ; _ParkEvent = NULL ;
ParkEvent::Release (_SleepEvent) ; _SleepEvent = NULL ;
ParkEvent::Release (_MutexEvent) ; _MutexEvent = NULL ;
ParkEvent::Release (_MuxEvent) ; _MuxEvent = NULL ;
ParkEvent::Release(_ParkEvent); _ParkEvent = NULL;
ParkEvent::Release(_SleepEvent); _SleepEvent = NULL;
ParkEvent::Release(_MutexEvent); _MutexEvent = NULL;
ParkEvent::Release(_MuxEvent); _MuxEvent = NULL;
delete handle_area();
delete metadata_handles();
@ -844,7 +844,7 @@ void Thread::print_on(outputStream* st) const {
// Thread::print_on_error() is called by fatal error handler. Don't use
// any lock or allocate memory.
void Thread::print_on_error(outputStream* st, char* buf, int buflen) const {
if (is_VM_thread()) st->print("VMThread");
if (is_VM_thread()) st->print("VMThread");
else if (is_Compiler_thread()) st->print("CompilerThread");
else if (is_Java_thread()) st->print("JavaThread");
else if (is_GC_task_thread()) st->print("GCTaskThread");
@ -867,7 +867,7 @@ void Thread::print_owned_locks_on(outputStream* st) const {
st->print(" (no locks) ");
} else {
st->print_cr(" Locks owned:");
while(cur) {
while (cur) {
cur->print_on(st);
cur = cur->next();
}
@ -877,7 +877,7 @@ void Thread::print_owned_locks_on(outputStream* st) const {
static int ref_use_count = 0;
bool Thread::owns_locks_but_compiled_lock() const {
for(Monitor *cur = _owned_locks; cur; cur = cur->next()) {
for (Monitor *cur = _owned_locks; cur; cur = cur->next()) {
if (cur != Compile_lock) return true;
}
return false;
@ -904,12 +904,12 @@ void Thread::check_for_valid_safepoint_state(bool potential_vm_operation) {
&& !Universe::is_bootstrapping()) {
// Make sure we do not hold any locks that the VM thread also uses.
// This could potentially lead to deadlocks
for(Monitor *cur = _owned_locks; cur; cur = cur->next()) {
for (Monitor *cur = _owned_locks; cur; cur = cur->next()) {
// Threads_lock is special, since the safepoint synchronization will not start before this is
// acquired. Hence, a JavaThread cannot be holding it at a safepoint. So is VMOperationRequest_lock,
// since it is used to transfer control between JavaThreads and the VMThread
// Do not *exclude* any locks unless you are absolutely sure it is correct. Ask someone else first!
if ( (cur->allow_vm_block() &&
if ((cur->allow_vm_block() &&
cur != Threads_lock &&
cur != Compile_lock && // Temporary: should not be necessary when we get separate compilation
cur != VMOperationRequest_lock &&
@ -1291,9 +1291,9 @@ void WatcherThread::run() {
this->record_stack_base_and_size();
this->initialize_thread_local_storage();
this->set_active_handles(JNIHandleBlock::allocate_block());
while(!_should_terminate) {
assert(watcher_thread() == Thread::current(), "thread consistency check");
assert(watcher_thread() == this, "thread consistency check");
while (!_should_terminate) {
assert(watcher_thread() == Thread::current(), "thread consistency check");
assert(watcher_thread() == this, "thread consistency check");
// Calculate how long it'll be until the next PeriodicTask work
// should be done, and sleep that amount of time.
@ -1370,7 +1370,7 @@ void WatcherThread::stop() {
// it is ok to take late safepoints here, if needed
MutexLocker mu(Terminator_lock);
while(watcher_thread() != NULL) {
while (watcher_thread() != NULL) {
// This wait should make safepoint checks, wait without a timeout,
// and wait as a suspend-equivalent condition.
//
@ -1448,13 +1448,14 @@ void JavaThread::initialize() {
_thread_stat = new ThreadStatistics();
_blocked_on_compilation = false;
_jni_active_critical = 0;
_pending_jni_exception_check_fn = NULL;
_do_not_unlock_if_synchronized = false;
_cached_monitor_info = NULL;
_parker = Parker::Allocate(this) ;
_parker = Parker::Allocate(this);
#ifndef PRODUCT
_jmp_ring_index = 0;
for (int ji = 0 ; ji < jump_ring_buffer_size ; ji++ ) {
for (int ji = 0; ji < jump_ring_buffer_size; ji++) {
record_jump(NULL, NULL, NULL, 0);
}
#endif /* PRODUCT */
@ -1591,7 +1592,7 @@ JavaThread::~JavaThread() {
// JSR166 -- return the parker to the free list
Parker::Release(_parker);
_parker = NULL ;
_parker = NULL;
// Free any remaining previous UnrollBlock
vframeArray* old_array = vframe_array_last();
@ -1717,7 +1718,7 @@ static void ensure_join(JavaThread* thread) {
// For any new cleanup additions, please check to see if they need to be applied to
// cleanup_failed_attach_current_thread as well.
void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
assert(this == JavaThread::current(), "thread consistency check");
assert(this == JavaThread::current(), "thread consistency check");
HandleMark hm(this);
Handle uncaught_exception(this, this->pending_exception());
@ -1738,55 +1739,26 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
CLEAR_PENDING_EXCEPTION;
}
// FIXIT: The is_null check is only so it works better on JDK1.2 VM's. This
// has to be fixed by a runtime query method
if (!destroy_vm || JDK_Version::is_jdk12x_version()) {
// JSR-166: change call from from ThreadGroup.uncaughtException to
// java.lang.Thread.dispatchUncaughtException
if (!destroy_vm) {
if (uncaught_exception.not_null()) {
Handle group(this, java_lang_Thread::threadGroup(threadObj()));
{
EXCEPTION_MARK;
// Check if the method Thread.dispatchUncaughtException() exists. If so
// call it. Otherwise we have an older library without the JSR-166 changes,
// so call ThreadGroup.uncaughtException()
KlassHandle recvrKlass(THREAD, threadObj->klass());
CallInfo callinfo;
KlassHandle thread_klass(THREAD, SystemDictionary::Thread_klass());
LinkResolver::resolve_virtual_call(callinfo, threadObj, recvrKlass, thread_klass,
vmSymbols::dispatchUncaughtException_name(),
vmSymbols::throwable_void_signature(),
KlassHandle(), false, false, THREAD);
EXCEPTION_MARK;
// Call method Thread.dispatchUncaughtException().
KlassHandle thread_klass(THREAD, SystemDictionary::Thread_klass());
JavaValue result(T_VOID);
JavaCalls::call_virtual(&result,
threadObj, thread_klass,
vmSymbols::dispatchUncaughtException_name(),
vmSymbols::throwable_void_signature(),
uncaught_exception,
THREAD);
if (HAS_PENDING_EXCEPTION) {
ResourceMark rm(this);
jio_fprintf(defaultStream::error_stream(),
"\nException: %s thrown from the UncaughtExceptionHandler"
" in thread \"%s\"\n",
pending_exception()->klass()->external_name(),
get_thread_name());
CLEAR_PENDING_EXCEPTION;
methodHandle method = callinfo.selected_method();
if (method.not_null()) {
JavaValue result(T_VOID);
JavaCalls::call_virtual(&result,
threadObj, thread_klass,
vmSymbols::dispatchUncaughtException_name(),
vmSymbols::throwable_void_signature(),
uncaught_exception,
THREAD);
} else {
KlassHandle thread_group(THREAD, SystemDictionary::ThreadGroup_klass());
JavaValue result(T_VOID);
JavaCalls::call_virtual(&result,
group, thread_group,
vmSymbols::uncaughtException_name(),
vmSymbols::thread_throwable_void_signature(),
threadObj, // Arg 1
uncaught_exception, // Arg 2
THREAD);
}
if (HAS_PENDING_EXCEPTION) {
ResourceMark rm(this);
jio_fprintf(defaultStream::error_stream(),
"\nException: %s thrown from the UncaughtExceptionHandler"
" in thread \"%s\"\n",
pending_exception()->klass()->external_name(),
get_thread_name());
CLEAR_PENDING_EXCEPTION;
}
}
}
@ -2086,7 +2058,7 @@ void JavaThread::check_and_handle_async_exceptions(bool check_unsafe_error) {
if (TraceExceptions) {
ResourceMark rm;
tty->print("Async. exception installed at runtime exit (" INTPTR_FORMAT ")", this);
if (has_last_Java_frame() ) {
if (has_last_Java_frame()) {
frame f = last_frame();
tty->print(" (pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " )", f.pc(), f.sp());
}
@ -2330,11 +2302,11 @@ int JavaThread::java_suspend_self() {
void JavaThread::verify_not_published() {
if (!Threads_lock->owned_by_self()) {
MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
assert( !Threads::includes(this),
assert(!Threads::includes(this),
"java thread shouldn't have been published yet!");
}
else {
assert( !Threads::includes(this),
assert(!Threads::includes(this),
"java thread shouldn't have been published yet!");
}
}
@ -2403,7 +2375,7 @@ void JavaThread::check_safepoint_and_suspend_for_native_trans(JavaThread *thread
thread->clear_deopt_suspend();
RegisterMap map(thread, false);
frame f = thread->last_frame();
while ( f.id() != thread->must_deopt_id() && ! f.is_first_frame()) {
while (f.id() != thread->must_deopt_id() && ! f.is_first_frame()) {
f = f.sender(&map);
}
if (f.id() == thread->must_deopt_id()) {
@ -2527,8 +2499,8 @@ void JavaThread::enable_stack_yellow_zone() {
// We need to adjust it to work correctly with guard_memory()
address base = stack_yellow_zone_base() - stack_yellow_zone_size();
guarantee(base < stack_base(),"Error calculating stack yellow zone");
guarantee(base < os::current_stack_pointer(),"Error calculating stack yellow zone");
guarantee(base < stack_base(), "Error calculating stack yellow zone");
guarantee(base < os::current_stack_pointer(), "Error calculating stack yellow zone");
if (os::guard_memory((char *) base, stack_yellow_zone_size())) {
_stack_guard_state = stack_guard_enabled;
@ -2563,10 +2535,10 @@ void JavaThread::enable_stack_red_zone() {
assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
address base = stack_red_zone_base() - stack_red_zone_size();
guarantee(base < stack_base(),"Error calculating stack red zone");
guarantee(base < os::current_stack_pointer(),"Error calculating stack red zone");
guarantee(base < stack_base(), "Error calculating stack red zone");
guarantee(base < os::current_stack_pointer(), "Error calculating stack red zone");
if(!os::guard_memory((char *) base, stack_red_zone_size())) {
if (!os::guard_memory((char *) base, stack_red_zone_size())) {
warning("Attempt to guard stack red zone failed.");
}
}
@ -2585,7 +2557,7 @@ void JavaThread::frames_do(void f(frame*, const RegisterMap* map)) {
// ignore is there is no stack
if (!has_last_Java_frame()) return;
// traverse the stack frames. Starts from top frame.
for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
for (StackFrameStream fst(this); !fst.is_done(); fst.next()) {
frame* fr = fst.current();
f(fr, fst.register_map());
}
@ -2601,8 +2573,8 @@ void JavaThread::deoptimize() {
bool deopt = false; // Dump stack only if a deopt actually happens.
bool only_at = strlen(DeoptimizeOnlyAt) > 0;
// Iterate over all frames in the thread and deoptimize
for(; !fst.is_done(); fst.next()) {
if(fst.current()->can_be_deoptimized()) {
for (; !fst.is_done(); fst.next()) {
if (fst.current()->can_be_deoptimized()) {
if (only_at) {
// Deoptimize only at particular bcis. DeoptimizeOnlyAt
@ -2647,7 +2619,7 @@ void JavaThread::deoptimize() {
// Make zombies
void JavaThread::make_zombies() {
for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
for (StackFrameStream fst(this); !fst.is_done(); fst.next()) {
if (fst.current()->can_be_deoptimized()) {
// it is a Java nmethod
nmethod* nm = CodeCache::find_nmethod(fst.current()->pc());
@ -2662,7 +2634,7 @@ void JavaThread::deoptimized_wrt_marked_nmethods() {
if (!has_last_Java_frame()) return;
// BiasedLocking needs an updated RegisterMap for the revoke monitors pass
StackFrameStream fst(this, UseBiasedLocking);
for(; !fst.is_done(); fst.next()) {
for (; !fst.is_done(); fst.next()) {
if (fst.current()->should_be_deoptimized()) {
if (LogCompilation && xtty != NULL) {
nmethod* nm = fst.current()->cb()->as_nmethod_or_null();
@ -2722,7 +2694,7 @@ void JavaThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf)
// Traverse the GCHandles
Thread::oops_do(f, cld_f, cf);
assert( (!has_last_Java_frame() && java_call_counter() == 0) ||
assert((!has_last_Java_frame() && java_call_counter() == 0) ||
(has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
if (has_last_Java_frame()) {
@ -2747,7 +2719,7 @@ void JavaThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf)
}
// Traverse the execution stack
for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
for (StackFrameStream fst(this); !fst.is_done(); fst.next()) {
fst.current()->oops_do(f, cld_f, cf, fst.register_map());
}
}
@ -2782,12 +2754,12 @@ void JavaThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf)
void JavaThread::nmethods_do(CodeBlobClosure* cf) {
Thread::nmethods_do(cf); // (super method is a no-op)
assert( (!has_last_Java_frame() && java_call_counter() == 0) ||
assert((!has_last_Java_frame() && java_call_counter() == 0) ||
(has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
if (has_last_Java_frame()) {
// Traverse the execution stack
for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
for (StackFrameStream fst(this); !fst.is_done(); fst.next()) {
fst.current()->nmethods_do(cf);
}
}
@ -2797,7 +2769,7 @@ void JavaThread::metadata_do(void f(Metadata*)) {
Thread::metadata_do(f);
if (has_last_Java_frame()) {
// Traverse the execution stack to call f() on the methods in the stack
for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
for (StackFrameStream fst(this); !fst.is_done(); fst.next()) {
fst.current()->metadata_do(f);
}
} else if (is_Compiler_thread()) {
@ -2848,7 +2820,7 @@ void JavaThread::print_on(outputStream *st) const {
Thread::print_on(st);
// print guess for valid stack memory region (assume 4K pages); helps lock debugging
st->print_cr("[" INTPTR_FORMAT "]", (intptr_t)last_Java_sp() & ~right_n_bits(12));
if (thread_oop != NULL && JDK_Version::is_gte_jdk15x_version()) {
if (thread_oop != NULL) {
st->print_cr(" java.lang.Thread.State: %s", java_lang_Thread::thread_status_name(thread_oop));
}
#ifndef PRODUCT
@ -2860,7 +2832,7 @@ void JavaThread::print_on(outputStream *st) const {
// Called by fatal error handler. The difference between this and
// JavaThread::print() is that we can't grab lock or allocate memory.
void JavaThread::print_on_error(outputStream* st, char *buf, int buflen) const {
st->print("JavaThread \"%s\"", get_thread_name_string(buf, buflen));
st->print("JavaThread \"%s\"", get_thread_name_string(buf, buflen));
oop thread_obj = threadObj();
if (thread_obj != NULL) {
if (java_lang_Thread::is_daemon(thread_obj)) st->print(" daemon");
@ -3043,7 +3015,7 @@ void JavaThread::print_stack_on(outputStream* st) {
RegisterMap reg_map(this);
vframe* start_vf = last_java_vframe(&reg_map);
int count = 0;
for (vframe* f = start_vf; f; f = f->sender() ) {
for (vframe* f = start_vf; f; f = f->sender()) {
if (f->is_java_frame()) {
javaVFrame* jvf = javaVFrame::cast(f);
java_lang_Throwable::print_stack_element(st, jvf->method(), jvf->bci());
@ -3099,9 +3071,9 @@ void JavaThread::popframe_free_preserved_args() {
void JavaThread::trace_frames() {
tty->print_cr("[Describe stack]");
int frame_no = 1;
for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
for (StackFrameStream fst(this); !fst.is_done(); fst.next()) {
tty->print(" %d. ", frame_no++);
fst.current()->print_value_on(tty,this);
fst.current()->print_value_on(tty, this);
tty->cr();
}
}
@ -3152,7 +3124,7 @@ void JavaThread::print_frame_layout(int depth, bool validate_only) {
PRESERVE_EXCEPTION_MARK;
FrameValues values;
int frame_no = 0;
for(StackFrameStream fst(this, false); !fst.is_done(); fst.next()) {
for (StackFrameStream fst(this, false); !fst.is_done(); fst.next()) {
fst.current()->describe(values, ++frame_no);
if (depth == frame_no) break;
}
@ -3168,7 +3140,7 @@ void JavaThread::print_frame_layout(int depth, bool validate_only) {
void JavaThread::trace_stack_from(vframe* start_vf) {
ResourceMark rm;
int vframe_no = 1;
for (vframe* f = start_vf; f; f = f->sender() ) {
for (vframe* f = start_vf; f; f = f->sender()) {
if (f->is_java_frame()) {
javaVFrame::cast(f)->print_activation(vframe_no++);
} else {
@ -3197,7 +3169,7 @@ void JavaThread::trace_stack() {
javaVFrame* JavaThread::last_java_vframe(RegisterMap *reg_map) {
assert(reg_map != NULL, "a map must be given");
frame f = last_frame();
for (vframe* vf = vframe::new_vframe(&f, reg_map, this); vf; vf = vf->sender() ) {
for (vframe* vf = vframe::new_vframe(&f, reg_map, this); vf; vf = vf->sender()) {
if (vf->is_java_frame()) return javaVFrame::cast(vf);
}
return NULL;
@ -3319,7 +3291,7 @@ void Threads::initialize_java_lang_classes(JavaThread* main_thread, TRAPS) {
// The VM preresolves methods to these classes. Make sure that they get initialized
initialize_class(vmSymbols::java_lang_reflect_Method(), CHECK);
initialize_class(vmSymbols::java_lang_ref_Finalizer(), CHECK);
initialize_class(vmSymbols::java_lang_ref_Finalizer(), CHECK);
call_initializeSystemClass(CHECK);
// get the Java runtime name after java.lang.System is initialized
@ -3453,7 +3425,7 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
main_thread->create_stack_guard_pages();
// Initialize Java-Level synchronization subsystem
ObjectMonitor::Initialize() ;
ObjectMonitor::Initialize();
// Second phase of bootstrapping, VM is about entering multi-thread mode
MemTracker::bootstrap_multi_thread();
@ -3501,7 +3473,7 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
}
}
assert (Universe::is_fully_initialized(), "not initialized");
assert(Universe::is_fully_initialized(), "not initialized");
if (VerifyDuringStartup) {
// Make sure we're starting with a clean slate.
VM_Verify verify_op;
@ -3927,7 +3899,7 @@ bool Threads::destroy_vm() {
#endif
// Wait until we are the last non-daemon thread to execute
{ MutexLocker nu(Threads_lock);
while (Threads::number_of_non_daemon_threads() > 1 )
while (Threads::number_of_non_daemon_threads() > 1)
// This wait should make safepoint checks, wait without a timeout,
// and wait as a suspend-equivalent condition.
//
@ -3948,15 +3920,8 @@ bool Threads::destroy_vm() {
}
os::wait_for_keypress_at_exit();
if (JDK_Version::is_jdk12x_version()) {
// We are the last thread running, so check if finalizers should be run.
// For 1.3 or later this is done in thread->invoke_shutdown_hooks()
HandleMark rm(thread);
Universe::run_finalizers_on_exit();
} else {
// run Java level shutdown hooks
thread->invoke_shutdown_hooks();
}
// run Java level shutdown hooks
thread->invoke_shutdown_hooks();
before_exit(thread);
@ -3968,14 +3933,8 @@ bool Threads::destroy_vm() {
// GC vm_operations can get caught at the safepoint, and the
// heap is unparseable if they are caught. Grab the Heap_lock
// to prevent this. The GC vm_operations will not be able to
// queue until after the vm thread is dead.
// After this point, we'll never emerge out of the safepoint before
// the VM exits, so concurrent GC threads do not need to be explicitly
// stopped; they remain inactive until the process exits.
// Note: some concurrent G1 threads may be running during a safepoint,
// but these will not be accessing the heap, just some G1-specific side
// data structures that are not accessed by any other threads but them
// after this point in a terminal safepoint.
// queue until after the vm thread is dead. After this point,
// we'll never emerge out of the safepoint before the VM exits.
MutexLocker ml(Heap_lock);
@ -4113,7 +4072,7 @@ void Threads::remove(JavaThread* p) {
bool Threads::includes(JavaThread* p) {
assert(Threads_lock->is_locked(), "sanity check");
ALL_JAVA_THREADS(q) {
if (q == p ) {
if (q == p) {
return true;
}
}
@ -4398,43 +4357,43 @@ void Threads::print_on_error(outputStream* st, Thread* current, char* buf, int b
// cache-coherency traffic.
typedef volatile int SpinLockT ;
typedef volatile int SpinLockT;
void Thread::SpinAcquire (volatile int * adr, const char * LockName) {
if (Atomic::cmpxchg (1, adr, 0) == 0) {
return ; // normal fast-path return
return; // normal fast-path return
}
// Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
TEVENT (SpinAcquire - ctx) ;
int ctr = 0 ;
int Yields = 0 ;
TEVENT(SpinAcquire - ctx);
int ctr = 0;
int Yields = 0;
for (;;) {
while (*adr != 0) {
++ctr ;
++ctr;
if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
if (Yields > 5) {
os::naked_short_sleep(1);
} else {
os::NakedYield() ;
++Yields ;
os::NakedYield();
++Yields;
}
} else {
SpinPause() ;
SpinPause();
}
}
if (Atomic::cmpxchg (1, adr, 0) == 0) return ;
if (Atomic::cmpxchg(1, adr, 0) == 0) return;
}
}
void Thread::SpinRelease (volatile int * adr) {
assert (*adr != 0, "invariant") ;
OrderAccess::fence() ; // guarantee at least release consistency.
assert(*adr != 0, "invariant");
OrderAccess::fence(); // guarantee at least release consistency.
// Roach-motel semantics.
// It's safe if subsequent LDs and STs float "up" into the critical section,
// but prior LDs and STs within the critical section can't be allowed
// to reorder or float past the ST that releases the lock.
*adr = 0 ;
*adr = 0;
}
// muxAcquire and muxRelease:
@ -4487,111 +4446,111 @@ void Thread::SpinRelease (volatile int * adr) {
//
typedef volatile intptr_t MutexT ; // Mux Lock-word
enum MuxBits { LOCKBIT = 1 } ;
typedef volatile intptr_t MutexT; // Mux Lock-word
enum MuxBits { LOCKBIT = 1 };
void Thread::muxAcquire (volatile intptr_t * Lock, const char * LockName) {
intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
if (w == 0) return ;
intptr_t w = Atomic::cmpxchg_ptr(LOCKBIT, Lock, 0);
if (w == 0) return;
if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
return ;
return;
}
TEVENT (muxAcquire - Contention) ;
ParkEvent * const Self = Thread::current()->_MuxEvent ;
assert ((intptr_t(Self) & LOCKBIT) == 0, "invariant") ;
TEVENT(muxAcquire - Contention);
ParkEvent * const Self = Thread::current()->_MuxEvent;
assert((intptr_t(Self) & LOCKBIT) == 0, "invariant");
for (;;) {
int its = (os::is_MP() ? 100 : 0) + 1 ;
int its = (os::is_MP() ? 100 : 0) + 1;
// Optional spin phase: spin-then-park strategy
while (--its >= 0) {
w = *Lock ;
w = *Lock;
if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
return ;
return;
}
}
Self->reset() ;
Self->OnList = intptr_t(Lock) ;
Self->reset();
Self->OnList = intptr_t(Lock);
// The following fence() isn't _strictly necessary as the subsequent
// CAS() both serializes execution and ratifies the fetched *Lock value.
OrderAccess::fence();
for (;;) {
w = *Lock ;
w = *Lock;
if ((w & LOCKBIT) == 0) {
if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
Self->OnList = 0 ; // hygiene - allows stronger asserts
return ;
Self->OnList = 0; // hygiene - allows stronger asserts
return;
}
continue ; // Interference -- *Lock changed -- Just retry
continue; // Interference -- *Lock changed -- Just retry
}
assert (w & LOCKBIT, "invariant") ;
Self->ListNext = (ParkEvent *) (w & ~LOCKBIT );
if (Atomic::cmpxchg_ptr (intptr_t(Self)|LOCKBIT, Lock, w) == w) break ;
assert(w & LOCKBIT, "invariant");
Self->ListNext = (ParkEvent *) (w & ~LOCKBIT);
if (Atomic::cmpxchg_ptr(intptr_t(Self)|LOCKBIT, Lock, w) == w) break;
}
while (Self->OnList != 0) {
Self->park() ;
Self->park();
}
}
}
void Thread::muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) {
intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
if (w == 0) return ;
intptr_t w = Atomic::cmpxchg_ptr(LOCKBIT, Lock, 0);
if (w == 0) return;
if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
return ;
return;
}
TEVENT (muxAcquire - Contention) ;
ParkEvent * ReleaseAfter = NULL ;
TEVENT(muxAcquire - Contention);
ParkEvent * ReleaseAfter = NULL;
if (ev == NULL) {
ev = ReleaseAfter = ParkEvent::Allocate (NULL) ;
ev = ReleaseAfter = ParkEvent::Allocate(NULL);
}
assert ((intptr_t(ev) & LOCKBIT) == 0, "invariant") ;
assert((intptr_t(ev) & LOCKBIT) == 0, "invariant");
for (;;) {
guarantee (ev->OnList == 0, "invariant") ;
int its = (os::is_MP() ? 100 : 0) + 1 ;
guarantee(ev->OnList == 0, "invariant");
int its = (os::is_MP() ? 100 : 0) + 1;
// Optional spin phase: spin-then-park strategy
while (--its >= 0) {
w = *Lock ;
w = *Lock;
if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
if (ReleaseAfter != NULL) {
ParkEvent::Release (ReleaseAfter) ;
ParkEvent::Release(ReleaseAfter);
}
return ;
return;
}
}
ev->reset() ;
ev->OnList = intptr_t(Lock) ;
ev->reset();
ev->OnList = intptr_t(Lock);
// The following fence() isn't _strictly necessary as the subsequent
// CAS() both serializes execution and ratifies the fetched *Lock value.
OrderAccess::fence();
for (;;) {
w = *Lock ;
w = *Lock;
if ((w & LOCKBIT) == 0) {
if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
ev->OnList = 0 ;
ev->OnList = 0;
// We call ::Release while holding the outer lock, thus
// artificially lengthening the critical section.
// Consider deferring the ::Release() until the subsequent unlock(),
// after we've dropped the outer lock.
if (ReleaseAfter != NULL) {
ParkEvent::Release (ReleaseAfter) ;
ParkEvent::Release(ReleaseAfter);
}
return ;
return;
}
continue ; // Interference -- *Lock changed -- Just retry
continue; // Interference -- *Lock changed -- Just retry
}
assert (w & LOCKBIT, "invariant") ;
ev->ListNext = (ParkEvent *) (w & ~LOCKBIT );
if (Atomic::cmpxchg_ptr (intptr_t(ev)|LOCKBIT, Lock, w) == w) break ;
assert(w & LOCKBIT, "invariant");
ev->ListNext = (ParkEvent *) (w & ~LOCKBIT);
if (Atomic::cmpxchg_ptr(intptr_t(ev)|LOCKBIT, Lock, w) == w) break;
}
while (ev->OnList != 0) {
ev->park() ;
ev->park();
}
}
}
@ -4618,22 +4577,22 @@ void Thread::muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) {
void Thread::muxRelease (volatile intptr_t * Lock) {
for (;;) {
const intptr_t w = Atomic::cmpxchg_ptr (0, Lock, LOCKBIT) ;
assert (w & LOCKBIT, "invariant") ;
if (w == LOCKBIT) return ;
ParkEvent * List = (ParkEvent *) (w & ~LOCKBIT) ;
assert (List != NULL, "invariant") ;
assert (List->OnList == intptr_t(Lock), "invariant") ;
ParkEvent * nxt = List->ListNext ;
const intptr_t w = Atomic::cmpxchg_ptr(0, Lock, LOCKBIT);
assert(w & LOCKBIT, "invariant");
if (w == LOCKBIT) return;
ParkEvent * List = (ParkEvent *)(w & ~LOCKBIT);
assert(List != NULL, "invariant");
assert(List->OnList == intptr_t(Lock), "invariant");
ParkEvent * nxt = List->ListNext;
// The following CAS() releases the lock and pops the head element.
if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) {
continue ;
continue;
}
List->OnList = 0 ;
OrderAccess::fence() ;
List->unpark () ;
return ;
List->OnList = 0;
OrderAccess::fence();
List->unpark();
return;
}
}

View File

@ -244,7 +244,7 @@ class Thread: public ThreadShadow {
// The two classes No_Safepoint_Verifier and No_Allocation_Verifier are used to set these counters.
//
NOT_PRODUCT(int _allow_safepoint_count;) // If 0, thread allow a safepoint to happen
debug_only (int _allow_allocation_count;) // If 0, the thread is allowed to allocate oops.
debug_only(int _allow_allocation_count;) // If 0, the thread is allowed to allocate oops.
// Used by SkipGCALot class.
NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot?
@ -593,12 +593,12 @@ public:
bool is_inside_jvmti_env_iteration() { return _jvmti_env_iteration_count > 0; }
// Code generation
static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file ); }
static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line ); }
static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles ); }
static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file); }
static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line); }
static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles); }
static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base ); }
static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size ); }
static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base); }
static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size); }
#define TLAB_FIELD_OFFSET(name) \
static ByteSize tlab_##name##_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
@ -615,35 +615,35 @@ public:
#undef TLAB_FIELD_OFFSET
static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes ); }
static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes); }
public:
volatile intptr_t _Stalled ;
volatile int _TypeTag ;
ParkEvent * _ParkEvent ; // for synchronized()
ParkEvent * _SleepEvent ; // for Thread.sleep
ParkEvent * _MutexEvent ; // for native internal Mutex/Monitor
ParkEvent * _MuxEvent ; // for low-level muxAcquire-muxRelease
int NativeSyncRecursion ; // diagnostic
volatile intptr_t _Stalled;
volatile int _TypeTag;
ParkEvent * _ParkEvent; // for synchronized()
ParkEvent * _SleepEvent; // for Thread.sleep
ParkEvent * _MutexEvent; // for native internal Mutex/Monitor
ParkEvent * _MuxEvent; // for low-level muxAcquire-muxRelease
int NativeSyncRecursion; // diagnostic
volatile int _OnTrap ; // Resume-at IP delta
jint _hashStateW ; // Marsaglia Shift-XOR thread-local RNG
jint _hashStateX ; // thread-specific hashCode generator state
jint _hashStateY ;
jint _hashStateZ ;
void * _schedctl ;
volatile int _OnTrap; // Resume-at IP delta
jint _hashStateW; // Marsaglia Shift-XOR thread-local RNG
jint _hashStateX; // thread-specific hashCode generator state
jint _hashStateY;
jint _hashStateZ;
void * _schedctl;
volatile jint rng [4] ; // RNG for spin loop
volatile jint rng[4]; // RNG for spin loop
// Low-level leaf-lock primitives used to implement synchronization
// and native monitor-mutex infrastructure.
// Not for general synchronization use.
static void SpinAcquire (volatile int * Lock, const char * Name) ;
static void SpinRelease (volatile int * Lock) ;
static void muxAcquire (volatile intptr_t * Lock, const char * Name) ;
static void muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) ;
static void muxRelease (volatile intptr_t * Lock) ;
static void SpinAcquire(volatile int * Lock, const char * Name);
static void SpinRelease(volatile int * Lock);
static void muxAcquire(volatile intptr_t * Lock, const char * Name);
static void muxAcquireW(volatile intptr_t * Lock, ParkEvent * ev);
static void muxRelease(volatile intptr_t * Lock);
};
// Inline implementation of Thread::current()
@ -915,6 +915,9 @@ class JavaThread: public Thread {
// support for JNI critical regions
jint _jni_active_critical; // count of entries into JNI critical region
// Checked JNI: function name requires exception check
char* _pending_jni_exception_check_fn;
// For deadlock detection.
int _depth_first_number;
@ -930,7 +933,7 @@ class JavaThread: public Thread {
intptr_t _instruction;
const char* _file;
int _line;
} _jmp_ring[ jump_ring_buffer_size ];
} _jmp_ring[jump_ring_buffer_size];
#endif /* PRODUCT */
#if INCLUDE_ALL_GCS
@ -1333,34 +1336,34 @@ class JavaThread: public Thread {
#endif /* PRODUCT */
// For assembly stub generation
static ByteSize threadObj_offset() { return byte_offset_of(JavaThread, _threadObj ); }
static ByteSize threadObj_offset() { return byte_offset_of(JavaThread, _threadObj); }
#ifndef PRODUCT
static ByteSize jmp_ring_index_offset() { return byte_offset_of(JavaThread, _jmp_ring_index ); }
static ByteSize jmp_ring_offset() { return byte_offset_of(JavaThread, _jmp_ring ); }
static ByteSize jmp_ring_index_offset() { return byte_offset_of(JavaThread, _jmp_ring_index); }
static ByteSize jmp_ring_offset() { return byte_offset_of(JavaThread, _jmp_ring); }
#endif /* PRODUCT */
static ByteSize jni_environment_offset() { return byte_offset_of(JavaThread, _jni_environment ); }
static ByteSize last_Java_sp_offset() {
static ByteSize jni_environment_offset() { return byte_offset_of(JavaThread, _jni_environment); }
static ByteSize last_Java_sp_offset() {
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_sp_offset();
}
static ByteSize last_Java_pc_offset() {
static ByteSize last_Java_pc_offset() {
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_pc_offset();
}
static ByteSize frame_anchor_offset() {
static ByteSize frame_anchor_offset() {
return byte_offset_of(JavaThread, _anchor);
}
static ByteSize callee_target_offset() { return byte_offset_of(JavaThread, _callee_target ); }
static ByteSize vm_result_offset() { return byte_offset_of(JavaThread, _vm_result ); }
static ByteSize vm_result_2_offset() { return byte_offset_of(JavaThread, _vm_result_2 ); }
static ByteSize thread_state_offset() { return byte_offset_of(JavaThread, _thread_state ); }
static ByteSize saved_exception_pc_offset() { return byte_offset_of(JavaThread, _saved_exception_pc ); }
static ByteSize osthread_offset() { return byte_offset_of(JavaThread, _osthread ); }
static ByteSize exception_oop_offset() { return byte_offset_of(JavaThread, _exception_oop ); }
static ByteSize exception_pc_offset() { return byte_offset_of(JavaThread, _exception_pc ); }
static ByteSize callee_target_offset() { return byte_offset_of(JavaThread, _callee_target); }
static ByteSize vm_result_offset() { return byte_offset_of(JavaThread, _vm_result); }
static ByteSize vm_result_2_offset() { return byte_offset_of(JavaThread, _vm_result_2); }
static ByteSize thread_state_offset() { return byte_offset_of(JavaThread, _thread_state); }
static ByteSize saved_exception_pc_offset() { return byte_offset_of(JavaThread, _saved_exception_pc); }
static ByteSize osthread_offset() { return byte_offset_of(JavaThread, _osthread); }
static ByteSize exception_oop_offset() { return byte_offset_of(JavaThread, _exception_oop); }
static ByteSize exception_pc_offset() { return byte_offset_of(JavaThread, _exception_pc); }
static ByteSize exception_handler_pc_offset() { return byte_offset_of(JavaThread, _exception_handler_pc); }
static ByteSize stack_overflow_limit_offset() { return byte_offset_of(JavaThread, _stack_overflow_limit); }
static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); }
static ByteSize stack_guard_state_offset() { return byte_offset_of(JavaThread, _stack_guard_state ); }
static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags ); }
static ByteSize stack_guard_state_offset() { return byte_offset_of(JavaThread, _stack_guard_state); }
static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags); }
static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
static ByteSize should_post_on_exceptions_flag_offset() {
@ -1400,6 +1403,12 @@ class JavaThread: public Thread {
assert(_jni_active_critical >= 0,
"JNI critical nesting problem?"); }
// Checked JNI, is the programmer required to check for exceptions, specify which function name
bool is_pending_jni_exception_check() const { return _pending_jni_exception_check_fn != NULL; }
void clear_pending_jni_exception_check() { _pending_jni_exception_check_fn = NULL; }
const char* get_pending_jni_exception_check() const { return _pending_jni_exception_check_fn; }
void set_pending_jni_exception_check(const char* fn_name) { _pending_jni_exception_check_fn = (char*) fn_name; }
// For deadlock detection
int depth_first_number() { return _depth_first_number; }
void set_depth_first_number(int dfn) { _depth_first_number = dfn; }
@ -1413,7 +1422,7 @@ class JavaThread: public Thread {
void remove_monitor_chunk(MonitorChunk* chunk);
bool in_deopt_handler() const { return _in_deopt_handler > 0; }
void inc_in_deopt_handler() { _in_deopt_handler++; }
void dec_in_deopt_handler() {
void dec_in_deopt_handler() {
assert(_in_deopt_handler > 0, "mismatched deopt nesting");
if (_in_deopt_handler > 0) { // robustness
_in_deopt_handler--;
@ -1767,7 +1776,7 @@ private:
uint _claimed_par_id;
public:
uint get_claimed_par_id() { return _claimed_par_id; }
void set_claimed_par_id(uint id) { _claimed_par_id = id;}
void set_claimed_par_id(uint id) { _claimed_par_id = id; }
};
// Inline implementation of JavaThread::current
@ -1802,7 +1811,7 @@ inline bool JavaThread::stack_yellow_zone_enabled() {
inline size_t JavaThread::stack_available(address cur_sp) {
// This code assumes java stacks grow down
address low_addr; // Limit on the address for deepest stack depth
if ( _stack_guard_state == stack_guard_unused) {
if (_stack_guard_state == stack_guard_unused) {
low_addr = stack_base() - stack_size();
} else {
low_addr = stack_yellow_zone_base();

View File

@ -480,9 +480,8 @@ void vframeStreamCommon::skip_prefixed_method_and_wrappers() {
void vframeStreamCommon::skip_reflection_related_frames() {
while (!at_end() &&
(JDK_Version::is_gte_jdk14x_version() &&
(method()->method_holder()->is_subclass_of(SystemDictionary::reflect_MethodAccessorImpl_klass()) ||
method()->method_holder()->is_subclass_of(SystemDictionary::reflect_ConstructorAccessorImpl_klass())))) {
method()->method_holder()->is_subclass_of(SystemDictionary::reflect_ConstructorAccessorImpl_klass()))) {
next();
}
}

View File

@ -184,9 +184,7 @@ bool VM_PrintThreads::doit_prologue() {
assert(Thread::current()->is_Java_thread(), "just checking");
// Make sure AbstractOwnableSynchronizer is loaded
if (JDK_Version::is_gte_jdk16x_version()) {
java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(JavaThread::current());
}
java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(JavaThread::current());
// Get Heap_lock if concurrent locks will be dumped
if (_print_concurrent_locks) {
@ -225,7 +223,7 @@ bool VM_FindDeadlocks::doit_prologue() {
assert(Thread::current()->is_Java_thread(), "just checking");
// Load AbstractOwnableSynchronizer class
if (_concurrent_locks && JDK_Version::is_gte_jdk16x_version()) {
if (_concurrent_locks) {
java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(JavaThread::current());
}
@ -283,9 +281,7 @@ bool VM_ThreadDump::doit_prologue() {
assert(Thread::current()->is_Java_thread(), "just checking");
// Load AbstractOwnableSynchronizer class before taking thread snapshots
if (JDK_Version::is_gte_jdk16x_version()) {
java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(JavaThread::current());
}
java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(JavaThread::current());
if (_with_locked_synchronizers) {
// Acquire Heap_lock to dump concurrent locks

View File

@ -98,6 +98,7 @@
template(LinuxDllLoad) \
template(RotateGCLog) \
template(WhiteBoxOperation) \
template(ClassLoaderStatsOperation) \
class VM_Operation: public CHeapObj<mtInternal> {
public:

View File

@ -160,8 +160,7 @@ const char* Abstract_VM_Version::vm_vendor() {
#ifdef VENDOR
return XSTR(VENDOR);
#else
return JDK_Version::is_gte_jdk17x_version() ?
"Oracle Corporation" : "Sun Microsystems Inc.";
return "Oracle Corporation";
#endif
}
@ -222,20 +221,12 @@ const char* Abstract_VM_Version::internal_vm_info_string() {
#ifndef HOTSPOT_BUILD_COMPILER
#ifdef _MSC_VER
#if _MSC_VER == 1100
#define HOTSPOT_BUILD_COMPILER "MS VC++ 5.0"
#elif _MSC_VER == 1200
#define HOTSPOT_BUILD_COMPILER "MS VC++ 6.0"
#elif _MSC_VER == 1310
#define HOTSPOT_BUILD_COMPILER "MS VC++ 7.1 (VS2003)"
#elif _MSC_VER == 1400
#define HOTSPOT_BUILD_COMPILER "MS VC++ 8.0 (VS2005)"
#elif _MSC_VER == 1500
#define HOTSPOT_BUILD_COMPILER "MS VC++ 9.0 (VS2008)"
#elif _MSC_VER == 1600
#if _MSC_VER == 1600
#define HOTSPOT_BUILD_COMPILER "MS VC++ 10.0 (VS2010)"
#elif _MSC_VER == 1700
#define HOTSPOT_BUILD_COMPILER "MS VC++ 11.0 (VS2012)"
#elif _MSC_VER == 1800
#define HOTSPOT_BUILD_COMPILER "MS VC++ 12.0 (VS2013)"
#else
#define HOTSPOT_BUILD_COMPILER "unknown MS VC++:" XSTR(_MSC_VER)
#endif

View File

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "classfile/classLoaderStats.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/os.hpp"
@ -58,6 +59,7 @@ void DCmdRegistrant::register_dcmds(){
#endif // INCLUDE_SERVICES
DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ThreadDumpDCmd>(full_export, true, false));
DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<RotateGCLogDCmd>(full_export, true, false));
DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ClassLoaderStatsDCmd>(full_export, true, false));
// Enhanced JMX Agent Support
// These commands won't be exported via the DiagnosticCommandMBean until an

View File

@ -1229,10 +1229,8 @@ JVM_ENTRY(jint, jmm_GetThreadInfo(JNIEnv *env, jlongArray ids, jint maxDepth, jo
"The length of the given ThreadInfo array does not match the length of the given array of thread IDs", -1);
}
if (JDK_Version::is_gte_jdk16x_version()) {
// make sure the AbstractOwnableSynchronizer klass is loaded before taking thread snapshots
java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(CHECK_0);
}
// make sure the AbstractOwnableSynchronizer klass is loaded before taking thread snapshots
java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(CHECK_0);
// Must use ThreadDumpResult to store the ThreadSnapshot.
// GC may occur after the thread snapshots are taken but before
@ -1303,10 +1301,8 @@ JVM_END
JVM_ENTRY(jobjectArray, jmm_DumpThreads(JNIEnv *env, jlongArray thread_ids, jboolean locked_monitors, jboolean locked_synchronizers))
ResourceMark rm(THREAD);
if (JDK_Version::is_gte_jdk16x_version()) {
// make sure the AbstractOwnableSynchronizer klass is loaded before taking thread snapshots
java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(CHECK_NULL);
}
// make sure the AbstractOwnableSynchronizer klass is loaded before taking thread snapshots
java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(CHECK_NULL);
typeArrayOop ta = typeArrayOop(JNIHandles::resolve(thread_ids));
int num_threads = (ta != NULL ? ta->length() : 0);

View File

@ -665,17 +665,15 @@ void ConcurrentLocksDump::dump_at_safepoint() {
// dump all locked concurrent locks
assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
if (JDK_Version::is_gte_jdk16x_version()) {
ResourceMark rm;
ResourceMark rm;
GrowableArray<oop>* aos_objects = new GrowableArray<oop>(INITIAL_ARRAY_SIZE);
GrowableArray<oop>* aos_objects = new GrowableArray<oop>(INITIAL_ARRAY_SIZE);
// Find all instances of AbstractOwnableSynchronizer
HeapInspection::find_instances_at_safepoint(SystemDictionary::abstract_ownable_synchronizer_klass(),
// Find all instances of AbstractOwnableSynchronizer
HeapInspection::find_instances_at_safepoint(SystemDictionary::abstract_ownable_synchronizer_klass(),
aos_objects);
// Build a map of thread to its owned AQS locks
build_map(aos_objects);
}
// Build a map of thread to its owned AQS locks
build_map(aos_objects);
}

View File

@ -0,0 +1,115 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* This test has a EnclosingMethod attribute with an illegal
* attribute_length field value of 6. This should cause a
* java.lang.ClassFormatError exception to be thrown.
*/
class EnclMethTest {
0xCAFEBABE;
0; // minor version
52; // version
[22] { // Constant Pool
; // first element is empty
Field #3 #14; // #1 at 0x0A
Method #4 #15; // #2 at 0x0F
class #16; // #3 at 0x14
class #19; // #4 at 0x17
Utf8 "this$0"; // #5 at 0x1A
Utf8 "La;"; // #6 at 0x23
Utf8 "Synthetic"; // #7 at 0x29
Utf8 "<init>"; // #8 at 0x35
Utf8 "(Ljava/lang/Object;)V"; // #9 at 0x3E
Utf8 "Code"; // #10 at 0x56
Utf8 "LineNumberTable"; // #11 at 0x5D
Utf8 "SourceFile"; // #12 at 0x6F
Utf8 "a.java"; // #13 at 0x7C
NameAndType #5 #6; // #14 at 0x85
NameAndType #8 #20; // #15 at 0x8A
Utf8 "EnclMethTest"; // #16 at 0x8F
Utf8 "Loc"; // #17 at 0x9E
Utf8 "InnerClasses"; // #18 at 0xA4
Utf8 "java/lang/Object"; // #19 at 0xB3
Utf8 "()V"; // #20 at 0xC6
Utf8 "EnclosingMethod"; // #21 at 0xCC
} // Constant Pool
0x0000; // access
#3;// this_cpx
#4;// super_cpx
[0] { // Interfaces
} // Interfaces
[1] { // fields
{ // Member at 0xE8
0x0000; // access
#5; // name_cpx
#6; // sig_cpx
[1] { // Attributes
Attr(#7, 0) { // Synthetic at 0xF0
} // end Synthetic
} // Attributes
} // Member
} // fields
[1] { // methods
{ // Member at 0xF8
0x0001; // access
#8; // name_cpx
#20; // sig_cpx
[1] { // Attributes
Attr(#10, 17) { // Code at 0x0100
2; // max_stack
2; // max_locals
Bytes[5]{
0x2AB70002B1;
};
[0] { // Traps
} // end Traps
[0] { // Attributes
} // Attributes
} // end Code
} // Attributes
} // Member
} // methods
[3] { // Attributes
Attr(#12, 2) { // SourceFile at 0x0119
#13;
} // end SourceFile
;
Attr(#18, 10) { // InnerClasses at 0x0121
[1] { // InnerClasses
#3 #0 #17 0; // at 0x0131
}
} // end InnerClasses
;
Attr(#21, 6) { // EnclosingMethod at 0x0131
// invalid length of EnclosingMethod attr: 6 (should be 4) !!
0x0004000F;
} // end EnclosingMethod
} // Attributes
} // end class EnclMethTest

View File

@ -0,0 +1,47 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8044738
* @library /testlibrary
* @summary Check attribute_length of EnclosingMethod attribute
* @run main EnclMethodAttr
*/
import java.io.File;
import com.oracle.java.testlibrary.*;
public class EnclMethodAttr {
static final String testsrc = System.getProperty("test.src");
public static void main(String args[]) throws Throwable {
System.out.println("Regression test for bug 8044738");
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-jar", testsrc + File.separator + "enclMethodAttr.jar");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldContain("java.lang.ClassFormatError: Wrong EnclosingMethod");
}
}

View File

@ -24,6 +24,7 @@
/*
* @test
* @bug 8036823
* @bug 8046287
* @summary Creates two threads contending for the same lock and checks
* whether jstack reports "locked" by more than one thread.
*
@ -52,10 +53,13 @@ public class TestThreadDumpMonitorContention {
// looking for header lines with these patterns:
// "ContendingThread-1" #19 prio=5 os_prio=64 tid=0x000000000079c000 nid=0x23 runnable [0xffff80ffb8b87000]
// "ContendingThread-2" #21 prio=5 os_prio=64 tid=0x0000000000780000 nid=0x2f waiting for monitor entry [0xfffffd7fc1111000]
// "ContendingThread-2" #24 prio=5 os_prio=64 tid=0x0000000000ec8800 nid=0x31 waiting on condition [0xfffffd7bbfffe000]
final static Pattern HEADER_PREFIX_PATTERN = Pattern.compile(
"^\"ContendingThread-.*");
final static Pattern HEADER_WAITING_PATTERN = Pattern.compile(
final static Pattern HEADER_WAITING_PATTERN1 = Pattern.compile(
"^\"ContendingThread-.* waiting for monitor entry .*");
final static Pattern HEADER_WAITING_PATTERN2 = Pattern.compile(
"^\"ContendingThread-.* waiting on condition .*");
final static Pattern HEADER_RUNNABLE_PATTERN = Pattern.compile(
"^\"ContendingThread-.* runnable .*");
@ -80,17 +84,34 @@ public class TestThreadDumpMonitorContention {
final static Pattern WAITING_PATTERN = Pattern.compile(
".* waiting to lock \\<.*\\(a TestThreadDumpMonitorContention.*");
final static Object barrier = new Object();
volatile static boolean done = false;
static int barrier_cnt = 0;
static int blank_line_match_cnt = 0;
static int error_cnt = 0;
static String header_line = null;
static boolean have_header_line = false;
static boolean have_thread_state_line = false;
static int match_cnt = 0;
static String[] match_list = new String[2];
static String header_line = null;
static int header_prefix_match_cnt = 0;
static int locked_line_match_cnt = 0;
static String[] locked_match_list = new String[2];
static int n_samples = 15;
static int sum_both_running_cnt = 0;
static int sum_both_waiting_cnt = 0;
static int sum_contended_cnt = 0;
static int sum_locked_hdr_runnable_cnt = 0;
static int sum_locked_hdr_waiting1_cnt = 0;
static int sum_locked_hdr_waiting2_cnt = 0;
static int sum_locked_thr_state_blocked_cnt = 0;
static int sum_locked_thr_state_runnable_cnt = 0;
static int sum_one_waiting_cnt = 0;
static int sum_uncontended_cnt = 0;
static int sum_waiting_hdr_waiting1_cnt = 0;
static int sum_waiting_thr_state_blocked_cnt = 0;
static String thread_state_line = null;
static boolean verbose = false;
static int waiting_line_match_cnt = 0;
public static void main(String[] args) throws Exception {
if (args.length != 0) {
@ -110,6 +131,11 @@ public class TestThreadDumpMonitorContention {
Runnable runnable = new Runnable() {
public void run() {
synchronized (barrier) {
// let the main thread know we're running
barrier_cnt++;
barrier.notify();
}
while (!done) {
synchronized (this) { }
}
@ -118,8 +144,16 @@ public class TestThreadDumpMonitorContention {
Thread[] thread_list = new Thread[2];
thread_list[0] = new Thread(runnable, "ContendingThread-1");
thread_list[1] = new Thread(runnable, "ContendingThread-2");
thread_list[0].start();
thread_list[1].start();
synchronized (barrier) {
thread_list[0].start();
thread_list[1].start();
// Wait until the contending threads are running so that
// we don't sample any thread init states.
while (barrier_cnt < 2) {
barrier.wait();
}
}
doSamples();
@ -143,11 +177,12 @@ public class TestThreadDumpMonitorContention {
// Example:
// "ContendingThread-1" #21 prio=5 os_prio=64 tid=0x00000000007b9000 nid=0x2f runnable [0xfffffd7fc1111000]
// java.lang.Thread.State: RUNNABLE
// at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:67)
// at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140)
// at java.lang.Thread.run(Thread.java:745)
//
static boolean checkBlankLine(String line) {
if (line.length() == 0) {
blank_line_match_cnt++;
have_header_line = false;
have_thread_state_line = false;
return true;
@ -161,49 +196,73 @@ public class TestThreadDumpMonitorContention {
// Example 1:
// "ContendingThread-1" #21 prio=5 os_prio=64 tid=0x00000000007b9000 nid=0x2f runnable [0xfffffd7fc1111000]
// java.lang.Thread.State: RUNNABLE
// at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:67)
// at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140)
// - locked <0xfffffd7e6a2912f8> (a TestThreadDumpMonitorContention$1)
// at java.lang.Thread.run(Thread.java:745)
//
// Example 2:
// "ContendingThread-1" #21 prio=5 os_prio=64 tid=0x00000000007b9000 nid=0x2f waiting for monitor entry [0xfffffd7fc1111000]
// java.lang.Thread.State: BLOCKED (on object monitor)
// at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:67)
// at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140)
// - locked <0xfffffd7e6a2912f8> (a TestThreadDumpMonitorContention$1)
// at java.lang.Thread.run(Thread.java:745)
//
// Example 3:
// "ContendingThread-2" #24 prio=5 os_prio=64 tid=0x0000000000ec8800 nid=0x31 waiting on condition [0xfffffd7bbfffe000]
// java.lang.Thread.State: RUNNABLE
// JavaThread state: _thread_blocked
// Thread: 0x0000000000ec8800 [0x31] State: _at_safepoint _has_called_back 0 _at_poll_safepoint 0
// JavaThread state: _thread_blocked
// at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140)
// - locked <0xfffffd7e6d03eb28> (a TestThreadDumpMonitorContention$1)
// at java.lang.Thread.run(Thread.java:745)
//
static boolean checkLockedLine(String line) {
Matcher matcher = LOCK_PATTERN.matcher(line);
if (matcher.matches()) {
if (verbose) {
System.out.println("locked_line='" + line + "'");
}
match_list[match_cnt] = new String(line);
match_cnt++;
locked_match_list[locked_line_match_cnt] = new String(line);
locked_line_match_cnt++;
matcher = HEADER_RUNNABLE_PATTERN.matcher(header_line);
if (!matcher.matches()) {
if (matcher.matches()) {
sum_locked_hdr_runnable_cnt++;
} else {
// It's strange, but a locked line can also
// match the HEADER_WAITING_PATTERN.
matcher = HEADER_WAITING_PATTERN.matcher(header_line);
if (!matcher.matches()) {
System.err.println();
System.err.println("ERROR: header line does " +
"not match runnable or waiting patterns.");
System.err.println("ERROR: header_line='" +
header_line + "'");
System.err.println("ERROR: locked_line='" + line + "'");
error_cnt++;
// match the HEADER_WAITING_PATTERN{1,2}.
matcher = HEADER_WAITING_PATTERN1.matcher(header_line);
if (matcher.matches()) {
sum_locked_hdr_waiting1_cnt++;
} else {
matcher = HEADER_WAITING_PATTERN2.matcher(header_line);
if (matcher.matches()) {
sum_locked_hdr_waiting2_cnt++;
} else {
System.err.println();
System.err.println("ERROR: header line does " +
"not match runnable or waiting patterns.");
System.err.println("ERROR: header_line='" +
header_line + "'");
System.err.println("ERROR: locked_line='" + line +
"'");
error_cnt++;
}
}
}
matcher = THREAD_STATE_RUNNABLE_PATTERN.matcher(thread_state_line);
if (!matcher.matches()) {
if (matcher.matches()) {
sum_locked_thr_state_runnable_cnt++;
} else {
// It's strange, but a locked line can also
// match the THREAD_STATE_BLOCKED_PATTERN.
matcher = THREAD_STATE_BLOCKED_PATTERN.matcher(
thread_state_line);
if (!matcher.matches()) {
if (matcher.matches()) {
sum_locked_thr_state_blocked_cnt++;
} else {
System.err.println();
System.err.println("ERROR: thread state line does not " +
"match runnable or waiting patterns.");
@ -229,19 +288,22 @@ public class TestThreadDumpMonitorContention {
// Example:
// "ContendingThread-2" #22 prio=5 os_prio=64 tid=0x00000000007b9800 nid=0x30 waiting for monitor entry [0xfffffd7fc1010000]
// java.lang.Thread.State: BLOCKED (on object monitor)
// at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:67)
// at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140)
// - waiting to lock <0xfffffd7e6a2912f8> (a TestThreadDumpMonitorContention$1)
// at java.lang.Thread.run(Thread.java:745)
//
static boolean checkWaitingLine(String line) {
Matcher matcher = WAITING_PATTERN.matcher(line);
if (matcher.matches()) {
waiting_line_match_cnt++;
if (verbose) {
System.out.println("waiting_line='" + line + "'");
}
matcher = HEADER_WAITING_PATTERN.matcher(header_line);
if (!matcher.matches()) {
matcher = HEADER_WAITING_PATTERN1.matcher(header_line);
if (matcher.matches()) {
sum_waiting_hdr_waiting1_cnt++;
} else {
System.err.println();
System.err.println("ERROR: header line does " +
"not match a waiting pattern.");
@ -251,7 +313,9 @@ public class TestThreadDumpMonitorContention {
}
matcher = THREAD_STATE_BLOCKED_PATTERN.matcher(thread_state_line);
if (!matcher.matches()) {
if (matcher.matches()) {
sum_waiting_thr_state_blocked_cnt++;
} else {
System.err.println();
System.err.println("ERROR: thread state line " +
"does not match a waiting pattern.");
@ -273,7 +337,10 @@ public class TestThreadDumpMonitorContention {
static void doSamples() throws Exception {
for (int count = 0; count < n_samples; count++) {
match_cnt = 0;
blank_line_match_cnt = 0;
header_prefix_match_cnt = 0;
locked_line_match_cnt = 0;
waiting_line_match_cnt = 0;
// verbose mode or an error has a lot of output so add more space
if (verbose || error_cnt > 0) System.out.println();
System.out.println("Sample #" + count);
@ -290,12 +357,12 @@ public class TestThreadDumpMonitorContention {
// a failure and we report it
// - for a stack trace that matches LOCKED_PATTERN, we verify:
// - the header line matches HEADER_RUNNABLE_PATTERN
// or HEADER_WAITING_PATTERN
// or HEADER_WAITING_PATTERN{1,2}
// - the thread state line matches THREAD_STATE_BLOCKED_PATTERN
// or THREAD_STATE_RUNNABLE_PATTERN
// - we report any mismatches as failures
// - for a stack trace that matches WAITING_PATTERN, we verify:
// - the header line matches HEADER_WAITING_PATTERN
// - the header line matches HEADER_WAITING_PATTERN1
// - the thread state line matches THREAD_STATE_BLOCKED_PATTERN
// - we report any mismatches as failures
// - the stack traces that match HEADER_PREFIX_PATTERN may
@ -324,6 +391,7 @@ public class TestThreadDumpMonitorContention {
if (!have_header_line) {
matcher = HEADER_PREFIX_PATTERN.matcher(line);
if (matcher.matches()) {
header_prefix_match_cnt++;
if (verbose) {
System.out.println();
System.out.println("header='" + line + "'");
@ -366,19 +434,80 @@ public class TestThreadDumpMonitorContention {
}
process.waitFor();
if (match_cnt == 2) {
if (match_list[0].equals(match_list[1])) {
System.err.println();
System.err.println("ERROR: matching lock lines:");
System.err.println("ERROR: line[0]'" + match_list[0] + "'");
System.err.println("ERROR: line[1]'" + match_list[1] + "'");
error_cnt++;
}
}
if (header_prefix_match_cnt != 2) {
System.err.println();
System.err.println("ERROR: should match exactly two headers.");
System.err.println("ERROR: header_prefix_match_cnt=" +
header_prefix_match_cnt);
error_cnt++;
}
if (locked_line_match_cnt == 2) {
if (locked_match_list[0].equals(locked_match_list[1])) {
System.err.println();
System.err.println("ERROR: matching lock lines:");
System.err.println("ERROR: line[0]'" +
locked_match_list[0] + "'");
System.err.println("ERROR: line[1]'" +
locked_match_list[1] + "'");
error_cnt++;
}
}
if (locked_line_match_cnt == 1) {
// one thread has the lock
if (waiting_line_match_cnt == 1) {
// and the other contended for it
sum_contended_cnt++;
} else {
// and the other is just running
sum_uncontended_cnt++;
}
} else if (waiting_line_match_cnt == 1) {
// one thread is waiting
sum_one_waiting_cnt++;
} else if (waiting_line_match_cnt == 2) {
// both threads are waiting
sum_both_waiting_cnt++;
} else {
// both threads are running
sum_both_running_cnt++;
}
// slight delay between jstack launches
Thread.sleep(500);
}
if (error_cnt != 0) {
// skip summary info since there were errors
return;
}
System.out.println("INFO: Summary for all samples:");
System.out.println("INFO: both_running_cnt=" + sum_both_running_cnt);
System.out.println("INFO: both_waiting_cnt=" + sum_both_waiting_cnt);
System.out.println("INFO: contended_cnt=" + sum_contended_cnt);
System.out.println("INFO: one_waiting_cnt=" + sum_one_waiting_cnt);
System.out.println("INFO: uncontended_cnt=" + sum_uncontended_cnt);
System.out.println("INFO: locked_hdr_runnable_cnt=" +
sum_locked_hdr_runnable_cnt);
System.out.println("INFO: locked_hdr_waiting1_cnt=" +
sum_locked_hdr_waiting1_cnt);
System.out.println("INFO: locked_hdr_waiting2_cnt=" +
sum_locked_hdr_waiting2_cnt);
System.out.println("INFO: locked_thr_state_blocked_cnt=" +
sum_locked_thr_state_blocked_cnt);
System.out.println("INFO: locked_thr_state_runnable_cnt=" +
sum_locked_thr_state_runnable_cnt);
System.out.println("INFO: waiting_hdr_waiting1_cnt=" +
sum_waiting_hdr_waiting1_cnt);
System.out.println("INFO: waiting_thr_state_blocked_cnt=" +
sum_waiting_thr_state_blocked_cnt);
if (sum_contended_cnt == 0) {
System.err.println("WARNING: the primary scenario for 8036823" +
" has not been exercised by this test run.");
}
}
// This helper relies on RuntimeMXBean.getName() returning a string

View File

@ -0,0 +1,155 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
*
* @build ClassLoaderStatsTest DcmdUtil
* @run main ClassLoaderStatsTest
*/
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.StringReader;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class ClassLoaderStatsTest {
// ClassLoader Parent CLD* Classes ChunkSz BlockSz Type
// 0x00000007c0215928 0x0000000000000000 0x0000000000000000 0 0 0 org.eclipse.osgi.baseadaptor.BaseAdaptor$1
// 0x00000007c0009868 0x0000000000000000 0x00007fc52aebcc80 1 6144 3768 sun.reflect.DelegatingClassLoader
// 0x00000007c0009868 0x0000000000000000 0x00007fc52b8916d0 1 6144 3688 sun.reflect.DelegatingClassLoader
// 0x00000007c0009868 0x00000007c0038ba8 0x00007fc52afb8760 1 6144 3688 sun.reflect.DelegatingClassLoader
// 0x00000007c0009868 0x0000000000000000 0x00007fc52afbb1a0 1 6144 3688 sun.reflect.DelegatingClassLoader
// 0x0000000000000000 0x0000000000000000 0x00007fc523416070 5019 30060544 29956216 <boot classloader>
// 455 1210368 672848 + unsafe anonymous classes
// 0x00000007c016b5c8 0x00000007c0038ba8 0x00007fc52a995000 5 8192 5864 org.netbeans.StandardModule$OneModuleClassLoader
// 0x00000007c0009868 0x00000007c016b5c8 0x00007fc52ac13640 1 6144 3896 sun.reflect.DelegatingClassLoader
// ...
static Pattern clLine = Pattern.compile("0x\\p{XDigit}*\\s*0x\\p{XDigit}*\\s*0x\\p{XDigit}*\\s*(\\d*)\\s*(\\d*)\\s*(\\d*)\\s*(.*)");
static Pattern anonLine = Pattern.compile("\\s*(\\d*)\\s*(\\d*)\\s*(\\d*)\\s*.*");
public static DummyClassLoader dummyloader;
public static void main(String arg[]) throws Exception {
// create a classloader and load our special class
dummyloader = new DummyClassLoader();
Class<?> c = Class.forName("TestClass", true, dummyloader);
if (c.getClassLoader() != dummyloader) {
throw new RuntimeException("TestClass defined by wrong classloader: " + c.getClassLoader());
}
String result = DcmdUtil.executeDcmd("VM.classloader_stats");
BufferedReader r = new BufferedReader(new StringReader(result));
String line;
while((line = r.readLine()) != null) {
Matcher m = clLine.matcher(line);
if (m.matches()) {
// verify that DummyClassLoader has loaded 1 class and 1 anonymous class
if (m.group(4).equals("ClassLoaderStatsTest$DummyClassLoader")) {
System.out.println("line: " + line);
if (!m.group(1).equals("1")) {
throw new Exception("Should have loaded 1 class: " + line);
}
checkPositiveInt(m.group(2));
checkPositiveInt(m.group(3));
String next = r.readLine();
System.out.println("next: " + next);
Matcher m1 = anonLine.matcher(next);
m1.matches();
if (!m1.group(1).equals("1")) {
throw new Exception("Should have loaded 1 anonymous class, but found : " + m1.group(1));
}
checkPositiveInt(m1.group(2));
checkPositiveInt(m1.group(3));
}
}
}
}
private static void checkPositiveInt(String s) throws Exception {
if (Integer.parseInt(s) <= 0) {
throw new Exception("Value should have been > 0: " + s);
}
}
public static class DummyClassLoader extends ClassLoader {
public static final String CLASS_NAME = "TestClass";
static ByteBuffer readClassFile(String name)
{
File f = new File(System.getProperty("test.classes", "."),
name);
try (FileInputStream fin = new FileInputStream(f);
FileChannel fc = fin.getChannel())
{
return fc.map(FileChannel.MapMode.READ_ONLY, 0, fc.size());
} catch (IOException e) {
throw new RuntimeException("Can't open file: " + name, e);
}
}
protected Class<?> loadClass(String name, boolean resolve)
throws ClassNotFoundException
{
Class<?> c;
if (!"TestClass".equals(name)) {
c = super.loadClass(name, resolve);
} else {
// should not delegate to the system class loader
c = findClass(name);
if (resolve) {
resolveClass(c);
}
}
return c;
}
protected Class<?> findClass(String name)
throws ClassNotFoundException
{
if (!"TestClass".equals(name)) {
throw new ClassNotFoundException("Unexpected class: " + name);
}
return defineClass(name, readClassFile(name + ".class"), null);
}
} /* DummyClassLoader */
}
class TestClass {
static {
// force creation of anonymous class (for the lambdaform)
Runnable r = () -> System.out.println("Hello");
r.run();
}
}

View File

@ -26,6 +26,7 @@
* @bug 8028623
* @summary Test hashing of extended characters in Serviceability Agent.
* @library /testlibrary
* @ignore 8044416
* @build com.oracle.java.testlibrary.*
* @compile -encoding utf8 Test8028623.java
* @run main Test8028623

View File

@ -25,7 +25,7 @@
* @test BooleanTest
* @bug 8028756
* @library /testlibrary /testlibrary/whitebox
* @build BooleanTest
* @build BooleanTest ClassFileInstaller sun.hotspot.WhiteBox com.oracle.java.testlibrary.*
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI BooleanTest
* @summary testing of WB::set/getBooleanVMFlag()

Some files were not shown because too many files have changed in this diff Show More