This commit is contained in:
J. Duke 2017-07-05 19:35:54 +02:00
commit b17c6d889b
1217 changed files with 70103 additions and 60325 deletions
.hgtags-top-repo
common
corba
hotspot

@ -249,3 +249,4 @@ fd8d51bdf9aadf7ae83e65e8655c53581017c363 jdk9-b03
cb4c3440bc2748101923e2488506e61009ab1bf5 jdk9-b04
8c63f0b6ada282f27e3a80125e53c3be603f9af7 jdk9-b05
d0b525cd31b87abeb6d5b7e3516953eeb13b323c jdk9-b06
0ea015c298b201c07fa33990f2445b6d0ef3566d jdk9-b07

@ -46,10 +46,24 @@ AC_DEFUN([ADD_JVM_ARG_IF_OK],
# Appends a string to a path variable, only adding the : when needed.
AC_DEFUN([BASIC_APPEND_TO_PATH],
[
if test "x[$]$1" = x; then
$1="$2"
else
$1="[$]$1:$2"
if test "x$2" != x; then
if test "x[$]$1" = x; then
$1="$2"
else
$1="[$]$1:$2"
fi
fi
])
# Prepends a string to a path variable, only adding the : when needed.
AC_DEFUN([BASIC_PREPEND_TO_PATH],
[
if test "x$2" != x; then
if test "x[$]$1" = x; then
$1="$2"
else
$1="$2:[$]$1"
fi
fi
])
@ -442,43 +456,95 @@ AC_DEFUN_ONCE([BASIC_SETUP_PATHS],
# Locate the directory of this script.
AUTOCONF_DIR=$TOPDIR/common/autoconf
])
AC_DEFUN_ONCE([BASIC_SETUP_DEVKIT],
[
AC_ARG_WITH([devkit], [AS_HELP_STRING([--with-devkit],
[use this devkit for compilers, tools and resources])],
[
BASIC_FIXUP_PATH([with_devkit])
DEVKIT_ROOT="$with_devkit"
# Check for a meta data info file in the root of the devkit
if test -f "$DEVKIT_ROOT/devkit.info"; then
# This potentially sets the following:
# DEVKIT_NAME: A descriptive name of the devkit
# DEVKIT_TOOLCHAIN_PATH: Corresponds to --with-toolchain-path
# DEVKIT_EXTRA_PATH: Corresponds to --with-extra-path
# DEVKIT_SYSROOT: Corresponds to --with-sysroot
. $DEVKIT_ROOT/devkit.info
fi
AC_MSG_CHECKING([for devkit])
if test "x$DEVKIT_NAME" != x; then
AC_MSG_RESULT([$DEVKIT_NAME in $DEVKIT_ROOT])
else
AC_MSG_RESULT([$DEVKIT_ROOT])
fi
if test "x$DEVKIT_EXTRA_PATH" != x; then
BASIC_PREPEND_TO_PATH([EXTRA_PATH],$DEVKIT_EXTRA_PATH)
fi
# Fallback default of just /bin if DEVKIT_PATH is not defined
if test "x$DEVKIT_TOOLCHAIN_PATH" = x; then
DEVKIT_TOOLCHAIN_PATH="$DEVKIT_ROOT/bin"
fi
BASIC_PREPEND_TO_PATH([TOOLCHAIN_PATH],$DEVKIT_TOOLCHAIN_PATH)
# If DEVKIT_SYSROOT is set, use that, otherwise try a couple of known
# places for backwards compatiblity.
if test "x$DEVKIT_SYSROOT" != x; then
SYSROOT="$DEVKIT_SYSROOT"
elif test -d "$DEVKIT_ROOT/$host_alias/libc"; then
SYSROOT="$DEVKIT_ROOT/$host_alias/libc"
elif test -d "$DEVKIT_ROOT/$host/sys-root"; then
SYSROOT="$DEVKIT_ROOT/$host/sys-root"
fi
]
)
# You can force the sysroot if the sysroot encoded into the compiler tools
# is not correct.
AC_ARG_WITH(sys-root, [AS_HELP_STRING([--with-sys-root],
[alias for --with-sysroot for backwards compatability])],
[SYSROOT=$with_sys_root]
)
AC_ARG_WITH(sysroot, [AS_HELP_STRING([--with-sysroot],
[use this directory as sysroot)])],
[SYSROOT=$with_sysroot]
)
AC_ARG_WITH([tools-dir], [AS_HELP_STRING([--with-tools-dir],
[alias for --with-toolchain-path for backwards compatibility])],
[BASIC_PREPEND_TO_PATH([TOOLCHAIN_PATH],$with_tools_dir)]
)
AC_ARG_WITH([toolchain-path], [AS_HELP_STRING([--with-toolchain-path],
[prepend these directories when searching for toolchain binaries (compilers etc)])],
[BASIC_PREPEND_TO_PATH([TOOLCHAIN_PATH],$with_toolchain_path)]
)
AC_ARG_WITH([extra-path], [AS_HELP_STRING([--with-extra-path],
[prepend these directories to the default path])],
[BASIC_PREPEND_TO_PATH([EXTRA_PATH],$with_extra_path)]
)
# Prepend the extra path to the global path
BASIC_PREPEND_TO_PATH([PATH],$EXTRA_PATH)
if test "x$OPENJDK_BUILD_OS" = "xsolaris"; then
# Add extra search paths on solaris for utilities like ar and as etc...
PATH="$PATH:/usr/ccs/bin:/usr/sfw/bin:/opt/csw/bin"
fi
# You can force the sys-root if the sys-root encoded into the cross compiler tools
# is not correct.
AC_ARG_WITH(sys-root, [AS_HELP_STRING([--with-sys-root],
[pass this sys-root to the compilers and tools (for cross-compiling)])])
if test "x$with_sys_root" != x; then
SYS_ROOT=$with_sys_root
else
SYS_ROOT=/
fi
AC_SUBST(SYS_ROOT)
AC_ARG_WITH([tools-dir], [AS_HELP_STRING([--with-tools-dir],
[search this directory for compilers and tools (for cross-compiling)])],
[TOOLS_DIR=$with_tools_dir]
)
AC_ARG_WITH([devkit], [AS_HELP_STRING([--with-devkit],
[use this directory as base for tools-dir and sys-root (for cross-compiling)])],
[
if test "x$with_sys_root" != x; then
AC_MSG_ERROR([Cannot specify both --with-devkit and --with-sys-root at the same time])
fi
BASIC_FIXUP_PATH([with_devkit])
BASIC_APPEND_TO_PATH([TOOLS_DIR],$with_devkit/bin)
if test -d "$with_devkit/$host_alias/libc"; then
SYS_ROOT=$with_devkit/$host_alias/libc
elif test -d "$with_devkit/$host/sys-root"; then
SYS_ROOT=$with_devkit/$host/sys-root
fi
])
AC_MSG_CHECKING([for sysroot])
AC_MSG_RESULT([$SYSROOT])
AC_MSG_CHECKING([for toolchain path])
AC_MSG_RESULT([$TOOLCHAIN_PATH])
AC_MSG_CHECKING([for extra path])
AC_MSG_RESULT([$EXTRA_PATH])
])
AC_DEFUN_ONCE([BASIC_SETUP_OUTPUT_DIR],
@ -648,10 +714,10 @@ AC_DEFUN([BASIC_CHECK_GNU_MAKE],
fi
if test "x$FOUND_MAKE" = x; then
if test "x$TOOLS_DIR" != x; then
# We have a tools-dir, check that as well before giving up.
if test "x$TOOLCHAIN_PATH" != x; then
# We have a toolchain path, check that as well before giving up.
OLD_PATH=$PATH
PATH=$TOOLS_DIR:$PATH
PATH=$TOOLCHAIN_PATH:$PATH
AC_PATH_PROGS(CHECK_TOOLSDIR_GMAKE, gmake)
BASIC_CHECK_MAKE_VERSION("$CHECK_TOOLSDIR_GMAKE", [gmake in tools-dir])
if test "x$FOUND_MAKE" = x; then

@ -76,4 +76,14 @@ if test $? = 0; then
OUT=powerpc$KERNEL_BITMODE`echo $OUT | sed -e 's/[^-]*//'`
fi
# Test and fix little endian PowerPC64.
# TODO: should be handled by autoconf-config.guess.
if [ "x$OUT" = x ]; then
if [ `uname -m` = ppc64le ]; then
if [ `uname -s` = Linux ]; then
OUT=powerpc64le-unknown-linux-gnu
fi
fi
fi
echo $OUT

@ -169,8 +169,8 @@ AC_DEFUN([BPERF_SETUP_CCACHE],
if test "x$enable_ccache" = xyes; then
AC_MSG_RESULT([yes])
OLD_PATH="$PATH"
if test "x$TOOLS_DIR" != x; then
PATH=$TOOLS_DIR:$PATH
if test "x$TOOLCHAIN_PATH" != x; then
PATH=$TOOLCHAIN_PATH:$PATH
fi
BASIC_REQUIRE_PROGS(CCACHE, ccache)
CCACHE_STATUS="enabled"

@ -100,6 +100,9 @@ JDKOPT_SETUP_DEBUG_LEVEL
# With basic setup done, call the custom early hook.
CUSTOM_EARLY_HOOK
# Check if we have devkits, extra paths or sysroot set.
BASIC_SETUP_DEVKIT
# To properly create a configuration name, we need to have the OpenJDK target
# and options (variants and debug level) parsed.
BASIC_SETUP_OUTPUT_DIR

@ -119,6 +119,32 @@ AC_DEFUN_ONCE([FLAGS_SETUP_INIT_FLAGS],
# FIXME: likely bug, should be CCXXFLAGS_JDK? or one for C or CXX.
CCXXFLAGS="$CCXXFLAGS -nologo"
fi
if test "x$SYSROOT" != "x"; then
if test "x$TOOLCHAIN_TYPE" = xsolstudio; then
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
# Solaris Studio does not have a concept of sysroot. Instead we must
# make sure the default include and lib dirs are appended to each
# compile and link command line.
SYSROOT_CFLAGS="-I$SYSROOT/usr/include"
SYSROOT_LDFLAGS="-L$SYSROOT/usr/lib$OPENJDK_TARGET_CPU_ISADIR \
-L$SYSROOT/lib$OPENJDK_TARGET_CPU_ISADIR \
-L$SYSROOT/usr/ccs/lib$OPENJDK_TARGET_CPU_ISADIR"
fi
elif test "x$TOOLCHAIN_TYPE" = xgcc; then
SYSROOT_CFLAGS="--sysroot=\"$SYSROOT\""
SYSROOT_LDFLAGS="--sysroot=\"$SYSROOT\""
elif test "x$TOOLCHAIN_TYPE" = xclang; then
SYSROOT_CFLAGS="-isysroot \"$SYSROOT\""
SYSROOT_LDFLAGS="-isysroot \"$SYSROOT\""
fi
# Propagate the sysroot args to hotspot
LEGACY_EXTRA_CFLAGS="$LEGACY_EXTRA_CFLAGS $SYSROOT_CFLAGS"
LEGACY_EXTRA_CXXFLAGS="$LEGACY_EXTRA_CXXFLAGS $SYSROOT_CFLAGS"
LEGACY_EXTRA_LDFLAGS="$LEGACY_EXTRA_LDFLAGS $SYSROOT_LDFLAGS"
fi
AC_SUBST(SYSROOT_CFLAGS)
AC_SUBST(SYSROOT_LDFLAGS)
])
AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_LIBS],
@ -421,9 +447,9 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
LDFLAGS_JDK="${LDFLAGS_JDK} $with_extra_ldflags"
# Hotspot needs these set in their legacy form
LEGACY_EXTRA_CFLAGS=$with_extra_cflags
LEGACY_EXTRA_CXXFLAGS=$with_extra_cxxflags
LEGACY_EXTRA_LDFLAGS=$with_extra_ldflags
LEGACY_EXTRA_CFLAGS="$LEGACY_EXTRA_CFLAGS $with_extra_cflags"
LEGACY_EXTRA_CXXFLAGS="$LEGACY_EXTRA_CXXFLAGS $with_extra_cxxflags"
LEGACY_EXTRA_LDFLAGS="$LEGACY_EXTRA_LDFLAGS $with_extra_ldflags"
AC_SUBST(LEGACY_EXTRA_CFLAGS)
AC_SUBST(LEGACY_EXTRA_CXXFLAGS)
@ -521,7 +547,13 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_LITTLE_ENDIAN"
fi
else
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_BIG_ENDIAN"
# Same goes for _BIG_ENDIAN. Do we really need to set *ENDIAN on Solaris if they
# are defined in the system?
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_BIG_ENDIAN="
else
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_BIG_ENDIAN"
fi
fi
# Setup target OS define. Use OS target name but in upper case.
@ -735,4 +767,20 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_MISC],
[COMPILER_SUPPORTS_TARGET_BITS_FLAG=true],
[COMPILER_SUPPORTS_TARGET_BITS_FLAG=false])
AC_SUBST(COMPILER_SUPPORTS_TARGET_BITS_FLAG)
case "${TOOLCHAIN_TYPE}" in
microsoft)
CFLAGS_WARNINGS_ARE_ERRORS="/WX"
;;
solstudio)
CFLAGS_WARNINGS_ARE_ERRORS="-errtags -errwarn=%all"
;;
gcc)
CFLAGS_WARNINGS_ARE_ERRORS="-Werror"
;;
clang)
CFLAGS_WARNINGS_ARE_ERRORS="-Werror"
;;
esac
AC_SUBST(CFLAGS_WARNINGS_ARE_ERRORS)
])

File diff suppressed because it is too large Load Diff

@ -200,6 +200,7 @@ AC_DEFUN_ONCE([HELP_PRINT_SUMMARY_AND_WARNINGS],
printf "\n"
printf "Configuration summary:\n"
printf "* Debug level: $DEBUG_LEVEL\n"
printf "* HS debug level: $HOTSPOT_DEBUG_LEVEL\n"
printf "* JDK variant: $JDK_VARIANT\n"
printf "* JVM variants: $with_jvm_variants\n"
printf "* OpenJDK target: OS: $OPENJDK_TARGET_OS, CPU architecture: $OPENJDK_TARGET_CPU_ARCH, address length: $OPENJDK_TARGET_CPU_BITS\n"

@ -176,6 +176,7 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_DEBUG_LEVEL],
#
# Set the debug level
# release: no debug information, all optimizations, no asserts.
# optimized: no debug information, all optimizations, no asserts, HotSpot target is 'optimized'.
# fastdebug: debug information (-g), all optimizations, all asserts
# slowdebug: debug information (-g), no optimizations, all asserts
#
@ -189,7 +190,7 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_DEBUG_LEVEL],
], [ENABLE_DEBUG="no"])
AC_ARG_WITH([debug-level], [AS_HELP_STRING([--with-debug-level],
[set the debug level (release, fastdebug, slowdebug) @<:@release@:>@])],
[set the debug level (release, fastdebug, slowdebug, optimized (HotSpot build only)) @<:@release@:>@])],
[
DEBUG_LEVEL="${withval}"
if test "x$ENABLE_DEBUG" = xyes; then
@ -199,6 +200,7 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_DEBUG_LEVEL],
AC_MSG_RESULT([$DEBUG_LEVEL])
if test "x$DEBUG_LEVEL" != xrelease && \
test "x$DEBUG_LEVEL" != xoptimized && \
test "x$DEBUG_LEVEL" != xfastdebug && \
test "x$DEBUG_LEVEL" != xslowdebug; then
AC_MSG_ERROR([Allowed debug levels are: release, fastdebug and slowdebug])
@ -235,8 +237,30 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_DEBUG_LEVEL],
HOTSPOT_DEBUG_LEVEL="jvmg"
HOTSPOT_EXPORT="debug"
;;
optimized )
VARIANT="OPT"
FASTDEBUG="false"
DEBUG_CLASSFILES="false"
BUILD_VARIANT_RELEASE="-optimized"
HOTSPOT_DEBUG_LEVEL="optimized"
HOTSPOT_EXPORT="optimized"
;;
esac
# The debug level 'optimized' is a little special because it is currently only
# applicable to the HotSpot build where it means to build a completely
# optimized version of the VM without any debugging code (like for the
# 'release' debug level which is called 'product' in the HotSpot build) but
# with the exception that it can contain additional code which is otherwise
# protected by '#ifndef PRODUCT' macros. These 'optimized' builds are used to
# test new and/or experimental features which are not intended for customer
# shipment. Because these new features need to be tested and benchmarked in
# real world scenarios, we want to build the containing JDK at the 'release'
# debug level.
if test "x$DEBUG_LEVEL" = xoptimized; then
DEBUG_LEVEL="release"
fi
#####
# Generate the legacy makefile targets for hotspot.
# The hotspot api for selecting the build artifacts, really, needs to be improved.

@ -110,21 +110,23 @@ AC_DEFUN_ONCE([LIB_SETUP_X11],
# Check if the user has specified sysroot, but not --x-includes or --x-libraries.
# Make a simple check for the libraries at the sysroot, and setup --x-includes and
# --x-libraries for the sysroot, if that seems to be correct.
if test "x$SYS_ROOT" != "x/"; then
if test "x$x_includes" = xNONE; then
if test -f "$SYS_ROOT/usr/X11R6/include/X11/Xlib.h"; then
x_includes="$SYS_ROOT/usr/X11R6/include"
elif test -f "$SYS_ROOT/usr/include/X11/Xlib.h"; then
x_includes="$SYS_ROOT/usr/include"
if test "x$OPENJDK_TARGET_OS" = "xlinux"; then
if test "x$SYSROOT" != "x"; then
if test "x$x_includes" = xNONE; then
if test -f "$SYSROOT/usr/X11R6/include/X11/Xlib.h"; then
x_includes="$SYSROOT/usr/X11R6/include"
elif test -f "$SYSROOT/usr/include/X11/Xlib.h"; then
x_includes="$SYSROOT/usr/include"
fi
fi
fi
if test "x$x_libraries" = xNONE; then
if test -f "$SYS_ROOT/usr/X11R6/lib/libX11.so"; then
x_libraries="$SYS_ROOT/usr/X11R6/lib"
elif test "$SYS_ROOT/usr/lib64/libX11.so" && test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
x_libraries="$SYS_ROOT/usr/lib64"
elif test -f "$SYS_ROOT/usr/lib/libX11.so"; then
x_libraries="$SYS_ROOT/usr/lib"
if test "x$x_libraries" = xNONE; then
if test -f "$SYSROOT/usr/X11R6/lib/libX11.so"; then
x_libraries="$SYSROOT/usr/X11R6/lib"
elif test "$SYSROOT/usr/lib64/libX11.so" && test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
x_libraries="$SYSROOT/usr/lib64"
elif test -f "$SYSROOT/usr/lib/libX11.so"; then
x_libraries="$SYSROOT/usr/lib"
fi
fi
fi
fi
@ -146,9 +148,12 @@ AC_DEFUN_ONCE([LIB_SETUP_X11],
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
OPENWIN_HOME="/usr/openwin"
X_CFLAGS="-I$SYSROOT$OPENWIN_HOME/include -I$SYSROOT$OPENWIN_HOME/include/X11/extensions"
X_LIBS="-L$SYSROOT$OPENWIN_HOME/sfw/lib$OPENJDK_TARGET_CPU_ISADIR \
-L$SYSROOT$OPENWIN_HOME/lib$OPENJDK_TARGET_CPU_ISADIR \
-R$OPENWIN_HOME/sfw/lib$OPENJDK_TARGET_CPU_ISADIR \
-R$OPENWIN_HOME/lib$OPENJDK_TARGET_CPU_ISADIR"
fi
AC_SUBST(OPENWIN_HOME)
#
# Weird Sol10 something check...TODO change to try compile
@ -237,14 +242,14 @@ AC_DEFUN_ONCE([LIB_SETUP_CUPS],
# Getting nervous now? Lets poke around for standard Solaris third-party
# package installation locations.
AC_MSG_CHECKING([for cups headers])
if test -s /opt/sfw/cups/include/cups/cups.h; then
if test -s $SYSROOT/opt/sfw/cups/include/cups/cups.h; then
# An SFW package seems to be installed!
CUPS_FOUND=yes
CUPS_CFLAGS="-I/opt/sfw/cups/include"
elif test -s /opt/csw/include/cups/cups.h; then
CUPS_CFLAGS="-I$SYSROOT/opt/sfw/cups/include"
elif test -s $SYSROOT/opt/csw/include/cups/cups.h; then
# A CSW package seems to be installed!
CUPS_FOUND=yes
CUPS_CFLAGS="-I/opt/csw/include"
CUPS_CFLAGS="-I$SYSROOT/opt/csw/include"
fi
AC_MSG_RESULT([$CUPS_FOUND])
fi
@ -398,24 +403,27 @@ AC_DEFUN_ONCE([LIB_SETUP_FREETYPE],
fi
fi
if test "x$FOUND_FREETYPE" != xyes; then
# Check modules using pkg-config, but only if we have it (ugly output results otherwise)
if test "x$PKG_CONFIG" != x; then
PKG_CHECK_MODULES(FREETYPE, freetype2, [FOUND_FREETYPE=yes], [FOUND_FREETYPE=no])
if test "x$FOUND_FREETYPE" = xyes; then
# On solaris, pkg_check adds -lz to freetype libs, which isn't necessary for us.
FREETYPE_LIBS=`$ECHO $FREETYPE_LIBS | $SED 's/-lz//g'`
# 64-bit libs for Solaris x86 are installed in the amd64 subdirectory, change lib to lib/amd64
if test "x$OPENJDK_TARGET_OS" = xsolaris && test "x$OPENJDK_TARGET_CPU" = xx86_64; then
FREETYPE_LIBS=`$ECHO $FREETYPE_LIBS | $SED 's?/lib?/lib/amd64?g'`
fi
# BDEPS_CHECK_MODULE will set FREETYPE_CFLAGS and _LIBS, but we don't get a lib path for bundling.
if test "x$BUNDLE_FREETYPE" = xyes; then
AC_MSG_NOTICE([Found freetype using pkg-config, but ignoring since we can not bundle that])
FOUND_FREETYPE=no
else
AC_MSG_CHECKING([for freetype])
AC_MSG_RESULT([yes (using pkg-config)])
# If we have a sysroot, assume that's where we are supposed to look and skip pkg-config.
if test "x$SYSROOT" = x; then
if test "x$FOUND_FREETYPE" != xyes; then
# Check modules using pkg-config, but only if we have it (ugly output results otherwise)
if test "x$PKG_CONFIG" != x; then
PKG_CHECK_MODULES(FREETYPE, freetype2, [FOUND_FREETYPE=yes], [FOUND_FREETYPE=no])
if test "x$FOUND_FREETYPE" = xyes; then
# On solaris, pkg_check adds -lz to freetype libs, which isn't necessary for us.
FREETYPE_LIBS=`$ECHO $FREETYPE_LIBS | $SED 's/-lz//g'`
# 64-bit libs for Solaris x86 are installed in the amd64 subdirectory, change lib to lib/amd64
if test "x$OPENJDK_TARGET_OS" = xsolaris && test "x$OPENJDK_TARGET_CPU" = xx86_64; then
FREETYPE_LIBS=`$ECHO $FREETYPE_LIBS | $SED 's?/lib?/lib/amd64?g'`
fi
# BDEPS_CHECK_MODULE will set FREETYPE_CFLAGS and _LIBS, but we don't get a lib path for bundling.
if test "x$BUNDLE_FREETYPE" = xyes; then
AC_MSG_NOTICE([Found freetype using pkg-config, but ignoring since we can not bundle that])
FOUND_FREETYPE=no
else
AC_MSG_CHECKING([for freetype])
AC_MSG_RESULT([yes (using pkg-config)])
fi
fi
fi
fi
@ -433,21 +441,21 @@ AC_DEFUN_ONCE([LIB_SETUP_FREETYPE],
LIB_CHECK_POTENTIAL_FREETYPE([$FREETYPE_BASE_DIR/include], [$FREETYPE_BASE_DIR/lib], [well-known location])
fi
else
if test "x$SYS_ROOT" = "x/"; then
FREETYPE_ROOT=
else
FREETYPE_ROOT="$SYS_ROOT"
fi
FREETYPE_BASE_DIR="$FREETYPE_ROOT/usr"
FREETYPE_BASE_DIR="$SYSROOT/usr"
LIB_CHECK_POTENTIAL_FREETYPE([$FREETYPE_BASE_DIR/include], [$FREETYPE_BASE_DIR/lib], [well-known location])
if test "x$FOUND_FREETYPE" != xyes; then
FREETYPE_BASE_DIR="$FREETYPE_ROOT/usr/X11"
FREETYPE_BASE_DIR="$SYSROOT/usr/X11"
LIB_CHECK_POTENTIAL_FREETYPE([$FREETYPE_BASE_DIR/include], [$FREETYPE_BASE_DIR/lib], [well-known location])
fi
if test "x$FOUND_FREETYPE" != xyes; then
FREETYPE_BASE_DIR="$FREETYPE_ROOT/usr"
FREETYPE_BASE_DIR="$SYSROOT/usr/sfw"
LIB_CHECK_POTENTIAL_FREETYPE([$FREETYPE_BASE_DIR/include], [$FREETYPE_BASE_DIR/lib], [well-known location])
fi
if test "x$FOUND_FREETYPE" != xyes; then
FREETYPE_BASE_DIR="$SYSROOT/usr"
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
LIB_CHECK_POTENTIAL_FREETYPE([$FREETYPE_BASE_DIR/include], [$FREETYPE_BASE_DIR/lib/x86_64-linux-gnu], [well-known location])
else
@ -577,8 +585,11 @@ AC_DEFUN_ONCE([LIB_SETUP_ALSA],
if test "x$ALSA_FOUND" = xno; then
BDEPS_CHECK_MODULE(ALSA, alsa, xxx, [ALSA_FOUND=yes], [ALSA_FOUND=no])
fi
if test "x$ALSA_FOUND" = xno; then
PKG_CHECK_MODULES(ALSA, alsa, [ALSA_FOUND=yes], [ALSA_FOUND=no])
# Do not try pkg-config if we have a sysroot set.
if test "x$SYSROOT" = x; then
if test "x$ALSA_FOUND" = xno; then
PKG_CHECK_MODULES(ALSA, alsa, [ALSA_FOUND=yes], [ALSA_FOUND=no])
fi
fi
if test "x$ALSA_FOUND" = xno; then
AC_CHECK_HEADERS([alsa/asoundlib.h],
@ -917,7 +928,7 @@ AC_DEFUN_ONCE([LIB_SETUP_STATIC_LINK_LIBSTDCPP],
# libCrun is the c++ runtime-library with SunStudio (roughly the equivalent of gcc's libstdc++.so)
if test "x$TOOLCHAIN_TYPE" = xsolstudio && test "x$LIBCXX" = x; then
LIBCXX="/usr/lib${OPENJDK_TARGET_CPU_ISADIR}/libCrun.so.1"
LIBCXX="${SYSROOT}/usr/lib${OPENJDK_TARGET_CPU_ISADIR}/libCrun.so.1"
fi
# TODO better (platform agnostic) test

@ -60,6 +60,12 @@ AC_DEFUN([PLATFORM_EXTRACT_VARS_FROM_CPU],
VAR_CPU_BITS=64
VAR_CPU_ENDIAN=big
;;
powerpc64le)
VAR_CPU=ppc64
VAR_CPU_ARCH=ppc
VAR_CPU_BITS=64
VAR_CPU_ENDIAN=little
;;
s390)
VAR_CPU=s390
VAR_CPU_ARCH=s390

@ -130,10 +130,8 @@ ifeq ($(OPENJDK_TARGET_OS), windows)
export LIB:=@VS_LIB@
endif
# The sys root where standard headers and libraries are found.
# Usually not needed since the configure script should have
# taken it into account already when setting CFLAGS et al.
SYS_ROOT:=@SYS_ROOT@
SYSROOT_CFLAGS := @SYSROOT_CFLAGS@
SYSROOT_LDFLAGS := @SYSROOT_LDFLAGS@
# Paths to the source code
ADD_SRC_ROOT:=@ADD_SRC_ROOT@
@ -294,7 +292,6 @@ RMICONNECTOR_IIOP=@RMICONNECTOR_IIOP@
# Necessary additional compiler flags to compile X11
X_CFLAGS:=@X_CFLAGS@
X_LIBS:=@X_LIBS@
OPENWIN_HOME:=@OPENWIN_HOME@
# The lowest required version of macosx to enforce compatiblity for
MACOSX_VERSION_MIN=@MACOSX_VERSION_MIN@
@ -324,6 +321,8 @@ CXX_O_FLAG_NONE:=@CXX_O_FLAG_NONE@
C_FLAG_DEPS:=@C_FLAG_DEPS@
CXX_FLAG_DEPS:=@CXX_FLAG_DEPS@
CFLAGS_WARNINGS_ARE_ERRORS:=@CFLAGS_WARNINGS_ARE_ERRORS@
# Tools that potentially need to be cross compilation aware.
CC:=@FIXPATH@ @CCACHE@ @CC@

@ -189,6 +189,12 @@ AC_DEFUN_ONCE([TOOLCHAIN_PRE_DETECTION],
# it for DLL resolution in runtime.
if test "x$OPENJDK_BUILD_OS" = "xwindows" && test "x$TOOLCHAIN_TYPE" = "xmicrosoft"; then
TOOLCHAIN_SETUP_VISUAL_STUDIO_ENV
# Reset path to VS_PATH. It will include everything that was on PATH at the time we
# ran TOOLCHAIN_SETUP_VISUAL_STUDIO_ENV.
PATH="$VS_PATH"
# The microsoft toolchain also requires INCLUDE and LIB to be set.
export INCLUDE="$VS_INCLUDE"
export LIB="$VS_LIB"
fi
# autoconf magic only relies on PATH, so update it if tools dir is specified
@ -202,29 +208,11 @@ AC_DEFUN_ONCE([TOOLCHAIN_PRE_DETECTION],
PATH="/usr/ccs/bin:$PATH"
fi
# Finally add TOOLS_DIR at the beginning, to allow --with-tools-dir to
# Finally add TOOLCHAIN_PATH at the beginning, to allow --with-tools-dir to
# override all other locations.
if test "x$TOOLS_DIR" != x; then
PATH=$TOOLS_DIR:$PATH
if test "x$TOOLCHAIN_PATH" != x; then
PATH=$TOOLCHAIN_PATH:$PATH
fi
# If a devkit is found on the builddeps server, then prepend its path to the
# PATH variable. If there are cross compilers available in the devkit, these
# will be found by AC_PROG_CC et al.
DEVKIT=
BDEPS_CHECK_MODULE(DEVKIT, devkit, xxx,
[
# Found devkit
PATH="$DEVKIT/bin:$PATH"
SYS_ROOT="$DEVKIT/${rewritten_target}/sys-root"
if test "x$x_includes" = "xNONE"; then
x_includes="$SYS_ROOT/usr/include/X11"
fi
if test "x$x_libraries" = "xNONE"; then
x_libraries="$SYS_ROOT/usr/lib"
fi
],
[])
])
# Restore path, etc
@ -396,15 +384,15 @@ AC_DEFUN([TOOLCHAIN_FIND_COMPILER],
# used.
$1=
# If TOOLS_DIR is set, check for all compiler names in there first
# If TOOLCHAIN_PATH is set, check for all compiler names in there first
# before checking the rest of the PATH.
# FIXME: Now that we prefix the TOOLS_DIR to the PATH in the PRE_DETECTION
# step, this should not be necessary.
if test -n "$TOOLS_DIR"; then
if test -n "$TOOLCHAIN_PATH"; then
PATH_save="$PATH"
PATH="$TOOLS_DIR"
AC_PATH_PROGS(TOOLS_DIR_$1, $SEARCH_LIST)
$1=$TOOLS_DIR_$1
PATH="$TOOLCHAIN_PATH"
AC_PATH_PROGS(TOOLCHAIN_PATH_$1, $SEARCH_LIST)
$1=$TOOLCHAIN_PATH_$1
PATH="$PATH_save"
fi

@ -141,46 +141,44 @@ AC_DEFUN([TOOLCHAIN_SETUP_VISUAL_STUDIO_ENV],
# Lets extract the variables that are set by vcvarsall.bat/vsvars32.bat/vsvars64.bat
AC_MSG_NOTICE([Trying to extract Visual Studio environment variables])
cd $OUTPUT_ROOT
# FIXME: The code betweeen ---- was inlined from a separate script and is not properly adapted
# to autoconf standards.
#----
# We need to create a couple of temporary files.
VS_ENV_TMP_DIR="$OUTPUT_ROOT/vs-env"
$MKDIR -p $VS_ENV_TMP_DIR
# Cannot use the VS10 setup script directly (since it only updates the DOS subshell environment)
# but calculate the difference in Cygwin environment before/after running it and then
# apply the diff.
# Cannot use the VS10 setup script directly (since it only updates the DOS subshell environment).
# Instead create a shell script which will set the relevant variables when run.
WINPATH_VS_ENV_CMD="$VS_ENV_CMD"
BASIC_WINDOWS_REWRITE_AS_WINDOWS_MIXED_PATH([WINPATH_VS_ENV_CMD])
WINPATH_BASH="$BASH"
BASIC_WINDOWS_REWRITE_AS_WINDOWS_MIXED_PATH([WINPATH_BASH])
if test "x$OPENJDK_BUILD_OS_ENV" = xwindows.cygwin; then
_vs10varsall=`cygpath -a -m -s "$VS_ENV_CMD"`
_dosvs10varsall=`cygpath -a -w -s $_vs10varsall`
_dosbash=`cygpath -a -w -s \`which bash\`.*`
else
_dosvs10varsall=`cmd //c echo $VS_ENV_CMD`
_dosbash=`cmd //c echo \`which bash\``
fi
# generate the set of exported vars before/after the vs10 setup
$ECHO "@echo off" > localdevenvtmp.bat
$ECHO "$_dosbash -c \"export -p\" > localdevenvtmp.export0" >> localdevenvtmp.bat
$ECHO "call $_dosvs10varsall $VS_ENV_ARGS" >> localdevenvtmp.bat
$ECHO "$_dosbash -c \"export -p\" > localdevenvtmp.export1" >> localdevenvtmp.bat
# Generate a DOS batch file which runs $VS_ENV_CMD, and then creates a shell
# script (executable by bash) that will setup the important variables.
EXTRACT_VC_ENV_BAT_FILE="$VS_ENV_TMP_DIR/extract-vs-env.bat"
$ECHO "@echo off" > $EXTRACT_VC_ENV_BAT_FILE
# This will end up something like:
# call C:/progra~2/micros~2.0/vc/bin/amd64/vcvars64.bat
$ECHO "call $WINPATH_VS_ENV_CMD $VS_ENV_ARGS" >> $EXTRACT_VC_ENV_BAT_FILE
# These will end up something like:
# C:/CygWin/bin/bash -c 'echo VS_PATH=\"$PATH\" > localdevenv.sh
# The trailing space for everyone except PATH is no typo, but is needed due
# to trailing \ in the Windows paths. These will be stripped later.
$ECHO "$WINPATH_BASH -c 'echo VS_PATH="'\"$PATH\" > set-vs-env.sh' >> $EXTRACT_VC_ENV_BAT_FILE
$ECHO "$WINPATH_BASH -c 'echo VS_INCLUDE="'\"$INCLUDE \" >> set-vs-env.sh' >> $EXTRACT_VC_ENV_BAT_FILE
$ECHO "$WINPATH_BASH -c 'echo VS_LIB="'\"$LIB \" >> set-vs-env.sh' >> $EXTRACT_VC_ENV_BAT_FILE
$ECHO "$WINPATH_BASH -c 'echo VCINSTALLDIR="'\"$VCINSTALLDIR \" >> set-vs-env.sh' >> $EXTRACT_VC_ENV_BAT_FILE
$ECHO "$WINPATH_BASH -c 'echo WindowsSdkDir="'\"$WindowsSdkDir \" >> set-vs-env.sh' >> $EXTRACT_VC_ENV_BAT_FILE
$ECHO "$WINPATH_BASH -c 'echo WINDOWSSDKDIR="'\"$WINDOWSSDKDIR \" >> set-vs-env.sh' >> $EXTRACT_VC_ENV_BAT_FILE
# Now execute the newly created bat file.
# The | cat is to stop SetEnv.Cmd to mess with system colors on msys
cmd /c localdevenvtmp.bat | cat
# apply the diff (less some non-vs10 vars named by "!")
$SORT localdevenvtmp.export0 | $GREP -v "!" > localdevenvtmp.export0.sort
$SORT localdevenvtmp.export1 | $GREP -v "!" > localdevenvtmp.export1.sort
$COMM -1 -3 localdevenvtmp.export0.sort localdevenvtmp.export1.sort > localdevenv.sh
# cleanup
$RM localdevenvtmp*
#----
# The | cat is to stop SetEnv.Cmd to mess with system colors on msys.
# Change directory so we don't need to mess with Windows paths in redirects.
cd $VS_ENV_TMP_DIR
cmd /c extract-vs-env.bat | $CAT
cd $CURDIR
if test ! -s $OUTPUT_ROOT/localdevenv.sh; then
AC_MSG_RESULT([no])
if test ! -s $VS_ENV_TMP_DIR/set-vs-env.sh; then
AC_MSG_NOTICE([Could not succesfully extract the envionment variables needed for the VS setup.])
AC_MSG_NOTICE([Try setting --with-tools-dir to the VC/bin directory within the VS installation])
AC_MSG_NOTICE([or run "bash.exe -l" from a VS command prompt and then run configure from there.])
@ -190,30 +188,36 @@ AC_DEFUN([TOOLCHAIN_SETUP_VISUAL_STUDIO_ENV],
# Now set all paths and other env variables. This will allow the rest of
# the configure script to find and run the compiler in the proper way.
AC_MSG_NOTICE([Setting extracted environment variables])
. $OUTPUT_ROOT/localdevenv.sh
. $VS_ENV_TMP_DIR/set-vs-env.sh
# Now we have VS_PATH, VS_INCLUDE, VS_LIB. For further checking, we
# also define VCINSTALLDIR, WindowsSdkDir and WINDOWSSDKDIR.
else
# We did not find a vsvars bat file, let's hope we are run from a VS command prompt.
AC_MSG_NOTICE([Cannot locate a valid Visual Studio installation, checking current environment])
fi
# At this point, we should have corrent variables in the environment, or we can't continue.
# At this point, we should have correct variables in the environment, or we can't continue.
AC_MSG_CHECKING([for Visual Studio variables])
if test "x$VCINSTALLDIR" != x || test "x$WindowsSDKDir" != x || test "x$WINDOWSSDKDIR" != x; then
if test "x$INCLUDE" = x || test "x$LIB" = x; then
if test "x$VS_INCLUDE" = x || test "x$VS_LIB" = x; then
AC_MSG_RESULT([present but broken])
AC_MSG_ERROR([Your VC command prompt seems broken, INCLUDE and/or LIB is missing.])
else
AC_MSG_RESULT([ok])
# Remove any trailing \ from INCLUDE and LIB to avoid trouble in spec.gmk.
VS_INCLUDE=`$ECHO "$INCLUDE" | $SED 's/\\\\$//'`
VS_LIB=`$ECHO "$LIB" | $SED 's/\\\\$//'`
# Remove any paths containing # (typically F#) as that messes up make
PATH=`$ECHO "$PATH" | $SED 's/[[^:#]]*#[^:]*://g'`
VS_PATH="$PATH"
# Remove any trailing "\" and " " from the variables.
VS_INCLUDE=`$ECHO "$VS_INCLUDE" | $SED 's/\\\\* *$//'`
VS_LIB=`$ECHO "$VS_LIB" | $SED 's/\\\\* *$//'`
VCINSTALLDIR=`$ECHO "$VCINSTALLDIR" | $SED 's/\\\\* *$//'`
WindowsSDKDir=`$ECHO "$WindowsSDKDir" | $SED 's/\\\\* *$//'`
WINDOWSSDKDIR=`$ECHO "$WINDOWSSDKDIR" | $SED 's/\\\\* *$//'`
# Remove any paths containing # (typically F#) as that messes up make. This
# is needed if visual studio was installed with F# support.
VS_PATH=`$ECHO "$VS_PATH" | $SED 's/[[^:#]]*#[^:]*://g'`
AC_SUBST(VS_PATH)
AC_SUBST(VS_INCLUDE)
AC_SUBST(VS_LIB)
AC_SUBST(VS_PATH)
fi
else
AC_MSG_RESULT([not found])

@ -114,7 +114,7 @@ diff_text() {
fi
if test "x$SUFFIX" = "xproperties"; then
# Run through nawk to add possibly missing newline at end of file.
$CAT $OTHER_FILE | $NAWK '{ print }' > $OTHER_FILE.cleaned
$CAT $OTHER_FILE | $NAWK '{ print }' | LC_ALL=C $SORT > $OTHER_FILE.cleaned
# Disable this exception since we aren't changing the properties cleaning method yet.
# $CAT $OTHER_FILE | $SED -e 's/\([^\\]\):/\1\\:/g' -e 's/\([^\\]\)=/\1\\=/g' -e 's/#.*/#/g' \
# | $SED -f "$SRC_ROOT/common/makefiles/support/unicode2x.sed" \

@ -24,12 +24,58 @@
#
# Shell script for a fast parallel forest command
command="$1"
pull_extra_base="$2"
if [ "" = "$command" ] ; then
echo No command to hg supplied!
exit 1
global_opts=""
status_output="/dev/stdout"
qflag="false"
vflag="false"
sflag="false"
while [ $# -gt 0 ]
do
case $1 in
-q | --quiet )
qflag="true"
global_opts="${global_opts} -q"
status_output="/dev/null"
;;
-v | --verbose )
vflag="true"
global_opts="${global_opts} -v"
;;
-s | --sequential )
sflag="true"
;;
'--' ) # no more options
shift; break
;;
-*) # bad option
usage
;;
* ) # non option
break
;;
esac
shift
done
command="$1"; shift
command_args="$@"
usage() {
echo "usage: $0 [-q|--quiet] [-v|--verbose] [-s|--sequential] [--] <command> [commands...]" > ${status_output}
exit 1
}
if [ "x" = "x$command" ] ; then
echo "ERROR: No command to hg supplied!"
usage
fi
# Clean out the temporary directory that stores the pid files.
@ -40,17 +86,17 @@ mkdir -p ${tmp}
safe_interrupt () {
if [ -d ${tmp} ]; then
if [ "`ls ${tmp}/*.pid`" != "" ]; then
echo "Waiting for processes ( `cat ${tmp}/*.pid | tr '\n' ' '`) to terminate nicely!"
echo "Waiting for processes ( `cat ${tmp}/*.pid | tr '\n' ' '`) to terminate nicely!" > ${status_output}
sleep 1
# Pipe stderr to dev/null to silence kill, that complains when trying to kill
# a subprocess that has already exited.
kill -TERM `cat ${tmp}/*.pid | tr '\n' ' '` 2> /dev/null
wait
echo Interrupt complete!
echo "Interrupt complete!" > ${status_output}
fi
rm -f -r ${tmp}
fi
rm -f -r ${tmp}
exit 1
exit 130
}
nice_exit () {
@ -58,39 +104,44 @@ nice_exit () {
if [ "`ls ${tmp}`" != "" ]; then
wait
fi
rm -f -r ${tmp}
fi
rm -f -r ${tmp}
}
trap 'safe_interrupt' INT QUIT
trap 'nice_exit' EXIT
subrepos="corba jaxp jaxws langtools jdk hotspot nashorn"
subrepos_extra="closed jdk/src/closed jdk/make/closed jdk/test/closed hotspot/make/closed hotspot/src/closed hotspot/test/closed deploy install sponsors pubs"
# Only look in specific locations for possible forests (avoids long searches)
pull_default=""
repos=""
repos_extra=""
if [ "${command}" = "clone" -o "${command}" = "fclone" ] ; then
subrepos="corba jaxp jaxws langtools jdk hotspot nashorn"
if [ -f .hg/hgrc ] ; then
pull_default=`hg paths default`
if [ "${pull_default}" = "" ] ; then
echo "ERROR: Need initial clone with 'hg paths default' defined"
exit 1
fi
fi
if [ "${pull_default}" = "" ] ; then
echo "ERROR: Need initial repository to use this script"
if [ "${command}" = "clone" -o "${command}" = "fclone" -o "${command}" = "tclone" ] ; then
if [ ! -f .hg/hgrc ] ; then
echo "ERROR: Need initial repository to use this script" > ${status_output}
exit 1
fi
pull_default=`hg paths default`
if [ "${pull_default}" = "" ] ; then
echo "ERROR: Need initial clone with 'hg paths default' defined" > ${status_output}
exit 1
fi
for i in ${subrepos} ; do
if [ ! -f ${i}/.hg/hgrc ] ; then
repos="${repos} ${i}"
fi
done
if [ "${pull_extra_base}" != "" ] ; then
subrepos_extra="closed jdk/src/closed jdk/make/closed jdk/test/closed hotspot/make/closed hotspot/src/closed hotspot/test/closed deploy install sponsors pubs"
if [ "${command_args}" != "" ] ; then
pull_default_tail=`echo ${pull_default} | sed -e 's@^.*://[^/]*/\(.*\)@\1@'`
pull_extra="${pull_extra_base}/${pull_default_tail}"
if [ "x${pull_default}" = "x${pull_default_tail}" ] ; then
echo "ERROR: Need initial clone from non-local source" > ${status_output}
exit 1
fi
pull_extra="${command_args}/${pull_default_tail}"
for i in ${subrepos_extra} ; do
if [ ! -f ${i}/.hg/hgrc ] ; then
repos_extra="${repos_extra} ${i}"
@ -100,78 +151,115 @@ if [ "${command}" = "clone" -o "${command}" = "fclone" ] ; then
at_a_time=2
# Any repos to deal with?
if [ "${repos}" = "" -a "${repos_extra}" = "" ] ; then
echo "No repositories to process." > ${status_output}
exit
fi
else
hgdirs=`ls -d ./.hg ./*/.hg ./*/*/.hg ./*/*/*/.hg ./*/*/*/*/.hg 2>/dev/null`
# Derive repository names from the .hg directory locations
for i in ${hgdirs} ; do
repos="${repos} `echo ${i} | sed -e 's@/.hg$@@'`"
for i in . ${subrepos} ${subrepos_extra} ; do
if [ -d ${i}/.hg ] ; then
repos="${repos} ${i}"
fi
done
# Any repos to deal with?
if [ "${repos}" = "" ] ; then
echo "No repositories to process." > ${status_output}
exit
fi
# any of the repos locked?
for i in ${repos} ; do
if [ -h ${i}/.hg/store/lock -o -f ${i}/.hg/store/lock ] ; then
locked="${i} ${locked}"
fi
done
at_a_time=8
# Any repos to deal with?
if [ "${repos}" = "" ] ; then
echo "No repositories to process."
exit
fi
if [ "${locked}" != "" ] ; then
echo "These repositories are locked: ${locked}"
exit
echo "ERROR: These repositories are locked: ${locked}" > ${status_output}
exit 1
fi
at_a_time=8
fi
# Echo out what repositories we do a command on.
echo "# Repositories: ${repos} ${repos_extra}"
echo
echo "# Repositories: ${repos} ${repos_extra}" > ${status_output}
# Run the supplied command on all repos in parallel.
n=0
for i in ${repos} ${repos_extra} ; do
n=`expr ${n} '+' 1`
repopidfile=`echo ${i} | sed -e 's@./@@' -e 's@/@_@g'`
reponame=`echo ${i} | sed -e :a -e 's/^.\{1,20\}$/ &/;ta'`
pull_base="${pull_default}"
for j in $repos_extra ; do
if [ "${command}" = "serve" ] ; then
# "serve" is run for all the repos.
(
(
(
echo "[web]"
echo "description = $(basename $(pwd))"
echo "allow_push = *"
echo "push_ssl = False"
echo "[paths]"
for i in ${repos} ${repos_extra} ; do
if [ "${i}" != "." ] ; then
echo "/$(basename $(pwd))/${i} = ${i}"
else
echo "/$(basename $(pwd)) = $(pwd)"
fi
done
) > ${tmp}/serve.web-conf
echo "serving root repo $(basename $(pwd))"
(PYTHONUNBUFFERED=true hg${global_opts} serve -A ${status_output} -E ${status_output} --pid-file ${tmp}/serve.pid --web-conf ${tmp}/serve.web-conf; echo "$?" > ${tmp}/serve.pid.rc ) 2>&1 &
) 2>&1 | sed -e "s@^@serve: @" > ${status_output}
) &
else
# Run the supplied command on all repos in parallel.
n=0
for i in ${repos} ${repos_extra} ; do
n=`expr ${n} '+' 1`
repopidfile=`echo ${i} | sed -e 's@./@@' -e 's@/@_@g'`
reponame=`echo ${i} | sed -e :a -e 's/^.\{1,20\}$/ &/;ta'`
pull_base="${pull_default}"
for j in $repos_extra ; do
if [ "$i" = "$j" ] ; then
pull_base="${pull_extra}"
fi
done
(
done
(
if [ "${command}" = "clone" -o "${command}" = "fclone" ] ; then
pull_newrepo="`echo ${pull_base}/${i} | sed -e 's@\([^:]/\)//*@\1@g'`"
echo hg clone ${pull_newrepo} ${i}
path="`dirname ${i}`"
if [ "${path}" != "." ] ; then
times=0
while [ ! -d "${path}" ] ## nested repo, ensure containing dir exists
do
times=`expr ${times} '+' 1`
if [ `expr ${times} '%' 10` -eq 0 ] ; then
echo ${path} still not created, waiting...
fi
sleep 5
done
(
if [ "${command}" = "clone" -o "${command}" = "fclone" -o "${command}" = "tclone" ] ; then
pull_newrepo="`echo ${pull_base}/${i} | sed -e 's@\([^:]/\)//*@\1@g'`"
path="`dirname ${i}`"
if [ "${path}" != "." ] ; then
times=0
while [ ! -d "${path}" ] ## nested repo, ensure containing dir exists
do
times=`expr ${times} '+' 1`
if [ `expr ${times} '%' 10` -eq 0 ] ; then
echo "${path} still not created, waiting..." > ${status_output}
fi
sleep 5
done
fi
echo "hg clone ${pull_newrepo} ${i}" > ${status_output}
(PYTHONUNBUFFERED=true hg${global_opts} clone ${pull_newrepo} ${i}; echo "$?" > ${tmp}/${repopidfile}.pid.rc ) 2>&1 &
else
echo "cd ${i} && hg${global_opts} ${command} ${command_args}" > ${status_output}
cd ${i} && (PYTHONUNBUFFERED=true hg${global_opts} ${command} ${command_args}; echo "$?" > ${tmp}/${repopidfile}.pid.rc ) 2>&1 &
fi
(PYTHONUNBUFFERED=true hg clone ${pull_newrepo} ${i}; echo "$?" > ${tmp}/${repopidfile}.pid.rc )&
else
echo "cd ${i} && hg $*"
cd ${i} && (PYTHONUNBUFFERED=true hg "$@"; echo "$?" > ${tmp}/${repopidfile}.pid.rc )&
fi
echo $! > ${tmp}/${repopidfile}.pid
) 2>&1 | sed -e "s@^@${reponame}: @") &
if [ `expr ${n} '%' ${at_a_time}` -eq 0 ] ; then
sleep 2
echo Waiting 5 secs before spawning next background command.
sleep 3
fi
done
echo $! > ${tmp}/${repopidfile}.pid
) 2>&1 | sed -e "s@^@${reponame}: @" > ${status_output}
) &
if [ `expr ${n} '%' ${at_a_time}` -eq 0 -a "${sflag}" = "false" ] ; then
sleep 2
echo "Waiting 5 secs before spawning next background command." > ${status_output}
sleep 3
fi
if [ "${sflag}" = "true" ] ; then
wait
fi
done
fi
# Wait for all hg commands to complete
wait
@ -181,7 +269,8 @@ if [ -d ${tmp} ]; then
for rc in ${tmp}/*.pid.rc ; do
exit_code=`cat ${rc} | tr -d ' \n\r'`
if [ "${exit_code}" != "0" ] ; then
echo "WARNING: ${rc} exited abnormally."
repo="`echo ${rc} | sed -e s@^${tmp}@@ -e 's@/*\([^/]*\)\.pid\.rc$@\1@' -e 's@_@/@g'`"
echo "WARNING: ${repo} exited abnormally." > ${status_output}
ec=1
fi
done

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@
#include <string.h>
#include <malloc.h>
void report_error()
void report_error(char const * msg)
{
LPVOID lpMsgBuf;
DWORD dw = GetLastError();
@ -46,8 +46,8 @@ void report_error()
NULL);
fprintf(stderr,
"Could not start process! Failed with error %d: %s\n",
dw, lpMsgBuf);
"%s Failed with error %d: %s\n",
msg, dw, lpMsgBuf);
LocalFree(lpMsgBuf);
}
@ -56,7 +56,7 @@ void report_error()
* Test if pos points to /cygdrive/_/ where _ can
* be any character.
*/
int is_cygdrive_here(int pos, char *in, int len)
int is_cygdrive_here(int pos, char const *in, int len)
{
// Length of /cygdrive/c/ is 12
if (pos+12 > len) return 0;
@ -81,16 +81,17 @@ int is_cygdrive_here(int pos, char *in, int len)
* Works in place since drive letter is always
* shorter than /cygdrive/
*/
char *replace_cygdrive_cygwin(char *in)
char *replace_cygdrive_cygwin(char const *in)
{
int len = strlen(in);
char *out = malloc(len+1);
size_t len = strlen(in);
char *out = (char*) malloc(len+1);
int i,j;
if (len < 12) {
strcpy(out, in);
memmove(out, in, len + 1);
return out;
}
for (i = 0, j = 0; i<len;) {
if (is_cygdrive_here(i, in, len)) {
out[j++] = in[i+10];
@ -102,7 +103,7 @@ char *replace_cygdrive_cygwin(char *in)
j++;
}
}
out[j] = 0;
out[j] = '\0';
return out;
}
@ -110,7 +111,7 @@ void append(char **b, size_t *bl, size_t *u, char *add, size_t addlen)
{
while ( (addlen+*u+1) > *bl) {
*bl *= 2;
*b = realloc(*b, *bl);
*b = (char*) realloc(*b, *bl);
}
memcpy(*b+*u, add, addlen);
*u += addlen;
@ -125,7 +126,7 @@ char *replace_substring(char *in, char *sub, char *rep)
int in_len = strlen(in);
int sub_len = strlen(sub);
int rep_len = strlen(rep);
char *out = malloc(in_len - sub_len + rep_len + 1);
char *out = (char *) malloc(in_len - sub_len + rep_len + 1);
char *p;
if (!(p = strstr(in, sub))) {
@ -145,7 +146,7 @@ char *replace_substring(char *in, char *sub, char *rep)
char* msys_path_list; // @-separated list of paths prefix to look for
char* msys_path_list_end; // Points to last \0 in msys_path_list.
void setup_msys_path_list(char* argument)
void setup_msys_path_list(char const * argument)
{
char* p;
char* drive_letter_pos;
@ -173,7 +174,7 @@ void setup_msys_path_list(char* argument)
} while (p != NULL);
}
char *replace_cygdrive_msys(char *in)
char *replace_cygdrive_msys(char const *in)
{
char* str;
char* prefix;
@ -195,12 +196,12 @@ char *replace_cygdrive_msys(char *in)
return str;
}
char*(*replace_cygdrive)(char *in) = NULL;
char*(*replace_cygdrive)(char const *in) = NULL;
char *files_to_delete[1024];
int num_files_to_delete = 0;
char *fix_at_file(char *in)
char *fix_at_file(char const *in)
{
char *tmpdir;
char name[2048];
@ -222,9 +223,13 @@ char *fix_at_file(char *in)
exit(-1);
}
tmpdir = getenv("TMP");
tmpdir = getenv("TEMP");
if (tmpdir == NULL) {
#if _WIN64
tmpdir = "c:/cygwin64/tmp";
#else
tmpdir = "c:/cygwin/tmp";
#endif
}
_snprintf(name, sizeof(name), "%s\\atfile_XXXXXX", tmpdir);
@ -240,7 +245,7 @@ char *fix_at_file(char *in)
exit(-1);
}
buffer = malloc(buflen);
buffer = (char*) malloc(buflen);
while((blocklen = fread(block,1,sizeof(block),atin)) > 0) {
append(&buffer, &buflen, &used, block, blocklen);
}
@ -257,84 +262,229 @@ char *fix_at_file(char *in)
fclose(atout);
free(fixed);
free(buffer);
files_to_delete[num_files_to_delete] = malloc(strlen(name)+1);
files_to_delete[num_files_to_delete] = (char*) malloc(strlen(name)+1);
strcpy(files_to_delete[num_files_to_delete], name);
num_files_to_delete++;
atname = malloc(strlen(name)+2);
atname = (char*) malloc(strlen(name)+2);
atname[0] = '@';
strcpy(atname+1, name);
return atname;
}
int main(int argc, char **argv)
// given an argument, convert it to the windows command line safe quoted version
// using rules from:
// http://blogs.msdn.com/b/twistylittlepassagesallalike/archive/2011/04/23/everyone-quotes-arguments-the-wrong-way.aspx
// caller is responsible for freeing both input and output.
char * quote_arg(char const * in_arg) {
char *quoted = NULL;
char *current = quoted;
int pass;
if(strpbrk(in_arg, " \t\n\v\r\\\"") == NULL) {
return _strdup(in_arg);
}
// process the arg twice. Once to calculate the size and then to copy it.
for(pass=1; pass<=2; pass++) {
char const *arg = in_arg;
// initial "
if(pass == 2) {
*current = '\"';
}
current++;
// process string to be quoted until NUL
do {
int escapes = 0;
while (*arg == '\\') {
// count escapes.
escapes++;
arg++;
}
if (*arg == '\0') {
// escape the escapes before final "
escapes *= 2;
} else if (*arg == '"') {
// escape the escapes and the "
escapes = escapes * 2 + 1;
} else {
// escapes aren't special, just echo them.
}
// emit some escapes
while (escapes > 0) {
if (pass == 2) {
*current = '\\';
}
current++;
escapes--;
}
// and the current char
if (pass == 2) {
*current = *arg;
}
current++;
} while( *arg++ != '\0');
// allocate the buffer
if (pass == 1) {
size_t alloc = (size_t) (current - quoted + (ptrdiff_t) 2);
current = quoted = (char*) calloc(alloc, sizeof(char));
}
}
// final " and \0
*(current - 1) = '"';
*current = '\0';
return quoted;
}
int main(int argc, char const ** argv)
{
STARTUPINFO si;
PROCESS_INFORMATION pi;
unsigned short rc;
char *new_at_file;
char *old_at_file;
char *line;
int i;
char *current;
int i, cmd;
DWORD exitCode;
if (argc<3 || argv[1][0] != '-' || (argv[1][1] != 'c' && argv[1][1] != 'm')) {
fprintf(stderr, "Usage: fixpath -c|m<path@path@...> /cygdrive/c/WINDOWS/notepad.exe /cygdrive/c/x/test.txt\n");
if (argc<2 || argv[1][0] != '-' || (argv[1][1] != 'c' && argv[1][1] != 'm')) {
fprintf(stderr, "Usage: fixpath -c|m<path@path@...> /cygdrive/c/WINDOWS/notepad.exe [/cygdrive/c/x/test.txt|@/cygdrive/c/x/atfile]\n");
exit(0);
}
if (getenv("DEBUG_FIXPATH") != NULL) {
fprintf(stderr, "fixpath input line >%s<\n", strstr(GetCommandLine(), argv[1]));
char const * cmdline = GetCommandLine();
fprintf(stderr, "fixpath input line >%s<\n", strstr( cmdline , argv[1]));
}
if (argv[1][1] == 'c' && argv[1][2] == '\0') {
if (getenv("DEBUG_FIXPATH") != NULL) {
fprintf(stderr, "using cygwin mode\n");
fprintf(stderr, "fixpath using cygwin mode\n");
}
replace_cygdrive = replace_cygdrive_cygwin;
} else if (argv[1][1] == 'm') {
if (getenv("DEBUG_FIXPATH") != NULL) {
fprintf(stderr, "using msys mode, with path list: %s\n", &argv[1][2]);
fprintf(stderr, "fixpath using msys mode, with path list: %s\n", &argv[1][2]);
}
setup_msys_path_list(argv[1]);
replace_cygdrive = replace_cygdrive_msys;
} else {
fprintf(stderr, "Unknown mode: %s\n", argv[1]);
fprintf(stderr, "fixpath Unknown mode: %s\n", argv[1]);
exit(-1);
}
line = replace_cygdrive(strstr(GetCommandLine(), argv[2]));
for (i=1; i<argc; ++i) {
if (argv[i][0] == '@') {
// Found at-file! Fix it!
old_at_file = replace_cygdrive(argv[i]);
new_at_file = fix_at_file(old_at_file);
line = replace_substring(line, old_at_file, new_at_file);
}
i = 2;
// handle assignments
while (i < argc) {
char const * assignment = strchr(argv[i], '=');
if (assignment != NULL && assignment != argv[i]) {
size_t var_len = (size_t) (assignment - argv[i] + (ptrdiff_t) 1);
char *var = (char *) calloc(var_len, sizeof(char));
char *val = replace_cygdrive(assignment + 1);
memmove(var, argv[i], var_len);
var[var_len - 1] = '\0';
strupr(var);
if (getenv("DEBUG_FIXPATH") != NULL) {
fprintf(stderr, "fixpath setting var >%s< to >%s<\n", var, val);
}
rc = SetEnvironmentVariable(var, val);
if(!rc) {
// Could not set var for some reason. Try to report why.
const int msg_len = 80 + var_len + strlen(val);
char * msg = (char *) alloca(msg_len);
_snprintf_s(msg, msg_len, _TRUNCATE, "Could not set environment variable [%s=%s]", var, val);
report_error(msg);
exit(1);
}
free(var);
free(val);
} else {
// no more assignments;
break;
}
i++;
}
// remember index of the command
cmd = i;
// handle command and it's args.
while (i < argc) {
char const *replaced = replace_cygdrive(argv[i]);
if(replaced[0] == '@') {
// Found at-file! Fix it!
replaced = fix_at_file(replaced);
}
argv[i] = quote_arg(replaced);
i++;
}
// determine the length of the line
line = NULL;
// args
for(i = cmd; i < argc; i++) {
line += (ptrdiff_t) strlen(argv[i]);
}
// spaces and null
line += (ptrdiff_t) (argc - cmd + 1);
// allocate
line = (char*) calloc(line - (char*) NULL, sizeof(char));
// copy in args.
current = line;
for(i = cmd; i < argc; i++) {
ptrdiff_t len = strlen(argv[i]);
if (i != cmd) {
*current++ = ' ';
}
memmove(current, argv[i], len);
current += len;
}
*current = '\0';
if (getenv("DEBUG_FIXPATH") != NULL) {
fprintf(stderr, "fixpath converted line >%s<\n", line);
}
if(cmd == argc) {
if (getenv("DEBUG_FIXPATH") != NULL) {
fprintf(stderr, "fixpath no command provided!\n");
}
exit(0);
}
ZeroMemory(&si,sizeof(si));
si.cb=sizeof(si);
ZeroMemory(&pi,sizeof(pi));
fflush(stderr);
fflush(stdout);
rc = CreateProcess(NULL,
line,
0,
0,
TRUE,
0,
0,
0,
NULL,
NULL,
&si,
&pi);
if(!rc) {
// Could not start process for some reason. Try to report why:
report_error();
exit(rc);
report_error("Could not start process!");
exit(126);
}
WaitForSingleObject(pi.hProcess,INFINITE);
@ -342,15 +492,21 @@ int main(int argc, char **argv)
if (getenv("DEBUG_FIXPATH") != NULL) {
for (i=0; i<num_files_to_delete; ++i) {
fprintf(stderr, "Not deleting temporary fixpath file %s\n",
fprintf(stderr, "fixpath Not deleting temporary file %s\n",
files_to_delete[i]);
}
}
else {
} else {
for (i=0; i<num_files_to_delete; ++i) {
remove(files_to_delete[i]);
}
}
if (exitCode != 0) {
if (getenv("DEBUG_FIXPATH") != NULL) {
fprintf(stderr, "fixpath exit code %d\n",
exitCode);
}
}
exit(exitCode);
}

@ -249,3 +249,4 @@ d338b892a13db19b093f85cf5f949a4504e4d31f jdk9-b03
1ed19de263e1e0772da0269118cdd9deeb9fff04 jdk9-b04
167c39eb44731a5d66770d0f00e231164653a2ff jdk9-b05
a4bf701ac316946c2e5e83138ad8e687da6a4b30 jdk9-b06
6c8563600a71394c949405189ddd66267a88d8cd jdk9-b07

@ -253,6 +253,13 @@ public class SocketOrChannelAcceptorImpl
// registered with the selector. Otherwise if the bytes
// are read on the connection it will attempt a time stamp
// but the cache will be null, resulting in NPE.
// A connection needs to be timestamped before putting to the cache.
// Otherwise the newly created connection (with 0 timestamp) could be
// incorrectly reclaimed by concurrent reclaim() call OR if there
// will be no events on this connection then it could be reclaimed
// by upcoming reclaim() call.
getConnectionCache().stampTime(connection);
getConnectionCache().put(this, connection);
if (connection.shouldRegisterServerReadEvent()) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,8 @@
package com.sun.corba.se.spi.orb;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.Map ;
import java.util.HashMap ;
import java.util.Properties ;
@ -97,8 +99,7 @@ import com.sun.corba.se.impl.logging.OMGSystemException ;
import com.sun.corba.se.impl.presentation.rmi.PresentationManagerImpl ;
import sun.awt.AppContext;
import sun.corba.SharedSecrets;
import sun.misc.JavaAWTAccess;
public abstract class ORB extends com.sun.corba.se.org.omg.CORBA.ORB
implements Broker, TypeCodeFactory
@ -170,6 +171,13 @@ public abstract class ORB extends com.sun.corba.se.org.omg.CORBA.ORB
// representing LogDomain and ExceptionGroup.
private Map wrapperMap ;
static class Holder {
static final PresentationManager defaultPresentationManager =
setupPresentationManager();
}
private static final Map<Object, PresentationManager> pmContexts = new HashMap<>();
private static Map staticWrapperMap = new ConcurrentHashMap();
protected MonitoringManager monitoringManager;
@ -201,8 +209,9 @@ public abstract class ORB extends com.sun.corba.se.org.omg.CORBA.ORB
try {
// First try the configured class name, if any
Class<?> cls = SharedSecrets.getJavaCorbaAccess().loadClass( className ) ;
sff = (PresentationManager.StubFactoryFactory)cls.newInstance() ;
Class<?> cls =
sun.corba.SharedSecrets.getJavaCorbaAccess().loadClass(className);
sff = (PresentationManager.StubFactoryFactory)cls.newInstance();
} catch (Exception exc) {
// Use the default. Log the error as a warning.
staticWrapper.errorInSettingDynamicStubFactoryFactory(
@ -235,13 +244,34 @@ public abstract class ORB extends com.sun.corba.se.org.omg.CORBA.ORB
*/
public static PresentationManager getPresentationManager()
{
AppContext ac = AppContext.getAppContext();
PresentationManager pm = (PresentationManager) ac.get(PresentationManager.class);
if (pm == null) {
pm = setupPresentationManager();
ac.put(PresentationManager.class, pm);
SecurityManager sm = System.getSecurityManager();
JavaAWTAccess javaAwtAccess = sun.misc.SharedSecrets.getJavaAWTAccess();
if (sm != null && javaAwtAccess != null) {
Object appletContext;
try {
Class<?> clazz = JavaAWTAccess.class;
Method method = clazz.getMethod("getAppletContext");
appletContext = method.invoke(javaAwtAccess);
} catch (IllegalAccessException | InvocationTargetException | NoSuchMethodException e) {
InternalError err = new InternalError();
err.initCause(e);
throw err;
}
if (appletContext != null) {
synchronized (pmContexts) {
PresentationManager pm = pmContexts.get(appletContext);
if (pm == null) {
pm = setupPresentationManager();
pmContexts.put(appletContext, pm);
}
return pm;
}
}
}
return pm;
// No security manager or AppletAppContext
return Holder.defaultPresentationManager;
}
/** Get the appropriate StubFactoryFactory. This

@ -38,11 +38,11 @@ package org.omg.CORBA;
* OMG specifications :
* <ul>
* <li> ORB core as defined by CORBA 2.3.1
* (<a href="http://cgi.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* (<a href="http://www.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* </li>
*
* <li> IDL/Java Language Mapping as defined in
* <a href="http://cgi.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* <a href="http://www.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* </li>
* </ul>
*/

@ -38,11 +38,11 @@ package org.omg.CORBA;
* OMG specifications :
* <ul>
* <li> ORB core as defined by CORBA 2.3.1
* (<a href="http://cgi.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* (<a href="http://www.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* </li>
*
* <li> IDL/Java Language Mapping as defined in
* <a href="http://cgi.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* <a href="http://www.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* </li>
* </ul>
*/

@ -38,11 +38,11 @@ package org.omg.CORBA;
* OMG specifications :
* <ul>
* <li> ORB core as defined by CORBA 2.3.1
* (<a href="http://cgi.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* (<a href="http://www.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* </li>
*
* <li> IDL/Java Language Mapping as defined in
* <a href="http://cgi.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* <a href="http://www.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* </li>
* </ul>
*/

@ -38,11 +38,11 @@ package org.omg.CORBA;
* OMG specifications :
* <ul>
* <li> ORB core as defined by CORBA 2.3.1
* (<a href="http://cgi.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* (<a href="http://www.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* </li>
*
* <li> IDL/Java Language Mapping as defined in
* <a href="http://cgi.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* <a href="http://www.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* </li>
* </ul>
*/

@ -38,11 +38,11 @@ package org.omg.CORBA;
* OMG specifications :
* <ul>
* <li> ORB core as defined by CORBA 2.3.1
* (<a href="http://cgi.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* (<a href="http://www.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* </li>
*
* <li> IDL/Java Language Mapping as defined in
* <a href="http://cgi.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* <a href="http://www.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* </li>
* </ul>
*/

@ -38,11 +38,11 @@ package org.omg.CORBA;
* OMG specifications :
* <ul>
* <li> ORB core as defined by CORBA 2.3.1
* (<a href="http://cgi.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* (<a href="http://www.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* </li>
*
* <li> IDL/Java Language Mapping as defined in
* <a href="http://cgi.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* <a href="http://www.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* </li>
* </ul>
*/

@ -38,11 +38,11 @@ package org.omg.CORBA;
* OMG specifications :
* <ul>
* <li> ORB core as defined by CORBA 2.3.1
* (<a href="http://cgi.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* (<a href="http://www.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* </li>
*
* <li> IDL/Java Language Mapping as defined in
* <a href="http://cgi.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* <a href="http://www.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* </li>
* </ul>
*/

@ -38,11 +38,11 @@ package org.omg.CORBA;
* OMG specifications :
* <ul>
* <li> ORB core as defined by CORBA 2.3.1
* (<a href="http://cgi.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* (<a href="http://www.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* </li>
*
* <li> IDL/Java Language Mapping as defined in
* <a href="http://cgi.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* <a href="http://www.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* </li>
* </ul>
*/

@ -44,11 +44,11 @@ package org.omg.CORBA;
* OMG specifications :
* <ul>
* <li> ORB core as defined by CORBA 2.3.1
* (<a href="http://cgi.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* (<a href="http://www.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* </li>
*
* <li> IDL/Java Language Mapping as defined in
* <a href="http://cgi.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* <a href="http://www.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* </li>
* </ul>
*/

@ -38,11 +38,11 @@ package org.omg.CORBA;
* OMG specifications :
* <ul>
* <li> ORB core as defined by CORBA 2.3.1
* (<a href="http://cgi.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* (<a href="http://www.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* </li>
*
* <li> IDL/Java Language Mapping as defined in
* <a href="http://cgi.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* <a href="http://www.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* </li>
* </ul>
*/

@ -38,11 +38,11 @@ package org.omg.CORBA;
* OMG specifications :
* <ul>
* <li> ORB core as defined by CORBA 2.3.1
* (<a href="http://cgi.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* (<a href="http://www.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* </li>
*
* <li> IDL/Java Language Mapping as defined in
* <a href="http://cgi.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* <a href="http://www.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* </li>
* </ul>
*/

@ -38,11 +38,11 @@ package org.omg.CORBA;
* OMG specifications :
* <ul>
* <li> ORB core as defined by CORBA 2.3.1
* (<a href="http://cgi.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* (<a href="http://www.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* </li>
*
* <li> IDL/Java Language Mapping as defined in
* <a href="http://cgi.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* <a href="http://www.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* </li>
* </ul>
*/

@ -38,11 +38,11 @@ package org.omg.CORBA;
* OMG specifications :
* <ul>
* <li> ORB core as defined by CORBA 2.3.1
* (<a href="http://cgi.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* (<a href="http://www.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* </li>
*
* <li> IDL/Java Language Mapping as defined in
* <a href="http://cgi.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* <a href="http://www.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* </li>
* </ul>
*/

@ -48,11 +48,11 @@ package org.omg.CORBA;
* OMG specifications :
* <ul>
* <li> ORB core as defined by CORBA 2.3.1
* (<a href="http://cgi.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* (<a href="http://www.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* </li>
*
* <li> IDL/Java Language Mapping as defined in
* <a href="http://cgi.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* <a href="http://www.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* </li>
* </ul>
*/

@ -18,26 +18,26 @@ href="http://www.omg.org/">www.omg.org</a> to search for the correct specificati
<ul>
<li>
CORBA 2.3.1 (<a
href="http://cgi.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)</li>
href="http://www.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)</li>
<li>
IDL to Java language mapping (<a
href="http://cgi.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>)</li>
href="http://www.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>)</li>
<li>Revised IDL to Java language mapping (<a
href="http://cgi.omg.org/cgi-bin/doc?ptc/00-11-03">ptc/00-11-03</a>)</li>
href="http://www.omg.org/cgi-bin/doc?ptc/00-11-03">ptc/00-11-03</a>)</li>
<li>
Java to IDL language mapping (<a
href="http://cgi.omg.org/cgi-bin/doc?ptc/00-01-06">ptc/00-01-06</a>)</li>
href="http://www.omg.org/cgi-bin/doc?ptc/00-01-06">ptc/00-01-06</a>)</li>
<li>
Interoperable Naming Service (<a
href="http://cgi.omg.org/cgi-bin/doc?ptc/00-08-07">ptc/00-08-07</a>)</li>
href="http://www.omg.org/cgi-bin/doc?ptc/00-08-07">ptc/00-08-07</a>)</li>
<li>
Portable Interceptors (<a
href="http://cgi.omg.org/cgi-bin/doc?ptc/2001-03-04">ptc/2001-03-04</a>)</li>
href="http://www.omg.org/cgi-bin/doc?ptc/2001-03-04">ptc/2001-03-04</a>)</li>
</ul>
These are the only specifications referenced by this document.

@ -8,7 +8,7 @@
<H1>IDL-to-Java Generated Files</H1>
<P>The files that are generated by the IDL-to-Java compiler, in accordance with the <em><a href="http://cgi.omg.org/cgi-bin/doc?ptc/00-01-08">IDL-to-Java Language Mapping Specification</a></em>, which is implemented in Java<sup><font size="-2">TM</font></sup> SE 6 according the <a href="compliance.html">compliance</a> document.
<P>The files that are generated by the IDL-to-Java compiler, in accordance with the <em><a href="http://www.omg.org/cgi-bin/doc?ptc/00-01-08">IDL-to-Java Language Mapping Specification</a></em>, which is implemented in Java<sup><font size="-2">TM</font></sup> SE 6 according the <a href="compliance.html">compliance</a> document.
<P>In general IDL names and identifiers are mapped to Java names and identifiers with no change. Because of the nature of the Java language, a single IDL construct may be mapped to several (differently named) Java constructs. The additional names are constructed by appending a descriptive suffix. For example, the IDL interface <tt>foo</tt> is mapped to the Java interfaces <tt>foo</tt> and <tt>fooOperations</tt>, and additional Java classes <tt>fooHelper</tt>, <tt>fooHolder</tt>, <tt>fooPOA</tt>, and optionally <tt>fooPOATie</tt>.

@ -344,7 +344,7 @@ public final class AccountHolder implements
</PRE>
<P>For more information on Holder classes, see Chapter 1.4, <em>Mapping for
Basic Types</em> in the <a href="http://cgi.omg.org/cgi-bin/doc?ptc/00-01-08">
Basic Types</em> in the <a href="http://www.omg.org/cgi-bin/doc?ptc/00-01-08">
<em>OMG IDL to Java Language Mapping</em></a>. The Holder classes defined
in the package <TT>org.omg.CORBA</TT> are:
<PRE>

@ -331,7 +331,7 @@ Context.
<ul>
<li>Interoperable Naming Service (<a
href="http://cgi.omg.org/cgi-bin/doc?ptc/00-08-07">ptc/00-08-07</a>)
href="http://www.omg.org/cgi-bin/doc?ptc/00-08-07">ptc/00-08-07</a>)
</ul>
<h2>Related Documentation</h2>

@ -33,8 +33,8 @@
<body bgcolor="white">
<P>This package contains the <tt>Dynamic</tt> module specified in the OMG Portable
Interceptor specification,
<a href="http://cgi.omg.org/cgi-bin/doc?ptc/2000-08-06">
http://cgi.omg.org/cgi-bin/doc?ptc/2000-08-06</a>, section 21.9. Please
<a href="http://www.omg.org/cgi-bin/doc?ptc/2000-08-06">
ptc/2000-08-06</a>, section 21.9. Please
refer to that OMG specification for further details.

@ -38,8 +38,8 @@ interface of the
<tt>DynamicAny</tt> module
specified in the OMG <em>The Common Object Request Broker: Architecture and
Specification</em>,
<a href="http://cgi.omg.org/cgi-bin/doc?formal/99-10-07">
http://cgi.omg.org/cgi-bin/doc?formal/99-10-07</a>, section 9.2.2. Please
<a href="http://www.omg.org/cgi-bin/doc?formal/99-10-07">
formal/99-10-07</a>, section 9.2.2. Please
refer to that OMG specification for further details.
<H3>Package Specification</H3>

@ -34,8 +34,8 @@
interface of the <tt>DynamicAny</tt> module
specified in the OMG <em>The Common Object Request Broker: Architecture and
Specification</em>,
<a href="http://cgi.omg.org/cgi-bin/doc?formal/99-10-07">
http://cgi.omg.org/cgi-bin/doc?formal/99-10-07</a>, section 9.2. Please
<a href="http://www.omg.org/cgi-bin/doc?formal/99-10-07">
formal/99-10-07</a>, section 9.2. Please
refer to that OMG specification for further details.

@ -33,8 +33,8 @@ questions.
<P>This package contains the <TT>IOP</tt> module specified in the OMG document
<em>The Common
Object Request Broker: Architecture and Specification</em>,
<a href="http://cgi.omg.org/cgi-bin/doc?formal/99-10-07">
http://cgi.omg.org/cgi-bin/doc?formal/99-10-07</a>, section 13.6. Please
<a href="http://www.omg.org/cgi-bin/doc?formal/99-10-07">
formal/99-10-07</a>, section 13.6. Please
refer to that OMG specification for further details.
<P>Please note that we do not provide all parts of the <tt>IOP</tt> module from

@ -32,8 +32,8 @@ questions.
<body bgcolor="white">
<P>This package contains the <tt>Messaging</tt> module specified in the OMG CORBA
Messaging specification,
<a href="http://cgi.omg.org/cgi-bin/doc?formal/99-10-07">
http://cgi.omg.org/cgi-bin/doc?formal/99-10-07</a>. Please refer to that OMG
<a href="http://www.omg.org/cgi-bin/doc?formal/99-10-07">
formal/99-10-07</a>. Please refer to that OMG
specification for further details.
<P>Please note that we do not provide all parts of the <tt>Messaging</tt> module from

@ -33,8 +33,8 @@ questions.
<P>This package contains the exceptions and typedefs from the <tt>ORBInitInfo</tt>
local interface of the <tt>PortableInterceptor</tt> module specified in the OMG
Portable Interceptor specification,
<a href="http://cgi.omg.org/cgi-bin/doc?ptc/2000-08-06">
http://cgi.omg.org/cgi-bin/doc?ptc/2000-08-06</a>, section 21.7.2. Please
<a href="http://www.omg.org/cgi-bin/doc?ptc/2000-08-06">
ptc/2000-08-06</a>, section 21.7.2. Please
refer to that OMG specification for further details.

@ -409,3 +409,4 @@ b2fee789d23f3cdabb3db4e51af43038e5692d3a jdk9-b03
3812c088b9456ee22c933e88aee1ece71f4e783a jdk9-b04
bdc5311e1db7598589b77015119b821bf8c828bd jdk9-b05
52377a30a3f87b62d6135706997b8c7a47366e37 jdk9-b06
52f7edf2589d9f9d35db3008bc5377f279de9c18 jdk9-b07

@ -95,7 +95,9 @@ static task_t getTask(JNIEnv *env, jobject this_obj) {
#define CHECK_EXCEPTION_CLEAR_(value) if ((*env)->ExceptionOccurred(env)) { (*env)->ExceptionClear(env); return value; }
static void throw_new_debugger_exception(JNIEnv* env, const char* errMsg) {
(*env)->ThrowNew(env, (*env)->FindClass(env, "sun/jvm/hotspot/debugger/DebuggerException"), errMsg);
jclass exceptionClass = (*env)->FindClass(env, "sun/jvm/hotspot/debugger/DebuggerException");
CHECK_EXCEPTION;
(*env)->ThrowNew(env, exceptionClass, errMsg);
}
static struct ps_prochandle* get_proc_handle(JNIEnv* env, jobject this_obj) {
@ -129,6 +131,7 @@ static struct ps_prochandle* get_proc_handle(JNIEnv* env, jobject this_obj) {
JNIEXPORT void JNICALL
Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_init0(JNIEnv *env, jclass cls) {
symbolicatorID = (*env)->GetFieldID(env, cls, "symbolicator", "J");
CHECK_EXCEPTION;
taskID = (*env)->GetFieldID(env, cls, "task", "J");
CHECK_EXCEPTION;
@ -236,13 +239,16 @@ JNIEXPORT jobject JNICALL Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_loo
(JNIEnv *env, jobject this_obj, jlong addr) {
uintptr_t offset;
const char* sym = NULL;
jstring sym_string;
struct ps_prochandle* ph = get_proc_handle(env, this_obj);
if (ph != NULL && ph->core != NULL) {
sym = symbol_for_pc(ph, (uintptr_t) addr, &offset);
if (sym == NULL) return 0;
sym_string = (*env)->NewStringUTF(env, sym);
CHECK_EXCEPTION_(0);
return (*env)->CallObjectMethod(env, this_obj, createClosestSymbol_ID,
(*env)->NewStringUTF(env, sym), (jlong)offset);
sym_string, (jlong)offset);
}
return 0;
}
@ -749,11 +755,14 @@ static void fillLoadObjects(JNIEnv* env, jobject this_obj, struct ps_prochandle*
const char* name;
jobject loadObject;
jobject loadObjectList;
jstring nameString;
base = get_lib_base(ph, i);
name = get_lib_name(ph, i);
nameString = (*env)->NewStringUTF(env, name);
CHECK_EXCEPTION;
loadObject = (*env)->CallObjectMethod(env, this_obj, createLoadObject_ID,
(*env)->NewStringUTF(env, name), (jlong)0, (jlong)base);
nameString, (jlong)0, (jlong)base);
CHECK_EXCEPTION;
loadObjectList = (*env)->GetObjectField(env, this_obj, loadObjectList_ID);
CHECK_EXCEPTION;

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -162,28 +162,27 @@ open_debug_file (const char *pathname, unsigned int crc)
static struct elf_section *find_section_by_name(char *name,
int fd,
ELF_EHDR *ehdr,
ELF_SHDR *shbuf,
struct elf_section *scn_cache)
{
ELF_SHDR* cursct = NULL;
char *strtab;
int cnt;
int strtab_size;
// Section cache have to already contain data for e_shstrndx section.
// If it's not true - elf file is broken, so just bail out
if (scn_cache[ehdr->e_shstrndx].c_data == NULL) {
if ((scn_cache[ehdr->e_shstrndx].c_data
= read_section_data(fd, ehdr, cursct)) == NULL) {
return NULL;
}
return NULL;
}
strtab = scn_cache[ehdr->e_shstrndx].c_data;
strtab_size = scn_cache[ehdr->e_shstrndx].c_shdr->sh_size;
for (cursct = shbuf, cnt = 0;
cnt < ehdr->e_shnum;
cnt++, cursct++) {
if (strcmp(cursct->sh_name + strtab, name) == 0) {
scn_cache[cnt].c_data = read_section_data(fd, ehdr, cursct);
return &scn_cache[cnt];
for (cnt = 0; cnt < ehdr->e_shnum; ++cnt) {
if (scn_cache[cnt].c_shdr->sh_name < strtab_size) {
if (strcmp(scn_cache[cnt].c_shdr->sh_name + strtab, name) == 0) {
scn_cache[cnt].c_data = read_section_data(fd, ehdr, scn_cache[cnt].c_shdr);
return &scn_cache[cnt];
}
}
}
@ -195,12 +194,11 @@ static struct elf_section *find_section_by_name(char *name,
static int open_file_from_debug_link(const char *name,
int fd,
ELF_EHDR *ehdr,
ELF_SHDR *shbuf,
struct elf_section *scn_cache)
{
int debug_fd;
struct elf_section *debug_link = find_section_by_name(".gnu_debuglink", fd, ehdr,
shbuf, scn_cache);
scn_cache);
if (debug_link == NULL)
return -1;
char *debug_filename = debug_link->c_data;
@ -221,7 +219,6 @@ static int open_file_from_debug_link(const char *name,
/* Look in the same directory as the object. */
strcpy(last_slash+1, debug_filename);
debug_fd = open_debug_file(debug_pathname, crc);
if (debug_fd >= 0) {
free(debug_pathname);
@ -261,10 +258,9 @@ static struct symtab* build_symtab_internal(int fd, const char *filename, bool t
static struct symtab *build_symtab_from_debug_link(const char *name,
int fd,
ELF_EHDR *ehdr,
ELF_SHDR *shbuf,
struct elf_section *scn_cache)
{
fd = open_file_from_debug_link(name, fd, ehdr, shbuf, scn_cache);
fd = open_file_from_debug_link(name, fd, ehdr, scn_cache);
if (fd >= 0) {
struct symtab *symtab = build_symtab_internal(fd, NULL, /* try_debuginfo */ false);
@ -463,7 +459,7 @@ static struct symtab* build_symtab_internal(int fd, const char *filename, bool t
// Then, if that doesn't work, the debug link
if (symtab == NULL) {
symtab = build_symtab_from_debug_link(filename, fd, &ehdr, shbuf,
symtab = build_symtab_from_debug_link(filename, fd, &ehdr,
scn_cache);
}

@ -51,9 +51,9 @@ public class G1CollectedHeap extends SharedHeap {
static private CIntegerField summaryBytesUsedField;
// G1MonitoringSupport* _g1mm;
static private AddressField g1mmField;
// MasterOldRegionSet _old_set;
// HeapRegionSet _old_set;
static private long oldSetFieldOffset;
// MasterHumongousRegionSet _humongous_set;
// HeapRegionSet _humongous_set;
static private long humongousSetFieldOffset;
static {

@ -40,12 +40,8 @@ import sun.jvm.hotspot.types.TypeDataBase;
// Mirror class for HeapRegionSetBase. Represents a group of regions.
public class HeapRegionSetBase extends VMObject {
// uint _length;
static private CIntegerField lengthField;
// uint _region_num;
static private CIntegerField regionNumField;
// size_t _total_used_bytes;
static private CIntegerField totalUsedBytesField;
static private long countField;
static {
VM.registerVMInitializedObserver(new Observer() {
@ -58,21 +54,13 @@ public class HeapRegionSetBase extends VMObject {
static private synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("HeapRegionSetBase");
lengthField = type.getCIntegerField("_length");
regionNumField = type.getCIntegerField("_region_num");
totalUsedBytesField = type.getCIntegerField("_total_used_bytes");
countField = type.getField("_count").getOffset();
}
public long length() {
return lengthField.getValue(addr);
}
public long regionNum() {
return regionNumField.getValue(addr);
}
public long totalUsedBytes() {
return totalUsedBytesField.getValue(addr);
public HeapRegionSetCount count() {
Address countFieldAddr = addr.addOffsetTo(countField);
return (HeapRegionSetCount) VMObjectFactory.newObject(HeapRegionSetCount.class, countFieldAddr);
}
public HeapRegionSetBase(Address addr) {

@ -0,0 +1,73 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.gc_implementation.g1;
import java.util.Iterator;
import java.util.Observable;
import java.util.Observer;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.runtime.VMObject;
import sun.jvm.hotspot.runtime.VMObjectFactory;
import sun.jvm.hotspot.types.AddressField;
import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
// Mirror class for HeapRegionSetCount. Represents a group of regions.
public class HeapRegionSetCount extends VMObject {
static private CIntegerField lengthField;
static private CIntegerField capacityField;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
static private synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("HeapRegionSetCount");
lengthField = type.getCIntegerField("_length");
capacityField = type.getCIntegerField("_capacity");
}
public long length() {
return lengthField.getValue(addr);
}
public long capacity() {
return capacityField.getValue(addr);
}
public HeapRegionSetCount(Address addr) {
super(addr);
}
}

@ -114,7 +114,8 @@ public class HeapSummary extends Tool {
long survivorRegionNum = g1mm.survivorRegionNum();
HeapRegionSetBase oldSet = g1h.oldSet();
HeapRegionSetBase humongousSet = g1h.humongousSet();
long oldRegionNum = oldSet.regionNum() + humongousSet.regionNum();
long oldRegionNum = oldSet.count().length()
+ humongousSet.count().capacity() / HeapRegion.grainBytes();
printG1Space("G1 Heap:", g1h.n_regions(),
g1h.used(), g1h.capacity());
System.out.println("G1 Young Generation:");

@ -287,8 +287,43 @@ else
@$(ECHO) "Error: trying to build a minimal target but JVM_VARIANT_MINIMAL1 is not true."
endif
remove_old_debuginfo:
ifeq ($(JVM_VARIANT_CLIENT), true)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
ifeq ($(OSNAME), windows)
$(RM) -f $(EXPORT_CLIENT_DIR)/jvm.map $(EXPORT_CLIENT_DIR)/jvm.pdb
else
$(RM) -f $(EXPORT_CLIENT_DIR)/libjvm.debuginfo
endif
else
$(RM) -f $(EXPORT_CLIENT_DIR)/libjvm.diz
endif
endif
ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
ifeq ($(OSNAME), windows)
$(RM) -f $(EXPORT_SERVER_DIR)/jvm.map $(EXPORT_SERVER_DIR)/jvm.pdb
else
ifeq ($(OS_VENDOR), Darwin)
$(RM) -rf $(EXPORT_SERVER_DIR)/libjvm.dylib.dSYM
else
$(RM) -f $(EXPORT_SERVER_DIR)/libjvm.debuginfo
endif
endif
else
$(RM) -f $(EXPORT_SERVER_DIR)/libjvm.diz
endif
endif
ifeq ($(JVM_VARIANT_MINIMAL1),true)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
$(RM) -f $(EXPORT_MINIMAL_DIR)/libjvm.debuginfo
else
$(RM) -f $(EXPORT_MINIMAL_DIR)/libjvm.diz
endif
endif
# Export file rule
generic_export: $(EXPORT_LIST)
generic_export: $(EXPORT_LIST) remove_old_debuginfo
export_product:
$(MAKE) BUILD_FLAVOR=$(@:export_%=%) generic_export
@ -841,4 +876,4 @@ include $(GAMMADIR)/make/jprt.gmk
export_jdk_product export_jdk_fastdebug export_jdk_debug \
create_jdk copy_jdk update_jdk test_jdk \
copy_product_jdk copy_fastdebug_jdk copy_debug_jdk \
$(HS_ALT_MAKE)/Makefile.make
$(HS_ALT_MAKE)/Makefile.make remove_old_debuginfo

@ -101,7 +101,7 @@ CXXFLAGS = \
# This is VERY important! The version define must only be supplied to vm_version.o
# If not, ccache will not re-use the cache at all, since the version string might contain
# a time and date.
vm_version.o: CXXFLAGS += ${JRE_VERSION}
CXXFLAGS/vm_version.o += ${JRE_VERSION}
CXXFLAGS/BYFILE = $(CXXFLAGS/$@)

@ -1,5 +1,5 @@
#
# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -87,9 +87,10 @@ ifeq ($(INCLUDE_ALL_GCS), false)
g1BlockOffsetTable.cpp g1CardCounts.cpp g1CollectedHeap.cpp g1CollectorPolicy.cpp \
g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp g1OopClosures.cpp \
g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1StringDedup.cpp g1StringDedupStat.cpp \
g1StringDedupTable.cpp g1StringDedupThread.cpp g1StringDedupQueue.cpp g1_globals.cpp heapRegion.cpp \
g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp g1CodeCacheRemSet.cpp \
adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \
parallelScavengeHeap.cpp parMarkBitMap.cpp pcTasks.cpp psAdaptiveSizePolicy.cpp \

@ -33,7 +33,7 @@ jprt.need.sibling.build=false
# This tells jprt what default release we want to build
jprt.hotspot.default.release=jdk8
jprt.hotspot.default.release=jdk9
jprt.tools.default.release=${jprt.submit.option.release?${jprt.submit.option.release}:${jprt.hotspot.default.release}}
@ -47,72 +47,50 @@ jprt.sync.push=false
# sparc etc.
# Define the Solaris platforms we want for the various releases
jprt.my.solaris.sparcv9.jdk8=solaris_sparcv9_5.10
jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
jprt.my.solaris.sparcv9.jdk7u8=${jprt.my.solaris.sparcv9.jdk7}
jprt.my.solaris.sparcv9.jdk9=solaris_sparcv9_5.10
jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
jprt.my.solaris.x64.jdk8=solaris_x64_5.10
jprt.my.solaris.x64.jdk7=solaris_x64_5.10
jprt.my.solaris.x64.jdk7u8=${jprt.my.solaris.x64.jdk7}
jprt.my.solaris.x64.jdk9=solaris_x64_5.10
jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
jprt.my.linux.i586.jdk8=linux_i586_2.6
jprt.my.linux.i586.jdk7=linux_i586_2.6
jprt.my.linux.i586.jdk7u8=${jprt.my.linux.i586.jdk7}
jprt.my.linux.i586.jdk9=linux_i586_2.6
jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
jprt.my.linux.x64.jdk8=linux_x64_2.6
jprt.my.linux.x64.jdk7=linux_x64_2.6
jprt.my.linux.x64.jdk7u8=${jprt.my.linux.x64.jdk7}
jprt.my.linux.x64.jdk9=linux_x64_2.6
jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
jprt.my.linux.ppc.jdk8=linux_ppc_2.6
jprt.my.linux.ppc.jdk7=linux_ppc_2.6
jprt.my.linux.ppc.jdk7u8=${jprt.my.linux.ppc.jdk7}
jprt.my.linux.ppc.jdk9=linux_ppc_2.6
jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}}
jprt.my.linux.ppcv2.jdk8=linux_ppcv2_2.6
jprt.my.linux.ppcv2.jdk7=linux_ppcv2_2.6
jprt.my.linux.ppcv2.jdk7u8=${jprt.my.linux.ppcv2.jdk7}
jprt.my.linux.ppcv2.jdk9=linux_ppcv2_2.6
jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}}
jprt.my.linux.ppcsflt.jdk8=linux_ppcsflt_2.6
jprt.my.linux.ppcsflt.jdk7=linux_ppcsflt_2.6
jprt.my.linux.ppcsflt.jdk7u8=${jprt.my.linux.ppcsflt.jdk7}
jprt.my.linux.ppcsflt.jdk9=linux_ppcsflt_2.6
jprt.my.linux.ppcsflt=${jprt.my.linux.ppcsflt.${jprt.tools.default.release}}
jprt.my.linux.armvfpsflt.jdk8=linux_armvfpsflt_2.6
jprt.my.linux.armvfpsflt.jdk9=linux_armvfpsflt_2.6
jprt.my.linux.armvfpsflt=${jprt.my.linux.armvfpsflt.${jprt.tools.default.release}}
jprt.my.linux.armvfphflt.jdk8=linux_armvfphflt_2.6
jprt.my.linux.armvfphflt.jdk9=linux_armvfphflt_2.6
jprt.my.linux.armvfphflt=${jprt.my.linux.armvfphflt.${jprt.tools.default.release}}
# The ARM GP vfp-sflt build is not currently supported
#jprt.my.linux.armvs.jdk8=linux_armvs_2.6
#jprt.my.linux.armvs.jdk9=linux_armvs_2.6
#jprt.my.linux.armvs=${jprt.my.linux.armvs.${jprt.tools.default.release}}
jprt.my.linux.armvh.jdk8=linux_armvh_2.6
jprt.my.linux.armvh.jdk9=linux_armvh_2.6
jprt.my.linux.armvh=${jprt.my.linux.armvh.${jprt.tools.default.release}}
jprt.my.linux.armsflt.jdk8=linux_armsflt_2.6
jprt.my.linux.armsflt.jdk7=linux_armsflt_2.6
jprt.my.linux.armsflt.jdk7u8=${jprt.my.linux.armsflt.jdk7}
jprt.my.linux.armsflt.jdk9=linux_armsflt_2.6
jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}}
jprt.my.macosx.x64.jdk8=macosx_x64_10.7
jprt.my.macosx.x64.jdk7=macosx_x64_10.7
jprt.my.macosx.x64.jdk7u8=${jprt.my.macosx.x64.jdk7}
jprt.my.macosx.x64.jdk9=macosx_x64_10.7
jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
jprt.my.windows.i586.jdk8=windows_i586_6.1
jprt.my.windows.i586.jdk7=windows_i586_6.1
jprt.my.windows.i586.jdk7u8=${jprt.my.windows.i586.jdk7}
jprt.my.windows.i586.jdk9=windows_i586_6.1
jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
jprt.my.windows.x64.jdk8=windows_x64_6.1
jprt.my.windows.x64.jdk7=windows_x64_6.1
jprt.my.windows.x64.jdk7u8=${jprt.my.windows.x64.jdk7}
jprt.my.windows.x64.jdk9=windows_x64_6.1
jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
# Standard list of jprt build targets for this source tree
@ -143,9 +121,7 @@ jprt.build.targets.embedded= \
jprt.build.targets.all=${jprt.build.targets.standard}, \
${jprt.build.targets.embedded}, ${jprt.build.targets.open}
jprt.build.targets.jdk8=${jprt.build.targets.all}
jprt.build.targets.jdk7=${jprt.build.targets.all}
jprt.build.targets.jdk7u8=${jprt.build.targets.all}
jprt.build.targets.jdk9=${jprt.build.targets.all}
jprt.build.targets=${jprt.build.targets.${jprt.tools.default.release}}
# Subset lists of test targets for this source tree
@ -349,9 +325,7 @@ jprt.test.targets.embedded= \
${jprt.my.windows.i586.test.targets}, \
${jprt.my.windows.x64.test.targets}
jprt.test.targets.jdk8=${jprt.test.targets.standard}
jprt.test.targets.jdk7=${jprt.test.targets.standard}
jprt.test.targets.jdk7u8=${jprt.test.targets.jdk7}
jprt.test.targets.jdk9=${jprt.test.targets.standard}
jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}}
# The default test/Makefile targets that should be run
@ -399,9 +373,7 @@ jprt.make.rule.test.targets.standard = \
jprt.make.rule.test.targets.embedded = \
${jprt.make.rule.test.targets.standard.client}
jprt.make.rule.test.targets.jdk8=${jprt.make.rule.test.targets.standard}
jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard}
jprt.make.rule.test.targets.jdk7u8=${jprt.make.rule.test.targets.jdk7}
jprt.make.rule.test.targets.jdk9=${jprt.make.rule.test.targets.standard}
jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}}
# 7155453: Work-around to prevent popups on OSX from blocking test completion

@ -66,8 +66,8 @@ ifndef CC_INTERP
FORCE_TIERED=1
endif
endif
# C1 is not ported on ppc64(le), so we cannot build a tiered VM:
ifneq (,$(filter $(ARCH),ppc64 pp64le))
# C1 is not ported on ppc64, so we cannot build a tiered VM:
ifeq ($(ARCH),ppc64)
FORCE_TIERED=0
endif

@ -33,6 +33,11 @@ SLASH_JAVA ?= /java
# ARCH can be set explicitly in spec.gmk
ifndef ARCH
ARCH := $(shell uname -m)
# Fold little endian PowerPC64 into big-endian (if ARCH is set in
# hotspot-spec.gmk, this will be done by the configure script).
ifeq ($(ARCH),ppc64le)
ARCH := ppc64
endif
endif
PATH_SEP ?= :

@ -337,56 +337,20 @@ endif
ifeq ($(DEBUG_BINARIES), true)
CFLAGS += -g
else
# Use the stabs format for debugging information (this is the default
# on gcc-2.91). It's good enough, has all the information about line
# numbers and local variables, and libjvm.so is only about 16M.
# Change this back to "-g" if you want the most expressive format.
# (warning: that could easily inflate libjvm.so to 150M!)
# Note: The Itanium gcc compiler crashes when using -gstabs.
DEBUG_CFLAGS/ia64 = -g
DEBUG_CFLAGS/amd64 = -g
DEBUG_CFLAGS/arm = -g
DEBUG_CFLAGS/ppc = -g
DEBUG_CFLAGS/ppc64 = -g
DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
ifeq ($(USE_CLANG), true)
# Clang doesn't understand -gstabs
DEBUG_CFLAGS += -g
else
DEBUG_CFLAGS += -gstabs
endif
DEBUG_CFLAGS += -g
endif
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
FASTDEBUG_CFLAGS/ia64 = -g
FASTDEBUG_CFLAGS/amd64 = -g
FASTDEBUG_CFLAGS/arm = -g
FASTDEBUG_CFLAGS/ppc = -g
FASTDEBUG_CFLAGS/ppc64 = -g
FASTDEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
FASTDEBUG_CFLAGS += $(FASTDEBUG_CFLAGS/$(BUILDARCH))
ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
ifeq ($(USE_CLANG), true)
# Clang doesn't understand -gstabs
FASTDEBUG_CFLAGS += -g
else
FASTDEBUG_CFLAGS += -gstabs
endif
FASTDEBUG_CFLAGS += -g
endif
OPT_CFLAGS/ia64 = -g
OPT_CFLAGS/amd64 = -g
OPT_CFLAGS/arm = -g
OPT_CFLAGS/ppc = -g
OPT_CFLAGS/ppc64 = -g
OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
ifeq ($(USE_CLANG), true)
# Clang doesn't understand -gstabs
OPT_CFLAGS += -g
else
OPT_CFLAGS += -gstabs
endif
OPT_CFLAGS += -g
endif
endif
endif

@ -26,14 +26,26 @@
# make c code know it is on a 64 bit platform.
CFLAGS += -D_LP64=1
# fixes `relocation truncated to fit' error for gcc 4.1.
CFLAGS += -mminimal-toc
ifeq ($(origin OPENJDK_TARGET_CPU_ENDIAN),undefined)
# This can happen during hotspot standalone build. Set endianness from
# uname. We assume build and target machines are the same.
OPENJDK_TARGET_CPU_ENDIAN:=$(if $(filter ppc64le,$(shell uname -m)),little,big)
endif
# finds use ppc64 instructions, but schedule for power5
CFLAGS += -mcpu=powerpc64 -mtune=power5 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string
ifeq ($(filter $(OPENJDK_TARGET_CPU_ENDIAN),big little),)
$(error OPENJDK_TARGET_CPU_ENDIAN value should be 'big' or 'little')
endif
# let linker find external 64 bit libs.
LFLAGS_VM += -L/lib64
ifeq ($(OPENJDK_TARGET_CPU_ENDIAN),big)
# fixes `relocation truncated to fit' error for gcc 4.1.
CFLAGS += -mminimal-toc
# specify lib format.
LFLAGS_VM += -Wl,-melf64ppc
# finds use ppc64 instructions, but schedule for power5
CFLAGS += -mcpu=powerpc64 -mtune=power5 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string
else
# Little endian machine uses ELFv2 ABI.
CFLAGS += -DVM_LITTLE_ENDIAN -DABI_ELFv2
# Use Power8, this is the first CPU to support PPC64 LE with ELFv2 ABI.
CFLAGS += -mcpu=power7 -mtune=power8 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string
endif

@ -260,7 +260,6 @@ ifeq ($(JVM_VARIANT_SERVER),true)
EXPORT_LIST += $(EXPORT_SERVER_DIR)/jvm.map
endif
endif
EXPORT_LIST += $(EXPORT_LIB_DIR)/jvm.lib
endif
ifeq ($(JVM_VARIANT_CLIENT),true)
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
@ -275,6 +274,8 @@ ifeq ($(JVM_VARIANT_CLIENT),true)
endif
endif
EXPORT_LIST += $(EXPORT_LIB_DIR)/jvm.lib
ifeq ($(BUILD_WIN_SA), 1)
EXPORT_LIST += $(EXPORT_JRE_BIN_DIR)/sawindbg.$(LIBRARY_SUFFIX)
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,7 +24,6 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
@ -37,6 +36,7 @@
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
@ -384,10 +384,10 @@ int Assembler::load_const_optimized(Register d, long x, Register tmp, bool retur
bool load_xa = (xa != 0) || (xb < 0);
bool return_xd = false;
if (load_xa) lis(tmp, xa);
if (xc) lis(d, xc);
if (load_xa) { lis(tmp, xa); }
if (xc) { lis(d, xc); }
if (load_xa) {
if (xb) ori(tmp, tmp, xb); // No addi, we support tmp == R0.
if (xb) { ori(tmp, tmp, (unsigned short)xb); } // No addi, we support tmp == R0.
} else {
li(tmp, xb); // non-negative
}
@ -409,18 +409,18 @@ int Assembler::load_const_optimized(Register d, long x, Register tmp, bool retur
// opt 4: avoid adding 0
if (xa) { // Highest 16-bit needed?
lis(d, xa);
if (xb) addi(d, d, xb);
if (xb) { addi(d, d, xb); }
} else {
li(d, xb);
}
sldi(d, d, 32);
if (xc) addis(d, d, xc);
if (xc) { addis(d, d, xc); }
}
// opt 5: Return offset to be inserted into following instruction.
if (return_simm16_rest) return xd;
if (xd) addi(d, d, xd);
if (xd) { addi(d, d, xd); }
return 0;
}
@ -696,4 +696,5 @@ void Assembler::test_asm() {
tty->print_cr("\ntest_asm disassembly (0x%lx 0x%lx):", code()->insts_begin(), code()->insts_end());
code()->decode();
}
#endif // !PRODUCT

@ -1025,15 +1025,14 @@ class Assembler : public AbstractAssembler {
}
static void set_imm(int* instr, short s) {
short* p = ((short *)instr) + 1;
*p = s;
// imm is always in the lower 16 bits of the instruction,
// so this is endian-neutral. Same for the get_imm below.
uint32_t w = *(uint32_t *)instr;
*instr = (int)((w & ~0x0000FFFF) | (s & 0x0000FFFF));
}
static int get_imm(address a, int instruction_number) {
short imm;
short *p =((short *)a)+2*instruction_number+1;
imm = *p;
return (int)imm;
return (short)((int *)a)[instruction_number];
}
static inline int hi16_signed( int x) { return (int)(int16_t)(x >> 16); }

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -139,7 +139,8 @@ inline void Assembler::cmpldi(ConditionRegister crx, Register a, int ui16) { A
inline void Assembler::cmplw( ConditionRegister crx, Register a, Register b) { Assembler::cmpl( crx, 0, a, b); }
inline void Assembler::cmpld( ConditionRegister crx, Register a, Register b) { Assembler::cmpl( crx, 1, a, b); }
inline void Assembler::isel(Register d, Register a, Register b, int c) { emit_int32(ISEL_OPCODE | rt(d) | ra(a) | rb(b) | bc(c)); }
inline void Assembler::isel(Register d, Register a, Register b, int c) { guarantee(VM_Version::has_isel(), "opcode not supported on this hardware");
emit_int32(ISEL_OPCODE | rt(d) | ra(a) | rb(b) | bc(c)); }
// PPC 1, section 3.3.11, Fixed-Point Logical Instructions
inline void Assembler::andi_( Register a, Register s, int ui16) { emit_int32(ANDI_OPCODE | rta(a) | rs(s) | uimm(ui16, 16)); }
@ -531,9 +532,12 @@ inline void Assembler::fmr_(FloatRegister d, FloatRegister b) { emit_int32( FMR_
//inline void Assembler::mffgpr( FloatRegister d, Register b) { emit_int32( MFFGPR_OPCODE | frt(d) | rb(b) | rc(0)); }
//inline void Assembler::mftgpr( Register d, FloatRegister b) { emit_int32( MFTGPR_OPCODE | rt(d) | frb(b) | rc(0)); }
// add cmpb and popcntb to detect ppc power version.
inline void Assembler::cmpb( Register a, Register s, Register b) { emit_int32( CMPB_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
inline void Assembler::popcntb(Register a, Register s) { emit_int32( POPCNTB_OPCODE | rta(a) | rs(s)); };
inline void Assembler::popcntw(Register a, Register s) { emit_int32( POPCNTW_OPCODE | rta(a) | rs(s)); };
inline void Assembler::cmpb( Register a, Register s, Register b) { guarantee(VM_Version::has_cmpb(), "opcode not supported on this hardware");
emit_int32( CMPB_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
inline void Assembler::popcntb(Register a, Register s) { guarantee(VM_Version::has_popcntb(), "opcode not supported on this hardware");
emit_int32( POPCNTB_OPCODE | rta(a) | rs(s)); };
inline void Assembler::popcntw(Register a, Register s) { guarantee(VM_Version::has_popcntw(), "opcode not supported on this hardware");
emit_int32( POPCNTW_OPCODE | rta(a) | rs(s)); };
inline void Assembler::popcntd(Register a, Register s) { emit_int32( POPCNTD_OPCODE | rta(a) | rs(s)); };
inline void Assembler::fneg( FloatRegister d, FloatRegister b) { emit_int32( FNEG_OPCODE | frt(d) | frb(b) | rc(0)); }
@ -568,14 +572,17 @@ inline void Assembler::fctidz(FloatRegister d, FloatRegister b) { emit_int32( FC
inline void Assembler::fctiw( FloatRegister d, FloatRegister b) { emit_int32( FCTIW_OPCODE | frt(d) | frb(b) | rc(0)); }
inline void Assembler::fctiwz(FloatRegister d, FloatRegister b) { emit_int32( FCTIWZ_OPCODE | frt(d) | frb(b) | rc(0)); }
inline void Assembler::fcfid( FloatRegister d, FloatRegister b) { emit_int32( FCFID_OPCODE | frt(d) | frb(b) | rc(0)); }
inline void Assembler::fcfids(FloatRegister d, FloatRegister b) { emit_int32( FCFIDS_OPCODE | frt(d) | frb(b) | rc(0)); }
inline void Assembler::fcfids(FloatRegister d, FloatRegister b) { guarantee(VM_Version::has_fcfids(), "opcode not supported on this hardware");
emit_int32( FCFIDS_OPCODE | frt(d) | frb(b) | rc(0)); }
// PPC 1, section 4.6.7 Floating-Point Compare Instructions
inline void Assembler::fcmpu( ConditionRegister crx, FloatRegister a, FloatRegister b) { emit_int32( FCMPU_OPCODE | bf(crx) | fra(a) | frb(b)); }
// PPC 1, section 5.2.1 Floating-Point Arithmetic Instructions
inline void Assembler::fsqrt( FloatRegister d, FloatRegister b) { emit_int32( FSQRT_OPCODE | frt(d) | frb(b) | rc(0)); }
inline void Assembler::fsqrts(FloatRegister d, FloatRegister b) { emit_int32( FSQRTS_OPCODE | frt(d) | frb(b) | rc(0)); }
inline void Assembler::fsqrt( FloatRegister d, FloatRegister b) { guarantee(VM_Version::has_fsqrt(), "opcode not supported on this hardware");
emit_int32( FSQRT_OPCODE | frt(d) | frb(b) | rc(0)); }
inline void Assembler::fsqrts(FloatRegister d, FloatRegister b) { guarantee(VM_Version::has_fsqrts(), "opcode not supported on this hardware");
emit_int32( FSQRTS_OPCODE | frt(d) | frb(b) | rc(0)); }
// Vector instructions for >= Power6.
inline void Assembler::lvebx( VectorRegister d, Register s1, Register s2) { emit_int32( LVEBX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
@ -703,7 +710,8 @@ inline void Assembler::vcmpgtsw_(VectorRegister d,VectorRegister a, VectorRegist
inline void Assembler::vcmpgtub_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
inline void Assembler::vcmpgtuh_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
inline void Assembler::vcmpgtuw_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
inline void Assembler::vand( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAND_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vand( VectorRegister d, VectorRegister a, VectorRegister b) { guarantee(VM_Version::has_vand(), "opcode not supported on this hardware");
emit_int32( VAND_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vandc( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VANDC_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vnor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNOR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VOR_OPCODE | vrt(d) | vra(a) | vrb(b)); }

@ -35,6 +35,126 @@ class Bytes: AllStatic {
// Can I count on address always being a pointer to an unsigned char? Yes.
#if defined(VM_LITTLE_ENDIAN)
// Returns true, if the byte ordering used by Java is different from the native byte ordering
// of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
static inline bool is_Java_byte_ordering_different() { return true; }
// Forward declarations of the compiler-dependent implementation
static inline u2 swap_u2(u2 x);
static inline u4 swap_u4(u4 x);
static inline u8 swap_u8(u8 x);
static inline u2 get_native_u2(address p) {
return (intptr_t(p) & 1) == 0
? *(u2*)p
: ( u2(p[1]) << 8 )
| ( u2(p[0]) );
}
static inline u4 get_native_u4(address p) {
switch (intptr_t(p) & 3) {
case 0: return *(u4*)p;
case 2: return ( u4( ((u2*)p)[1] ) << 16 )
| ( u4( ((u2*)p)[0] ) );
default: return ( u4(p[3]) << 24 )
| ( u4(p[2]) << 16 )
| ( u4(p[1]) << 8 )
| u4(p[0]);
}
}
static inline u8 get_native_u8(address p) {
switch (intptr_t(p) & 7) {
case 0: return *(u8*)p;
case 4: return ( u8( ((u4*)p)[1] ) << 32 )
| ( u8( ((u4*)p)[0] ) );
case 2: return ( u8( ((u2*)p)[3] ) << 48 )
| ( u8( ((u2*)p)[2] ) << 32 )
| ( u8( ((u2*)p)[1] ) << 16 )
| ( u8( ((u2*)p)[0] ) );
default: return ( u8(p[7]) << 56 )
| ( u8(p[6]) << 48 )
| ( u8(p[5]) << 40 )
| ( u8(p[4]) << 32 )
| ( u8(p[3]) << 24 )
| ( u8(p[2]) << 16 )
| ( u8(p[1]) << 8 )
| u8(p[0]);
}
}
static inline void put_native_u2(address p, u2 x) {
if ( (intptr_t(p) & 1) == 0 ) *(u2*)p = x;
else {
p[1] = x >> 8;
p[0] = x;
}
}
static inline void put_native_u4(address p, u4 x) {
switch ( intptr_t(p) & 3 ) {
case 0: *(u4*)p = x;
break;
case 2: ((u2*)p)[1] = x >> 16;
((u2*)p)[0] = x;
break;
default: ((u1*)p)[3] = x >> 24;
((u1*)p)[2] = x >> 16;
((u1*)p)[1] = x >> 8;
((u1*)p)[0] = x;
break;
}
}
static inline void put_native_u8(address p, u8 x) {
switch ( intptr_t(p) & 7 ) {
case 0: *(u8*)p = x;
break;
case 4: ((u4*)p)[1] = x >> 32;
((u4*)p)[0] = x;
break;
case 2: ((u2*)p)[3] = x >> 48;
((u2*)p)[2] = x >> 32;
((u2*)p)[1] = x >> 16;
((u2*)p)[0] = x;
break;
default: ((u1*)p)[7] = x >> 56;
((u1*)p)[6] = x >> 48;
((u1*)p)[5] = x >> 40;
((u1*)p)[4] = x >> 32;
((u1*)p)[3] = x >> 24;
((u1*)p)[2] = x >> 16;
((u1*)p)[1] = x >> 8;
((u1*)p)[0] = x;
}
}
// Efficient reading and writing of unaligned unsigned data in Java byte ordering (i.e. big-endian ordering)
// (no byte-order reversal is needed since Power CPUs are big-endian oriented).
static inline u2 get_Java_u2(address p) { return swap_u2(get_native_u2(p)); }
static inline u4 get_Java_u4(address p) { return swap_u4(get_native_u4(p)); }
static inline u8 get_Java_u8(address p) { return swap_u8(get_native_u8(p)); }
static inline void put_Java_u2(address p, u2 x) { put_native_u2(p, swap_u2(x)); }
static inline void put_Java_u4(address p, u4 x) { put_native_u4(p, swap_u4(x)); }
static inline void put_Java_u8(address p, u8 x) { put_native_u8(p, swap_u8(x)); }
#else // !defined(VM_LITTLE_ENDIAN)
// Returns true, if the byte ordering used by Java is different from the nativ byte ordering
// of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
static inline bool is_Java_byte_ordering_different() { return false; }
@ -150,6 +270,12 @@ class Bytes: AllStatic {
static inline void put_Java_u2(address p, u2 x) { put_native_u2(p, x); }
static inline void put_Java_u4(address p, u4 x) { put_native_u4(p, x); }
static inline void put_Java_u8(address p, u8 x) { put_native_u8(p, x); }
#endif // VM_LITTLE_ENDIAN
};
#if defined(TARGET_OS_ARCH_linux_ppc)
#include "bytes_linux_ppc.inline.hpp"
#endif
#endif // CPU_PPC_VM_BYTES_PPC_HPP

@ -1,6 +1,6 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -87,7 +87,7 @@ define_pd_global(uint64_t,MaxRAM, 4ULL*G);
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(bool, TrapBasedRangeChecks, false);
define_pd_global(bool, TrapBasedRangeChecks, true);
// Heap related flags
define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(16*M));

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,8 +24,6 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "compiler/disassembler.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
@ -1120,7 +1118,7 @@ address MacroAssembler::call_c_using_toc(const FunctionDescriptor* fd,
}
return _last_calls_return_pc;
}
#endif
#endif // ABI_ELFv2
void MacroAssembler::call_VM_base(Register oop_result,
Register last_java_sp,
@ -1794,7 +1792,7 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
bne(cr_reg, cas_label);
load_klass_with_trap_null_check(temp_reg, obj_reg);
load_klass(temp_reg, obj_reg);
load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
@ -1891,7 +1889,7 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
// the bias from one thread to another directly in this situation.
andi(temp_reg, mark_reg, markOopDesc::age_mask_in_place);
orr(temp_reg, R16_thread, temp_reg);
load_klass_with_trap_null_check(temp2_reg, obj_reg);
load_klass(temp2_reg, obj_reg);
ld(temp2_reg, in_bytes(Klass::prototype_header_offset()), temp2_reg);
orr(temp_reg, temp_reg, temp2_reg);
@ -1927,7 +1925,7 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
// that another thread raced us for the privilege of revoking the
// bias of this particular object, so it's okay to continue in the
// normal locking code.
load_klass_with_trap_null_check(temp_reg, obj_reg);
load_klass(temp_reg, obj_reg);
ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place);
orr(temp_reg, temp_reg, temp2_reg);
@ -2213,8 +2211,7 @@ void MacroAssembler::card_table_write(jbyte* byte_map_base, Register Rtmp, Regis
stbx(R0, Rtmp, Robj);
}
#ifndef SERIALGC
#if INCLUDE_ALL_GCS
// General G1 pre-barrier generator.
// Goal: record the previous value if it is not null.
void MacroAssembler::g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
@ -2328,14 +2325,17 @@ void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, Register Rnew_v
// Get the address of the card.
lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr);
cmpwi(CCR0, Rtmp3, (int)G1SATBCardTableModRefBS::g1_young_card_val());
beq(CCR0, filtered);
assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
cmpwi(CCR0, Rtmp3 /* card value */, 0);
membar(Assembler::StoreLoad);
lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr); // Reload after membar.
cmpwi(CCR0, Rtmp3 /* card value */, CardTableModRefBS::dirty_card_val());
beq(CCR0, filtered);
// Storing a region crossing, non-NULL oop, card is clean.
// Dirty card and log.
li(Rtmp3, 0); // dirty
li(Rtmp3, CardTableModRefBS::dirty_card_val());
//release(); // G1: oops are allowed to get visible after dirty marking.
stbx(Rtmp3, Rbase, Rcard_addr);
@ -2362,7 +2362,7 @@ void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, Register Rnew_v
bind(filtered_int);
}
#endif // SERIALGC
#endif // INCLUDE_ALL_GCS
// Values for last_Java_pc, and last_Java_sp must comply to the rules
// in frame_ppc64.hpp.
@ -2453,7 +2453,8 @@ void MacroAssembler::get_vm_result_2(Register metadata_result) {
void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided.
if (Universe::narrow_klass_base() != 0) {
load_const(R0, Universe::narrow_klass_base(), (dst != current) ? dst : noreg); // Use dst as temp if it is free.
// Use dst as temp if it is free.
load_const(R0, Universe::narrow_klass_base(), (dst != current && dst != R0) ? dst : noreg);
sub(dst, current, R0);
current = dst;
}

@ -514,14 +514,14 @@ class MacroAssembler: public Assembler {
void card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp);
void card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj);
#ifndef SERIALGC
#if INCLUDE_ALL_GCS
// General G1 pre-barrier generator.
void g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
Register Rtmp1, Register Rtmp2, bool needs_frame = false);
// General G1 post-barrier generator
void g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1,
Register Rtmp2, Register Rtmp3, Label *filtered_ext = NULL);
#endif // SERIALGC
#endif
// Support for managing the JavaThread pointer (i.e.; the reference to
// thread-local information).

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -119,6 +119,7 @@ void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Registe
void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp,
bool for_compiler_entry) {
Label L_no_such_method;
assert(method == R19_method, "interpreter calling convention");
assert_different_registers(method, target, temp);
@ -131,17 +132,31 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
__ lwz(temp, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
__ cmplwi(CCR0, temp, 0);
__ beq(CCR0, run_compiled_code);
// Null method test is replicated below in compiled case,
// it might be able to address across the verify_thread()
__ cmplwi(CCR0, R19_method, 0);
__ beq(CCR0, L_no_such_method);
__ ld(target, in_bytes(Method::interpreter_entry_offset()), R19_method);
__ mtctr(target);
__ bctr();
__ BIND(run_compiled_code);
}
// Compiled case, either static or fall-through from runtime conditional
__ cmplwi(CCR0, R19_method, 0);
__ beq(CCR0, L_no_such_method);
const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
Method::from_interpreted_offset();
__ ld(target, in_bytes(entry_offset), R19_method);
__ mtctr(target);
__ bctr();
__ bind(L_no_such_method);
assert(StubRoutines::throw_AbstractMethodError_entry() != NULL, "not yet generated!");
__ load_const_optimized(target, StubRoutines::throw_AbstractMethodError_entry());
__ mtctr(target);
__ bctr();
}

@ -891,6 +891,13 @@ definitions %{
// This is a block of C++ code which provides values, functions, and
// definitions necessary in the rest of the architecture description.
source_hpp %{
// Header information of the source block.
// Method declarations/definitions which are used outside
// the ad-scope can conveniently be defined here.
//
// To keep related declarations/definitions/uses close together,
// we switch between source %{ }% and source_hpp %{ }% freely as needed.
// Returns true if Node n is followed by a MemBar node that
// will do an acquire. If so, this node must not do the acquire
// operation.
@ -1114,6 +1121,40 @@ static inline void emit_long(CodeBuffer &cbuf, int value) {
//=============================================================================
%} // interrupt source
source_hpp %{ // Header information of the source block.
//--------------------------------------------------------------
//---< Used for optimization in Compile::Shorten_branches >---
//--------------------------------------------------------------
const uint trampoline_stub_size = 6 * BytesPerInstWord;
class CallStubImpl {
public:
static void emit_trampoline_stub(MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset);
// Size of call trampoline stub.
// This doesn't need to be accurate to the byte, but it
// must be larger than or equal to the real size of the stub.
static uint size_call_trampoline() {
return trampoline_stub_size;
}
// number of relocations needed by a call trampoline stub
static uint reloc_call_trampoline() {
return 5;
}
};
%} // end source_hpp
source %{
// Emit a trampoline stub for a call to a target which is too far away.
//
// code sequences:
@ -1125,9 +1166,7 @@ static inline void emit_long(CodeBuffer &cbuf, int value) {
// load the call target from the constant pool
// branch via CTR (LR/link still points to the call-site above)
const uint trampoline_stub_size = 6 * BytesPerInstWord;
void emit_trampoline_stub(MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset) {
void CallStubImpl::emit_trampoline_stub(MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset) {
// Start the stub.
address stub = __ start_a_stub(Compile::MAX_stubs_size/2);
if (stub == NULL) {
@ -1170,19 +1209,6 @@ void emit_trampoline_stub(MacroAssembler &_masm, int destination_toc_offset, int
__ end_a_stub();
}
// Size of trampoline stub, this doesn't need to be accurate but it must
// be larger or equal to the real size of the stub.
// Used for optimization in Compile::Shorten_branches.
uint size_call_trampoline() {
return trampoline_stub_size;
}
// Number of relocation entries needed by trampoline stub.
// Used for optimization in Compile::Shorten_branches.
uint reloc_call_trampoline() {
return 5;
}
//=============================================================================
// Emit an inline branch-and-link call and a related trampoline stub.
@ -1221,7 +1247,7 @@ EmitCallOffsets emit_call_with_trampoline_stub(MacroAssembler &_masm, address en
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
// Emit the trampoline stub which will be related to the branch-and-link below.
emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
__ relocate(rtype);
}
@ -2023,17 +2049,34 @@ uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
//=============================================================================
uint size_exception_handler() {
// The exception_handler is a b64_patchable.
return MacroAssembler::b64_patchable_size;
}
%} // interrupt source
uint size_deopt_handler() {
// The deopt_handler is a bl64_patchable.
return MacroAssembler::bl64_patchable_size;
}
source_hpp %{ // Header information of the source block.
int emit_exception_handler(CodeBuffer &cbuf) {
class HandlerImpl {
public:
static int emit_exception_handler(CodeBuffer &cbuf);
static int emit_deopt_handler(CodeBuffer& cbuf);
static uint size_exception_handler() {
// The exception_handler is a b64_patchable.
return MacroAssembler::b64_patchable_size;
}
static uint size_deopt_handler() {
// The deopt_handler is a bl64_patchable.
return MacroAssembler::bl64_patchable_size;
}
};
%} // end source_hpp
source %{
int HandlerImpl::emit_exception_handler(CodeBuffer &cbuf) {
MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_exception_handler());
@ -2050,7 +2093,7 @@ int emit_exception_handler(CodeBuffer &cbuf) {
// The deopt_handler is like the exception handler, but it calls to
// the deoptimization blob instead of jumping to the exception blob.
int emit_deopt_handler(CodeBuffer& cbuf) {
int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_deopt_handler());
@ -3438,7 +3481,7 @@ encode %{
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
// Emit the trampoline stub which will be related to the branch-and-link below.
emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
__ relocate(_optimized_virtual ?
relocInfo::opt_virtual_call_type : relocInfo::static_call_type);
}
@ -3481,7 +3524,7 @@ encode %{
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
// Emit the trampoline stub which will be related to the branch-and-link below.
emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
assert(_optimized_virtual, "methodHandle call should be a virtual call");
__ relocate(relocInfo::opt_virtual_call_type);
}
@ -3531,7 +3574,7 @@ encode %{
const address entry_point = !($meth$$method) ? 0 : (address)$meth$$method;
const address entry_point_const = __ address_constant(entry_point, RelocationHolder::none);
const int entry_point_const_toc_offset = __ offset_to_method_toc(entry_point_const);
emit_trampoline_stub(_masm, entry_point_const_toc_offset, __ offset());
CallStubImpl::emit_trampoline_stub(_masm, entry_point_const_toc_offset, __ offset());
if (ra_->C->env()->failing())
return;
@ -8755,6 +8798,7 @@ instruct sqrtD_reg(regD dst, regD src) %{
// Single-precision sqrt.
instruct sqrtF_reg(regF dst, regF src) %{
match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
predicate(VM_Version::has_fsqrts());
ins_cost(DEFAULT_COST);
format %{ "FSQRTS $dst, $src" %}
@ -11550,8 +11594,7 @@ instruct safePoint_poll_conPollAddr(rscratch2RegP poll) %{
// effect no longer needs to be mentioned, since r0 is not contained
// in a reg_class.
format %{ "LD R12, addr of polling page\n\t"
"LD R0, #0, R12 \t// Safepoint poll for GC" %}
format %{ "LD R0, #0, R12 \t// Safepoint poll for GC" %}
ins_encode( enc_poll(0x0, poll) );
ins_pipe(pipe_class_default);
%}

@ -34,6 +34,7 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
#include "vmreg_ppc.inline.hpp"
#include "adfiles/ad_ppc_64.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
@ -52,10 +53,6 @@
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
// Used by generate_deopt_blob. Defined in .ad file.
extern uint size_deopt_handler();
class RegisterSaver {
// Used for saving volatile registers.
public:
@ -2782,7 +2779,7 @@ void SharedRuntime::generate_deopt_blob() {
// We can't grab a free register here, because all registers may
// contain live values, so let the RegisterSaver do the adjustment
// of the return pc.
const int return_pc_adjustment_no_exception = -size_deopt_handler();
const int return_pc_adjustment_no_exception = -HandlerImpl::size_deopt_handler();
// Push the "unpack frame"
// Save everything in sight.

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,17 +23,6 @@
*
*/
#include "precompiled.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/stubRoutines.hpp"
#ifdef TARGET_OS_FAMILY_aix
# include "thread_aix.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_linux
# include "thread_linux.inline.hpp"
#endif
// Implementation of the platform-specific part of StubRoutines - for
// a description of how to extend it, see the stubRoutines.hpp file.

@ -1672,7 +1672,7 @@ address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
//__ flush_bundle();
address entry = __ pc();
char *bname = NULL;
const char *bname = NULL;
uint tsize = 0;
switch(state) {
case ftos:

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -402,6 +402,9 @@ void VM_Version::determine_features() {
CodeBuffer cb("detect_cpu_features", code_size, 0);
MacroAssembler* a = new MacroAssembler(&cb);
// Must be set to true so we can generate the test code.
_features = VM_Version::all_features_m;
// Emit code.
void (*test)(address addr, uint64_t offset)=(void(*)(address addr, uint64_t offset))(void *)a->function_entry();
uint32_t *code = (uint32_t *)a->pc();
@ -409,14 +412,15 @@ void VM_Version::determine_features() {
// Keep R3_ARG1 unmodified, it contains &field (see below).
// Keep R4_ARG2 unmodified, it contains offset = 0 (see below).
a->fsqrt(F3, F4); // code[0] -> fsqrt_m
a->isel(R7, R5, R6, 0); // code[1] -> isel_m
a->ldarx_unchecked(R7, R3_ARG1, R4_ARG2, 1); // code[2] -> lxarx_m
a->cmpb(R7, R5, R6); // code[3] -> bcmp
//a->mftgpr(R7, F3); // code[4] -> mftgpr
a->popcntb(R7, R5); // code[5] -> popcntb
a->popcntw(R7, R5); // code[6] -> popcntw
a->fcfids(F3, F4); // code[7] -> fcfids
a->vand(VR0, VR0, VR0); // code[8] -> vand
a->fsqrts(F3, F4); // code[1] -> fsqrts_m
a->isel(R7, R5, R6, 0); // code[2] -> isel_m
a->ldarx_unchecked(R7, R3_ARG1, R4_ARG2, 1); // code[3] -> lxarx_m
a->cmpb(R7, R5, R6); // code[4] -> bcmp
//a->mftgpr(R7, F3); // code[5] -> mftgpr
a->popcntb(R7, R5); // code[6] -> popcntb
a->popcntw(R7, R5); // code[7] -> popcntw
a->fcfids(F3, F4); // code[8] -> fcfids
a->vand(VR0, VR0, VR0); // code[9] -> vand
a->blr();
// Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
@ -426,6 +430,7 @@ void VM_Version::determine_features() {
uint32_t *code_end = (uint32_t *)a->pc();
a->flush();
_features = VM_Version::unknown_m;
// Print the detection code.
if (PrintAssembly) {
@ -450,6 +455,7 @@ void VM_Version::determine_features() {
// determine which instructions are legal.
int feature_cntr = 0;
if (code[feature_cntr++]) features |= fsqrt_m;
if (code[feature_cntr++]) features |= fsqrts_m;
if (code[feature_cntr++]) features |= isel_m;
if (code[feature_cntr++]) features |= lxarxeh_m;
if (code[feature_cntr++]) features |= cmpb_m;

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,6 +33,7 @@ class VM_Version: public Abstract_VM_Version {
protected:
enum Feature_Flag {
fsqrt,
fsqrts,
isel,
lxarxeh,
cmpb,
@ -46,6 +47,7 @@ protected:
enum Feature_Flag_Set {
unknown_m = 0,
fsqrt_m = (1 << fsqrt ),
fsqrts_m = (1 << fsqrts ),
isel_m = (1 << isel ),
lxarxeh_m = (1 << lxarxeh),
cmpb_m = (1 << cmpb ),
@ -72,6 +74,7 @@ public:
static bool is_determine_features_test_running() { return _is_determine_features_test_running; }
// CPU instruction support
static bool has_fsqrt() { return (_features & fsqrt_m) != 0; }
static bool has_fsqrts() { return (_features & fsqrts_m) != 0; }
static bool has_isel() { return (_features & isel_m) != 0; }
static bool has_lxarxeh() { return (_features & lxarxeh_m) !=0; }
static bool has_cmpb() { return (_features & cmpb_m) != 0; }

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -79,7 +79,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
address npe_addr = __ pc(); // npe = null pointer exception
__ load_klass_with_trap_null_check(rcvr_klass, R3);
// Set methodOop (in case of interpreted method), and destination address.
// Set method (in case of interpreted method), and destination address.
int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
#ifndef PRODUCT
@ -161,8 +161,6 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
address npe_addr = __ pc(); // npe = null pointer exception
__ load_klass_with_trap_null_check(rcvr_klass, R3_ARG1);
//__ ld(rcvr_klass, oopDesc::klass_offset_in_bytes(), R3_ARG1);
BLOCK_COMMENT("Load start of itable entries into itable_entry.");
__ lwz(vtable_len, InstanceKlass::vtable_length_offset() * wordSize, rcvr_klass);
__ slwi(vtable_len, vtable_len, exact_log2(vtableEntry::size() * wordSize));
@ -199,7 +197,7 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
itable_offset_search_inc;
__ lwz(vtable_offset, vtable_offset_offset, itable_entry_addr);
// Compute itableMethodEntry and get methodOop and entry point for compiler.
// Compute itableMethodEntry and get method and entry point for compiler.
const int method_offset = (itableMethodEntry::size() * wordSize * vtable_index) +
itableMethodEntry::method_offset_in_bytes();
@ -211,7 +209,7 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
Label ok;
__ cmpd(CCR0, R19_method, 0);
__ bne(CCR0, ok);
__ stop("methodOop is null", 103);
__ stop("method is null", 103);
__ bind(ok);
}
#endif

@ -3320,7 +3320,7 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest,
// if tmp is invalid, then the function being called doesn't destroy the thread
if (tmp->is_valid()) {
__ save_thread(tmp->as_register());
__ save_thread(tmp->as_pointer_register());
}
__ call(dest, relocInfo::runtime_call_type);
__ delayed()->nop();
@ -3328,7 +3328,7 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest,
add_call_info_here(info);
}
if (tmp->is_valid()) {
__ restore_thread(tmp->as_register());
__ restore_thread(tmp->as_pointer_register());
}
#ifdef ASSERT

@ -69,7 +69,7 @@ void LIRItem::load_nonconstant() {
LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::Oexception_opr; }
LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::Oissuing_pc_opr; }
LIR_Opr LIRGenerator::syncTempOpr() { return new_register(T_OBJECT); }
LIR_Opr LIRGenerator::getThreadTemp() { return rlock_callee_saved(T_INT); }
LIR_Opr LIRGenerator::getThreadTemp() { return rlock_callee_saved(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); }
LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
LIR_Opr opr;

@ -66,6 +66,4 @@ define_pd_global(bool, OptimizeSinglePrecision, false);
define_pd_global(bool, CSEArrayLength, true );
define_pd_global(bool, TwoOperandLIRForm, false);
define_pd_global(intx, SafepointPollOffset, 0 );
#endif // CPU_SPARC_VM_C1_GLOBALS_SPARC_HPP

@ -457,6 +457,13 @@ definitions %{
// This is a block of C++ code which provides values, functions, and
// definitions necessary in the rest of the architecture description
source_hpp %{
// Header information of the source block.
// Method declarations/definitions which are used outside
// the ad-scope can conveniently be defined here.
//
// To keep related declarations/definitions/uses close together,
// we switch between source %{ }% and source_hpp %{ }% freely as needed.
// Must be visible to the DFA in dfa_sparc.cpp
extern bool can_branch_register( Node *bol, Node *cmp );
@ -468,6 +475,46 @@ extern bool use_block_zeroing(Node* count);
#define LONG_HI_REG(x) (x)
#define LONG_LO_REG(x) (x)
class CallStubImpl {
//--------------------------------------------------------------
//---< Used for optimization in Compile::Shorten_branches >---
//--------------------------------------------------------------
public:
// Size of call trampoline stub.
static uint size_call_trampoline() {
return 0; // no call trampolines on this platform
}
// number of relocations needed by a call trampoline stub
static uint reloc_call_trampoline() {
return 0; // no call trampolines on this platform
}
};
class HandlerImpl {
public:
static int emit_exception_handler(CodeBuffer &cbuf);
static int emit_deopt_handler(CodeBuffer& cbuf);
static uint size_exception_handler() {
if (TraceJumps) {
return (400); // just a guess
}
return ( NativeJump::instruction_size ); // sethi;jmp;nop
}
static uint size_deopt_handler() {
if (TraceJumps) {
return (400); // just a guess
}
return ( 4+ NativeJump::instruction_size ); // save;sethi;jmp;restore
}
};
%}
source %{
@ -1710,22 +1757,9 @@ uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
//=============================================================================
uint size_exception_handler() {
if (TraceJumps) {
return (400); // just a guess
}
return ( NativeJump::instruction_size ); // sethi;jmp;nop
}
uint size_deopt_handler() {
if (TraceJumps) {
return (400); // just a guess
}
return ( 4+ NativeJump::instruction_size ); // save;sethi;jmp;restore
}
// Emit exception handler code.
int emit_exception_handler(CodeBuffer& cbuf) {
int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
Register temp_reg = G3;
AddressLiteral exception_blob(OptoRuntime::exception_blob()->entry_point());
MacroAssembler _masm(&cbuf);
@ -1746,7 +1780,7 @@ int emit_exception_handler(CodeBuffer& cbuf) {
return offset;
}
int emit_deopt_handler(CodeBuffer& cbuf) {
int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
// Can't use any of the current frame's registers as we may have deopted
// at a poll and everything (including G3) can be live.
Register temp_reg = L0;

@ -1112,7 +1112,6 @@ void Assembler::bsfl(Register dst, Register src) {
}
void Assembler::bsrl(Register dst, Register src) {
assert(!VM_Version::supports_lzcnt(), "encoding is treated as LZCNT");
int encode = prefix_and_encode(dst->encoding(), src->encoding());
emit_int8(0x0F);
emit_int8((unsigned char)0xBD);
@ -2343,6 +2342,11 @@ void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, bool vector25
emit_int8(imm8);
}
void Assembler::pause() {
emit_int8((unsigned char)0xF3);
emit_int8((unsigned char)0x90);
}
void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
assert(VM_Version::supports_sse4_2(), "");
InstructionMark im(this);
@ -2667,6 +2671,11 @@ void Assembler::rcll(Register dst, int imm8) {
}
}
void Assembler::rdtsc() {
emit_int8((unsigned char)0x0F);
emit_int8((unsigned char)0x31);
}
// copies data from [esi] to [edi] using rcx pointer sized words
// generic
void Assembler::rep_mov() {
@ -2976,6 +2985,11 @@ void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE);
}
void Assembler::xabort(int8_t imm8) {
emit_int8((unsigned char)0xC6);
emit_int8((unsigned char)0xF8);
emit_int8((unsigned char)(imm8 & 0xFF));
}
void Assembler::xaddl(Address dst, Register src) {
InstructionMark im(this);
@ -2985,6 +2999,24 @@ void Assembler::xaddl(Address dst, Register src) {
emit_operand(src, dst);
}
void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) {
InstructionMark im(this);
relocate(rtype);
if (abort.is_bound()) {
address entry = target(abort);
assert(entry != NULL, "abort entry NULL");
intptr_t offset = entry - pc();
emit_int8((unsigned char)0xC7);
emit_int8((unsigned char)0xF8);
emit_int32(offset - 6); // 2 opcode + 4 address
} else {
abort.add_patch_at(code(), locator());
emit_int8((unsigned char)0xC7);
emit_int8((unsigned char)0xF8);
emit_int32(0);
}
}
void Assembler::xchgl(Register dst, Address src) { // xchg
InstructionMark im(this);
prefix(src, dst);
@ -2998,6 +3030,12 @@ void Assembler::xchgl(Register dst, Register src) {
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::xend() {
emit_int8((unsigned char)0x0F);
emit_int8((unsigned char)0x01);
emit_int8((unsigned char)0xD5);
}
void Assembler::xgetbv() {
emit_int8(0x0F);
emit_int8(0x01);
@ -4938,7 +4976,6 @@ void Assembler::bsfq(Register dst, Register src) {
}
void Assembler::bsrq(Register dst, Register src) {
assert(!VM_Version::supports_lzcnt(), "encoding is treated as LZCNT");
int encode = prefixq_and_encode(dst->encoding(), src->encoding());
emit_int8(0x0F);
emit_int8((unsigned char)0xBD);

@ -1451,6 +1451,8 @@ private:
// Pemutation of 64bit words
void vpermq(XMMRegister dst, XMMRegister src, int imm8, bool vector256);
void pause();
// SSE4.2 string instructions
void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
void pcmpestri(XMMRegister xmm1, Address src, int imm8);
@ -1535,6 +1537,8 @@ private:
void rclq(Register dst, int imm8);
void rdtsc();
void ret(int imm16);
void sahf();
@ -1632,16 +1636,22 @@ private:
void ucomiss(XMMRegister dst, Address src);
void ucomiss(XMMRegister dst, XMMRegister src);
void xabort(int8_t imm8);
void xaddl(Address dst, Register src);
void xaddq(Address dst, Register src);
void xbegin(Label& abort, relocInfo::relocType rtype = relocInfo::none);
void xchgl(Register reg, Address adr);
void xchgl(Register dst, Register src);
void xchgq(Register reg, Address adr);
void xchgq(Register dst, Register src);
void xend();
// Get Value of Extended Control Register
void xgetbv();

@ -604,8 +604,7 @@ void LIR_Assembler::return_op(LIR_Opr result) {
// Note: we do not need to round double result; float result has the right precision
// the poll sets the condition code, but no data registers
AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),
relocInfo::poll_return_type);
AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type);
if (Assembler::is_polling_page_far()) {
__ lea(rscratch1, polling_page);
@ -619,8 +618,7 @@ void LIR_Assembler::return_op(LIR_Opr result) {
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),
relocInfo::poll_type);
AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_type);
guarantee(info != NULL, "Shouldn't be NULL");
int offset = __ offset();
if (Assembler::is_polling_page_far()) {
@ -801,7 +799,13 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
if (UseCompressedOops && !wide) {
__ movl(as_Address(addr), (int32_t)NULL_WORD);
} else {
#ifdef _LP64
__ xorptr(rscratch1, rscratch1);
null_check_here = code_offset();
__ movptr(as_Address(addr), rscratch1);
#else
__ movptr(as_Address(addr), NULL_WORD);
#endif
}
} else {
if (is_literal_address(addr)) {

@ -65,6 +65,4 @@ define_pd_global(bool, OptimizeSinglePrecision, true );
define_pd_global(bool, CSEArrayLength, false);
define_pd_global(bool, TwoOperandLIRForm, true );
define_pd_global(intx, SafepointPollOffset, 256 );
#endif // CPU_X86_VM_C1_GLOBALS_X86_HPP

@ -129,6 +129,42 @@ define_pd_global(uintx, TypeProfileLevel, 111);
product(bool, UseFastStosb, false, \
"Use fast-string operation for zeroing: rep stosb") \
\
/* Use Restricted Transactional Memory for lock eliding */ \
product(bool, UseRTMLocking, false, \
"Enable RTM lock eliding for inflated locks in compiled code") \
\
experimental(bool, UseRTMForStackLocks, false, \
"Enable RTM lock eliding for stack locks in compiled code") \
\
product(bool, UseRTMDeopt, false, \
"Perform deopt and recompilation based on RTM abort ratio") \
\
product(uintx, RTMRetryCount, 5, \
"Number of RTM retries on lock abort or busy") \
\
experimental(intx, RTMSpinLoopCount, 100, \
"Spin count for lock to become free before RTM retry") \
\
experimental(intx, RTMAbortThreshold, 1000, \
"Calculate abort ratio after this number of aborts") \
\
experimental(intx, RTMLockingThreshold, 10000, \
"Lock count at which to do RTM lock eliding without " \
"abort ratio calculation") \
\
experimental(intx, RTMAbortRatio, 50, \
"Lock abort ratio at which to stop use RTM lock eliding") \
\
experimental(intx, RTMTotalCountIncrRate, 64, \
"Increment total RTM attempted lock count once every n times") \
\
experimental(intx, RTMLockingCalculationDelay, 0, \
"Number of milliseconds to wait before start calculating aborts " \
"for RTM locking") \
\
experimental(bool, UseRTMXendForLockBusy, false, \
"Use RTM Xend instead of Xabort when lock busy") \
\
/* assembler */ \
product(bool, Use486InstrsOnly, false, \
"Use 80486 Compliant instruction subset") \

@ -301,7 +301,9 @@ void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
}
void MacroAssembler::movptr(Register dst, AddressLiteral src) {
void MacroAssembler::movptr(Register dst, AddressLiteral src, Register scratch) {
// scratch register is not used,
// it is defined to match parameters of 64-bit version of this method.
if (src.is_lval()) {
mov_literal32(dst, (intptr_t)src.target(), src.rspec());
} else {
@ -613,6 +615,15 @@ void MacroAssembler::decrementq(Address dst, int value) {
/* else */ { subq(dst, value) ; return; }
}
void MacroAssembler::incrementq(AddressLiteral dst) {
if (reachable(dst)) {
incrementq(as_Address(dst));
} else {
lea(rscratch1, dst);
incrementq(Address(rscratch1, 0));
}
}
void MacroAssembler::incrementq(Register reg, int value) {
if (value == min_jint) { addq(reg, value); return; }
if (value < 0) { decrementq(reg, -value); return; }
@ -681,15 +692,15 @@ void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
movq(dst, rscratch1);
}
void MacroAssembler::movptr(Register dst, AddressLiteral src) {
void MacroAssembler::movptr(Register dst, AddressLiteral src, Register scratch) {
if (src.is_lval()) {
mov_literal64(dst, (intptr_t)src.target(), src.rspec());
} else {
if (reachable(src)) {
movq(dst, as_Address(src));
} else {
lea(rscratch1, src);
movq(dst, Address(rscratch1,0));
lea(scratch, src);
movq(dst, Address(scratch, 0));
}
}
}
@ -988,21 +999,38 @@ void MacroAssembler::andptr(Register dst, int32_t imm32) {
LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32));
}
void MacroAssembler::atomic_incl(AddressLiteral counter_addr) {
pushf();
if (reachable(counter_addr)) {
if (os::is_MP())
lock();
incrementl(as_Address(counter_addr));
} else {
lea(rscratch1, counter_addr);
if (os::is_MP())
lock();
incrementl(Address(rscratch1, 0));
}
popf();
void MacroAssembler::atomic_incl(Address counter_addr) {
if (os::is_MP())
lock();
incrementl(counter_addr);
}
void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register scr) {
if (reachable(counter_addr)) {
atomic_incl(as_Address(counter_addr));
} else {
lea(scr, counter_addr);
atomic_incl(Address(scr, 0));
}
}
#ifdef _LP64
void MacroAssembler::atomic_incq(Address counter_addr) {
if (os::is_MP())
lock();
incrementq(counter_addr);
}
void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register scr) {
if (reachable(counter_addr)) {
atomic_incq(as_Address(counter_addr));
} else {
lea(scr, counter_addr);
atomic_incq(Address(scr, 0));
}
}
#endif
// Writes to stack successive pages until offset reached to check for
// stack overflow + shadow pages. This clobbers tmp.
void MacroAssembler::bang_stack_size(Register size, Register tmp) {
@ -1274,6 +1302,325 @@ void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, La
}
#ifdef COMPILER2
#if INCLUDE_RTM_OPT
// Update rtm_counters based on abort status
// input: abort_status
// rtm_counters (RTMLockingCounters*)
// flags are killed
void MacroAssembler::rtm_counters_update(Register abort_status, Register rtm_counters) {
atomic_incptr(Address(rtm_counters, RTMLockingCounters::abort_count_offset()));
if (PrintPreciseRTMLockingStatistics) {
for (int i = 0; i < RTMLockingCounters::ABORT_STATUS_LIMIT; i++) {
Label check_abort;
testl(abort_status, (1<<i));
jccb(Assembler::equal, check_abort);
atomic_incptr(Address(rtm_counters, RTMLockingCounters::abortX_count_offset() + (i * sizeof(uintx))));
bind(check_abort);
}
}
}
// Branch if (random & (count-1) != 0), count is 2^n
// tmp, scr and flags are killed
void MacroAssembler::branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel) {
assert(tmp == rax, "");
assert(scr == rdx, "");
rdtsc(); // modifies EDX:EAX
andptr(tmp, count-1);
jccb(Assembler::notZero, brLabel);
}
// Perform abort ratio calculation, set no_rtm bit if high ratio
// input: rtm_counters_Reg (RTMLockingCounters* address)
// tmpReg, rtm_counters_Reg and flags are killed
void MacroAssembler::rtm_abort_ratio_calculation(Register tmpReg,
Register rtm_counters_Reg,
RTMLockingCounters* rtm_counters,
Metadata* method_data) {
Label L_done, L_check_always_rtm1, L_check_always_rtm2;
if (RTMLockingCalculationDelay > 0) {
// Delay calculation
movptr(tmpReg, ExternalAddress((address) RTMLockingCounters::rtm_calculation_flag_addr()), tmpReg);
testptr(tmpReg, tmpReg);
jccb(Assembler::equal, L_done);
}
// Abort ratio calculation only if abort_count > RTMAbortThreshold
// Aborted transactions = abort_count * 100
// All transactions = total_count * RTMTotalCountIncrRate
// Set no_rtm bit if (Aborted transactions >= All transactions * RTMAbortRatio)
movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::abort_count_offset()));
cmpptr(tmpReg, RTMAbortThreshold);
jccb(Assembler::below, L_check_always_rtm2);
imulptr(tmpReg, tmpReg, 100);
Register scrReg = rtm_counters_Reg;
movptr(scrReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset()));
imulptr(scrReg, scrReg, RTMTotalCountIncrRate);
imulptr(scrReg, scrReg, RTMAbortRatio);
cmpptr(tmpReg, scrReg);
jccb(Assembler::below, L_check_always_rtm1);
if (method_data != NULL) {
// set rtm_state to "no rtm" in MDO
mov_metadata(tmpReg, method_data);
if (os::is_MP()) {
lock();
}
orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), NoRTM);
}
jmpb(L_done);
bind(L_check_always_rtm1);
// Reload RTMLockingCounters* address
lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters));
bind(L_check_always_rtm2);
movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset()));
cmpptr(tmpReg, RTMLockingThreshold / RTMTotalCountIncrRate);
jccb(Assembler::below, L_done);
if (method_data != NULL) {
// set rtm_state to "always rtm" in MDO
mov_metadata(tmpReg, method_data);
if (os::is_MP()) {
lock();
}
orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), UseRTM);
}
bind(L_done);
}
// Update counters and perform abort ratio calculation
// input: abort_status_Reg
// rtm_counters_Reg, flags are killed
void MacroAssembler::rtm_profiling(Register abort_status_Reg,
Register rtm_counters_Reg,
RTMLockingCounters* rtm_counters,
Metadata* method_data,
bool profile_rtm) {
assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
// update rtm counters based on rax value at abort
// reads abort_status_Reg, updates flags
lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters));
rtm_counters_update(abort_status_Reg, rtm_counters_Reg);
if (profile_rtm) {
// Save abort status because abort_status_Reg is used by following code.
if (RTMRetryCount > 0) {
push(abort_status_Reg);
}
assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
rtm_abort_ratio_calculation(abort_status_Reg, rtm_counters_Reg, rtm_counters, method_data);
// restore abort status
if (RTMRetryCount > 0) {
pop(abort_status_Reg);
}
}
}
// Retry on abort if abort's status is 0x6: can retry (0x2) | memory conflict (0x4)
// inputs: retry_count_Reg
// : abort_status_Reg
// output: retry_count_Reg decremented by 1
// flags are killed
void MacroAssembler::rtm_retry_lock_on_abort(Register retry_count_Reg, Register abort_status_Reg, Label& retryLabel) {
Label doneRetry;
assert(abort_status_Reg == rax, "");
// The abort reason bits are in eax (see all states in rtmLocking.hpp)
// 0x6 = conflict on which we can retry (0x2) | memory conflict (0x4)
// if reason is in 0x6 and retry count != 0 then retry
andptr(abort_status_Reg, 0x6);
jccb(Assembler::zero, doneRetry);
testl(retry_count_Reg, retry_count_Reg);
jccb(Assembler::zero, doneRetry);
pause();
decrementl(retry_count_Reg);
jmp(retryLabel);
bind(doneRetry);
}
// Spin and retry if lock is busy,
// inputs: box_Reg (monitor address)
// : retry_count_Reg
// output: retry_count_Reg decremented by 1
// : clear z flag if retry count exceeded
// tmp_Reg, scr_Reg, flags are killed
void MacroAssembler::rtm_retry_lock_on_busy(Register retry_count_Reg, Register box_Reg,
Register tmp_Reg, Register scr_Reg, Label& retryLabel) {
Label SpinLoop, SpinExit, doneRetry;
// Clean monitor_value bit to get valid pointer
int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
testl(retry_count_Reg, retry_count_Reg);
jccb(Assembler::zero, doneRetry);
decrementl(retry_count_Reg);
movptr(scr_Reg, RTMSpinLoopCount);
bind(SpinLoop);
pause();
decrementl(scr_Reg);
jccb(Assembler::lessEqual, SpinExit);
movptr(tmp_Reg, Address(box_Reg, owner_offset));
testptr(tmp_Reg, tmp_Reg);
jccb(Assembler::notZero, SpinLoop);
bind(SpinExit);
jmp(retryLabel);
bind(doneRetry);
incrementl(retry_count_Reg); // clear z flag
}
// Use RTM for normal stack locks
// Input: objReg (object to lock)
void MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Register scrReg,
Register retry_on_abort_count_Reg,
RTMLockingCounters* stack_rtm_counters,
Metadata* method_data, bool profile_rtm,
Label& DONE_LABEL, Label& IsInflated) {
assert(UseRTMForStackLocks, "why call this otherwise?");
assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
assert(tmpReg == rax, "");
assert(scrReg == rdx, "");
Label L_rtm_retry, L_decrement_retry, L_on_abort;
if (RTMRetryCount > 0) {
movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
bind(L_rtm_retry);
}
if (!UseRTMXendForLockBusy) {
movptr(tmpReg, Address(objReg, 0));
testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
jcc(Assembler::notZero, IsInflated);
}
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
Label L_noincrement;
if (RTMTotalCountIncrRate > 1) {
// tmpReg, scrReg and flags are killed
branch_on_random_using_rdtsc(tmpReg, scrReg, (int)RTMTotalCountIncrRate, L_noincrement);
}
assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM");
atomic_incptr(ExternalAddress((address)stack_rtm_counters->total_count_addr()), scrReg);
bind(L_noincrement);
}
xbegin(L_on_abort);
movptr(tmpReg, Address(objReg, 0)); // fetch markword
andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
cmpptr(tmpReg, markOopDesc::unlocked_value); // bits = 001 unlocked
jcc(Assembler::equal, DONE_LABEL); // all done if unlocked
Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
if (UseRTMXendForLockBusy) {
xend();
movptr(tmpReg, Address(objReg, 0));
testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
jcc(Assembler::notZero, IsInflated);
movptr(abort_status_Reg, 0x1); // Set the abort status to 1 (as xabort does)
jmp(L_decrement_retry);
}
else {
xabort(0);
}
bind(L_on_abort);
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
rtm_profiling(abort_status_Reg, scrReg, stack_rtm_counters, method_data, profile_rtm);
}
bind(L_decrement_retry);
if (RTMRetryCount > 0) {
// retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4)
rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry);
}
}
// Use RTM for inflating locks
// inputs: objReg (object to lock)
// boxReg (on-stack box address (displaced header location) - KILLED)
// tmpReg (ObjectMonitor address + 2(monitor_value))
void MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, Register tmpReg,
Register scrReg, Register retry_on_busy_count_Reg,
Register retry_on_abort_count_Reg,
RTMLockingCounters* rtm_counters,
Metadata* method_data, bool profile_rtm,
Label& DONE_LABEL) {
assert(UseRTMLocking, "why call this otherwise?");
assert(tmpReg == rax, "");
assert(scrReg == rdx, "");
Label L_rtm_retry, L_decrement_retry, L_on_abort;
// Clean monitor_value bit to get valid pointer
int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
// Without cast to int32_t a movptr will destroy r10 which is typically obj
movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
movptr(boxReg, tmpReg); // Save ObjectMonitor address
if (RTMRetryCount > 0) {
movl(retry_on_busy_count_Reg, RTMRetryCount); // Retry on lock busy
movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
bind(L_rtm_retry);
}
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
Label L_noincrement;
if (RTMTotalCountIncrRate > 1) {
// tmpReg, scrReg and flags are killed
branch_on_random_using_rdtsc(tmpReg, scrReg, (int)RTMTotalCountIncrRate, L_noincrement);
}
assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
atomic_incptr(ExternalAddress((address)rtm_counters->total_count_addr()), scrReg);
bind(L_noincrement);
}
xbegin(L_on_abort);
movptr(tmpReg, Address(objReg, 0));
movptr(tmpReg, Address(tmpReg, owner_offset));
testptr(tmpReg, tmpReg);
jcc(Assembler::zero, DONE_LABEL);
if (UseRTMXendForLockBusy) {
xend();
jmp(L_decrement_retry);
}
else {
xabort(0);
}
bind(L_on_abort);
Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
rtm_profiling(abort_status_Reg, scrReg, rtm_counters, method_data, profile_rtm);
}
if (RTMRetryCount > 0) {
// retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4)
rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry);
}
movptr(tmpReg, Address(boxReg, owner_offset)) ;
testptr(tmpReg, tmpReg) ;
jccb(Assembler::notZero, L_decrement_retry) ;
// Appears unlocked - try to swing _owner from null to non-null.
// Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand.
#ifdef _LP64
Register threadReg = r15_thread;
#else
get_thread(scrReg);
Register threadReg = scrReg;
#endif
if (os::is_MP()) {
lock();
}
cmpxchgptr(threadReg, Address(boxReg, owner_offset)); // Updates tmpReg
if (RTMRetryCount > 0) {
// success done else retry
jccb(Assembler::equal, DONE_LABEL) ;
bind(L_decrement_retry);
// Spin and retry if lock is busy.
rtm_retry_lock_on_busy(retry_on_busy_count_Reg, boxReg, tmpReg, scrReg, L_rtm_retry);
}
else {
bind(L_decrement_retry);
}
}
#endif // INCLUDE_RTM_OPT
// Fast_Lock and Fast_Unlock used by C2
// Because the transitions from emitted code to the runtime
@ -1350,17 +1697,26 @@ void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, La
// box: on-stack box address (displaced header location) - KILLED
// rax,: tmp -- KILLED
// scr: tmp -- KILLED
void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg, Register scrReg, BiasedLockingCounters* counters) {
void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg,
Register scrReg, Register cx1Reg, Register cx2Reg,
BiasedLockingCounters* counters,
RTMLockingCounters* rtm_counters,
RTMLockingCounters* stack_rtm_counters,
Metadata* method_data,
bool use_rtm, bool profile_rtm) {
// Ensure the register assignents are disjoint
guarantee (objReg != boxReg, "");
guarantee (objReg != tmpReg, "");
guarantee (objReg != scrReg, "");
guarantee (boxReg != tmpReg, "");
guarantee (boxReg != scrReg, "");
guarantee (tmpReg == rax, "");
assert(tmpReg == rax, "");
if (use_rtm) {
assert_different_registers(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg);
} else {
assert(cx1Reg == noreg, "");
assert(cx2Reg == noreg, "");
assert_different_registers(objReg, boxReg, tmpReg, scrReg);
}
if (counters != NULL) {
atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()));
atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()), scrReg);
}
if (EmitSync & 1) {
// set box->dhw = unused_mark (3)
@ -1419,12 +1775,20 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, counters);
}
#if INCLUDE_RTM_OPT
if (UseRTMForStackLocks && use_rtm) {
rtm_stack_locking(objReg, tmpReg, scrReg, cx2Reg,
stack_rtm_counters, method_data, profile_rtm,
DONE_LABEL, IsInflated);
}
#endif // INCLUDE_RTM_OPT
movptr(tmpReg, Address(objReg, 0)); // [FETCH]
testl (tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
jccb (Assembler::notZero, IsInflated);
testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
jccb(Assembler::notZero, IsInflated);
// Attempt stack-locking ...
orptr (tmpReg, 0x1);
orptr (tmpReg, markOopDesc::unlocked_value);
movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
if (os::is_MP()) {
lock();
@ -1434,19 +1798,32 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
cond_inc32(Assembler::equal,
ExternalAddress((address)counters->fast_path_entry_count_addr()));
}
jccb(Assembler::equal, DONE_LABEL);
jcc(Assembler::equal, DONE_LABEL); // Success
// Recursive locking
// Recursive locking.
// The object is stack-locked: markword contains stack pointer to BasicLock.
// Locked by current thread if difference with current SP is less than one page.
subptr(tmpReg, rsp);
// Next instruction set ZFlag == 1 (Success) if difference is less then one page.
andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) );
movptr(Address(boxReg, 0), tmpReg);
if (counters != NULL) {
cond_inc32(Assembler::equal,
ExternalAddress((address)counters->fast_path_entry_count_addr()));
}
jmpb(DONE_LABEL);
jmp(DONE_LABEL);
bind(IsInflated);
// The object is inflated. tmpReg contains pointer to ObjectMonitor* + 2(monitor_value)
#if INCLUDE_RTM_OPT
// Use the same RTM locking code in 32- and 64-bit VM.
if (use_rtm) {
rtm_inflated_locking(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg,
rtm_counters, method_data, profile_rtm, DONE_LABEL);
} else {
#endif // INCLUDE_RTM_OPT
#ifndef _LP64
// The object is inflated.
//
@ -1576,7 +1953,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
// Without cast to int32_t a movptr will destroy r10 which is typically obj
movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
mov (boxReg, tmpReg);
movptr (boxReg, tmpReg);
movptr (tmpReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
testptr(tmpReg, tmpReg);
jccb (Assembler::notZero, DONE_LABEL);
@ -1587,9 +1964,11 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
}
cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
// Intentional fall-through into DONE_LABEL ...
#endif // _LP64
#if INCLUDE_RTM_OPT
} // use_rtm()
#endif
// DONE_LABEL is a hot target - we'd really like to place it at the
// start of cache line by padding with NOPs.
// See the AMD and Intel software optimization manuals for the
@ -1631,11 +2010,9 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
// should not be unlocked by "normal" java-level locking and vice-versa. The specification
// doesn't specify what will occur if a program engages in such mixed-mode locking, however.
void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg) {
guarantee (objReg != boxReg, "");
guarantee (objReg != tmpReg, "");
guarantee (boxReg != tmpReg, "");
guarantee (boxReg == rax, "");
void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg, bool use_rtm) {
assert(boxReg == rax, "");
assert_different_registers(objReg, boxReg, tmpReg);
if (EmitSync & 4) {
// Disable - inhibit all inlining. Force control through the slow-path
@ -1667,14 +2044,41 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
biased_locking_exit(objReg, tmpReg, DONE_LABEL);
}
cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
movptr(tmpReg, Address(objReg, 0)); // Examine the object's markword
jccb (Assembler::zero, DONE_LABEL); // 0 indicates recursive stack-lock
#if INCLUDE_RTM_OPT
if (UseRTMForStackLocks && use_rtm) {
assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
Label L_regular_unlock;
movptr(tmpReg, Address(objReg, 0)); // fetch markword
andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
cmpptr(tmpReg, markOopDesc::unlocked_value); // bits = 001 unlocked
jccb(Assembler::notEqual, L_regular_unlock); // if !HLE RegularLock
xend(); // otherwise end...
jmp(DONE_LABEL); // ... and we're done
bind(L_regular_unlock);
}
#endif
testptr(tmpReg, 0x02); // Inflated?
cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
jcc (Assembler::zero, DONE_LABEL); // 0 indicates recursive stack-lock
movptr(tmpReg, Address(objReg, 0)); // Examine the object's markword
testptr(tmpReg, markOopDesc::monitor_value); // Inflated?
jccb (Assembler::zero, Stacked);
// It's inflated.
#if INCLUDE_RTM_OPT
if (use_rtm) {
Label L_regular_inflated_unlock;
// Clean monitor_value bit to get valid pointer
int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
movptr(boxReg, Address(tmpReg, owner_offset));
testptr(boxReg, boxReg);
jccb(Assembler::notZero, L_regular_inflated_unlock);
xend();
jmpb(DONE_LABEL);
bind(L_regular_inflated_unlock);
}
#endif
// Despite our balanced locking property we still check that m->_owner == Self
// as java routines or native JNI code called by this thread might
// have released the lock.
@ -2448,7 +2852,9 @@ void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) {
Condition negated_cond = negate_condition(cond);
Label L;
jcc(negated_cond, L);
pushf(); // Preserve flags
atomic_incl(counter_addr);
popf();
bind(L);
}

@ -27,6 +27,7 @@
#include "asm/assembler.hpp"
#include "utilities/macros.hpp"
#include "runtime/rtmLocking.hpp"
// MacroAssembler extends Assembler by frequently used macros.
@ -111,7 +112,8 @@ class MacroAssembler: public Assembler {
op == 0xE9 /* jmp */ ||
op == 0xEB /* short jmp */ ||
(op & 0xF0) == 0x70 /* short jcc */ ||
op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */,
op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
"Invalid opcode at patch point");
if (op == 0xEB || (op & 0xF0) == 0x70) {
@ -121,7 +123,7 @@ class MacroAssembler: public Assembler {
guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
*disp = imm8;
} else {
int* disp = (int*) &branch[(op == 0x0F)? 2: 1];
int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
int imm32 = target - (address) &disp[1];
*disp = imm32;
}
@ -161,7 +163,6 @@ class MacroAssembler: public Assembler {
void incrementq(Register reg, int value = 1);
void incrementq(Address dst, int value = 1);
// Support optimal SSE move instructions.
void movflt(XMMRegister dst, XMMRegister src) {
if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
@ -187,6 +188,8 @@ class MacroAssembler: public Assembler {
void incrementl(AddressLiteral dst);
void incrementl(ArrayAddress dst);
void incrementq(AddressLiteral dst);
// Alignment
void align(int modulus);
@ -654,8 +657,36 @@ class MacroAssembler: public Assembler {
#ifdef COMPILER2
// Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
// See full desription in macroAssembler_x86.cpp.
void fast_lock(Register obj, Register box, Register tmp, Register scr, BiasedLockingCounters* counters);
void fast_unlock(Register obj, Register box, Register tmp);
void fast_lock(Register obj, Register box, Register tmp,
Register scr, Register cx1, Register cx2,
BiasedLockingCounters* counters,
RTMLockingCounters* rtm_counters,
RTMLockingCounters* stack_rtm_counters,
Metadata* method_data,
bool use_rtm, bool profile_rtm);
void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm);
#if INCLUDE_RTM_OPT
void rtm_counters_update(Register abort_status, Register rtm_counters);
void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel);
void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg,
RTMLockingCounters* rtm_counters,
Metadata* method_data);
void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg,
RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm);
void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel);
void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel);
void rtm_stack_locking(Register obj, Register tmp, Register scr,
Register retry_on_abort_count,
RTMLockingCounters* stack_rtm_counters,
Metadata* method_data, bool profile_rtm,
Label& DONE_LABEL, Label& IsInflated);
void rtm_inflated_locking(Register obj, Register box, Register tmp,
Register scr, Register retry_on_busy_count,
Register retry_on_abort_count,
RTMLockingCounters* rtm_counters,
Metadata* method_data, bool profile_rtm,
Label& DONE_LABEL);
#endif
#endif
Condition negate_condition(Condition cond);
@ -721,6 +752,7 @@ class MacroAssembler: public Assembler {
void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); }
void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
@ -762,7 +794,14 @@ class MacroAssembler: public Assembler {
// Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
void cond_inc32(Condition cond, AddressLiteral counter_addr);
// Unconditional atomic increment.
void atomic_incl(AddressLiteral counter_addr);
void atomic_incl(Address counter_addr);
void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1);
#ifdef _LP64
void atomic_incq(Address counter_addr);
void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1);
#endif
void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; }
void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; }
void lea(Register dst, AddressLiteral adr);
void lea(Address dst, AddressLiteral adr);
@ -1074,7 +1113,11 @@ public:
void movptr(Register dst, Address src);
void movptr(Register dst, AddressLiteral src);
#ifdef _LP64
void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1);
#else
void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit
#endif
void movptr(Register dst, intptr_t src);
void movptr(Register dst, Register src);

@ -0,0 +1,60 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/task.hpp"
#include "runtime/rtmLocking.hpp"
// One-shot PeriodicTask subclass for enabling RTM locking
uintx RTMLockingCounters::_calculation_flag = 0;
class RTMLockingCalculationTask : public PeriodicTask {
public:
RTMLockingCalculationTask(size_t interval_time) : PeriodicTask(interval_time){ }
virtual void task() {
RTMLockingCounters::_calculation_flag = 1;
// Reclaim our storage and disenroll ourself
delete this;
}
};
void RTMLockingCounters::init() {
if (UseRTMLocking && RTMLockingCalculationDelay > 0) {
RTMLockingCalculationTask* task = new RTMLockingCalculationTask(RTMLockingCalculationDelay);
task->enroll();
} else {
_calculation_flag = 1;
}
}
//------------------------------print_on-------------------------------
void RTMLockingCounters::print_on(outputStream* st) {
tty->print_cr("# rtm locks total (estimated): " UINTX_FORMAT, _total_count * RTMTotalCountIncrRate);
tty->print_cr("# rtm lock aborts : " UINTX_FORMAT, _abort_count);
for (int i = 0; i < ABORT_STATUS_LIMIT; i++) {
tty->print_cr("# rtm lock aborts %d: " UINTX_FORMAT, i, _abortX_count[i]);
}
}

@ -1817,6 +1817,13 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Frame is now completed as far as size and linkage.
int frame_complete = ((intptr_t)__ pc()) - start;
if (UseRTMLocking) {
// Abort RTM transaction before calling JNI
// because critical section will be large and will be
// aborted anyway. Also nmethod could be deoptimized.
__ xabort(0);
}
// Calculate the difference between rsp and rbp,. We need to know it
// after the native call because on windows Java Natives will pop
// the arguments and it is painful to do rsp relative addressing
@ -3170,6 +3177,12 @@ void SharedRuntime::generate_uncommon_trap_blob() {
};
address start = __ pc();
if (UseRTMLocking) {
// Abort RTM transaction before possible nmethod deoptimization.
__ xabort(0);
}
// Push self-frame.
__ subptr(rsp, return_off*wordSize); // Epilog!
@ -3355,6 +3368,14 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
address call_pc = NULL;
bool cause_return = (poll_type == POLL_AT_RETURN);
bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
if (UseRTMLocking) {
// Abort RTM transaction before calling runtime
// because critical section will be large and will be
// aborted anyway. Also nmethod could be deoptimized.
__ xabort(0);
}
// If cause_return is true we are at a poll_return and there is
// the return address on the stack to the caller on the nmethod
// that is safepoint. We can leave this return on the stack and

@ -2012,6 +2012,13 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Frame is now completed as far as size and linkage.
int frame_complete = ((intptr_t)__ pc()) - start;
if (UseRTMLocking) {
// Abort RTM transaction before calling JNI
// because critical section will be large and will be
// aborted anyway. Also nmethod could be deoptimized.
__ xabort(0);
}
#ifdef ASSERT
{
Label L;
@ -3612,6 +3619,11 @@ void SharedRuntime::generate_uncommon_trap_blob() {
address start = __ pc();
if (UseRTMLocking) {
// Abort RTM transaction before possible nmethod deoptimization.
__ xabort(0);
}
// Push self-frame. We get here with a return address on the
// stack, so rsp is 8-byte aligned until we allocate our frame.
__ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
@ -3792,6 +3804,13 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
bool cause_return = (poll_type == POLL_AT_RETURN);
bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
if (UseRTMLocking) {
// Abort RTM transaction before calling runtime
// because critical section will be large and will be
// aborted anyway. Also nmethod could be deoptimized.
__ xabort(0);
}
// Make room for return address (or push it again)
if (!cause_return) {
__ push(rbx);

@ -50,13 +50,18 @@ int VM_Version::_cpuFeatures;
const char* VM_Version::_features_str = "";
VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
// Address of instruction which causes SEGV
address VM_Version::_cpuinfo_segv_addr = 0;
// Address of instruction after the one which causes SEGV
address VM_Version::_cpuinfo_cont_addr = 0;
static BufferBlob* stub_blob;
static const int stub_size = 550;
static const int stub_size = 600;
extern "C" {
typedef void (*getPsrInfo_stub_t)(void*);
typedef void (*get_cpu_info_stub_t)(void*);
}
static getPsrInfo_stub_t getPsrInfo_stub = NULL;
static get_cpu_info_stub_t get_cpu_info_stub = NULL;
class VM_Version_StubGenerator: public StubCodeGenerator {
@ -64,7 +69,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
address generate_getPsrInfo() {
address generate_get_cpu_info() {
// Flags to test CPU type.
const uint32_t HS_EFL_AC = 0x40000;
const uint32_t HS_EFL_ID = 0x200000;
@ -76,13 +81,13 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4;
Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, done;
StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub");
# define __ _masm->
address start = __ pc();
//
// void getPsrInfo(VM_Version::CpuidInfo* cpuid_info);
// void get_cpu_info(VM_Version::CpuidInfo* cpuid_info);
//
// LP64: rcx and rdx are first and second argument registers on windows
@ -234,9 +239,9 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// Check if OS has enabled XGETBV instruction to access XCR0
// (OSXSAVE feature flag) and CPU supports AVX
//
__ andl(rcx, 0x18000000);
__ andl(rcx, 0x18000000); // cpuid1 bits osxsave | avx
__ cmpl(rcx, 0x18000000);
__ jccb(Assembler::notEqual, sef_cpuid);
__ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported
//
// XCR0, XFEATURE_ENABLED_MASK register
@ -247,6 +252,47 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rdx);
__ andl(rax, 0x6); // xcr0 bits sse | ymm
__ cmpl(rax, 0x6);
__ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported
//
// Some OSs have a bug when upper 128bits of YMM
// registers are not restored after a signal processing.
// Generate SEGV here (reference through NULL)
// and check upper YMM bits after it.
//
VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts
// load value into all 32 bytes of ymm7 register
__ movl(rcx, VM_Version::ymm_test_value());
__ movdl(xmm0, rcx);
__ pshufd(xmm0, xmm0, 0x00);
__ vinsertf128h(xmm0, xmm0, xmm0);
__ vmovdqu(xmm7, xmm0);
#ifdef _LP64
__ vmovdqu(xmm8, xmm0);
__ vmovdqu(xmm15, xmm0);
#endif
__ xorl(rsi, rsi);
VM_Version::set_cpuinfo_segv_addr( __ pc() );
// Generate SEGV
__ movl(rax, Address(rsi, 0));
VM_Version::set_cpuinfo_cont_addr( __ pc() );
// Returns here after signal. Save xmm0 to check it later.
__ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset())));
__ vmovdqu(Address(rsi, 0), xmm0);
__ vmovdqu(Address(rsi, 32), xmm7);
#ifdef _LP64
__ vmovdqu(Address(rsi, 64), xmm8);
__ vmovdqu(Address(rsi, 96), xmm15);
#endif
VM_Version::clean_cpuFeatures();
//
// cpuid(0x7) Structured Extended Features
//
@ -339,6 +385,14 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
};
void VM_Version::get_cpu_info_wrapper() {
get_cpu_info_stub(&_cpuid_info);
}
#ifndef CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED
#define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) f()
#endif
void VM_Version::get_processor_features() {
_cpu = 4; // 486 by default
@ -349,7 +403,11 @@ void VM_Version::get_processor_features() {
if (!Use486InstrsOnly) {
// Get raw processor info
getPsrInfo_stub(&_cpuid_info);
// Some platforms (like Win*) need a wrapper around here
// in order to properly handle SEGV for YMM registers test.
CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(get_cpu_info_wrapper);
assert_is_initialized();
_cpu = extended_cpu_family();
_model = extended_cpu_model();
@ -429,7 +487,7 @@ void VM_Version::get_processor_features() {
}
char buf[256];
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
cores_per_cpu(), threads_per_core(),
cpu_family(), _model, _stepping,
(supports_cmov() ? ", cmov" : ""),
@ -446,8 +504,9 @@ void VM_Version::get_processor_features() {
(supports_avx() ? ", avx" : ""),
(supports_avx2() ? ", avx2" : ""),
(supports_aes() ? ", aes" : ""),
(supports_clmul() ? ", clmul" : ""),
(supports_clmul() ? ", clmul" : ""),
(supports_erms() ? ", erms" : ""),
(supports_rtm() ? ", rtm" : ""),
(supports_mmx_ext() ? ", mmxext" : ""),
(supports_3dnow_prefetch() ? ", 3dnowpref" : ""),
(supports_lzcnt() ? ", lzcnt": ""),
@ -488,7 +547,7 @@ void VM_Version::get_processor_features() {
}
} else if (UseAES) {
if (!FLAG_IS_DEFAULT(UseAES))
warning("AES instructions not available on this CPU");
warning("AES instructions are not available on this CPU");
FLAG_SET_DEFAULT(UseAES, false);
}
@ -521,10 +580,57 @@ void VM_Version::get_processor_features() {
}
} else if (UseAESIntrinsics) {
if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
warning("AES intrinsics not available on this CPU");
warning("AES intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
// Adjust RTM (Restricted Transactional Memory) flags
if (!supports_rtm() && UseRTMLocking) {
// Can't continue because UseRTMLocking affects UseBiasedLocking flag
// setting during arguments processing. See use_biased_locking().
// VM_Version_init() is executed after UseBiasedLocking is used
// in Thread::allocate().
vm_exit_during_initialization("RTM instructions are not available on this CPU");
}
#if INCLUDE_RTM_OPT
if (UseRTMLocking) {
if (!FLAG_IS_CMDLINE(UseRTMLocking)) {
// RTM locking should be used only for applications with
// high lock contention. For now we do not use it by default.
vm_exit_during_initialization("UseRTMLocking flag should be only set on command line");
}
if (!is_power_of_2(RTMTotalCountIncrRate)) {
warning("RTMTotalCountIncrRate must be a power of 2, resetting it to 64");
FLAG_SET_DEFAULT(RTMTotalCountIncrRate, 64);
}
if (RTMAbortRatio < 0 || RTMAbortRatio > 100) {
warning("RTMAbortRatio must be in the range 0 to 100, resetting it to 50");
FLAG_SET_DEFAULT(RTMAbortRatio, 50);
}
} else { // !UseRTMLocking
if (UseRTMForStackLocks) {
if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) {
warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off");
}
FLAG_SET_DEFAULT(UseRTMForStackLocks, false);
}
if (UseRTMDeopt) {
FLAG_SET_DEFAULT(UseRTMDeopt, false);
}
if (PrintPreciseRTMLockingStatistics) {
FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false);
}
}
#else
if (UseRTMLocking) {
// Only C2 does RTM locking optimization.
// Can't continue because UseRTMLocking affects UseBiasedLocking flag
// setting during arguments processing. See use_biased_locking().
vm_exit_during_initialization("RTM locking optimization is not supported in this VM");
}
#endif
#ifdef COMPILER2
if (UseFPUForSpilling) {
if (UseSSE < 2) {
@ -540,14 +646,28 @@ void VM_Version::get_processor_features() {
if (MaxVectorSize > 32) {
FLAG_SET_DEFAULT(MaxVectorSize, 32);
}
if (MaxVectorSize > 16 && UseAVX == 0) {
// Only supported with AVX+
if (MaxVectorSize > 16 && (UseAVX == 0 || !os_supports_avx_vectors())) {
// 32 bytes vectors (in YMM) are only supported with AVX+
FLAG_SET_DEFAULT(MaxVectorSize, 16);
}
if (UseSSE < 2) {
// Only supported with SSE2+
// Vectors (in XMM) are only supported with SSE2+
FLAG_SET_DEFAULT(MaxVectorSize, 0);
}
#ifdef ASSERT
if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) {
tty->print_cr("State of YMM registers after signal handle:");
int nreg = 2 LP64_ONLY(+2);
const char* ymm_name[4] = {"0", "7", "8", "15"};
for (int i = 0; i < nreg; i++) {
tty->print("YMM%s:", ymm_name[i]);
for (int j = 7; j >=0; j--) {
tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]);
}
tty->cr();
}
}
#endif
}
#endif
@ -678,14 +798,6 @@ void VM_Version::get_processor_features() {
}
}
}
#if defined(COMPILER2) && defined(_ALLBSD_SOURCE)
if (MaxVectorSize > 16) {
// Limit vectors size to 16 bytes on BSD until it fixes
// restoring upper 128bit of YMM registers on return
// from signal handler.
FLAG_SET_DEFAULT(MaxVectorSize, 16);
}
#endif // COMPILER2
// Use count leading zeros count instruction if available.
if (supports_lzcnt()) {
@ -814,6 +926,11 @@ void VM_Version::get_processor_features() {
if (UseAES) {
tty->print(" UseAES=1");
}
#ifdef COMPILER2
if (MaxVectorSize > 0) {
tty->print(" MaxVectorSize=%d", MaxVectorSize);
}
#endif
tty->cr();
tty->print("Allocation");
if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) {
@ -856,18 +973,39 @@ void VM_Version::get_processor_features() {
#endif // !PRODUCT
}
bool VM_Version::use_biased_locking() {
#if INCLUDE_RTM_OPT
// RTM locking is most useful when there is high lock contention and
// low data contention. With high lock contention the lock is usually
// inflated and biased locking is not suitable for that case.
// RTM locking code requires that biased locking is off.
// Note: we can't switch off UseBiasedLocking in get_processor_features()
// because it is used by Thread::allocate() which is called before
// VM_Version::initialize().
if (UseRTMLocking && UseBiasedLocking) {
if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
FLAG_SET_DEFAULT(UseBiasedLocking, false);
} else {
warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." );
UseBiasedLocking = false;
}
}
#endif
return UseBiasedLocking;
}
void VM_Version::initialize() {
ResourceMark rm;
// Making this stub must be FIRST use of assembler
stub_blob = BufferBlob::create("getPsrInfo_stub", stub_size);
stub_blob = BufferBlob::create("get_cpu_info_stub", stub_size);
if (stub_blob == NULL) {
vm_exit_during_initialization("Unable to allocate getPsrInfo_stub");
vm_exit_during_initialization("Unable to allocate get_cpu_info_stub");
}
CodeBuffer c(stub_blob);
VM_Version_StubGenerator g(&c);
getPsrInfo_stub = CAST_TO_FN_PTR(getPsrInfo_stub_t,
g.generate_getPsrInfo());
get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t,
g.generate_get_cpu_info());
get_processor_features();
}

@ -207,7 +207,9 @@ public:
: 2,
bmi2 : 1,
erms : 1,
: 22;
: 1,
rtm : 1,
: 20;
} bits;
};
@ -229,6 +231,9 @@ protected:
// 0 if this instruction is not available
static const char* _features_str;
static address _cpuinfo_segv_addr; // address of instruction which causes SEGV
static address _cpuinfo_cont_addr; // address of instruction after the one which causes SEGV
enum {
CPU_CX8 = (1 << 0), // next bits are from cpuid 1 (EDX)
CPU_CMOV = (1 << 1),
@ -254,7 +259,8 @@ protected:
CPU_ERMS = (1 << 20), // enhanced 'rep movsb/stosb' instructions
CPU_CLMUL = (1 << 21), // carryless multiply for CRC
CPU_BMI1 = (1 << 22),
CPU_BMI2 = (1 << 23)
CPU_BMI2 = (1 << 23),
CPU_RTM = (1 << 24) // Restricted Transactional Memory instructions
} cpuFeatureFlags;
enum {
@ -361,6 +367,9 @@ protected:
// extended control register XCR0 (the XFEATURE_ENABLED_MASK register)
XemXcr0Eax xem_xcr0_eax;
uint32_t xem_xcr0_edx; // reserved
// Space to save ymm registers after signal handle
int ymm_save[8*4]; // Save ymm0, ymm7, ymm8, ymm15
};
// The actual cpuid info block
@ -438,6 +447,8 @@ protected:
result |= CPU_ERMS;
if (_cpuid_info.std_cpuid1_ecx.bits.clmul != 0)
result |= CPU_CLMUL;
if (_cpuid_info.sef_cpuid7_ebx.bits.rtm != 0)
result |= CPU_RTM;
// AMD features.
if (is_amd()) {
@ -460,6 +471,21 @@ protected:
return result;
}
static bool os_supports_avx_vectors() {
if (!supports_avx()) {
return false;
}
// Verify that OS save/restore all bits of AVX registers
// during signal processing.
int nreg = 2 LP64_ONLY(+2);
for (int i = 0; i < 8 * nreg; i++) { // 32 bytes per ymm register
if (_cpuid_info.ymm_save[i] != ymm_test_value()) {
return false;
}
}
return true;
}
static void get_processor_features();
public:
@ -476,10 +502,27 @@ public:
static ByteSize tpl_cpuidB1_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB1_eax); }
static ByteSize tpl_cpuidB2_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB2_eax); }
static ByteSize xem_xcr0_offset() { return byte_offset_of(CpuidInfo, xem_xcr0_eax); }
static ByteSize ymm_save_offset() { return byte_offset_of(CpuidInfo, ymm_save); }
// The value used to check ymm register after signal handle
static int ymm_test_value() { return 0xCAFEBABE; }
static void get_cpu_info_wrapper();
static void set_cpuinfo_segv_addr(address pc) { _cpuinfo_segv_addr = pc; }
static bool is_cpuinfo_segv_addr(address pc) { return _cpuinfo_segv_addr == pc; }
static void set_cpuinfo_cont_addr(address pc) { _cpuinfo_cont_addr = pc; }
static address cpuinfo_cont_addr() { return _cpuinfo_cont_addr; }
static void clean_cpuFeatures() { _cpuFeatures = 0; }
static void set_avx_cpuFeatures() { _cpuFeatures = (CPU_SSE | CPU_SSE2 | CPU_AVX); }
// Initialization
static void initialize();
// Override Abstract_VM_Version implementation
static bool use_biased_locking();
// Asserts
static void assert_is_initialized() {
assert(_cpuid_info.std_cpuid1_eax.bits.family != 0, "VM_Version not initialized");
@ -572,6 +615,7 @@ public:
static bool supports_aes() { return (_cpuFeatures & CPU_AES) != 0; }
static bool supports_erms() { return (_cpuFeatures & CPU_ERMS) != 0; }
static bool supports_clmul() { return (_cpuFeatures & CPU_CLMUL) != 0; }
static bool supports_rtm() { return (_cpuFeatures & CPU_RTM) != 0; }
static bool supports_bmi1() { return (_cpuFeatures & CPU_BMI1) != 0; }
static bool supports_bmi2() { return (_cpuFeatures & CPU_BMI2) != 0; }
// Intel features

@ -474,7 +474,125 @@ reg_class vectory_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM
%}
//----------SOURCE BLOCK-------------------------------------------------------
// This is a block of C++ code which provides values, functions, and
// definitions necessary in the rest of the architecture description
source_hpp %{
// Header information of the source block.
// Method declarations/definitions which are used outside
// the ad-scope can conveniently be defined here.
//
// To keep related declarations/definitions/uses close together,
// we switch between source %{ }% and source_hpp %{ }% freely as needed.
class CallStubImpl {
//--------------------------------------------------------------
//---< Used for optimization in Compile::shorten_branches >---
//--------------------------------------------------------------
public:
// Size of call trampoline stub.
static uint size_call_trampoline() {
return 0; // no call trampolines on this platform
}
// number of relocations needed by a call trampoline stub
static uint reloc_call_trampoline() {
return 0; // no call trampolines on this platform
}
};
class HandlerImpl {
public:
static int emit_exception_handler(CodeBuffer &cbuf);
static int emit_deopt_handler(CodeBuffer& cbuf);
static uint size_exception_handler() {
// NativeCall instruction size is the same as NativeJump.
// exception handler starts out as jump and can be patched to
// a call be deoptimization. (4932387)
// Note that this value is also credited (in output.cpp) to
// the size of the code section.
return NativeJump::instruction_size;
}
#ifdef _LP64
static uint size_deopt_handler() {
// three 5 byte instructions
return 15;
}
#else
static uint size_deopt_handler() {
// NativeCall instruction size is the same as NativeJump.
// exception handler starts out as jump and can be patched to
// a call be deoptimization. (4932387)
// Note that this value is also credited (in output.cpp) to
// the size of the code section.
return 5 + NativeJump::instruction_size; // pushl(); jmp;
}
#endif
};
%} // end source_hpp
source %{
// Emit exception handler code.
// Stuff framesize into a register and call a VM stub routine.
int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_exception_handler());
if (base == NULL) return 0; // CodeBuffer::expand failed
int offset = __ offset();
__ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
__ end_a_stub();
return offset;
}
// Emit deopt handler code.
int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_deopt_handler());
if (base == NULL) return 0; // CodeBuffer::expand failed
int offset = __ offset();
#ifdef _LP64
address the_pc = (address) __ pc();
Label next;
// push a "the_pc" on the stack without destroying any registers
// as they all may be live.
// push address of "next"
__ call(next, relocInfo::none); // reloc none is fine since it is a disp32
__ bind(next);
// adjust it so it matches "the_pc"
__ subptr(Address(rsp, 0), __ offset() - offset);
#else
InternalAddress here(__ pc());
__ pushptr(here.addr());
#endif
__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
__ end_a_stub();
return offset;
}
//=============================================================================
// Float masks come from different places depending on platform.
#ifdef _LP64
static address float_signmask() { return StubRoutines::x86::float_sign_mask(); }

@ -1297,59 +1297,6 @@ uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
//=============================================================================
uint size_exception_handler() {
// NativeCall instruction size is the same as NativeJump.
// exception handler starts out as jump and can be patched to
// a call be deoptimization. (4932387)
// Note that this value is also credited (in output.cpp) to
// the size of the code section.
return NativeJump::instruction_size;
}
// Emit exception handler code. Stuff framesize into a register
// and call a VM stub routine.
int emit_exception_handler(CodeBuffer& cbuf) {
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
MacroAssembler _masm(&cbuf);
address base =
__ start_a_stub(size_exception_handler());
if (base == NULL) return 0; // CodeBuffer::expand failed
int offset = __ offset();
__ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
__ end_a_stub();
return offset;
}
uint size_deopt_handler() {
// NativeCall instruction size is the same as NativeJump.
// exception handler starts out as jump and can be patched to
// a call be deoptimization. (4932387)
// Note that this value is also credited (in output.cpp) to
// the size of the code section.
return 5 + NativeJump::instruction_size; // pushl(); jmp;
}
// Emit deopt handler code.
int emit_deopt_handler(CodeBuffer& cbuf) {
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
MacroAssembler _masm(&cbuf);
address base =
__ start_a_stub(size_exception_handler());
if (base == NULL) return 0; // CodeBuffer::expand failed
int offset = __ offset();
InternalAddress here(__ pc());
__ pushptr(here.addr());
__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
__ end_a_stub();
return offset;
}
int Matcher::regnum_to_fpu_offset(int regnum) {
return regnum - 32; // The FP registers are in the second chunk
@ -12925,13 +12872,31 @@ instruct RethrowException()
// inlined locking and unlocking
instruct cmpFastLockRTM(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eDXRegI scr, rRegI cx1, rRegI cx2) %{
predicate(Compile::current()->use_rtm());
match(Set cr (FastLock object box));
effect(TEMP tmp, TEMP scr, TEMP cx1, TEMP cx2, USE_KILL box);
ins_cost(300);
format %{ "FASTLOCK $object,$box\t! kills $box,$tmp,$scr,$cx1,$cx2" %}
ins_encode %{
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register,
$scr$$Register, $cx1$$Register, $cx2$$Register,
_counters, _rtm_counters, _stack_rtm_counters,
((Method*)(ra_->C->method()->constant_encoding()))->method_data(),
true, ra_->C->profile_rtm());
%}
ins_pipe(pipe_slow);
%}
instruct cmpFastLock(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eRegP scr) %{
predicate(!Compile::current()->use_rtm());
match(Set cr (FastLock object box));
effect(TEMP tmp, TEMP scr, USE_KILL box);
ins_cost(300);
format %{ "FASTLOCK $object,$box\t! kills $box,$tmp,$scr" %}
ins_encode %{
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register, _counters);
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register,
$scr$$Register, noreg, noreg, _counters, NULL, NULL, NULL, false, false);
%}
ins_pipe(pipe_slow);
%}
@ -12942,7 +12907,7 @@ instruct cmpFastUnlock(eFlagsReg cr, eRegP object, eAXRegP box, eRegP tmp ) %{
ins_cost(300);
format %{ "FASTUNLOCK $object,$box\t! kills $box,$tmp" %}
ins_encode %{
__ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
__ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, ra_->C->use_rtm());
%}
ins_pipe(pipe_slow);
%}

@ -1439,66 +1439,9 @@ uint MachUEPNode::size(PhaseRegAlloc* ra_) const
return MachNode::size(ra_); // too many variables; just compute it
// the hard way
}
//=============================================================================
uint size_exception_handler()
{
// NativeCall instruction size is the same as NativeJump.
// Note that this value is also credited (in output.cpp) to
// the size of the code section.
return NativeJump::instruction_size;
}
// Emit exception handler code.
int emit_exception_handler(CodeBuffer& cbuf)
{
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
MacroAssembler _masm(&cbuf);
address base =
__ start_a_stub(size_exception_handler());
if (base == NULL) return 0; // CodeBuffer::expand failed
int offset = __ offset();
__ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
__ end_a_stub();
return offset;
}
uint size_deopt_handler()
{
// three 5 byte instructions
return 15;
}
// Emit deopt handler code.
int emit_deopt_handler(CodeBuffer& cbuf)
{
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
MacroAssembler _masm(&cbuf);
address base =
__ start_a_stub(size_deopt_handler());
if (base == NULL) return 0; // CodeBuffer::expand failed
int offset = __ offset();
address the_pc = (address) __ pc();
Label next;
// push a "the_pc" on the stack without destroying any registers
// as they all may be live.
// push address of "next"
__ call(next, relocInfo::none); // reloc none is fine since it is a disp32
__ bind(next);
// adjust it so it matches "the_pc"
__ subptr(Address(rsp, 0), __ offset() - offset);
__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
__ end_a_stub();
return offset;
}
int Matcher::regnum_to_fpu_offset(int regnum)
{
@ -11387,13 +11330,31 @@ instruct jmpConUCF2_short(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{
// ============================================================================
// inlined locking and unlocking
instruct cmpFastLockRTM(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp, rdx_RegI scr, rRegI cx1, rRegI cx2) %{
predicate(Compile::current()->use_rtm());
match(Set cr (FastLock object box));
effect(TEMP tmp, TEMP scr, TEMP cx1, TEMP cx2, USE_KILL box);
ins_cost(300);
format %{ "fastlock $object,$box\t! kills $box,$tmp,$scr,$cx1,$cx2" %}
ins_encode %{
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register,
$scr$$Register, $cx1$$Register, $cx2$$Register,
_counters, _rtm_counters, _stack_rtm_counters,
((Method*)(ra_->C->method()->constant_encoding()))->method_data(),
true, ra_->C->profile_rtm());
%}
ins_pipe(pipe_slow);
%}
instruct cmpFastLock(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp, rRegP scr) %{
predicate(!Compile::current()->use_rtm());
match(Set cr (FastLock object box));
effect(TEMP tmp, TEMP scr, USE_KILL box);
ins_cost(300);
format %{ "fastlock $object,$box\t! kills $box,$tmp,$scr" %}
ins_encode %{
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register, _counters);
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register,
$scr$$Register, noreg, noreg, _counters, NULL, NULL, NULL, false, false);
%}
ins_pipe(pipe_slow);
%}
@ -11404,7 +11365,7 @@ instruct cmpFastUnlock(rFlagsReg cr, rRegP object, rax_RegP box, rRegP tmp) %{
ins_cost(300);
format %{ "fastunlock $object,$box\t! kills $box,$tmp" %}
ins_encode %{
__ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
__ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, ra_->C->use_rtm());
%}
ins_pipe(pipe_slow);
%}

@ -1,6 +1,6 @@
/*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,6 @@
#include "os_aix.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "thread_aix.inline.hpp"
#include "runtime/thread.inline.hpp"
#endif // OS_AIX_VM_MUTEX_AIX_INLINE_HPP

@ -1,6 +1,6 @@
/*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -60,26 +60,16 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/statSampler.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
#include "runtime/timer.hpp"
#include "services/attachListener.hpp"
#include "services/runtimeService.hpp"
#include "thread_aix.inline.hpp"
#include "utilities/decoder.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/vmError.hpp"
#ifdef TARGET_ARCH_ppc
# include "assembler_ppc.inline.hpp"
# include "nativeInst_ppc.hpp"
#endif
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
// put OS-includes here (sorted alphabetically)
#include <errno.h>
@ -378,13 +368,14 @@ void os::Aix::query_multipage_support() {
assert(_page_size == SIZE_4K, "surprise!");
// query default data page size (default page size for C-Heap, pthread stacks and .bss).
// Query default data page size (default page size for C-Heap, pthread stacks and .bss).
// Default data page size is influenced either by linker options (-bdatapsize)
// or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
// default should be 4K.
size_t data_page_size = SIZE_4K;
{
void* p = ::malloc(SIZE_16M);
guarantee(p != NULL, "malloc failed");
data_page_size = os::Aix::query_pagesize(p);
::free(p);
}
@ -511,85 +502,76 @@ query_multipage_support_end:
} // end os::Aix::query_multipage_support()
// The code for this method was initially derived from the version in os_linux.cpp
// The code for this method was initially derived from the version in os_linux.cpp.
void os::init_system_properties_values() {
// The next few definitions allow the code to be verbatim:
#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n), mtInternal)
#define DEFAULT_LIBPATH "/usr/lib:/lib"
#define EXTENSIONS_DIR "/lib/ext"
#define ENDORSED_DIR "/lib/endorsed"
// Buffer that fits several sprintfs.
// Note that the space for the trailing null is provided
// by the nulls included by the sizeof operator.
const size_t bufsize =
MAX3((size_t)MAXPATHLEN, // For dll_dir & friends.
(size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR), // extensions dir
(size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
// sysclasspath, java_home, dll_dir
char *home_path;
char *dll_path;
char *pslash;
char buf[MAXPATHLEN];
os::jvm_path(buf, sizeof(buf));
{
char *pslash;
os::jvm_path(buf, bufsize);
// Found the full path to libjvm.so.
// Now cut the path to <java_home>/jre if we can.
*(strrchr(buf, '/')) = '\0'; // get rid of /libjvm.so
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // get rid of /{client|server|hotspot}
}
dll_path = malloc(strlen(buf) + 1);
strcpy(dll_path, buf);
Arguments::set_dll_dir(dll_path);
if (pslash != NULL) {
// Found the full path to libjvm.so.
// Now cut the path to <java_home>/jre if we can.
*(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // get rid of /<arch>
*pslash = '\0'; // Get rid of /{client|server|hotspot}.
}
Arguments::set_dll_dir(buf);
if (pslash != NULL) {
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // get rid of /lib
*pslash = '\0'; // Get rid of /<arch>.
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // Get rid of /lib.
}
}
}
Arguments::set_java_home(buf);
set_boot_path('/', ':');
}
home_path = malloc(strlen(buf) + 1);
strcpy(home_path, buf);
Arguments::set_java_home(home_path);
// Where to look for native libraries.
if (!set_boot_path('/', ':')) return;
// Where to look for native libraries
// On Aix we get the user setting of LIBPATH
// On Aix we get the user setting of LIBPATH.
// Eventually, all the library path setting will be done here.
char *ld_library_path;
// Construct the invariant part of ld_library_path.
ld_library_path = (char *) malloc(sizeof(DEFAULT_LIBPATH));
sprintf(ld_library_path, DEFAULT_LIBPATH);
// Get the user setting of LIBPATH, and prepended it.
char *v = ::getenv("LIBPATH");
if (v == NULL) {
v = "";
}
char *t = ld_library_path;
// That's +1 for the colon and +1 for the trailing '\0'
ld_library_path = (char *) malloc(strlen(v) + 1 + strlen(t) + 1);
sprintf(ld_library_path, "%s:%s", v, t);
// Get the user setting of LIBPATH.
const char *v = ::getenv("LIBPATH");
const char *v_colon = ":";
if (v == NULL) { v = ""; v_colon = ""; }
// Concatenate user and invariant part of ld_library_path.
// That's +1 for the colon and +1 for the trailing '\0'.
char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
Arguments::set_library_path(ld_library_path);
FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);
// Extensions directories
char* cbuf = malloc(strlen(Arguments::get_java_home()) + sizeof(EXTENSIONS_DIR));
sprintf(cbuf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
Arguments::set_ext_dirs(cbuf);
// Extensions directories.
sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
Arguments::set_ext_dirs(buf);
// Endorsed standards default directory.
cbuf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR));
sprintf(cbuf, "%s" ENDORSED_DIR, Arguments::get_java_home());
Arguments::set_endorsed_dirs(cbuf);
sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
Arguments::set_endorsed_dirs(buf);
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
#undef malloc
#undef DEFAULT_LIBPATH
#undef EXTENSIONS_DIR
#undef ENDORSED_DIR
@ -3593,6 +3575,11 @@ void os::Aix::check_signal_handler(int sig) {
tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
// No need to check this sig any longer
sigaddset(&check_signal_done, sig);
// Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
exception_name(sig, buf, O_BUFLEN));
}
} else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));

@ -1,6 +1,6 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "runtime/threadCritical.hpp"
#include "thread_aix.inline.hpp"
#include "runtime/thread.inline.hpp"
// put OS-includes here
# include <pthread.h>

@ -306,9 +306,6 @@ static const char *get_home() {
#endif
void os::init_system_properties_values() {
// char arch[12];
// sysinfo(SI_ARCHITECTURE, arch, sizeof(arch));
// The next steps are taken in the product version:
//
// Obtain the JAVA_HOME value from the location of libjvm.so.
@ -335,199 +332,205 @@ void os::init_system_properties_values() {
// Important note: if the location of libjvm.so changes this
// code needs to be changed accordingly.
// The next few definitions allow the code to be verbatim:
#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n), mtInternal)
#define getenv(n) ::getenv(n)
/*
* See ld(1):
* The linker uses the following search paths to locate required
* shared libraries:
* 1: ...
* ...
* 7: The default directories, normally /lib and /usr/lib.
*/
// See ld(1):
// The linker uses the following search paths to locate required
// shared libraries:
// 1: ...
// ...
// 7: The default directories, normally /lib and /usr/lib.
#ifndef DEFAULT_LIBPATH
#define DEFAULT_LIBPATH "/lib:/usr/lib"
#endif
// Base path of extensions installed on the system.
#define SYS_EXT_DIR "/usr/java/packages"
#define EXTENSIONS_DIR "/lib/ext"
#define ENDORSED_DIR "/lib/endorsed"
#define REG_DIR "/usr/java/packages"
#ifdef __APPLE__
#define SYS_EXTENSIONS_DIR "/Library/Java/Extensions"
#define SYS_EXTENSIONS_DIRS SYS_EXTENSIONS_DIR ":/Network" SYS_EXTENSIONS_DIR ":/System" SYS_EXTENSIONS_DIR ":/usr/lib/java"
const char *user_home_dir = get_home();
// the null in SYS_EXTENSIONS_DIRS counts for the size of the colon after user_home_dir
int system_ext_size = strlen(user_home_dir) + sizeof(SYS_EXTENSIONS_DIR) +
sizeof(SYS_EXTENSIONS_DIRS);
#endif
{
/* sysclasspath, java_home, dll_dir */
{
char *home_path;
char *dll_path;
char *pslash;
char buf[MAXPATHLEN];
os::jvm_path(buf, sizeof(buf));
// Found the full path to libjvm.so.
// Now cut the path to <java_home>/jre if we can.
*(strrchr(buf, '/')) = '\0'; /* get rid of /libjvm.so */
pslash = strrchr(buf, '/');
if (pslash != NULL)
*pslash = '\0'; /* get rid of /{client|server|hotspot} */
dll_path = malloc(strlen(buf) + 1);
if (dll_path == NULL)
return;
strcpy(dll_path, buf);
Arguments::set_dll_dir(dll_path);
if (pslash != NULL) {
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; /* get rid of /<arch> (/lib on macosx) */
#ifndef __APPLE__
pslash = strrchr(buf, '/');
if (pslash != NULL)
*pslash = '\0'; /* get rid of /lib */
#endif
}
}
home_path = malloc(strlen(buf) + 1);
if (home_path == NULL)
return;
strcpy(home_path, buf);
Arguments::set_java_home(home_path);
// Buffer that fits several sprintfs.
// Note that the space for the colon and the trailing null are provided
// by the nulls included by the sizeof operator.
const size_t bufsize =
MAX3((size_t)MAXPATHLEN, // For dll_dir & friends.
(size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
(size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
if (!set_boot_path('/', ':'))
return;
// sysclasspath, java_home, dll_dir
{
char *pslash;
os::jvm_path(buf, bufsize);
// Found the full path to libjvm.so.
// Now cut the path to <java_home>/jre if we can.
*(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // Get rid of /{client|server|hotspot}.
}
Arguments::set_dll_dir(buf);
/*
* Where to look for native libraries
*
* Note: Due to a legacy implementation, most of the library path
* is set in the launcher. This was to accomodate linking restrictions
* on legacy Bsd implementations (which are no longer supported).
* Eventually, all the library path setting will be done here.
*
* However, to prevent the proliferation of improperly built native
* libraries, the new path component /usr/java/packages is added here.
* Eventually, all the library path setting will be done here.
*/
{
char *ld_library_path;
/*
* Construct the invariant part of ld_library_path. Note that the
* space for the colon and the trailing null are provided by the
* nulls included by the sizeof operator (so actually we allocate
* a byte more than necessary).
*/
#ifdef __APPLE__
ld_library_path = (char *) malloc(system_ext_size);
sprintf(ld_library_path, "%s" SYS_EXTENSIONS_DIR ":" SYS_EXTENSIONS_DIRS, user_home_dir);
#else
ld_library_path = (char *) malloc(sizeof(REG_DIR) + sizeof("/lib/") +
strlen(cpu_arch) + sizeof(DEFAULT_LIBPATH));
sprintf(ld_library_path, REG_DIR "/lib/%s:" DEFAULT_LIBPATH, cpu_arch);
#endif
/*
* Get the user setting of LD_LIBRARY_PATH, and prepended it. It
* should always exist (until the legacy problem cited above is
* addressed).
*/
#ifdef __APPLE__
// Prepend the default path with the JAVA_LIBRARY_PATH so that the app launcher code can specify a directory inside an app wrapper
char *l = getenv("JAVA_LIBRARY_PATH");
if (l != NULL) {
char *t = ld_library_path;
/* That's +1 for the colon and +1 for the trailing '\0' */
ld_library_path = (char *) malloc(strlen(l) + 1 + strlen(t) + 1);
sprintf(ld_library_path, "%s:%s", l, t);
free(t);
if (pslash != NULL) {
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // Get rid of /<arch>.
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // Get rid of /lib.
}
char *v = getenv("DYLD_LIBRARY_PATH");
#else
char *v = getenv("LD_LIBRARY_PATH");
#endif
if (v != NULL) {
char *t = ld_library_path;
/* That's +1 for the colon and +1 for the trailing '\0' */
ld_library_path = (char *) malloc(strlen(v) + 1 + strlen(t) + 1);
sprintf(ld_library_path, "%s:%s", v, t);
free(t);
}
#ifdef __APPLE__
// Apple's Java6 has "." at the beginning of java.library.path.
// OpenJDK on Windows has "." at the end of java.library.path.
// OpenJDK on Linux and Solaris don't have "." in java.library.path
// at all. To ease the transition from Apple's Java6 to OpenJDK7,
// "." is appended to the end of java.library.path. Yes, this
// could cause a change in behavior, but Apple's Java6 behavior
// can be achieved by putting "." at the beginning of the
// JAVA_LIBRARY_PATH environment variable.
{
char *t = ld_library_path;
// that's +3 for appending ":." and the trailing '\0'
ld_library_path = (char *) malloc(strlen(t) + 3);
sprintf(ld_library_path, "%s:%s", t, ".");
free(t);
}
#endif
Arguments::set_library_path(ld_library_path);
}
/*
* Extensions directories.
*
* Note that the space for the colon and the trailing null are provided
* by the nulls included by the sizeof operator (so actually one byte more
* than necessary is allocated).
*/
{
#ifdef __APPLE__
char *buf = malloc(strlen(Arguments::get_java_home()) +
sizeof(EXTENSIONS_DIR) + system_ext_size);
sprintf(buf, "%s" SYS_EXTENSIONS_DIR ":%s" EXTENSIONS_DIR ":"
SYS_EXTENSIONS_DIRS, user_home_dir, Arguments::get_java_home());
#else
char *buf = malloc(strlen(Arguments::get_java_home()) +
sizeof(EXTENSIONS_DIR) + sizeof(REG_DIR) + sizeof(EXTENSIONS_DIR));
sprintf(buf, "%s" EXTENSIONS_DIR ":" REG_DIR EXTENSIONS_DIR,
Arguments::get_java_home());
#endif
Arguments::set_ext_dirs(buf);
}
/* Endorsed standards default directory. */
{
char * buf;
buf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR));
sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
Arguments::set_endorsed_dirs(buf);
}
}
Arguments::set_java_home(buf);
set_boot_path('/', ':');
}
#ifdef __APPLE__
// Where to look for native libraries.
//
// Note: Due to a legacy implementation, most of the library path
// is set in the launcher. This was to accomodate linking restrictions
// on legacy Bsd implementations (which are no longer supported).
// Eventually, all the library path setting will be done here.
//
// However, to prevent the proliferation of improperly built native
// libraries, the new path component /usr/java/packages is added here.
// Eventually, all the library path setting will be done here.
{
// Get the user setting of LD_LIBRARY_PATH, and prepended it. It
// should always exist (until the legacy problem cited above is
// addressed).
const char *v = ::getenv("LD_LIBRARY_PATH");
const char *v_colon = ":";
if (v == NULL) { v = ""; v_colon = ""; }
// That's +1 for the colon and +1 for the trailing '\0'.
char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char,
strlen(v) + 1 +
sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch) + sizeof(DEFAULT_LIBPATH) + 1,
mtInternal);
sprintf(ld_library_path, "%s%s" SYS_EXT_DIR "/lib/%s:" DEFAULT_LIBPATH, v, v_colon, cpu_arch);
Arguments::set_library_path(ld_library_path);
FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);
}
// Extensions directories.
sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
Arguments::set_ext_dirs(buf);
// Endorsed standards default directory.
sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
Arguments::set_endorsed_dirs(buf);
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
#else // __APPLE__
#define SYS_EXTENSIONS_DIR "/Library/Java/Extensions"
#define SYS_EXTENSIONS_DIRS SYS_EXTENSIONS_DIR ":/Network" SYS_EXTENSIONS_DIR ":/System" SYS_EXTENSIONS_DIR ":/usr/lib/java"
const char *user_home_dir = get_home();
// The null in SYS_EXTENSIONS_DIRS counts for the size of the colon after user_home_dir.
size_t system_ext_size = strlen(user_home_dir) + sizeof(SYS_EXTENSIONS_DIR) +
sizeof(SYS_EXTENSIONS_DIRS);
// Buffer that fits several sprintfs.
// Note that the space for the colon and the trailing null are provided
// by the nulls included by the sizeof operator.
const size_t bufsize =
MAX3((size_t)MAXPATHLEN, // for dll_dir & friends.
(size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + system_ext_size, // extensions dir
(size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
// sysclasspath, java_home, dll_dir
{
char *pslash;
os::jvm_path(buf, bufsize);
// Found the full path to libjvm.so.
// Now cut the path to <java_home>/jre if we can.
*(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // Get rid of /{client|server|hotspot}.
}
Arguments::set_dll_dir(buf);
if (pslash != NULL) {
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // Get rid of /lib.
}
}
Arguments::set_java_home(buf);
set_boot_path('/', ':');
}
// Where to look for native libraries.
//
// Note: Due to a legacy implementation, most of the library path
// is set in the launcher. This was to accomodate linking restrictions
// on legacy Bsd implementations (which are no longer supported).
// Eventually, all the library path setting will be done here.
//
// However, to prevent the proliferation of improperly built native
// libraries, the new path component /usr/java/packages is added here.
// Eventually, all the library path setting will be done here.
{
// Get the user setting of LD_LIBRARY_PATH, and prepended it. It
// should always exist (until the legacy problem cited above is
// addressed).
// Prepend the default path with the JAVA_LIBRARY_PATH so that the app launcher code
// can specify a directory inside an app wrapper
const char *l = ::getenv("JAVA_LIBRARY_PATH");
const char *l_colon = ":";
if (l == NULL) { l = ""; l_colon = ""; }
const char *v = ::getenv("DYLD_LIBRARY_PATH");
const char *v_colon = ":";
if (v == NULL) { v = ""; v_colon = ""; }
// Apple's Java6 has "." at the beginning of java.library.path.
// OpenJDK on Windows has "." at the end of java.library.path.
// OpenJDK on Linux and Solaris don't have "." in java.library.path
// at all. To ease the transition from Apple's Java6 to OpenJDK7,
// "." is appended to the end of java.library.path. Yes, this
// could cause a change in behavior, but Apple's Java6 behavior
// can be achieved by putting "." at the beginning of the
// JAVA_LIBRARY_PATH environment variable.
char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char,
strlen(v) + 1 + strlen(l) + 1 +
system_ext_size + 3,
mtInternal);
sprintf(ld_library_path, "%s%s%s%s%s" SYS_EXTENSIONS_DIR ":" SYS_EXTENSIONS_DIRS ":.",
v, v_colon, l, l_colon, user_home_dir);
Arguments::set_library_path(ld_library_path);
FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);
}
// Extensions directories.
//
// Note that the space for the colon and the trailing null are provided
// by the nulls included by the sizeof operator (so actually one byte more
// than necessary is allocated).
sprintf(buf, "%s" SYS_EXTENSIONS_DIR ":%s" EXTENSIONS_DIR ":" SYS_EXTENSIONS_DIRS,
user_home_dir, Arguments::get_java_home());
Arguments::set_ext_dirs(buf);
// Endorsed standards default directory.
sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
Arguments::set_endorsed_dirs(buf);
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
#undef SYS_EXTENSIONS_DIR
#endif
#undef malloc
#undef getenv
#undef SYS_EXTENSIONS_DIRS
#endif // __APPLE__
#undef SYS_EXT_DIR
#undef EXTENSIONS_DIR
#undef ENDORSED_DIR
// Done
return;
}
////////////////////////////////////////////////////////////////////////////////
@ -3091,7 +3094,7 @@ void os::Bsd::set_signal_handler(int sig, bool set_installed) {
sigAct.sa_sigaction = signalHandler;
sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
}
#if __APPLE__
#ifdef __APPLE__
// Needed for main thread as XNU (Mac OS X kernel) will only deliver SIGSEGV
// (which starts as SIGBUS) on main thread with faulting address inside "stack+guard pages"
// if the signal handler declares it will handle it on alternate stack.
@ -3374,6 +3377,11 @@ void os::Bsd::check_signal_handler(int sig) {
tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
// No need to check this sig any longer
sigaddset(&check_signal_done, sig);
// Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
exception_name(sig, buf, O_BUFLEN));
}
} else if(os::Bsd::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Bsd::get_our_sigflags(sig)) {
tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
tty->print("expected:" PTR32_FORMAT, os::Bsd::get_our_sigflags(sig));

@ -109,6 +109,8 @@
#define MAX_PATH (2 * K)
#define MAX_SECS 100000000
// for timer info max values which include all bits
#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
@ -317,9 +319,6 @@ void os::Linux::initialize_system_info() {
}
void os::init_system_properties_values() {
// char arch[12];
// sysinfo(SI_ARCHITECTURE, arch, sizeof(arch));
// The next steps are taken in the product version:
//
// Obtain the JAVA_HOME value from the location of libjvm.so.
@ -346,140 +345,101 @@ void os::init_system_properties_values() {
// Important note: if the location of libjvm.so changes this
// code needs to be changed accordingly.
// The next few definitions allow the code to be verbatim:
#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n), mtInternal)
#define getenv(n) ::getenv(n)
/*
* See ld(1):
* The linker uses the following search paths to locate required
* shared libraries:
* 1: ...
* ...
* 7: The default directories, normally /lib and /usr/lib.
*/
// See ld(1):
// The linker uses the following search paths to locate required
// shared libraries:
// 1: ...
// ...
// 7: The default directories, normally /lib and /usr/lib.
#if defined(AMD64) || defined(_LP64) && (defined(SPARC) || defined(PPC) || defined(S390))
#define DEFAULT_LIBPATH "/usr/lib64:/lib64:/lib:/usr/lib"
#else
#define DEFAULT_LIBPATH "/lib:/usr/lib"
#endif
// Base path of extensions installed on the system.
#define SYS_EXT_DIR "/usr/java/packages"
#define EXTENSIONS_DIR "/lib/ext"
#define ENDORSED_DIR "/lib/endorsed"
#define REG_DIR "/usr/java/packages"
// Buffer that fits several sprintfs.
// Note that the space for the colon and the trailing null are provided
// by the nulls included by the sizeof operator.
const size_t bufsize =
MAX3((size_t)MAXPATHLEN, // For dll_dir & friends.
(size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
(size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
// sysclasspath, java_home, dll_dir
{
/* sysclasspath, java_home, dll_dir */
{
char *home_path;
char *dll_path;
char *pslash;
char buf[MAXPATHLEN];
os::jvm_path(buf, sizeof(buf));
char *pslash;
os::jvm_path(buf, bufsize);
// Found the full path to libjvm.so.
// Now cut the path to <java_home>/jre if we can.
*(strrchr(buf, '/')) = '\0'; /* get rid of /libjvm.so */
// Found the full path to libjvm.so.
// Now cut the path to <java_home>/jre if we can.
*(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // Get rid of /{client|server|hotspot}.
}
Arguments::set_dll_dir(buf);
if (pslash != NULL) {
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // Get rid of /<arch>.
pslash = strrchr(buf, '/');
if (pslash != NULL)
*pslash = '\0'; /* get rid of /{client|server|hotspot} */
dll_path = malloc(strlen(buf) + 1);
if (dll_path == NULL)
return;
strcpy(dll_path, buf);
Arguments::set_dll_dir(dll_path);
if (pslash != NULL) {
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; /* get rid of /<arch> */
pslash = strrchr(buf, '/');
if (pslash != NULL)
*pslash = '\0'; /* get rid of /lib */
}
*pslash = '\0'; // Get rid of /lib.
}
home_path = malloc(strlen(buf) + 1);
if (home_path == NULL)
return;
strcpy(home_path, buf);
Arguments::set_java_home(home_path);
if (!set_boot_path('/', ':'))
return;
}
/*
* Where to look for native libraries
*
* Note: Due to a legacy implementation, most of the library path
* is set in the launcher. This was to accomodate linking restrictions
* on legacy Linux implementations (which are no longer supported).
* Eventually, all the library path setting will be done here.
*
* However, to prevent the proliferation of improperly built native
* libraries, the new path component /usr/java/packages is added here.
* Eventually, all the library path setting will be done here.
*/
{
char *ld_library_path;
/*
* Construct the invariant part of ld_library_path. Note that the
* space for the colon and the trailing null are provided by the
* nulls included by the sizeof operator (so actually we allocate
* a byte more than necessary).
*/
ld_library_path = (char *) malloc(sizeof(REG_DIR) + sizeof("/lib/") +
strlen(cpu_arch) + sizeof(DEFAULT_LIBPATH));
sprintf(ld_library_path, REG_DIR "/lib/%s:" DEFAULT_LIBPATH, cpu_arch);
/*
* Get the user setting of LD_LIBRARY_PATH, and prepended it. It
* should always exist (until the legacy problem cited above is
* addressed).
*/
char *v = getenv("LD_LIBRARY_PATH");
if (v != NULL) {
char *t = ld_library_path;
/* That's +1 for the colon and +1 for the trailing '\0' */
ld_library_path = (char *) malloc(strlen(v) + 1 + strlen(t) + 1);
sprintf(ld_library_path, "%s:%s", v, t);
}
Arguments::set_library_path(ld_library_path);
}
/*
* Extensions directories.
*
* Note that the space for the colon and the trailing null are provided
* by the nulls included by the sizeof operator (so actually one byte more
* than necessary is allocated).
*/
{
char *buf = malloc(strlen(Arguments::get_java_home()) +
sizeof(EXTENSIONS_DIR) + sizeof(REG_DIR) + sizeof(EXTENSIONS_DIR));
sprintf(buf, "%s" EXTENSIONS_DIR ":" REG_DIR EXTENSIONS_DIR,
Arguments::get_java_home());
Arguments::set_ext_dirs(buf);
}
/* Endorsed standards default directory. */
{
char * buf;
buf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR));
sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
Arguments::set_endorsed_dirs(buf);
}
}
Arguments::set_java_home(buf);
set_boot_path('/', ':');
}
#undef malloc
#undef getenv
// Where to look for native libraries.
//
// Note: Due to a legacy implementation, most of the library path
// is set in the launcher. This was to accomodate linking restrictions
// on legacy Linux implementations (which are no longer supported).
// Eventually, all the library path setting will be done here.
//
// However, to prevent the proliferation of improperly built native
// libraries, the new path component /usr/java/packages is added here.
// Eventually, all the library path setting will be done here.
{
// Get the user setting of LD_LIBRARY_PATH, and prepended it. It
// should always exist (until the legacy problem cited above is
// addressed).
const char *v = ::getenv("LD_LIBRARY_PATH");
const char *v_colon = ":";
if (v == NULL) { v = ""; v_colon = ""; }
// That's +1 for the colon and +1 for the trailing '\0'.
char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char,
strlen(v) + 1 +
sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch) + sizeof(DEFAULT_LIBPATH) + 1,
mtInternal);
sprintf(ld_library_path, "%s%s" SYS_EXT_DIR "/lib/%s:" DEFAULT_LIBPATH, v, v_colon, cpu_arch);
Arguments::set_library_path(ld_library_path);
FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);
}
// Extensions directories.
sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
Arguments::set_ext_dirs(buf);
// Endorsed standards default directory.
sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
Arguments::set_endorsed_dirs(buf);
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
#undef DEFAULT_LIBPATH
#undef SYS_EXT_DIR
#undef EXTENSIONS_DIR
#undef ENDORSED_DIR
// Done
return;
}
////////////////////////////////////////////////////////////////////////////////
@ -1961,7 +1921,11 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
{EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
{EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
{EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
#if defined(VM_LITTLE_ENDIAN)
{EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2LSB, (char*)"Power PC 64"},
#else
{EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
#endif
{EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM"},
{EM_S390, EM_S390, ELFCLASSNONE, ELFDATA2MSB, (char*)"IBM System/390"},
{EM_ALPHA, EM_ALPHA, ELFCLASS64, ELFDATA2LSB, (char*)"Alpha"},
@ -2434,7 +2398,6 @@ class Semaphore : public StackObj {
sem_t _semaphore;
};
Semaphore::Semaphore() {
sem_init(&_semaphore, 0, 0);
}
@ -2456,8 +2419,22 @@ bool Semaphore::trywait() {
}
bool Semaphore::timedwait(unsigned int sec, int nsec) {
struct timespec ts;
unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
// Semaphore's are always associated with CLOCK_REALTIME
os::Linux::clock_gettime(CLOCK_REALTIME, &ts);
// see unpackTime for discussion on overflow checking
if (sec >= MAX_SECS) {
ts.tv_sec += MAX_SECS;
ts.tv_nsec = 0;
} else {
ts.tv_sec += sec;
ts.tv_nsec += nsec;
if (ts.tv_nsec >= NANOSECS_PER_SEC) {
ts.tv_nsec -= NANOSECS_PER_SEC;
++ts.tv_sec; // note: this must be <= max_secs
}
}
while (1) {
int result = sem_timedwait(&_semaphore, &ts);
@ -4560,6 +4537,11 @@ void os::Linux::check_signal_handler(int sig) {
tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
// No need to check this sig any longer
sigaddset(&check_signal_done, sig);
// Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
exception_name(sig, buf, O_BUFLEN));
}
} else if(os::Linux::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Linux::get_our_sigflags(sig)) {
tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
tty->print("expected:" PTR32_FORMAT, os::Linux::get_our_sigflags(sig));
@ -5656,7 +5638,6 @@ void os::PlatformEvent::unpark() {
* is no need to track notifications.
*/
#define MAX_SECS 100000000
/*
* This code is common to linux and solaris and will be moved to a
* common place in dolphin.

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,7 +41,6 @@ void OSThread::pd_initialize() {
_thread_id = 0;
sigemptyset(&_caller_sigmask);
_saved_interrupt_thread_state = _thread_new;
_vm_created_thread = false;
}

Some files were not shown because too many files have changed in this diff Show More