This commit is contained in:
Albert Noll 2014-02-26 02:38:46 -08:00
commit 1258452546
4501 changed files with 42318 additions and 43171 deletions

View File

@ -244,3 +244,4 @@ f5b521ade7a35cea18df78ee86322207729f5611 jdk8-b118
a1ee9743f4ee165eae59389a020f2552f895dac8 jdk8-b120
13b877757b0b1c0d5813298df85364f41d7ba6fe jdk9-b00
f130ca87de6637acae7d99fcd7a8573eea1cbaed jdk9-b01
b32e2219736e42baaf45daf0ad67ed34f6033799 jdk9-b02

View File

@ -244,3 +244,4 @@ a4afb0a8d55ef75aef5b0d77b434070468fb89f8 jdk8-b117
cd3825b2983045784d6fc6d1729c799b08215752 jdk8-b120
1e1f86d5d4e22c15a9bf9f1581acddb8c59abae2 jdk9-b00
50669e45cec4491de0d921d3118a3fe2e767020a jdk9-b01
135f0c7af57ebace31383d8877f47e32172759ff jdk9-b02

View File

@ -236,35 +236,119 @@ AC_DEFUN_ONCE([BASIC_INIT],
# Test that variable $1 denoting a program is not empty. If empty, exit with an error.
# $1: variable to check
# $2: executable name to print in warning (optional)
AC_DEFUN([BASIC_CHECK_NONEMPTY],
[
if test "x[$]$1" = x; then
if test "x$2" = x; then
PROG_NAME=translit($1,A-Z,a-z)
else
PROG_NAME=$2
fi
AC_MSG_NOTICE([Could not find $PROG_NAME!])
AC_MSG_ERROR([Cannot continue])
AC_MSG_ERROR([Could not find required tool for $1])
fi
])
# Does AC_PATH_PROG followed by BASIC_CHECK_NONEMPTY.
# Arguments as AC_PATH_PROG:
# $1: variable to set
# $2: executable name to look for
AC_DEFUN([BASIC_REQUIRE_PROG],
# Check that there are no unprocessed overridden variables left.
# If so, they are an incorrect argument and we will exit with an error.
AC_DEFUN([BASIC_CHECK_LEFTOVER_OVERRIDDEN],
[
AC_PATH_PROGS($1, $2)
BASIC_CHECK_NONEMPTY($1, $2)
if test "x$CONFIGURE_OVERRIDDEN_VARIABLES" != x; then
# Replace the separating ! with spaces before presenting for end user.
unknown_variables=${CONFIGURE_OVERRIDDEN_VARIABLES//!/ }
AC_MSG_WARN([The following variables might be unknown to configure: $unknown_variables])
fi
])
# Setup a tool for the given variable. If correctly specified by the user,
# use that value, otherwise search for the tool using the supplied code snippet.
# $1: variable to set
# $2: code snippet to call to look for the tool
AC_DEFUN([BASIC_SETUP_TOOL],
[
# Publish this variable in the help.
AC_ARG_VAR($1, [Override default value for $1])
if test "x[$]$1" = x; then
# The variable is not set by user, try to locate tool using the code snippet
$2
else
# The variable is set, but is it from the command line or the environment?
# Try to remove the string !$1! from our list.
try_remove_var=${CONFIGURE_OVERRIDDEN_VARIABLES//!$1!/}
if test "x$try_remove_var" = "x$CONFIGURE_OVERRIDDEN_VARIABLES"; then
# If it failed, the variable was not from the command line. Ignore it,
# but warn the user (except for BASH, which is always set by the calling BASH).
if test "x$1" != xBASH; then
AC_MSG_WARN([Ignoring value of $1 from the environment. Use command line variables instead.])
fi
# Try to locate tool using the code snippet
$2
else
# If it succeeded, then it was overridden by the user. We will use it
# for the tool.
# First remove it from the list of overridden variables, so we can test
# for unknown variables in the end.
CONFIGURE_OVERRIDDEN_VARIABLES="$try_remove_var"
# Check if the provided tool contains a complete path.
tool_specified="[$]$1"
tool_basename="${tool_specified##*/}"
if test "x$tool_basename" = "x$tool_specified"; then
# A command without a complete path is provided, search $PATH.
AC_MSG_NOTICE([Will search for user supplied tool $1=$tool_basename])
AC_PATH_PROG($1, $tool_basename)
if test "x[$]$1" = x; then
AC_MSG_ERROR([User supplied tool $tool_basename could not be found])
fi
else
# Otherwise we believe it is a complete path. Use it as it is.
AC_MSG_NOTICE([Will use user supplied tool $1=$tool_specified])
AC_MSG_CHECKING([for $1])
if test ! -x "$tool_specified"; then
AC_MSG_RESULT([not found])
AC_MSG_ERROR([User supplied tool $1=$tool_specified does not exist or is not executable])
fi
AC_MSG_RESULT([$tool_specified])
fi
fi
fi
])
# Call BASIC_SETUP_TOOL with AC_PATH_PROGS to locate the tool
# $1: variable to set
# $2: executable name (or list of names) to look for
AC_DEFUN([BASIC_PATH_PROGS],
[
BASIC_SETUP_TOOL($1, [AC_PATH_PROGS($1, $2)])
])
# Call BASIC_SETUP_TOOL with AC_CHECK_TOOLS to locate the tool
# $1: variable to set
# $2: executable name (or list of names) to look for
AC_DEFUN([BASIC_CHECK_TOOLS],
[
BASIC_SETUP_TOOL($1, [AC_CHECK_TOOLS($1, $2)])
])
# Like BASIC_PATH_PROGS but fails if no tool was found.
# $1: variable to set
# $2: executable name (or list of names) to look for
AC_DEFUN([BASIC_REQUIRE_PROGS],
[
BASIC_PATH_PROGS($1, $2)
BASIC_CHECK_NONEMPTY($1)
])
# Like BASIC_SETUP_TOOL but fails if no tool was found.
# $1: variable to set
# $2: autoconf macro to call to look for the special tool
AC_DEFUN([BASIC_REQUIRE_SPECIAL],
[
BASIC_SETUP_TOOL($1, [$2])
BASIC_CHECK_NONEMPTY($1)
])
# Setup the most fundamental tools that relies on not much else to set up,
# but is used by much of the early bootstrap code.
AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS],
[
# Start with tools that do not need have cross compilation support
# and can be expected to be found in the default PATH. These tools are
# used by configure. Nor are these tools expected to be found in the
@ -272,57 +356,50 @@ AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS],
# needed to download the devkit.
# First are all the simple required tools.
BASIC_REQUIRE_PROG(BASENAME, basename)
BASIC_REQUIRE_PROG(BASH, bash)
BASIC_REQUIRE_PROG(CAT, cat)
BASIC_REQUIRE_PROG(CHMOD, chmod)
BASIC_REQUIRE_PROG(CMP, cmp)
BASIC_REQUIRE_PROG(COMM, comm)
BASIC_REQUIRE_PROG(CP, cp)
BASIC_REQUIRE_PROG(CPIO, cpio)
BASIC_REQUIRE_PROG(CUT, cut)
BASIC_REQUIRE_PROG(DATE, date)
BASIC_REQUIRE_PROG(DIFF, [gdiff diff])
BASIC_REQUIRE_PROG(DIRNAME, dirname)
BASIC_REQUIRE_PROG(ECHO, echo)
BASIC_REQUIRE_PROG(EXPR, expr)
BASIC_REQUIRE_PROG(FILE, file)
BASIC_REQUIRE_PROG(FIND, find)
BASIC_REQUIRE_PROG(HEAD, head)
BASIC_REQUIRE_PROG(LN, ln)
BASIC_REQUIRE_PROG(LS, ls)
BASIC_REQUIRE_PROG(MKDIR, mkdir)
BASIC_REQUIRE_PROG(MKTEMP, mktemp)
BASIC_REQUIRE_PROG(MV, mv)
BASIC_REQUIRE_PROG(PRINTF, printf)
BASIC_REQUIRE_PROG(RM, rm)
BASIC_REQUIRE_PROG(SH, sh)
BASIC_REQUIRE_PROG(SORT, sort)
BASIC_REQUIRE_PROG(TAIL, tail)
BASIC_REQUIRE_PROG(TAR, tar)
BASIC_REQUIRE_PROG(TEE, tee)
BASIC_REQUIRE_PROG(TOUCH, touch)
BASIC_REQUIRE_PROG(TR, tr)
BASIC_REQUIRE_PROG(UNAME, uname)
BASIC_REQUIRE_PROG(UNIQ, uniq)
BASIC_REQUIRE_PROG(WC, wc)
BASIC_REQUIRE_PROG(WHICH, which)
BASIC_REQUIRE_PROG(XARGS, xargs)
BASIC_REQUIRE_PROGS(BASENAME, basename)
BASIC_REQUIRE_PROGS(BASH, bash)
BASIC_REQUIRE_PROGS(CAT, cat)
BASIC_REQUIRE_PROGS(CHMOD, chmod)
BASIC_REQUIRE_PROGS(CMP, cmp)
BASIC_REQUIRE_PROGS(COMM, comm)
BASIC_REQUIRE_PROGS(CP, cp)
BASIC_REQUIRE_PROGS(CPIO, cpio)
BASIC_REQUIRE_PROGS(CUT, cut)
BASIC_REQUIRE_PROGS(DATE, date)
BASIC_REQUIRE_PROGS(DIFF, [gdiff diff])
BASIC_REQUIRE_PROGS(DIRNAME, dirname)
BASIC_REQUIRE_PROGS(ECHO, echo)
BASIC_REQUIRE_PROGS(EXPR, expr)
BASIC_REQUIRE_PROGS(FILE, file)
BASIC_REQUIRE_PROGS(FIND, find)
BASIC_REQUIRE_PROGS(HEAD, head)
BASIC_REQUIRE_PROGS(LN, ln)
BASIC_REQUIRE_PROGS(LS, ls)
BASIC_REQUIRE_PROGS(MKDIR, mkdir)
BASIC_REQUIRE_PROGS(MKTEMP, mktemp)
BASIC_REQUIRE_PROGS(MV, mv)
BASIC_REQUIRE_PROGS(NAWK, [nawk gawk awk])
BASIC_REQUIRE_PROGS(PRINTF, printf)
BASIC_REQUIRE_PROGS(RM, rm)
BASIC_REQUIRE_PROGS(SH, sh)
BASIC_REQUIRE_PROGS(SORT, sort)
BASIC_REQUIRE_PROGS(TAIL, tail)
BASIC_REQUIRE_PROGS(TAR, tar)
BASIC_REQUIRE_PROGS(TEE, tee)
BASIC_REQUIRE_PROGS(TOUCH, touch)
BASIC_REQUIRE_PROGS(TR, tr)
BASIC_REQUIRE_PROGS(UNAME, uname)
BASIC_REQUIRE_PROGS(UNIQ, uniq)
BASIC_REQUIRE_PROGS(WC, wc)
BASIC_REQUIRE_PROGS(WHICH, which)
BASIC_REQUIRE_PROGS(XARGS, xargs)
# Then required tools that require some special treatment.
AC_PROG_AWK
BASIC_CHECK_NONEMPTY(AWK)
AC_PROG_GREP
BASIC_CHECK_NONEMPTY(GREP)
AC_PROG_EGREP
BASIC_CHECK_NONEMPTY(EGREP)
AC_PROG_FGREP
BASIC_CHECK_NONEMPTY(FGREP)
AC_PROG_SED
BASIC_CHECK_NONEMPTY(SED)
AC_PATH_PROGS(NAWK, [nawk gawk awk])
BASIC_CHECK_NONEMPTY(NAWK)
BASIC_REQUIRE_SPECIAL(AWK, [AC_PROG_AWK])
BASIC_REQUIRE_SPECIAL(GREP, [AC_PROG_GREP])
BASIC_REQUIRE_SPECIAL(EGREP, [AC_PROG_EGREP])
BASIC_REQUIRE_SPECIAL(FGREP, [AC_PROG_FGREP])
BASIC_REQUIRE_SPECIAL(SED, [AC_PROG_SED])
# Always force rm.
RM="$RM -f"
@ -332,10 +409,10 @@ AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS],
THEPWDCMD=pwd
# These are not required on all platforms
AC_PATH_PROG(CYGPATH, cygpath)
AC_PATH_PROG(READLINK, readlink)
AC_PATH_PROG(DF, df)
AC_PATH_PROG(SETFILE, SetFile)
BASIC_PATH_PROGS(CYGPATH, cygpath)
BASIC_PATH_PROGS(READLINK, [greadlink readlink])
BASIC_PATH_PROGS(DF, df)
BASIC_PATH_PROGS(SETFILE, SetFile)
])
# Setup basic configuration paths, and platform-specific stuff related to PATHs.
@ -622,26 +699,26 @@ AC_DEFUN_ONCE([BASIC_SETUP_COMPLEX_TOOLS],
# These tools might not be installed by default,
# need hint on how to install them.
BASIC_REQUIRE_PROG(UNZIP, unzip)
BASIC_REQUIRE_PROG(ZIP, zip)
BASIC_REQUIRE_PROGS(UNZIP, unzip)
BASIC_REQUIRE_PROGS(ZIP, zip)
# Non-required basic tools
AC_PATH_PROG(LDD, ldd)
BASIC_PATH_PROGS(LDD, ldd)
if test "x$LDD" = "x"; then
# List shared lib dependencies is used for
# debug output and checking for forbidden dependencies.
# We can build without it.
LDD="true"
fi
AC_PATH_PROG(OTOOL, otool)
BASIC_PATH_PROGS(OTOOL, otool)
if test "x$OTOOL" = "x"; then
OTOOL="true"
fi
AC_PATH_PROGS(READELF, [readelf greadelf])
AC_PATH_PROG(HG, hg)
AC_PATH_PROG(STAT, stat)
AC_PATH_PROG(TIME, time)
BASIC_PATH_PROGS(READELF, [greadelf readelf])
BASIC_PATH_PROGS(HG, hg)
BASIC_PATH_PROGS(STAT, stat)
BASIC_PATH_PROGS(TIME, time)
# Check if it's GNU time
IS_GNU_TIME=`$TIME --version 2>&1 | $GREP 'GNU time'`
if test "x$IS_GNU_TIME" != x; then
@ -652,13 +729,13 @@ AC_DEFUN_ONCE([BASIC_SETUP_COMPLEX_TOOLS],
AC_SUBST(IS_GNU_TIME)
if test "x$OPENJDK_TARGET_OS" = "xwindows"; then
BASIC_REQUIRE_PROG(COMM, comm)
BASIC_REQUIRE_PROGS(COMM, comm)
fi
if test "x$OPENJDK_TARGET_OS" = "xmacosx"; then
BASIC_REQUIRE_PROG(DSYMUTIL, dsymutil)
BASIC_REQUIRE_PROG(XATTR, xattr)
AC_PATH_PROG(CODESIGN, codesign)
BASIC_REQUIRE_PROGS(DSYMUTIL, dsymutil)
BASIC_REQUIRE_PROGS(XATTR, xattr)
BASIC_PATH_PROGS(CODESIGN, codesign)
if test "x$CODESIGN" != "x"; then
# Verify that the openjdk_codesign certificate is present
AC_MSG_CHECKING([if openjdk_codesign certificate is present])
@ -720,6 +797,9 @@ AC_DEFUN_ONCE([BASIC_CHECK_SRC_PERMS],
AC_DEFUN_ONCE([BASIC_TEST_USABILITY_ISSUES],
[
# Did user specify any unknown variables?
BASIC_CHECK_LEFTOVER_OVERRIDDEN
AC_MSG_CHECKING([if build directory is on local disk])
BASIC_CHECK_DIR_ON_LOCAL_DISK($OUTPUT_ROOT,
[OUTPUT_DIR_IS_LOCAL="yes"],
@ -738,12 +818,4 @@ AC_DEFUN_ONCE([BASIC_TEST_USABILITY_ISSUES],
else
IS_RECONFIGURE=no
fi
if test -e $SRC_ROOT/build/.hide-configure-performance-hints; then
HIDE_PERFORMANCE_HINTS=yes
else
HIDE_PERFORMANCE_HINTS=no
# Hide it the next time around...
$TOUCH $SRC_ROOT/build/.hide-configure-performance-hints > /dev/null 2>&1
fi
])

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,34 @@
# questions.
#
########################################################################
# This file handles detection of the Boot JDK. The Boot JDK detection
# process has been developed as a response to solve a complex real-world
# problem. Initially, it was simple, but it has grown as platform after
# platform, idiosyncracy after idiosyncracy has been supported.
#
# The basic idea is this:
# 1) You need an acceptable *) JDK to use as a Boot JDK
# 2) There are several ways to locate a JDK, that are mostly platform
# dependent **)
# 3) You can have multiple JDKs installed
# 4) If possible, configure should try to dig out an acceptable JDK
# automatically, without having to resort to command-line options
#
# *) acceptable means e.g. JDK7 for building JDK8, a complete JDK (with
# javac) and not a JRE, etc.
#
# **) On Windows we typically use a well-known path.
# On MacOSX we typically use the tool java_home.
# On Linux we typically find javac in the $PATH, and then follow a
# chain of symlinks that often ends up in a real JDK.
#
# This leads to the code where we check in different ways to locate a
# JDK, and if one is found, check if it is acceptable. If not, we print
# our reasons for rejecting it (useful when debugging non-working
# configure situations) and continue checking the next one.
########################################################################
# Execute the check given as argument, and verify the result
# If the Boot JDK was previously found, do nothing
# $1 A command line (typically autoconf macro) to execute
@ -54,10 +82,10 @@ AC_DEFUN([BOOTJDK_DO_CHECK],
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
# Extra M4 quote needed to protect [] in grep expression.
[FOUND_VERSION_78=`echo $BOOT_JDK_VERSION | grep '\"1\.[78]\.'`]
if test "x$FOUND_VERSION_78" = x; then
[FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`]
if test "x$FOUND_CORRECT_VERSION" = x; then
AC_MSG_NOTICE([Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring])
AC_MSG_NOTICE([(Your Boot JDK must be version 7 or 8)])
AC_MSG_NOTICE([(Your Boot JDK must be version 7, 8 or 9)])
BOOT_JDK_FOUND=no
else
# We're done! :-)
@ -136,12 +164,26 @@ AC_DEFUN([BOOTJDK_CHECK_JAVA_IN_PATH_IS_SYMLINK],
])
# Test: Is there a /usr/libexec/java_home? (Typically on MacOSX)
# $1: Argument to the java_home binary (optional)
AC_DEFUN([BOOTJDK_CHECK_LIBEXEC_JAVA_HOME],
[
if test -x /usr/libexec/java_home; then
BOOT_JDK=`/usr/libexec/java_home`
BOOT_JDK=`/usr/libexec/java_home $1`
BOOT_JDK_FOUND=maybe
AC_MSG_NOTICE([Found potential Boot JDK using /usr/libexec/java_home])
AC_MSG_NOTICE([Found potential Boot JDK using /usr/libexec/java_home $1])
fi
])
# Test: On MacOS X, can we find a boot jdk using /usr/libexec/java_home?
AC_DEFUN([BOOTJDK_CHECK_MACOSX_JAVA_LOCATOR],
[
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
# First check at user selected default
BOOTJDK_DO_CHECK([BOOTJDK_CHECK_LIBEXEC_JAVA_HOME()])
# If that did not work out (e.g. too old), try explicit versions instead
BOOTJDK_DO_CHECK([BOOTJDK_CHECK_LIBEXEC_JAVA_HOME([-v 1.9])])
BOOTJDK_DO_CHECK([BOOTJDK_CHECK_LIBEXEC_JAVA_HOME([-v 1.8])])
BOOTJDK_DO_CHECK([BOOTJDK_CHECK_LIBEXEC_JAVA_HOME([-v 1.7])])
fi
])
@ -201,14 +243,19 @@ AC_DEFUN([BOOTJDK_CHECK_WELL_KNOWN_LOCATIONS],
# $2 = name of binary
AC_DEFUN([BOOTJDK_CHECK_TOOL_IN_BOOTJDK],
[
AC_MSG_CHECKING([for $2 in Boot JDK])
$1=$BOOT_JDK/bin/$2
if test ! -x [$]$1; then
AC_MSG_RESULT(not found)
AC_MSG_NOTICE([Your Boot JDK seems broken. This might be fixed by explicitely setting --with-boot-jdk])
AC_MSG_ERROR([Could not find $2 in the Boot JDK])
fi
AC_MSG_RESULT(ok)
# Use user overridden value if available, otherwise locate tool in the Boot JDK.
BASIC_SETUP_TOOL($1,
[
AC_MSG_CHECKING([for $2 in Boot JDK])
$1=$BOOT_JDK/bin/$2
if test ! -x [$]$1; then
AC_MSG_RESULT(not found)
AC_MSG_NOTICE([Your Boot JDK seems broken. This might be fixed by explicitely setting --with-boot-jdk])
AC_MSG_ERROR([Could not find $2 in the Boot JDK])
fi
AC_MSG_RESULT(ok)
AC_SUBST($1)
])
])
###############################################################################
@ -238,12 +285,12 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK],
# Test: Is bootjdk available from builddeps?
BOOTJDK_DO_CHECK([BOOTJDK_CHECK_BUILDDEPS])
# Test: On MacOS X, can we find a boot jdk using /usr/libexec/java_home?
BOOTJDK_DO_CHECK([BOOTJDK_CHECK_MACOSX_JAVA_LOCATOR])
# Test: Is $JAVA_HOME set?
BOOTJDK_DO_CHECK([BOOTJDK_CHECK_JAVA_HOME])
# Test: Is there a /usr/libexec/java_home? (Typically on MacOSX)
BOOTJDK_DO_CHECK([BOOTJDK_CHECK_LIBEXEC_JAVA_HOME])
# Test: Is there a java or javac in the PATH, which is a symlink to the JDK?
BOOTJDK_DO_CHECK([BOOTJDK_CHECK_JAVA_IN_PATH_IS_SYMLINK])
@ -275,13 +322,12 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK],
AC_SUBST(BOOT_JDK)
# Setup tools from the Boot JDK.
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVA,java)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVAC,javac)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVAH,javah)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVAP,javap)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAR,jar)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(RMIC,rmic)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(NATIVE2ASCII,native2ascii)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVA, java)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVAC, javac)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVAH, javah)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAR, jar)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(NATIVE2ASCII, native2ascii)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JARSIGNER, jarsigner)
# Finally, set some other options...
@ -316,7 +362,7 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
# Minimum amount of heap memory.
ADD_JVM_ARG_IF_OK([-Xms64M],boot_jdk_jvmargs,[$JAVA])
if test "x$OPENJDK_TARGET_OS" = "xmacosx"; then
if test "x$OPENJDK_TARGET_OS" = "xmacosx" || test "x$OPENJDK_TARGET_CPU" = "xppc64" ; then
# Why does macosx need more heap? Its the huge JDK batch.
ADD_JVM_ARG_IF_OK([-Xmx1600M],boot_jdk_jvmargs,[$JAVA])
else

View File

@ -60,4 +60,20 @@ if test $? = 0; then
esac
fi
# Test and fix architecture string on AIX
# On AIX 'config.guess' returns 'powerpc' as architecture but 'powerpc' is
# implicitely handled as 32-bit architecture in 'platform.m4' so we check
# for the kernel mode rewrite it to 'powerpc64' if we'Re running in 64-bit mode.
# The check could also be done with `/usr/sbin/prtconf | grep "Kernel Type" | grep "64-bit"`
echo $OUT | grep powerpc-ibm-aix > /dev/null 2> /dev/null
if test $? = 0; then
if [ -x /bin/getconf ] ; then
KERNEL_BITMODE=`getconf KERNEL_BITMODE`
if [ "$KERNEL_BITMODE" = "32" ]; then
KERNEL_BITMODE=""
fi
fi
OUT=powerpc$KERNEL_BITMODE`echo $OUT | sed -e 's/[^-]*//'`
fi
echo $OUT

View File

@ -41,6 +41,9 @@ AC_DEFUN([BPERF_CHECK_CORES],
# Looks like a MacOSX system
NUM_CORES=`/usr/sbin/system_profiler -detailLevel full SPHardwareDataType | grep 'Cores' | awk '{print [$]5}'`
FOUND_CORES=yes
elif test "x$OPENJDK_BUILD_OS" = xaix ; then
NUM_CORES=`/usr/sbin/prtconf | grep "^Number Of Processors" | awk '{ print [$]4 }'`
FOUND_CORES=yes
elif test -n "$NUMBER_OF_PROCESSORS"; then
# On windows, look in the env
NUM_CORES=$NUMBER_OF_PROCESSORS
@ -68,8 +71,8 @@ AC_DEFUN([BPERF_CHECK_MEMORY_SIZE],
MEMORY_SIZE=`expr $MEMORY_SIZE / 1024`
FOUND_MEM=yes
elif test -x /usr/sbin/prtconf; then
# Looks like a Solaris system
MEMORY_SIZE=`/usr/sbin/prtconf | grep "Memory size" | awk '{ print [$]3 }'`
# Looks like a Solaris or AIX system
MEMORY_SIZE=`/usr/sbin/prtconf | grep "^Memory [[Ss]]ize" | awk '{ print [$]3 }'`
FOUND_MEM=yes
elif test -x /usr/sbin/system_profiler; then
# Looks like a MacOSX system
@ -157,20 +160,28 @@ AC_DEFUN_ONCE([BPERF_SETUP_BUILD_JOBS],
AC_DEFUN([BPERF_SETUP_CCACHE],
[
AC_ARG_ENABLE([ccache],
[AS_HELP_STRING([--disable-ccache],
[disable using ccache to speed up recompilations @<:@enabled@:>@])],
[ENABLE_CCACHE=${enable_ccache}], [ENABLE_CCACHE=yes])
if test "x$ENABLE_CCACHE" = xyes; then
[AS_HELP_STRING([--enable-ccache],
[enable using ccache to speed up recompilations @<:@disabled@:>@])])
CCACHE=
AC_MSG_CHECKING([is ccache enabled])
ENABLE_CCACHE=$enable_ccache
if test "x$enable_ccache" = xyes; then
AC_MSG_RESULT([yes])
OLD_PATH="$PATH"
if test "x$TOOLS_DIR" != x; then
PATH=$TOOLS_DIR:$PATH
fi
AC_PATH_PROG(CCACHE, ccache)
BASIC_REQUIRE_PROGS(CCACHE, ccache)
CCACHE_STATUS="enabled"
PATH="$OLD_PATH"
elif test "x$enable_ccache" = xno; then
AC_MSG_RESULT([no, explicitly disabled])
elif test "x$enable_ccache" = x; then
AC_MSG_RESULT([no])
else
AC_MSG_CHECKING([for ccache])
AC_MSG_RESULT([explicitly disabled])
CCACHE=
AC_MSG_RESULT([unknown])
AC_MSG_ERROR([--enable-ccache does not accept any parameters])
fi
AC_SUBST(CCACHE)
@ -182,8 +193,11 @@ AC_DEFUN([BPERF_SETUP_CCACHE],
# When using a non home ccache directory, assume the use is to share ccache files
# with other users. Thus change the umask.
SET_CCACHE_DIR="CCACHE_DIR=$with_ccache_dir CCACHE_UMASK=002"
if test "x$CCACHE" = x; then
AC_MSG_WARN([--with-ccache-dir has no meaning when ccache is not enabled])
fi
fi
CCACHE_FOUND=""
if test "x$CCACHE" != x; then
BPERF_SETUP_CCACHE_USAGE
fi
@ -192,7 +206,6 @@ AC_DEFUN([BPERF_SETUP_CCACHE],
AC_DEFUN([BPERF_SETUP_CCACHE_USAGE],
[
if test "x$CCACHE" != x; then
CCACHE_FOUND="true"
# Only use ccache if it is 3.1.4 or later, which supports
# precompiled headers.
AC_MSG_CHECKING([if ccache supports precompiled headers])
@ -200,6 +213,7 @@ AC_DEFUN([BPERF_SETUP_CCACHE_USAGE],
if test "x$HAS_GOOD_CCACHE" = x; then
AC_MSG_RESULT([no, disabling ccache])
CCACHE=
CCACHE_STATUS="disabled"
else
AC_MSG_RESULT([yes])
AC_MSG_CHECKING([if C-compiler supports ccache precompiled headers])
@ -212,6 +226,7 @@ AC_DEFUN([BPERF_SETUP_CCACHE_USAGE],
else
AC_MSG_RESULT([no, disabling ccaching of precompiled headers])
CCACHE=
CCACHE_STATUS="disabled"
fi
fi
fi

View File

@ -121,15 +121,23 @@ do
case $conf_option in
--openjdk-target=*)
conf_openjdk_target=`expr "X$conf_option" : '[^=]*=\(.*\)'`
continue ;;
;;
--debug-configure)
if test "x$conf_debug_configure" != xrecursive; then
conf_debug_configure=true
export conf_debug_configure
fi
continue ;;
;;
[^-]*=*)
# Add name of variable to CONFIGURE_OVERRIDDEN_VARIABLES list inside !...!.
conf_env_var=`expr "x$conf_option" : 'x\([^=]*\)='`
CONFIGURE_OVERRIDDEN_VARIABLES="$CONFIGURE_OVERRIDDEN_VARIABLES!$conf_env_var!"
# ... and then process argument as usual
conf_processed_arguments=("${conf_processed_arguments[@]}" "$conf_option")
;;
*)
conf_processed_arguments=("${conf_processed_arguments[@]}" "$conf_option") ;;
conf_processed_arguments=("${conf_processed_arguments[@]}" "$conf_option")
;;
esac
case $conf_option in
@ -212,6 +220,9 @@ Additional (non-autoconf) OpenJDK Options:
Please be aware that, when cross-compiling, the OpenJDK configure script will
generally use 'target' where autoconf traditionally uses 'host'.
Also note that variables must be passed on the command line. Variables in the
environment will generally be ignored, unlike traditional autoconf scripts.
EOT
fi
else

View File

@ -88,6 +88,7 @@ JDKOPT_SETUP_OPEN_OR_CUSTOM
# These are needed to be able to create a configuration name (and thus the output directory)
JDKOPT_SETUP_JDK_VARIANT
JDKOPT_SETUP_JVM_INTERPRETER
JDKOPT_SETUP_JVM_VARIANTS
JDKOPT_SETUP_DEBUG_LEVEL

File diff suppressed because it is too large Load Diff

View File

@ -52,8 +52,6 @@ AC_DEFUN([HELP_MSG_MISSING_DEPENDENCY],
pkgutil_help $MISSING_DEPENDENCY ;;
pkgadd)
pkgadd_help $MISSING_DEPENDENCY ;;
* )
break ;;
esac
if test "x$PKGHANDLER_COMMAND" != x; then
@ -92,8 +90,6 @@ http://www.freetype.org/
If you put the resulting build in \"C:\Program Files\GnuWin32\", it will be found automatically."
fi
;;
* )
break ;;
esac
}
@ -119,8 +115,6 @@ apt_help() {
PKGHANDLER_COMMAND="sudo apt-get install libX11-dev libxext-dev libxrender-dev libxtst-dev libxt-dev" ;;
ccache)
PKGHANDLER_COMMAND="sudo apt-get install ccache" ;;
* )
break ;;
esac
}
@ -142,8 +136,6 @@ yum_help() {
PKGHANDLER_COMMAND="sudo yum install libXtst-devel libXt-devel libXrender-devel" ;;
ccache)
PKGHANDLER_COMMAND="sudo yum install ccache" ;;
* )
break ;;
esac
}
@ -163,22 +155,6 @@ AC_DEFUN_ONCE([HELP_PRINT_SUMMARY_AND_WARNINGS],
[
# Finally output some useful information to the user
if test "x$CCACHE_FOUND" != x; then
if test "x$HAS_GOOD_CCACHE" = x; then
CCACHE_STATUS="installed, but disabled (version older than 3.1.4)"
CCACHE_HELP_MSG="You have ccache installed, but it is a version prior to 3.1.4. Try upgrading."
else
CCACHE_STATUS="installed and in use"
fi
else
if test "x$GCC" = xyes; then
CCACHE_STATUS="not installed (consider installing)"
CCACHE_HELP_MSG="You do not have ccache installed. Try installing it."
else
CCACHE_STATUS="not available for your system"
fi
fi
printf "\n"
printf "====================================================\n"
printf "A new configuration has been successfully created in\n"
@ -209,16 +185,10 @@ AC_DEFUN_ONCE([HELP_PRINT_SUMMARY_AND_WARNINGS],
printf "Build performance summary:\n"
printf "* Cores to use: $JOBS\n"
printf "* Memory limit: $MEMORY_SIZE MB\n"
printf "* ccache status: $CCACHE_STATUS\n"
printf "\n"
if test "x$CCACHE_HELP_MSG" != x && test "x$HIDE_PERFORMANCE_HINTS" = "xno"; then
printf "Build performance tip: ccache gives a tremendous speedup for C++ recompilations.\n"
printf "$CCACHE_HELP_MSG\n"
HELP_MSG_MISSING_DEPENDENCY([ccache])
printf "$HELP_MSG\n"
printf "\n"
if test "x$CCACHE_STATUS" != "x"; then
printf "* ccache status: $CCACHE_STATUS\n"
fi
printf "\n"
if test "x$BUILDING_MULTIPLE_JVM_VARIANTS" = "xyes"; then
printf "NOTE: You have requested to build more than one version of the JVM, which\n"

View File

@ -91,6 +91,11 @@ LLVM_LDFLAGS=@LLVM_LDFLAGS@
ALT_OUTPUTDIR=$(HOTSPOT_OUTPUTDIR)
ALT_EXPORT_PATH=$(HOTSPOT_DIST)
JVM_INTERPRETER:=@JVM_INTERPRETER@
ifeq ($(JVM_INTERPRETER), cpp)
CC_INTERP=true
endif
HOTSPOT_MAKE_ARGS:=@HOTSPOT_MAKE_ARGS@ @STATIC_CXX_SETTING@
# This is used from the libjvm build for C/C++ code.
HOTSPOT_BUILD_JOBS:=$(JOBS)

View File

@ -51,6 +51,33 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_VARIANT],
AC_MSG_RESULT([$JDK_VARIANT])
])
AC_DEFUN_ONCE([JDKOPT_SETUP_JVM_INTERPRETER],
[
###############################################################################
#
# Check which interpreter of the JVM we want to build.
# Currently we have:
# template: Template interpreter (the default)
# cpp : C++ interpreter
AC_MSG_CHECKING([which interpreter of the JVM to build])
AC_ARG_WITH([jvm-interpreter], [AS_HELP_STRING([--with-jvm-interpreter],
[JVM interpreter to build (template, cpp) @<:@template@:>@])])
if test "x$with_jvm_interpreter" = x; then
with_jvm_interpreter="template"
fi
JVM_INTERPRETER="$with_jvm_interpreter"
if test "x$JVM_INTERPRETER" != xtemplate && test "x$JVM_INTERPRETER" != xcpp; then
AC_MSG_ERROR([The available JVM interpreters are: template, cpp])
fi
AC_SUBST(JVM_INTERPRETER)
AC_MSG_RESULT([$with_jvm_interpreter])
])
AC_DEFUN_ONCE([JDKOPT_SETUP_JVM_VARIANTS],
[
@ -65,19 +92,20 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_JVM_VARIANTS],
# ie normal interpreter and C1, only the serial GC, kernel jvmti etc
# zero: no machine code interpreter, no compiler
# zeroshark: zero interpreter and shark/llvm compiler backend
# core: interpreter only, no compiler (only works on some platforms)
AC_MSG_CHECKING([which variants of the JVM to build])
AC_ARG_WITH([jvm-variants], [AS_HELP_STRING([--with-jvm-variants],
[JVM variants (separated by commas) to build (server, client, minimal1, kernel, zero, zeroshark) @<:@server@:>@])])
[JVM variants (separated by commas) to build (server, client, minimal1, kernel, zero, zeroshark, core) @<:@server@:>@])])
if test "x$with_jvm_variants" = x; then
with_jvm_variants="server"
fi
JVM_VARIANTS=",$with_jvm_variants,"
TEST_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/server,//' -e 's/client,//' -e 's/minimal1,//' -e 's/kernel,//' -e 's/zero,//' -e 's/zeroshark,//'`
TEST_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/server,//' -e 's/client,//' -e 's/minimal1,//' -e 's/kernel,//' -e 's/zero,//' -e 's/zeroshark,//' -e 's/core,//'`
if test "x$TEST_VARIANTS" != "x,"; then
AC_MSG_ERROR([The available JVM variants are: server, client, minimal1, kernel, zero, zeroshark])
AC_MSG_ERROR([The available JVM variants are: server, client, minimal1, kernel, zero, zeroshark, core])
fi
AC_MSG_RESULT([$with_jvm_variants])
@ -87,6 +115,7 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_JVM_VARIANTS],
JVM_VARIANT_KERNEL=`$ECHO "$JVM_VARIANTS" | $SED -e '/,kernel,/!s/.*/false/g' -e '/,kernel,/s/.*/true/g'`
JVM_VARIANT_ZERO=`$ECHO "$JVM_VARIANTS" | $SED -e '/,zero,/!s/.*/false/g' -e '/,zero,/s/.*/true/g'`
JVM_VARIANT_ZEROSHARK=`$ECHO "$JVM_VARIANTS" | $SED -e '/,zeroshark,/!s/.*/false/g' -e '/,zeroshark,/s/.*/true/g'`
JVM_VARIANT_CORE=`$ECHO "$JVM_VARIANTS" | $SED -e '/,core,/!s/.*/false/g' -e '/,core,/s/.*/true/g'`
if test "x$JVM_VARIANT_CLIENT" = xtrue; then
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
@ -106,7 +135,7 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_JVM_VARIANTS],
# Replace the commas with AND for use in the build directory name.
ANDED_JVM_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/^,//' -e 's/,$//' -e 's/,/AND/g'`
COUNT_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/server,/1/' -e 's/client,/1/' -e 's/minimal1,/1/' -e 's/kernel,/1/' -e 's/zero,/1/' -e 's/zeroshark,/1/'`
COUNT_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/server,/1/' -e 's/client,/1/' -e 's/minimal1,/1/' -e 's/kernel,/1/' -e 's/zero,/1/' -e 's/zeroshark,/1/' -e 's/core,/1/'`
if test "x$COUNT_VARIANTS" != "x,1"; then
BUILDING_MULTIPLE_JVM_VARIANTS=yes
else
@ -120,6 +149,7 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_JVM_VARIANTS],
AC_SUBST(JVM_VARIANT_KERNEL)
AC_SUBST(JVM_VARIANT_ZERO)
AC_SUBST(JVM_VARIANT_ZEROSHARK)
AC_SUBST(JVM_VARIANT_CORE)
INCLUDE_SA=true
if test "x$JVM_VARIANT_ZERO" = xtrue ; then
@ -128,6 +158,9 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_JVM_VARIANTS],
if test "x$JVM_VARIANT_ZEROSHARK" = xtrue ; then
INCLUDE_SA=false
fi
if test "x$VAR_CPU" = xppc64 ; then
INCLUDE_SA=false
fi
AC_SUBST(INCLUDE_SA)
if test "x$OPENJDK_TARGET_OS" = "xmacosx"; then
@ -236,6 +269,10 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_DEBUG_LEVEL],
HOTSPOT_TARGET="$HOTSPOT_TARGET${HOTSPOT_DEBUG_LEVEL}shark "
fi
if test "x$JVM_VARIANT_CORE" = xtrue; then
HOTSPOT_TARGET="$HOTSPOT_TARGET${HOTSPOT_DEBUG_LEVEL}core "
fi
HOTSPOT_TARGET="$HOTSPOT_TARGET docs export_$HOTSPOT_EXPORT"
# On Macosx universal binaries are produced, but they only contain

View File

@ -43,6 +43,14 @@ AC_DEFUN_ONCE([LIB_SETUP_INIT],
AC_MSG_RESULT([alsa pulse])
fi
if test "x$OPENJDK_TARGET_OS" = xaix; then
AC_MSG_CHECKING([what is not needed on AIX?])
ALSA_NOT_NEEDED=yes
PULSE_NOT_NEEDED=yes
AC_MSG_RESULT([alsa pulse])
fi
if test "x$OPENJDK_TARGET_OS" = xwindows; then
AC_MSG_CHECKING([what is not needed on Windows?])
CUPS_NOT_NEEDED=yes

View File

@ -126,6 +126,11 @@ AC_DEFUN([PLATFORM_EXTRACT_VARS_FROM_OS],
VAR_OS_API=winapi
VAR_OS_ENV=windows.msys
;;
*aix*)
VAR_OS=aix
VAR_OS_API=posix
VAR_OS_ENV=aix
;;
*)
AC_MSG_ERROR([unsupported operating system $1])
;;
@ -432,9 +437,9 @@ AC_DEFUN([PLATFORM_SET_COMPILER_TARGET_BITS_FLAGS],
# keep track of these additions in ADDED_CFLAGS etc. These
# will later be checked to make sure only controlled additions
# have been made to CFLAGS etc.
ADDED_CFLAGS=" -m${OPENJDK_TARGET_CPU_BITS}"
ADDED_CXXFLAGS=" -m${OPENJDK_TARGET_CPU_BITS}"
ADDED_LDFLAGS=" -m${OPENJDK_TARGET_CPU_BITS}"
ADDED_CFLAGS=" ${COMPILER_TARGET_BITS_FLAG}${OPENJDK_TARGET_CPU_BITS}"
ADDED_CXXFLAGS=" ${COMPILER_TARGET_BITS_FLAG}${OPENJDK_TARGET_CPU_BITS}"
ADDED_LDFLAGS=" ${COMPILER_TARGET_BITS_FLAG}${OPENJDK_TARGET_CPU_BITS}"
CFLAGS="${CFLAGS}${ADDED_CFLAGS}"
CXXFLAGS="${CXXFLAGS}${ADDED_CXXFLAGS}"
@ -454,8 +459,9 @@ AC_DEFUN_ONCE([PLATFORM_SETUP_OPENJDK_TARGET_BITS],
# is made at runtime.)
#
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
# Always specify -m flags on Solaris
if test "x$OPENJDK_TARGET_OS" = xsolaris || test "x$OPENJDK_TARGET_OS" = xaix; then
# Always specify -m flag on Solaris
# And -q on AIX because otherwise the compiler produces 32-bit objects by default
PLATFORM_SET_COMPILER_TARGET_BITS_FLAGS
elif test "x$COMPILE_TYPE" = xreduced; then
if test "x$OPENJDK_TARGET_OS" != xwindows; then
@ -477,19 +483,34 @@ AC_DEFUN_ONCE([PLATFORM_SETUP_OPENJDK_TARGET_BITS],
AC_CHECK_SIZEOF([int *], [1111])
if test "x$SIZEOF_INT_P" != "x$ac_cv_sizeof_int_p"; then
# Workaround autoconf bug, see http://lists.gnu.org/archive/html/autoconf/2010-07/msg00004.html
SIZEOF_INT_P="$ac_cv_sizeof_int_p"
fi
if test "x$SIZEOF_INT_P" = x; then
# AC_CHECK_SIZEOF defines 'ac_cv_sizeof_int_p' to hold the number of bytes used by an 'int*'
if test "x$ac_cv_sizeof_int_p" = x; then
# The test failed, lets stick to the assumed value.
AC_MSG_WARN([The number of bits in the target could not be determined, using $OPENJDK_TARGET_CPU_BITS.])
else
TESTED_TARGET_CPU_BITS=`expr 8 \* $SIZEOF_INT_P`
TESTED_TARGET_CPU_BITS=`expr 8 \* $ac_cv_sizeof_int_p`
if test "x$TESTED_TARGET_CPU_BITS" != "x$OPENJDK_TARGET_CPU_BITS"; then
AC_MSG_ERROR([The tested number of bits in the target ($TESTED_TARGET_CPU_BITS) differs from the number of bits expected to be found in the target ($OPENJDK_TARGET_CPU_BITS)])
# This situation may happen on 64-bit platforms where the compiler by default only generates 32-bit objects
# Let's try to implicitely set the compilers target architecture and retry the test
AC_MSG_NOTICE([The tested number of bits in the target ($TESTED_TARGET_CPU_BITS) differs from the number of bits expected to be found in the target ($OPENJDK_TARGET_CPU_BITS).])
AC_MSG_NOTICE([I'll retry after setting the platforms compiler target bits flag to ${COMPILER_TARGET_BITS_FLAG}${OPENJDK_TARGET_CPU_BITS}])
PLATFORM_SET_COMPILER_TARGET_BITS_FLAGS
# We have to unset 'ac_cv_sizeof_int_p' first, otherwise AC_CHECK_SIZEOF will use the previously cached value!
unset ac_cv_sizeof_int_p
# And we have to undef the definition of SIZEOF_INT_P in confdefs.h by the previous invocation of AC_CHECK_SIZEOF
cat >>confdefs.h <<_ACEOF
#undef SIZEOF_INT_P
_ACEOF
AC_CHECK_SIZEOF([int *], [1111])
TESTED_TARGET_CPU_BITS=`expr 8 \* $ac_cv_sizeof_int_p`
if test "x$TESTED_TARGET_CPU_BITS" != "x$OPENJDK_TARGET_CPU_BITS"; then
AC_MSG_ERROR([The tested number of bits in the target ($TESTED_TARGET_CPU_BITS) differs from the number of bits expected to be found in the target ($OPENJDK_TARGET_CPU_BITS)])
fi
fi
fi

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -37,6 +37,8 @@ X:=
SPACE:=$(X) $(X)
COMMA:=,
HASH:=\#
LEFT_PAREN:=(
RIGHT_PAREN:=)
SQUOTE:='
#'
DQUOTE:="
@ -208,6 +210,7 @@ JVM_VARIANT_MINIMAL1:=@JVM_VARIANT_MINIMAL1@
JVM_VARIANT_KERNEL:=@JVM_VARIANT_KERNEL@
JVM_VARIANT_ZERO:=@JVM_VARIANT_ZERO@
JVM_VARIANT_ZEROSHARK:=@JVM_VARIANT_ZEROSHARK@
JVM_VARIANT_CORE:=@JVM_VARIANT_CORE@
# Universal binaries on macosx
MACOSX_UNIVERSAL=@MACOSX_UNIVERSAL@
@ -297,6 +300,8 @@ MACOSX_VERSION_MIN=@MACOSX_VERSION_MIN@
COMPILER_TYPE:=@COMPILER_TYPE@
COMPILER_NAME:=@COMPILER_NAME@
# Option used to tell the compiler whether to create 32- or 64-bit executables
COMPILER_TARGET_BITS_FLAG:=@COMPILER_TARGET_BITS_FLAG@
COMPILER_SUPPORTS_TARGET_BITS_FLAG=@COMPILER_SUPPORTS_TARGET_BITS_FLAG@
CC_OUT_OPTION:=@CC_OUT_OPTION@
@ -340,6 +345,11 @@ CPP:=@FIXPATH@ @CPP@
# The linker can be gcc or ld on posix systems, or link.exe on windows systems.
LD:=@FIXPATH@ @LD@
# The linker on older SuSE distros (e.g. on SLES 10) complains with:
# "Invalid version tag `SUNWprivate_1.1'. Only anonymous version tag is allowed in executable."
# if feeded with a version script which contains named tags.
USING_BROKEN_SUSE_LD:=@USING_BROKEN_SUSE_LD@
# LDFLAGS used to link the jdk native libraries (C-code)
LDFLAGS_JDKLIB:=@LDFLAGS_JDKLIB@
LDFLAGS_JDKLIB_SUFFIX:=@LDFLAGS_JDKLIB_SUFFIX@
@ -430,28 +440,29 @@ POST_MCS_CMD:=@POST_MCS_CMD@
JAVA_FLAGS:=@BOOT_JDK_JVMARGS@
JAVA=@FIXPATH@ $(BOOT_JDK)/bin/java $(JAVA_FLAGS)
JAVA=@FIXPATH@ @JAVA@ $(JAVA_FLAGS)
JAVAC=@FIXPATH@ $(BOOT_JDK)/bin/javac
JAVAC:=@FIXPATH@ @JAVAC@
# Hotspot sets this variable before reading the SPEC when compiling sa-jdi.jar. Avoid
# overriding that value by using ?=.
JAVAC_FLAGS?=@JAVAC_FLAGS@
JAVAH=@FIXPATH@ $(BOOT_JDK)/bin/javah
JAVAH:=@FIXPATH@ @JAVAH@
JAR=@FIXPATH@ $(BOOT_JDK)/bin/jar
JAR:=@FIXPATH@ @JAR@
RMIC=@FIXPATH@ $(BOOT_JDK)/bin/rmic
NATIVE2ASCII:=@FIXPATH@ @NATIVE2ASCII@
NATIVE2ASCII=@FIXPATH@ $(BOOT_JDK)/bin/native2ascii
JARSIGNER=@FIXPATH@ $(BOOT_JDK)/bin/jarsigner
JARSIGNER:=@FIXPATH@ @JARSIGNER@
# You run the new javac using the boot jdk with $(BOOT_JDK)/bin/java $(NEW_JAVAC) ...
BOOTSTRAP_JAVAC_JAR:=$(LANGTOOLS_OUTPUTDIR)/dist/bootstrap/lib/javac.jar
BOOTSTRAP_JAVAC_ARGS:="-Xbootclasspath/p:$(BOOTSTRAP_JAVAC_JAR)" -cp $(BOOTSTRAP_JAVAC_JAR)
NEW_JAVAC = $(BOOTSTRAP_JAVAC_ARGS) com.sun.tools.javac.Main
NEW_JAVADOC = $(BOOTSTRAP_JAVAC_ARGS) com.sun.tools.javadoc.Main
INTERIM_LANGTOOLS_JAR := $(LANGTOOLS_OUTPUTDIR)/dist/interim_langtools.jar
INTERIM_LANGTOOLS_ARGS := "-Xbootclasspath/p:$(INTERIM_LANGTOOLS_JAR)" -cp $(INTERIM_LANGTOOLS_JAR)
NEW_JAVAC = $(INTERIM_LANGTOOLS_ARGS) com.sun.tools.javac.Main
NEW_JAVADOC = $(INTERIM_LANGTOOLS_ARGS) com.sun.tools.javadoc.Main
# The interim corba jar is needed for running rmic
INTERIM_CORBA_JAR := $(CORBA_OUTPUTDIR)/dist/interim_corba.jar
# Base flags for RC
# Guarding this against resetting value. Legacy make files include spec multiple

View File

@ -44,6 +44,15 @@ AC_DEFUN([TOOLCHAIN_CHECK_COMPILER_VERSION],
COMPILER_VERSION=`$ECHO $COMPILER_VERSION_TEST | $SED -n "s/^.*@<:@ ,\t@:>@$COMPILER_NAME@<:@ ,\t@:>@\(@<:@1-9@:>@\.@<:@0-9@:>@@<:@0-9@:>@*\).*/\1/p"`
COMPILER_VENDOR="Sun Studio"
fi
elif test "x$OPENJDK_TARGET_OS" = xaix; then
COMPILER_VERSION_TEST=`$COMPILER -qversion 2>&1 | $TAIL -n 1`
$ECHO $COMPILER_VERSION_TEST | $GREP "^Version: " > /dev/null
if test $? -ne 0; then
AC_MSG_ERROR([Failed to detect the compiler version of $COMPILER ....])
else
COMPILER_VERSION=`$ECHO $COMPILER_VERSION_TEST | $SED -n 's/Version: \([0-9][0-9]\.[0-9][0-9]*\).*/\1/p'`
COMPILER_VENDOR='IBM'
fi
elif test "x$OPENJDK_TARGET_OS" = xwindows; then
# First line typically looks something like:
# Microsoft (R) 32-bit C/C++ Optimizing Compiler Version 16.00.40219.01 for 80x86
@ -113,34 +122,62 @@ AC_DEFUN_ONCE([TOOLCHAIN_SETUP_SYSROOT_AND_OUT_OPTIONS],
AC_DEFUN([TOOLCHAIN_FIND_COMPILER],
[
COMPILER_NAME=$2
SEARCH_LIST="$3"
$1=
# If TOOLS_DIR is set, check for all compiler names in there first
# before checking the rest of the PATH.
if test -n "$TOOLS_DIR"; then
PATH_save="$PATH"
PATH="$TOOLS_DIR"
AC_PATH_PROGS(TOOLS_DIR_$1, $3)
$1=$TOOLS_DIR_$1
PATH="$PATH_save"
if test "x[$]$1" != x; then
# User has supplied compiler name already, always let that override.
AC_MSG_NOTICE([Will use user supplied compiler $1=[$]$1])
if test "x`basename [$]$1`" = "x[$]$1"; then
# A command without a complete path is provided, search $PATH.
AC_PATH_PROGS(POTENTIAL_$1, [$]$1)
if test "x$POTENTIAL_$1" != x; then
$1=$POTENTIAL_$1
else
AC_MSG_ERROR([User supplied compiler $1=[$]$1 could not be found])
fi
else
# Otherwise it might already be a complete path
if test ! -x "[$]$1"; then
AC_MSG_ERROR([User supplied compiler $1=[$]$1 does not exist])
fi
fi
else
# No user supplied value. Locate compiler ourselves
$1=
# If TOOLS_DIR is set, check for all compiler names in there first
# before checking the rest of the PATH.
if test -n "$TOOLS_DIR"; then
PATH_save="$PATH"
PATH="$TOOLS_DIR"
AC_PATH_PROGS(TOOLS_DIR_$1, $SEARCH_LIST)
$1=$TOOLS_DIR_$1
PATH="$PATH_save"
fi
# AC_PATH_PROGS can't be run multiple times with the same variable,
# so create a new name for this run.
if test "x[$]$1" = x; then
AC_PATH_PROGS(POTENTIAL_$1, $SEARCH_LIST)
$1=$POTENTIAL_$1
fi
if test "x[$]$1" = x; then
HELP_MSG_MISSING_DEPENDENCY([devkit])
AC_MSG_ERROR([Could not find a $COMPILER_NAME compiler. $HELP_MSG])
fi
fi
# AC_PATH_PROGS can't be run multiple times with the same variable,
# so create a new name for this run.
if test "x[$]$1" = x; then
AC_PATH_PROGS(POTENTIAL_$1, $3)
$1=$POTENTIAL_$1
fi
if test "x[$]$1" = x; then
HELP_MSG_MISSING_DEPENDENCY([devkit])
AC_MSG_ERROR([Could not find a $COMPILER_NAME compiler. $HELP_MSG])
fi
# Now we have a compiler binary in $1. Make sure it's okay.
BASIC_FIXUP_EXECUTABLE($1)
AC_MSG_CHECKING([resolved symbolic links for $1])
TEST_COMPILER="[$]$1"
BASIC_REMOVE_SYMBOLIC_LINKS(TEST_COMPILER)
AC_MSG_RESULT([$TEST_COMPILER])
# Don't remove symbolic links on AIX because 'xlc_r' and 'xlC_r' may all be links
# to 'xlc' but it is crucial that we invoke the compiler with the right name!
if test "x$OPENJDK_BUILD_OS" != xaix; then
AC_MSG_CHECKING([resolved symbolic links for $1])
BASIC_REMOVE_SYMBOLIC_LINKS(TEST_COMPILER)
AC_MSG_RESULT([$TEST_COMPILER])
fi
AC_MSG_CHECKING([if $1 is disguised ccache])
COMPILER_BASENAME=`$BASENAME "$TEST_COMPILER"`
@ -201,11 +238,11 @@ AC_DEFUN([TOOLCHAIN_SETUP_PATHS],
# otherwise we might pick up cross-compilers which don't use standard naming.
# Otherwise, we'll set the BUILD_tools to the native tools, but that'll have
# to wait until they are properly discovered.
AC_PATH_PROGS(BUILD_CC, [cl cc gcc])
BASIC_PATH_PROGS(BUILD_CC, [cl cc gcc])
BASIC_FIXUP_EXECUTABLE(BUILD_CC)
AC_PATH_PROGS(BUILD_CXX, [cl CC g++])
BASIC_PATH_PROGS(BUILD_CXX, [cl CC g++])
BASIC_FIXUP_EXECUTABLE(BUILD_CXX)
AC_PATH_PROG(BUILD_LD, ld)
BASIC_PATH_PROGS(BUILD_LD, ld)
BASIC_FIXUP_EXECUTABLE(BUILD_LD)
fi
AC_SUBST(BUILD_CC)
@ -248,12 +285,13 @@ AC_DEFUN([TOOLCHAIN_SETUP_PATHS],
# On Solaris, cc is preferred to gcc.
# Elsewhere, gcc is preferred to cc.
if test "x$CC" != x; then
COMPILER_CHECK_LIST="$CC"
elif test "x$OPENJDK_TARGET_OS" = "xwindows"; then
if test "x$OPENJDK_TARGET_OS" = "xwindows"; then
COMPILER_CHECK_LIST="cl"
elif test "x$OPENJDK_TARGET_OS" = "xsolaris"; then
COMPILER_CHECK_LIST="cc gcc"
elif test "x$OPENJDK_TARGET_OS" = "xaix"; then
# Do not probe for cc on AIX.
COMPILER_CHECK_LIST="xlc_r"
else
COMPILER_CHECK_LIST="gcc cc"
fi
@ -262,14 +300,23 @@ AC_DEFUN([TOOLCHAIN_SETUP_PATHS],
# Now that we have resolved CC ourself, let autoconf have its go at it
AC_PROG_CC([$CC])
# Option used to tell the compiler whether to create 32- or 64-bit executables
# Notice that CC contains the full compiler path at this point.
case $CC in
*xlc_r) COMPILER_TARGET_BITS_FLAG="-q";;
*) COMPILER_TARGET_BITS_FLAG="-m";;
esac
AC_SUBST(COMPILER_TARGET_BITS_FLAG)
### Locate C++ compiler (CXX)
if test "x$CXX" != x; then
COMPILER_CHECK_LIST="$CXX"
elif test "x$OPENJDK_TARGET_OS" = "xwindows"; then
if test "x$OPENJDK_TARGET_OS" = "xwindows"; then
COMPILER_CHECK_LIST="cl"
elif test "x$OPENJDK_TARGET_OS" = "xsolaris"; then
COMPILER_CHECK_LIST="CC g++"
elif test "x$OPENJDK_TARGET_OS" = "xaix"; then
# Do not probe for CC on AIX .
COMPILER_CHECK_LIST="xlC_r"
else
COMPILER_CHECK_LIST="g++ CC"
fi
@ -306,11 +353,13 @@ AC_DEFUN([TOOLCHAIN_SETUP_PATHS],
AC_SUBST(LDEXECXX)
if test "x$OPENJDK_TARGET_OS" != xwindows; then
AC_CHECK_TOOL(AR, ar)
BASIC_CHECK_TOOLS(AR, ar)
BASIC_FIXUP_EXECUTABLE(AR)
fi
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
ARFLAGS="-r"
elif test "x$OPENJDK_TARGET_OS" = xaix; then
ARFLAGS="-X64"
else
ARFLAGS=""
fi
@ -431,7 +480,7 @@ AC_DEFUN([TOOLCHAIN_SETUP_PATHS],
# Find the right assembler.
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
AC_PATH_PROG(AS, as)
BASIC_PATH_PROGS(AS, as)
BASIC_FIXUP_EXECUTABLE(AS)
else
AS="$CC -c"
@ -439,41 +488,41 @@ AC_DEFUN([TOOLCHAIN_SETUP_PATHS],
AC_SUBST(AS)
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
AC_PATH_PROG(NM, nm)
BASIC_PATH_PROGS(NM, nm)
BASIC_FIXUP_EXECUTABLE(NM)
AC_PATH_PROG(GNM, gnm)
BASIC_PATH_PROGS(GNM, gnm)
BASIC_FIXUP_EXECUTABLE(GNM)
AC_PATH_PROG(STRIP, strip)
BASIC_PATH_PROGS(STRIP, strip)
BASIC_FIXUP_EXECUTABLE(STRIP)
AC_PATH_PROG(MCS, mcs)
BASIC_PATH_PROGS(MCS, mcs)
BASIC_FIXUP_EXECUTABLE(MCS)
elif test "x$OPENJDK_TARGET_OS" != xwindows; then
AC_CHECK_TOOL(NM, nm)
BASIC_CHECK_TOOLS(NM, nm)
BASIC_FIXUP_EXECUTABLE(NM)
GNM="$NM"
AC_SUBST(GNM)
AC_CHECK_TOOL(STRIP, strip)
BASIC_CHECK_TOOLS(STRIP, strip)
BASIC_FIXUP_EXECUTABLE(STRIP)
fi
# objcopy is used for moving debug symbols to separate files when
# full debug symbols are enabled.
if test "x$OPENJDK_TARGET_OS" = xsolaris || test "x$OPENJDK_TARGET_OS" = xlinux; then
AC_CHECK_TOOLS(OBJCOPY, [gobjcopy objcopy])
BASIC_CHECK_TOOLS(OBJCOPY, [gobjcopy objcopy])
# Only call fixup if objcopy was found.
if test -n "$OBJCOPY"; then
BASIC_FIXUP_EXECUTABLE(OBJCOPY)
fi
fi
AC_CHECK_TOOLS(OBJDUMP, [gobjdump objdump])
BASIC_CHECK_TOOLS(OBJDUMP, [gobjdump objdump])
if test "x$OBJDUMP" != x; then
# Only used for compare.sh; we can live without it. BASIC_FIXUP_EXECUTABLE bails if argument is missing.
BASIC_FIXUP_EXECUTABLE(OBJDUMP)
fi
if test "x$OPENJDK_TARGET_OS" = "xmacosx"; then
AC_PATH_PROG(LIPO, lipo)
BASIC_PATH_PROGS(LIPO, lipo)
BASIC_FIXUP_EXECUTABLE(LIPO)
fi
@ -554,6 +603,29 @@ AC_DEFUN_ONCE([TOOLCHAIN_SETUP_COMPILER_FLAGS_FOR_LIBS],
POST_STRIP_CMD="$STRIP -x"
POST_MCS_CMD="$MCS -d -a \"JDK $FULL_VERSION\""
fi
if test "x$OPENJDK_TARGET_OS" = xaix; then
COMPILER_NAME=xlc
PICFLAG="-qpic=large"
LIBRARY_PREFIX=lib
SHARED_LIBRARY='lib[$]1.so'
STATIC_LIBRARY='lib[$]1.a'
SHARED_LIBRARY_FLAGS="-qmkshrobj"
SHARED_LIBRARY_SUFFIX='.so'
STATIC_LIBRARY_SUFFIX='.a'
OBJ_SUFFIX='.o'
EXE_SUFFIX=''
SET_SHARED_LIBRARY_NAME=''
SET_SHARED_LIBRARY_MAPFILE=''
C_FLAG_REORDER=''
CXX_FLAG_REORDER=''
SET_SHARED_LIBRARY_ORIGIN=''
SET_EXECUTABLE_ORIGIN=""
CFLAGS_JDK=""
CXXFLAGS_JDK=""
CFLAGS_JDKLIB_EXTRA=''
POST_STRIP_CMD="$STRIP -X32_64"
POST_MCS_CMD=""
fi
if test "x$OPENJDK_TARGET_OS" = xwindows; then
# If it is not gcc, then assume it is the MS Visual Studio compiler
COMPILER_NAME=cl
@ -730,6 +802,24 @@ AC_DEFUN_ONCE([TOOLCHAIN_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION],
CFLAGS_DEBUG_SYMBOLS="-g -xs"
CXXFLAGS_DEBUG_SYMBOLS="-g0 -xs"
;;
xlc )
C_FLAG_DEPS="-qmakedep=gcc -MF"
CXX_FLAG_DEPS="-qmakedep=gcc -MF"
C_O_FLAG_HIGHEST="-O3"
C_O_FLAG_HI="-O3 -qstrict"
C_O_FLAG_NORM="-O2"
C_O_FLAG_NONE=""
CXX_O_FLAG_HIGHEST="-O3"
CXX_O_FLAG_HI="-O3 -qstrict"
CXX_O_FLAG_NORM="-O2"
CXX_O_FLAG_NONE=""
CFLAGS_DEBUG_SYMBOLS="-g"
CXXFLAGS_DEBUG_SYMBOLS="-g"
LDFLAGS_JDK="${LDFLAGS_JDK} -q64 -brtl -bnolibpath -liconv -bexpall"
CFLAGS_JDK="${CFLAGS_JDK} -qchars=signed -q64 -qfullpath -qsaveopt"
CXXFLAGS_JDK="${CXXFLAGS_JDK} -qchars=signed -q64 -qfullpath -qsaveopt"
;;
esac
;;
CL )
@ -840,6 +930,13 @@ AC_DEFUN_ONCE([TOOLCHAIN_SETUP_COMPILER_FLAGS_FOR_JDK],
LDFLAGS_JDK="$LDFLAGS_JDK -z defs -xildoff -ztext"
LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK -norunpath -xnolib"
;;
xlc )
CFLAGS_JDK="$CFLAGS_JDK -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE -DSTDC"
CXXFLAGS_JDK="$CXXFLAGS_JDK -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE -DSTDC"
LDFLAGS_JDK="$LDFLAGS_JDK"
LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK"
;;
cl )
CCXXFLAGS_JDK="$CCXXFLAGS $CCXXFLAGS_JDK -Zi -MD -Zc:wchar_t- -W3 -wd4800 \
-D_STATIC_CPPLIB -D_DISABLE_DEPRECATE_STATIC_CPPLIB -DWIN32_LEAN_AND_MEAN \
@ -909,6 +1006,9 @@ AC_DEFUN_ONCE([TOOLCHAIN_SETUP_COMPILER_FLAGS_FOR_JDK],
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DSOLARIS"
fi
if test "x$OPENJDK_TARGET_OS" = xaix; then
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DAIX -DPPC64"
fi
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DMACOSX -D_ALLBSD_SOURCE -D_DARWIN_UNLIMITED_SELECT"
# Setting these parameters makes it an error to link to macosx APIs that are
@ -1076,20 +1176,38 @@ AC_DEFUN_ONCE([TOOLCHAIN_SETUP_COMPILER_FLAGS_MISC],
# ZERO_ARCHFLAG tells the compiler which mode to build for
case "${OPENJDK_TARGET_CPU}" in
s390)
ZERO_ARCHFLAG="-m31"
ZERO_ARCHFLAG="${COMPILER_TARGET_BITS_FLAG}31"
;;
*)
ZERO_ARCHFLAG="-m${OPENJDK_TARGET_CPU_BITS}"
ZERO_ARCHFLAG="${COMPILER_TARGET_BITS_FLAG}${OPENJDK_TARGET_CPU_BITS}"
esac
TOOLCHAIN_COMPILER_CHECK_ARGUMENTS([$ZERO_ARCHFLAG], [], [ZERO_ARCHFLAG=""])
AC_SUBST(ZERO_ARCHFLAG)
# Check that the compiler supports -mX flags
# Check that the compiler supports -mX (or -qX on AIX) flags
# Set COMPILER_SUPPORTS_TARGET_BITS_FLAG to 'true' if it does
TOOLCHAIN_COMPILER_CHECK_ARGUMENTS([-m${OPENJDK_TARGET_CPU_BITS}],
TOOLCHAIN_COMPILER_CHECK_ARGUMENTS([${COMPILER_TARGET_BITS_FLAG}${OPENJDK_TARGET_CPU_BITS}],
[COMPILER_SUPPORTS_TARGET_BITS_FLAG=true],
[COMPILER_SUPPORTS_TARGET_BITS_FLAG=false])
AC_SUBST(COMPILER_SUPPORTS_TARGET_BITS_FLAG)
# Check for broken SuSE 'ld' for which 'Only anonymous version tag is allowed in executable.'
USING_BROKEN_SUSE_LD=no
if test "x$OPENJDK_TARGET_OS" = xlinux && test "x$GCC" = xyes; then
AC_MSG_CHECKING([for broken SuSE 'ld' which only understands anonymous version tags in executables])
echo "SUNWprivate_1.1 { local: *; };" > version-script.map
echo "int main() { }" > main.c
if $CXX -Xlinker -version-script=version-script.map main.c 2>&AS_MESSAGE_LOG_FD >&AS_MESSAGE_LOG_FD; then
AC_MSG_RESULT(no)
USING_BROKEN_SUSE_LD=no
else
AC_MSG_RESULT(yes)
USING_BROKEN_SUSE_LD=yes
fi
rm -rf version-script.map main.c
fi
AC_SUBST(USING_BROKEN_SUSE_LD)
])
# Setup the JTREG paths
@ -1126,7 +1244,7 @@ AC_DEFUN_ONCE([TOOLCHAIN_SETUP_JTREG],
AC_MSG_RESULT($JTREGEXE)
else
# try to find jtreg on path
BASIC_REQUIRE_PROG(JTREGEXE, jtreg)
BASIC_REQUIRE_PROGS(JTREGEXE, jtreg)
JT_HOME="`$DIRNAME $JTREGEXE`"
fi
fi

View File

@ -244,3 +244,4 @@ d6820a414f182a011a53a29a52370c696cd58dab jdk8-b118
53fd772d28c8a9f0f43adfc06f75f6b3cfa93cb5 jdk8-b120
a7d3638deb2f4e33217b1ecf889479e90f9e5b50 jdk9-b00
79a8136b18c1c6848f500088f5a4b39f262f082d jdk9-b01
8394993063135a42b63a94473280399fb2a13aa7 jdk9-b02

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,53 @@
#
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
################################################################################
# The Corba sources are old and generates a LOT of warnings.
# Disable these using Xlint, until someone cares to fix them.
DISABLE_CORBA_WARNINGS := -Xlint:all,-deprecation,-unchecked,-serial,-fallthrough,-cast,-rawtypes,-static,-dep-ann
# The "generate old bytecode" javac setup uses the new compiler to compile for the
# boot jdk to generate tools that need to be run with the boot jdk.
# Thus we force the target bytecode to the boot jdk bytecode.
$(eval $(call SetupJavaCompiler,GENERATE_OLDBYTECODE, \
JVM := $(JAVA), \
JAVAC := $(NEW_JAVAC), \
FLAGS := $(BOOT_JDK_SOURCETARGET) \
-bootclasspath "$(BOOT_RTJAR)$(PATH_SEP)$(BOOT_TOOLSJAR)" \
$(DISABLE_CORBA_WARNINGS), \
SERVER_DIR := $(SJAVAC_SERVER_DIR), \
SERVER_JVM := $(SJAVAC_SERVER_JAVA)))
# The "generate new bytecode" uses the new compiler to generate bytecode
# for the new jdk that is being built. The code compiled by this setup
# cannot necessarily be run with the boot jdk.
$(eval $(call SetupJavaCompiler,GENERATE_NEWBYTECODE, \
JVM := $(JAVA), \
JAVAC := $(NEW_JAVAC), \
FLAGS := -cp $(BOOT_TOOLSJAR) -XDignore.symbol.file=true $(DISABLE_CORBA_WARNINGS), \
SERVER_DIR := $(SJAVAC_SERVER_DIR), \
SERVER_JVM := $(SJAVAC_SERVER_JAVA)))
################################################################################

View File

@ -0,0 +1,87 @@
#
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# This must be the first rule
default: all
include $(SPEC)
include MakeBase.gmk
include JavaCompilation.gmk
include CommonCorba.gmk
################################################################################
$(eval $(call SetupJavaCompilation,BUILD_CORBA, \
SETUP := GENERATE_NEWBYTECODE, \
SRC := $(CORBA_TOPDIR)/src/share/classes $(CORBA_OUTPUTDIR)/gensrc, \
EXCLUDES := com/sun/corba/se/PortableActivationIDL \
com/sun/tools/corba/se/logutil, \
EXCLUDE_FILES := com/sun/corba/se/impl/presentation/rmi/JNDIStateFactoryImpl.java \
com/sun/corba/se/spi/presentation/rmi/StubWrapper.java \
com/sun/org/omg/CORBA/IDLTypeOperations.java \
com/sun/org/omg/CORBA/IRObjectOperations.java \
org/omg/PortableInterceptor/UNKNOWN.java \
com/sun/tools/corba/se/idl/ResourceBundleUtil.java \
com/sun/corba/se/impl/presentation/rmi/jndi.properties, \
COPY := .prp LogStrings.properties, \
BIN := $(CORBA_OUTPUTDIR)/classes, \
JAR := $(CORBA_OUTPUTDIR)/dist/lib/classes.jar))
$(eval $(call SetupJavaCompilation,BUILD_INTERIM_CORBA, \
SETUP := GENERATE_OLDBYTECODE, \
SRC := $(BUILD_CORBA_SRC), \
EXCLUDES := $(BUILD_CORBA_EXCLUDES), \
EXCLUDE_FILES := $(BUILD_CORBA_EXCLUDE_FILES), \
COPY := $(BUILD_CORBA_COPY), \
BIN := $(CORBA_OUTPUTDIR)/interim_classes, \
JAR := $(INTERIM_CORBA_JAR)))
# Separate src.zip call to include sources that were excluded in the build to
# mimic behavior in old build system.
$(eval $(call SetupZipArchive,ARCHIVE_CORBA_SRC, \
SRC := $(CORBA_TOPDIR)/src/share/classes $(CORBA_OUTPUTDIR)/gensrc, \
ZIP := $(CORBA_OUTPUTDIR)/dist/lib/src.zip))
JAR := $(CORBA_OUTPUTDIR)/dist/lib/classes.jar))
################################################################################
# Create bin.zip containing the corba specific binaries: orb.idl, ir.idl
BIN_FILES := $(CORBA_TOPDIR)/src/share/classes/com/sun/tools/corba/se/idl/orb.idl \
$(CORBA_TOPDIR)/src/share/classes/com/sun/tools/corba/se/idl/ir.idl
$(CORBA_OUTPUTDIR)/dist/lib/bin.zip: $(BIN_FILES)
$(MKDIR) -p $(CORBA_OUTPUTDIR)/dist/lib
$(MKDIR) -p $(CORBA_OUTPUTDIR)/lib
$(RM) -f $@
$(ECHO) Creating `basename $@`
$(CP) $(BIN_FILES) $(CORBA_OUTPUTDIR)/lib
$(CHMOD) ug+w $(CORBA_OUTPUTDIR)/lib/*
(cd $(CORBA_OUTPUTDIR); $(ZIP) -q $@ lib/orb.idl lib/ir.idl)
################################################################################
all: $(BUILD_CORBA) $(BUILD_INTERIM_CORBA) $(ARCHIVE_CORBA_SRC) \
$(CORBA_OUTPUTDIR)/dist/lib/bin.zip

153
corba/make/GensrcCorba.gmk Normal file

File diff suppressed because one or more lines are too long

View File

@ -404,3 +404,4 @@ ce42d815dd2130250acf6132b51b624001638f0d jdk8-b119
fca262db9c4309f99d2f5542ab0780e45c2f1578 jdk8-b120
ce2d7e46f3c7e41241f3b407705a4071323a11ab jdk9-b00
050a626a88951140df874f7b163e304d07b6c296 jdk9-b01
b188446de75bda5fc52d102cddf242c3ef5ecbdf jdk9-b02

File diff suppressed because it is too large Load Diff

View File

@ -29,54 +29,51 @@
#include <thread_db.h>
#include "libproc_impl.h"
static const char* alt_root = NULL;
static int alt_root_len = -1;
#define SA_ALTROOT "SA_ALTROOT"
static void init_alt_root() {
if (alt_root_len == -1) {
alt_root = getenv(SA_ALTROOT);
if (alt_root) {
alt_root_len = strlen(alt_root);
} else {
alt_root_len = 0;
}
}
}
int pathmap_open(const char* name) {
int fd;
char alt_path[PATH_MAX + 1];
static const char *alt_root = NULL;
static int alt_root_initialized = 0;
init_alt_root();
int fd;
char alt_path[PATH_MAX + 1], *alt_path_end;
const char *s;
if (alt_root_len > 0) {
strcpy(alt_path, alt_root);
strcat(alt_path, name);
fd = open(alt_path, O_RDONLY);
if (fd >= 0) {
print_debug("path %s substituted for %s\n", alt_path, name);
return fd;
}
if (!alt_root_initialized) {
alt_root_initialized = -1;
alt_root = getenv(SA_ALTROOT);
}
if (strrchr(name, '/')) {
strcpy(alt_path, alt_root);
strcat(alt_path, strrchr(name, '/'));
fd = open(alt_path, O_RDONLY);
if (fd >= 0) {
print_debug("path %s substituted for %s\n", alt_path, name);
return fd;
}
}
} else {
fd = open(name, O_RDONLY);
if (fd >= 0) {
return fd;
}
}
if (alt_root == NULL) {
return open(name, O_RDONLY);
}
return -1;
strcpy(alt_path, alt_root);
alt_path_end = alt_path + strlen(alt_path);
// Strip path items one by one and try to open file with alt_root prepended
s = name;
while (1) {
strcat(alt_path, s);
s += 1;
fd = open(alt_path, O_RDONLY);
if (fd >= 0) {
print_debug("path %s substituted for %s\n", alt_path, name);
return fd;
}
// Linker always put full path to solib to process, so we can rely
// on presence of /. If slash is not present, it means, that SOlib doesn't
// physically exist (e.g. linux-gate.so) and we fail opening it anyway
if ((s = strchr(s, '/')) == NULL) {
break;
}
*alt_path_end = 0;
}
return -1;
}
static bool _libsaproc_debug;

View File

@ -55,31 +55,21 @@ class LinuxCDebugger implements CDebugger {
if (pc == null) {
return null;
}
List objs = getLoadObjectList();
Object[] arr = objs.toArray();
// load objects are sorted by base address, do binary search
int mid = -1;
int low = 0;
int high = arr.length - 1;
while (low <= high) {
mid = (low + high) >> 1;
LoadObject midVal = (LoadObject) arr[mid];
long cmp = pc.minus(midVal.getBase());
if (cmp < 0) {
high = mid - 1;
} else if (cmp > 0) {
long size = midVal.getSize();
if (cmp >= size) {
low = mid + 1;
} else {
return (LoadObject) arr[mid];
}
} else { // match found
return (LoadObject) arr[mid];
}
/* Typically we have about ten loaded objects here. So no reason to do
sort/binary search here. Linear search gives us acceptable performance.*/
List objs = getLoadObjectList();
for (int i = 0; i < objs.size(); i++) {
LoadObject ob = (LoadObject) objs.get(i);
Address base = ob.getBase();
long size = ob.getSize();
if ( pc.greaterThanOrEqual(base) && pc.lessThan(base.addOffsetTo(size))) {
return ob;
}
}
// no match found.
return null;
}

View File

@ -0,0 +1,77 @@
/*
* @(#)AdaptiveFreeList.java
*
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.memory;
import java.util.Observable;
import java.util.Observer;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.runtime.VMObject;
import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
public class AdaptiveFreeList extends VMObject {
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("AdaptiveFreeList<FreeChunk>");
sizeField = type.getCIntegerField("_size");
countField = type.getCIntegerField("_count");
headerSize = type.getSize();
}
// Fields
private static CIntegerField sizeField;
private static CIntegerField countField;
private static long headerSize;
//Constructor
public AdaptiveFreeList(Address address) {
super(address);
}
// Accessors
public long size() {
return sizeField.getValue(addr);
}
public long count() {
return countField.getValue(addr);
}
public static long sizeOf() {
return headerSize;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,25 +24,29 @@
package sun.jvm.hotspot.memory;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.oops.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Observable;
import java.util.Observer;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.debugger.Debugger;
import sun.jvm.hotspot.oops.ObjectHeap;
import sun.jvm.hotspot.oops.Oop;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.runtime.VMObjectFactory;
import sun.jvm.hotspot.types.AddressField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
import sun.jvm.hotspot.utilities.Assert;
public class CompactibleFreeListSpace extends CompactibleSpace {
private static AddressField collectorField;
// for free size, three fields
// FreeBlockDictionary* _dictionary; // ptr to dictionary for large size blocks
// FreeList _indexedFreeList[IndexSetSize]; // indexed array for small size blocks
// LinearAllocBlock _smallLinearAllocBlock; // small linear alloc in TLAB
private static AddressField indexedFreeListField;
private static AddressField dictionaryField;
private static long smallLinearAllocBlockFieldOffset;
private static long indexedFreeListSizeOf;
private int heapWordSize; // 4 for 32bit, 8 for 64 bits
private int IndexSetStart; // for small indexed list
@ -109,11 +113,11 @@ public class CompactibleFreeListSpace extends CompactibleSpace {
// small chunks
long size = 0;
Address cur = addr.addOffsetTo( indexedFreeListField.getOffset() );
cur = cur.addOffsetTo(IndexSetStart*FreeList.sizeOf());
cur = cur.addOffsetTo(IndexSetStart*AdaptiveFreeList.sizeOf());
for (int i=IndexSetStart; i<IndexSetSize; i += IndexSetStride) {
FreeList freeList = (FreeList) VMObjectFactory.newObject(FreeList.class, cur);
AdaptiveFreeList freeList = (AdaptiveFreeList) VMObjectFactory.newObject(AdaptiveFreeList.class, cur);
size += i*freeList.count();
cur= cur.addOffsetTo(IndexSetStride*FreeList.sizeOf());
cur= cur.addOffsetTo(IndexSetStride*AdaptiveFreeList.sizeOf());
}
// large block

View File

@ -1,72 +0,0 @@
/*
* @(#)FreeList.java
*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.memory;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.runtime.*;
public class FreeList extends VMObject {
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("FreeList<FreeChunk>");
sizeField = type.getCIntegerField("_size");
countField = type.getCIntegerField("_count");
headerSize = type.getSize();
}
// Fields
private static CIntegerField sizeField;
private static CIntegerField countField;
private static long headerSize;
//Constructor
public FreeList(Address address) {
super(address);
}
// Accessors
public long size() {
return sizeField.getValue(addr);
}
public long count() {
return countField.getValue(addr);
}
public static long sizeOf() {
return headerSize;
}
}

View File

@ -48,7 +48,7 @@ public class Block extends VMObject {
preOrderField = new CIntField(type.getCIntegerField("_pre_order"), 0);
domDepthField = new CIntField(type.getCIntegerField("_dom_depth"), 0);
idomField = type.getAddressField("_idom");
freqField = type.getJFloatField("_freq");
freqField = type.getJDoubleField("_freq");
}
private static AddressField nodesField;
@ -57,7 +57,7 @@ public class Block extends VMObject {
private static CIntField preOrderField;
private static CIntField domDepthField;
private static AddressField idomField;
private static JFloatField freqField;
private static JDoubleField freqField;
public Block(Address addr) {
super(addr);
@ -67,8 +67,8 @@ public class Block extends VMObject {
return (int)preOrderField.getValue(getAddress());
}
public float freq() {
return (float)freqField.getValue(getAddress());
public double freq() {
return (double)freqField.getValue(getAddress());
}
public Node_List nodes() {

View File

@ -371,19 +371,23 @@ function sym2addr(dso, sym) {
return sa.dbg.lookup(dso, sym);
}
// returns the ClosestSymbol or null
function closestSymbolFor(addr) {
if (sa.cdbg == null) {
function loadObjectContainingPC(addr) {
if (sa.cdbg == null) {
// no CDebugger support, return null
return null;
} else {
var dso = sa.cdbg.loadObjectContainingPC(addr);
if (dso != null) {
return dso.closestSymbolToPC(addr);
} else {
return null;
}
}
}
return sa.cdbg.loadObjectContainingPC(addr);
}
// returns the ClosestSymbol or null
function closestSymbolFor(addr) {
var dso = loadObjectContainingPC(addr);
if (dso != null) {
return dso.closestSymbolToPC(addr);
}
return null;
}
// Address-to-symbol
@ -804,6 +808,16 @@ delete tmp;
// VM type to SA class map
var vmType2Class = new Object();
// C2 only classes
try{
vmType2Class["ExceptionBlob"] = sapkg.code.ExceptionBlob;
vmType2Class["UncommonTrapBlob"] = sapkg.code.UncommonTrapBlob;
} catch(e) {
// Ignore exception. C2 specific objects might be not
// available in client VM
}
// This is *not* exhaustive. Add more if needed.
// code blobs
vmType2Class["BufferBlob"] = sapkg.code.BufferBlob;
@ -812,10 +826,8 @@ vmType2Class["RuntimeStub"] = sapkg.code.RuntimeStub;
vmType2Class["SafepointBlob"] = sapkg.code.SafepointBlob;
vmType2Class["C2IAdapter"] = sapkg.code.C2IAdapter;
vmType2Class["DeoptimizationBlob"] = sapkg.code.DeoptimizationBlob;
vmType2Class["ExceptionBlob"] = sapkg.code.ExceptionBlob;
vmType2Class["I2CAdapter"] = sapkg.code.I2CAdapter;
vmType2Class["OSRAdapter"] = sapkg.code.OSRAdapter;
vmType2Class["UncommonTrapBlob"] = sapkg.code.UncommonTrapBlob;
vmType2Class["PCDesc"] = sapkg.code.PCDesc;
// interpreter
@ -876,21 +888,29 @@ function isOop(addr) {
// returns description of given pointer as a String
function whatis(addr) {
addr = any2addr(addr);
var ptrLoc = findPtr(addr);
if (ptrLoc.isUnknown()) {
var vmType = vmTypeof(addr);
if (vmType != null) {
return "pointer to " + vmType.name;
} else {
var sym = closestSymbolFor(addr);
if (sym != null) {
return sym.name + '+' + sym.offset;
} else {
return ptrLoc.toString();
}
}
} else {
return ptrLoc.toString();
}
addr = any2addr(addr);
var ptrLoc = findPtr(addr);
if (!ptrLoc.isUnknown()) {
return ptrLoc.toString();
}
var vmType = vmTypeof(addr);
if (vmType != null) {
return "pointer to " + vmType.name;
}
var dso = loadObjectContainingPC(addr);
if (dso == null) {
return ptrLoc.toString();
}
var sym = dso.closestSymbolToPC(addr);
if (sym != null) {
return sym.name + '+' + sym.offset;
}
var s = dso.getName();
var p = s.lastIndexOf("/");
var base = dso.getBase();
return s.substring(p+1, s.length) + '+' + addr.minus(base);
}

View File

@ -260,7 +260,7 @@ ifeq ($(USE_CLANG), true)
WARNINGS_ARE_ERRORS += -Wno-empty-body
endif
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wformat=2 -Wno-error=format-nonliteral
ifeq ($(USE_CLANG),)
# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
@ -289,7 +289,7 @@ CFLAGS += -fno-strict-aliasing
# The flags to use for an Optimized g++ build
ifeq ($(OS_VENDOR), Darwin)
# use -Os by default, unless -O3 can be proved to be worth the cost, as per policy
# <http://wikis.sun.com/display/OpenJDK/Mac+OS+X+Port+Compilers>
# <https://wiki.openjdk.java.net/display/MacOSXPort/Compiler+Errata>
OPT_CFLAGS_DEFAULT ?= SIZE
else
OPT_CFLAGS_DEFAULT ?= SPEED

View File

@ -215,7 +215,7 @@ ifeq ($(USE_CLANG), true)
WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
endif
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value -Wformat=2 -Wno-error=format-nonliteral
ifeq ($(USE_CLANG),)
# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit

View File

@ -118,7 +118,7 @@ endif
# Compiler warnings are treated as errors
WARNINGS_ARE_ERRORS = -Werror
# Enable these warnings. See 'info gcc' about details on these options
WARNING_FLAGS = -Wpointer-arith -Wconversion -Wsign-compare -Wundef
WARNING_FLAGS = -Wpointer-arith -Wconversion -Wsign-compare -Wundef -Wformat=2 -Wno-error=format-nonliteral
CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(WARNING_FLAGS)
# Special cases
CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@))

View File

@ -2076,6 +2076,8 @@ const bool Matcher::match_rule_supported(int opcode) {
return false;
switch (opcode) {
case Op_SqrtD:
return VM_Version::has_fsqrt();
case Op_CountLeadingZerosI:
case Op_CountLeadingZerosL:
case Op_CountTrailingZerosI:
@ -8740,7 +8742,7 @@ instruct negD_absD_reg(regD dst, regD src) %{
ins_pipe(pipe_class_default);
%}
// VM_Version::has_sqrt() decides if this node will be used.
// VM_Version::has_fsqrt() decides if this node will be used.
// Sqrt float double precision
instruct sqrtD_reg(regD dst, regD src) %{
match(Set dst (SqrtD src));

View File

@ -2037,19 +2037,6 @@ const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return L7_REGP_mask();
}
const RegMask Matcher::mathExactI_result_proj_mask() {
return G1_REGI_mask();
}
const RegMask Matcher::mathExactL_result_proj_mask() {
return G1_REGL_mask();
}
const RegMask Matcher::mathExactI_flags_proj_mask() {
return INT_FLAGS_mask();
}
%}

File diff suppressed because it is too large Load Diff

View File

@ -651,7 +651,12 @@ class MacroAssembler: public Assembler {
Label& done, Label* slow_case = NULL,
BiasedLockingCounters* counters = NULL);
void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
#ifdef COMPILER2
// Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
// See full desription in macroAssembler_x86.cpp.
void fast_lock(Register obj, Register box, Register tmp, Register scr, BiasedLockingCounters* counters);
void fast_unlock(Register obj, Register box, Register tmp);
#endif
Condition negate_condition(Condition cond);

View File

@ -1542,19 +1542,6 @@ const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return EBP_REG_mask();
}
const RegMask Matcher::mathExactI_result_proj_mask() {
return EAX_REG_mask();
}
const RegMask Matcher::mathExactL_result_proj_mask() {
ShouldNotReachHere();
return RegMask();
}
const RegMask Matcher::mathExactI_flags_proj_mask() {
return INT_FLAGS_mask();
}
// Returns true if the high 32 bits of the value is known to be zero.
bool is_operand_hi32_zero(Node* n) {
int opc = n->Opcode();
@ -2918,542 +2905,6 @@ encode %{
emit_d8 (cbuf,0 );
%}
// Because the transitions from emitted code to the runtime
// monitorenter/exit helper stubs are so slow it's critical that
// we inline both the stack-locking fast-path and the inflated fast path.
//
// See also: cmpFastLock and cmpFastUnlock.
//
// What follows is a specialized inline transliteration of the code
// in slow_enter() and slow_exit(). If we're concerned about I$ bloat
// another option would be to emit TrySlowEnter and TrySlowExit methods
// at startup-time. These methods would accept arguments as
// (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure
// indications in the icc.ZFlag. Fast_Lock and Fast_Unlock would simply
// marshal the arguments and emit calls to TrySlowEnter and TrySlowExit.
// In practice, however, the # of lock sites is bounded and is usually small.
// Besides the call overhead, TrySlowEnter and TrySlowExit might suffer
// if the processor uses simple bimodal branch predictors keyed by EIP
// Since the helper routines would be called from multiple synchronization
// sites.
//
// An even better approach would be write "MonitorEnter()" and "MonitorExit()"
// in java - using j.u.c and unsafe - and just bind the lock and unlock sites
// to those specialized methods. That'd give us a mostly platform-independent
// implementation that the JITs could optimize and inline at their pleasure.
// Done correctly, the only time we'd need to cross to native could would be
// to park() or unpark() threads. We'd also need a few more unsafe operators
// to (a) prevent compiler-JIT reordering of non-volatile accesses, and
// (b) explicit barriers or fence operations.
//
// TODO:
//
// * Arrange for C2 to pass "Self" into Fast_Lock and Fast_Unlock in one of the registers (scr).
// This avoids manifesting the Self pointer in the Fast_Lock and Fast_Unlock terminals.
// Given TLAB allocation, Self is usually manifested in a register, so passing it into
// the lock operators would typically be faster than reifying Self.
//
// * Ideally I'd define the primitives as:
// fast_lock (nax Obj, nax box, EAX tmp, nax scr) where box, tmp and scr are KILLED.
// fast_unlock (nax Obj, EAX box, nax tmp) where box and tmp are KILLED
// Unfortunately ADLC bugs prevent us from expressing the ideal form.
// Instead, we're stuck with a rather awkward and brittle register assignments below.
// Furthermore the register assignments are overconstrained, possibly resulting in
// sub-optimal code near the synchronization site.
//
// * Eliminate the sp-proximity tests and just use "== Self" tests instead.
// Alternately, use a better sp-proximity test.
//
// * Currently ObjectMonitor._Owner can hold either an sp value or a (THREAD *) value.
// Either one is sufficient to uniquely identify a thread.
// TODO: eliminate use of sp in _owner and use get_thread(tr) instead.
//
// * Intrinsify notify() and notifyAll() for the common cases where the
// object is locked by the calling thread but the waitlist is empty.
// avoid the expensive JNI call to JVM_Notify() and JVM_NotifyAll().
//
// * use jccb and jmpb instead of jcc and jmp to improve code density.
// But beware of excessive branch density on AMD Opterons.
//
// * Both Fast_Lock and Fast_Unlock set the ICC.ZF to indicate success
// or failure of the fast-path. If the fast-path fails then we pass
// control to the slow-path, typically in C. In Fast_Lock and
// Fast_Unlock we often branch to DONE_LABEL, just to find that C2
// will emit a conditional branch immediately after the node.
// So we have branches to branches and lots of ICC.ZF games.
// Instead, it might be better to have C2 pass a "FailureLabel"
// into Fast_Lock and Fast_Unlock. In the case of success, control
// will drop through the node. ICC.ZF is undefined at exit.
// In the case of failure, the node will branch directly to the
// FailureLabel
// obj: object to lock
// box: on-stack box address (displaced header location) - KILLED
// rax,: tmp -- KILLED
// scr: tmp -- KILLED
enc_class Fast_Lock( eRegP obj, eRegP box, eAXRegI tmp, eRegP scr ) %{
Register objReg = as_Register($obj$$reg);
Register boxReg = as_Register($box$$reg);
Register tmpReg = as_Register($tmp$$reg);
Register scrReg = as_Register($scr$$reg);
// Ensure the register assignents are disjoint
guarantee (objReg != boxReg, "") ;
guarantee (objReg != tmpReg, "") ;
guarantee (objReg != scrReg, "") ;
guarantee (boxReg != tmpReg, "") ;
guarantee (boxReg != scrReg, "") ;
guarantee (tmpReg == as_Register(EAX_enc), "") ;
MacroAssembler masm(&cbuf);
if (_counters != NULL) {
masm.atomic_incl(ExternalAddress((address) _counters->total_entry_count_addr()));
}
if (EmitSync & 1) {
// set box->dhw = unused_mark (3)
// Force all sync thru slow-path: slow_enter() and slow_exit()
masm.movptr (Address(boxReg, 0), int32_t(markOopDesc::unused_mark())) ;
masm.cmpptr (rsp, (int32_t)0) ;
} else
if (EmitSync & 2) {
Label DONE_LABEL ;
if (UseBiasedLocking) {
// Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
}
masm.movptr(tmpReg, Address(objReg, 0)) ; // fetch markword
masm.orptr (tmpReg, 0x1);
masm.movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
if (os::is_MP()) { masm.lock(); }
masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
masm.jcc(Assembler::equal, DONE_LABEL);
// Recursive locking
masm.subptr(tmpReg, rsp);
masm.andptr(tmpReg, (int32_t) 0xFFFFF003 );
masm.movptr(Address(boxReg, 0), tmpReg);
masm.bind(DONE_LABEL) ;
} else {
// Possible cases that we'll encounter in fast_lock
// ------------------------------------------------
// * Inflated
// -- unlocked
// -- Locked
// = by self
// = by other
// * biased
// -- by Self
// -- by other
// * neutral
// * stack-locked
// -- by self
// = sp-proximity test hits
// = sp-proximity test generates false-negative
// -- by other
//
Label IsInflated, DONE_LABEL, PopDone ;
// TODO: optimize away redundant LDs of obj->mark and improve the markword triage
// order to reduce the number of conditional branches in the most common cases.
// Beware -- there's a subtle invariant that fetch of the markword
// at [FETCH], below, will never observe a biased encoding (*101b).
// If this invariant is not held we risk exclusion (safety) failure.
if (UseBiasedLocking && !UseOptoBiasInlining) {
masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
}
masm.movptr(tmpReg, Address(objReg, 0)) ; // [FETCH]
masm.testptr(tmpReg, 0x02) ; // Inflated v (Stack-locked or neutral)
masm.jccb (Assembler::notZero, IsInflated) ;
// Attempt stack-locking ...
masm.orptr (tmpReg, 0x1);
masm.movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
if (os::is_MP()) { masm.lock(); }
masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
if (_counters != NULL) {
masm.cond_inc32(Assembler::equal,
ExternalAddress((address)_counters->fast_path_entry_count_addr()));
}
masm.jccb (Assembler::equal, DONE_LABEL);
// Recursive locking
masm.subptr(tmpReg, rsp);
masm.andptr(tmpReg, 0xFFFFF003 );
masm.movptr(Address(boxReg, 0), tmpReg);
if (_counters != NULL) {
masm.cond_inc32(Assembler::equal,
ExternalAddress((address)_counters->fast_path_entry_count_addr()));
}
masm.jmp (DONE_LABEL) ;
masm.bind (IsInflated) ;
// The object is inflated.
//
// TODO-FIXME: eliminate the ugly use of manifest constants:
// Use markOopDesc::monitor_value instead of "2".
// use markOop::unused_mark() instead of "3".
// The tmpReg value is an objectMonitor reference ORed with
// markOopDesc::monitor_value (2). We can either convert tmpReg to an
// objectmonitor pointer by masking off the "2" bit or we can just
// use tmpReg as an objectmonitor pointer but bias the objectmonitor
// field offsets with "-2" to compensate for and annul the low-order tag bit.
//
// I use the latter as it avoids AGI stalls.
// As such, we write "mov r, [tmpReg+OFFSETOF(Owner)-2]"
// instead of "mov r, [tmpReg+OFFSETOF(Owner)]".
//
#define OFFSET_SKEWED(f) ((ObjectMonitor::f ## _offset_in_bytes())-2)
// boxReg refers to the on-stack BasicLock in the current frame.
// We'd like to write:
// set box->_displaced_header = markOop::unused_mark(). Any non-0 value suffices.
// This is convenient but results a ST-before-CAS penalty. The following CAS suffers
// additional latency as we have another ST in the store buffer that must drain.
if (EmitSync & 8192) {
masm.movptr(Address(boxReg, 0), 3) ; // results in ST-before-CAS penalty
masm.get_thread (scrReg) ;
masm.movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2]
masm.movptr(tmpReg, NULL_WORD); // consider: xor vs mov
if (os::is_MP()) { masm.lock(); }
masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
} else
if ((EmitSync & 128) == 0) { // avoid ST-before-CAS
masm.movptr(scrReg, boxReg) ;
masm.movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2]
// Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
// prefetchw [eax + Offset(_owner)-2]
masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2));
}
if ((EmitSync & 64) == 0) {
// Optimistic form: consider XORL tmpReg,tmpReg
masm.movptr(tmpReg, NULL_WORD) ;
} else {
// Can suffer RTS->RTO upgrades on shared or cold $ lines
// Test-And-CAS instead of CAS
masm.movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; // rax, = m->_owner
masm.testptr(tmpReg, tmpReg) ; // Locked ?
masm.jccb (Assembler::notZero, DONE_LABEL) ;
}
// Appears unlocked - try to swing _owner from null to non-null.
// Ideally, I'd manifest "Self" with get_thread and then attempt
// to CAS the register containing Self into m->Owner.
// But we don't have enough registers, so instead we can either try to CAS
// rsp or the address of the box (in scr) into &m->owner. If the CAS succeeds
// we later store "Self" into m->Owner. Transiently storing a stack address
// (rsp or the address of the box) into m->owner is harmless.
// Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand.
if (os::is_MP()) { masm.lock(); }
masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
masm.movptr(Address(scrReg, 0), 3) ; // box->_displaced_header = 3
masm.jccb (Assembler::notZero, DONE_LABEL) ;
masm.get_thread (scrReg) ; // beware: clobbers ICCs
masm.movptr(Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2), scrReg) ;
masm.xorptr(boxReg, boxReg) ; // set icc.ZFlag = 1 to indicate success
// If the CAS fails we can either retry or pass control to the slow-path.
// We use the latter tactic.
// Pass the CAS result in the icc.ZFlag into DONE_LABEL
// If the CAS was successful ...
// Self has acquired the lock
// Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
// Intentional fall-through into DONE_LABEL ...
} else {
masm.movptr(Address(boxReg, 0), 3) ; // results in ST-before-CAS penalty
masm.movptr(boxReg, tmpReg) ;
// Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
// prefetchw [eax + Offset(_owner)-2]
masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2));
}
if ((EmitSync & 64) == 0) {
// Optimistic form
masm.xorptr (tmpReg, tmpReg) ;
} else {
// Can suffer RTS->RTO upgrades on shared or cold $ lines
masm.movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; // rax, = m->_owner
masm.testptr(tmpReg, tmpReg) ; // Locked ?
masm.jccb (Assembler::notZero, DONE_LABEL) ;
}
// Appears unlocked - try to swing _owner from null to non-null.
// Use either "Self" (in scr) or rsp as thread identity in _owner.
// Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand.
masm.get_thread (scrReg) ;
if (os::is_MP()) { masm.lock(); }
masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
// If the CAS fails we can either retry or pass control to the slow-path.
// We use the latter tactic.
// Pass the CAS result in the icc.ZFlag into DONE_LABEL
// If the CAS was successful ...
// Self has acquired the lock
// Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
// Intentional fall-through into DONE_LABEL ...
}
// DONE_LABEL is a hot target - we'd really like to place it at the
// start of cache line by padding with NOPs.
// See the AMD and Intel software optimization manuals for the
// most efficient "long" NOP encodings.
// Unfortunately none of our alignment mechanisms suffice.
masm.bind(DONE_LABEL);
// Avoid branch-to-branch on AMD processors
// This appears to be superstition.
if (EmitSync & 32) masm.nop() ;
// At DONE_LABEL the icc ZFlag is set as follows ...
// Fast_Unlock uses the same protocol.
// ZFlag == 1 -> Success
// ZFlag == 0 -> Failure - force control through the slow-path
}
%}
// obj: object to unlock
// box: box address (displaced header location), killed. Must be EAX.
// rbx,: killed tmp; cannot be obj nor box.
//
// Some commentary on balanced locking:
//
// Fast_Lock and Fast_Unlock are emitted only for provably balanced lock sites.
// Methods that don't have provably balanced locking are forced to run in the
// interpreter - such methods won't be compiled to use fast_lock and fast_unlock.
// The interpreter provides two properties:
// I1: At return-time the interpreter automatically and quietly unlocks any
// objects acquired the current activation (frame). Recall that the
// interpreter maintains an on-stack list of locks currently held by
// a frame.
// I2: If a method attempts to unlock an object that is not held by the
// the frame the interpreter throws IMSX.
//
// Lets say A(), which has provably balanced locking, acquires O and then calls B().
// B() doesn't have provably balanced locking so it runs in the interpreter.
// Control returns to A() and A() unlocks O. By I1 and I2, above, we know that O
// is still locked by A().
//
// The only other source of unbalanced locking would be JNI. The "Java Native Interface:
// Programmer's Guide and Specification" claims that an object locked by jni_monitorenter
// should not be unlocked by "normal" java-level locking and vice-versa. The specification
// doesn't specify what will occur if a program engages in such mixed-mode locking, however.
enc_class Fast_Unlock( nabxRegP obj, eAXRegP box, eRegP tmp) %{
Register objReg = as_Register($obj$$reg);
Register boxReg = as_Register($box$$reg);
Register tmpReg = as_Register($tmp$$reg);
guarantee (objReg != boxReg, "") ;
guarantee (objReg != tmpReg, "") ;
guarantee (boxReg != tmpReg, "") ;
guarantee (boxReg == as_Register(EAX_enc), "") ;
MacroAssembler masm(&cbuf);
if (EmitSync & 4) {
// Disable - inhibit all inlining. Force control through the slow-path
masm.cmpptr (rsp, 0) ;
} else
if (EmitSync & 8) {
Label DONE_LABEL ;
if (UseBiasedLocking) {
masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
}
// classic stack-locking code ...
masm.movptr(tmpReg, Address(boxReg, 0)) ;
masm.testptr(tmpReg, tmpReg) ;
masm.jcc (Assembler::zero, DONE_LABEL) ;
if (os::is_MP()) { masm.lock(); }
masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses EAX which is box
masm.bind(DONE_LABEL);
} else {
Label DONE_LABEL, Stacked, CheckSucc, Inflated ;
// Critically, the biased locking test must have precedence over
// and appear before the (box->dhw == 0) recursive stack-lock test.
if (UseBiasedLocking && !UseOptoBiasInlining) {
masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
}
masm.cmpptr(Address(boxReg, 0), 0) ; // Examine the displaced header
masm.movptr(tmpReg, Address(objReg, 0)) ; // Examine the object's markword
masm.jccb (Assembler::zero, DONE_LABEL) ; // 0 indicates recursive stack-lock
masm.testptr(tmpReg, 0x02) ; // Inflated?
masm.jccb (Assembler::zero, Stacked) ;
masm.bind (Inflated) ;
// It's inflated.
// Despite our balanced locking property we still check that m->_owner == Self
// as java routines or native JNI code called by this thread might
// have released the lock.
// Refer to the comments in synchronizer.cpp for how we might encode extra
// state in _succ so we can avoid fetching EntryList|cxq.
//
// I'd like to add more cases in fast_lock() and fast_unlock() --
// such as recursive enter and exit -- but we have to be wary of
// I$ bloat, T$ effects and BP$ effects.
//
// If there's no contention try a 1-0 exit. That is, exit without
// a costly MEMBAR or CAS. See synchronizer.cpp for details on how
// we detect and recover from the race that the 1-0 exit admits.
//
// Conceptually Fast_Unlock() must execute a STST|LDST "release" barrier
// before it STs null into _owner, releasing the lock. Updates
// to data protected by the critical section must be visible before
// we drop the lock (and thus before any other thread could acquire
// the lock and observe the fields protected by the lock).
// IA32's memory-model is SPO, so STs are ordered with respect to
// each other and there's no need for an explicit barrier (fence).
// See also http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
masm.get_thread (boxReg) ;
if ((EmitSync & 4096) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
// prefetchw [ebx + Offset(_owner)-2]
masm.prefetchw(Address(rbx, ObjectMonitor::owner_offset_in_bytes()-2));
}
// Note that we could employ various encoding schemes to reduce
// the number of loads below (currently 4) to just 2 or 3.
// Refer to the comments in synchronizer.cpp.
// In practice the chain of fetches doesn't seem to impact performance, however.
if ((EmitSync & 65536) == 0 && (EmitSync & 256)) {
// Attempt to reduce branch density - AMD's branch predictor.
masm.xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
masm.jccb (Assembler::notZero, DONE_LABEL) ;
masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD) ;
masm.jmpb (DONE_LABEL) ;
} else {
masm.xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
masm.jccb (Assembler::notZero, DONE_LABEL) ;
masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
masm.jccb (Assembler::notZero, CheckSucc) ;
masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD) ;
masm.jmpb (DONE_LABEL) ;
}
// The Following code fragment (EmitSync & 65536) improves the performance of
// contended applications and contended synchronization microbenchmarks.
// Unfortunately the emission of the code - even though not executed - causes regressions
// in scimark and jetstream, evidently because of $ effects. Replacing the code
// with an equal number of never-executed NOPs results in the same regression.
// We leave it off by default.
if ((EmitSync & 65536) != 0) {
Label LSuccess, LGoSlowPath ;
masm.bind (CheckSucc) ;
// Optional pre-test ... it's safe to elide this
if ((EmitSync & 16) == 0) {
masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0) ;
masm.jccb (Assembler::zero, LGoSlowPath) ;
}
// We have a classic Dekker-style idiom:
// ST m->_owner = 0 ; MEMBAR; LD m->_succ
// There are a number of ways to implement the barrier:
// (1) lock:andl &m->_owner, 0
// is fast, but mask doesn't currently support the "ANDL M,IMM32" form.
// LOCK: ANDL [ebx+Offset(_Owner)-2], 0
// Encodes as 81 31 OFF32 IMM32 or 83 63 OFF8 IMM8
// (2) If supported, an explicit MFENCE is appealing.
// In older IA32 processors MFENCE is slower than lock:add or xchg
// particularly if the write-buffer is full as might be the case if
// if stores closely precede the fence or fence-equivalent instruction.
// In more modern implementations MFENCE appears faster, however.
// (3) In lieu of an explicit fence, use lock:addl to the top-of-stack
// The $lines underlying the top-of-stack should be in M-state.
// The locked add instruction is serializing, of course.
// (4) Use xchg, which is serializing
// mov boxReg, 0; xchgl boxReg, [tmpReg + Offset(_owner)-2] also works
// (5) ST m->_owner = 0 and then execute lock:orl &m->_succ, 0.
// The integer condition codes will tell us if succ was 0.
// Since _succ and _owner should reside in the same $line and
// we just stored into _owner, it's likely that the $line
// remains in M-state for the lock:orl.
//
// We currently use (3), although it's likely that switching to (2)
// is correct for the future.
masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD) ;
if (os::is_MP()) {
if (VM_Version::supports_sse2() && 1 == FenceInstruction) {
masm.mfence();
} else {
masm.lock () ; masm.addptr(Address(rsp, 0), 0) ;
}
}
// Ratify _succ remains non-null
masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0) ;
masm.jccb (Assembler::notZero, LSuccess) ;
masm.xorptr(boxReg, boxReg) ; // box is really EAX
if (os::is_MP()) { masm.lock(); }
masm.cmpxchgptr(rsp, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
masm.jccb (Assembler::notEqual, LSuccess) ;
// Since we're low on registers we installed rsp as a placeholding in _owner.
// Now install Self over rsp. This is safe as we're transitioning from
// non-null to non=null
masm.get_thread (boxReg) ;
masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), boxReg) ;
// Intentional fall-through into LGoSlowPath ...
masm.bind (LGoSlowPath) ;
masm.orptr(boxReg, 1) ; // set ICC.ZF=0 to indicate failure
masm.jmpb (DONE_LABEL) ;
masm.bind (LSuccess) ;
masm.xorptr(boxReg, boxReg) ; // set ICC.ZF=1 to indicate success
masm.jmpb (DONE_LABEL) ;
}
masm.bind (Stacked) ;
// It's not inflated and it's not recursively stack-locked and it's not biased.
// It must be stack-locked.
// Try to reset the header to displaced header.
// The "box" value on the stack is stable, so we can reload
// and be assured we observe the same value as above.
masm.movptr(tmpReg, Address(boxReg, 0)) ;
if (os::is_MP()) { masm.lock(); }
masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses EAX which is box
// Intention fall-thru into DONE_LABEL
// DONE_LABEL is a hot target - we'd really like to place it at the
// start of cache line by padding with NOPs.
// See the AMD and Intel software optimization manuals for the
// most efficient "long" NOP encodings.
// Unfortunately none of our alignment mechanisms suffice.
if ((EmitSync & 65536) == 0) {
masm.bind (CheckSucc) ;
}
masm.bind(DONE_LABEL);
// Avoid branch to branch on AMD processors
if (EmitSync & 32768) { masm.nop() ; }
}
%}
enc_class enc_pop_rdx() %{
emit_opcode(cbuf,0x5A);
%}
@ -7545,44 +6996,6 @@ instruct cmovL_regUCF(cmpOpUCF cop, eFlagsRegUCF cr, eRegL dst, eRegL src) %{
//----------Arithmetic Instructions--------------------------------------------
//----------Addition Instructions----------------------------------------------
instruct addExactI_eReg(eAXRegI dst, rRegI src, eFlagsReg cr)
%{
match(AddExactI dst src);
effect(DEF cr);
format %{ "ADD $dst, $src\t# addExact int" %}
ins_encode %{
__ addl($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg_reg);
%}
instruct addExactI_eReg_imm(eAXRegI dst, immI src, eFlagsReg cr)
%{
match(AddExactI dst src);
effect(DEF cr);
format %{ "ADD $dst, $src\t# addExact int" %}
ins_encode %{
__ addl($dst$$Register, $src$$constant);
%}
ins_pipe(ialu_reg_reg);
%}
instruct addExactI_eReg_mem(eAXRegI dst, memory src, eFlagsReg cr)
%{
match(AddExactI dst (LoadI src));
effect(DEF cr);
ins_cost(125);
format %{ "ADD $dst,$src\t# addExact int" %}
ins_encode %{
__ addl($dst$$Register, $src$$Address);
%}
ins_pipe( ialu_reg_mem );
%}
// Integer Addition Instructions
instruct addI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
match(Set dst (AddI dst src));
@ -7892,43 +7305,6 @@ instruct xchgP( memory mem, pRegP newval) %{
//----------Subtraction Instructions-------------------------------------------
instruct subExactI_eReg(eAXRegI dst, rRegI src, eFlagsReg cr)
%{
match(SubExactI dst src);
effect(DEF cr);
format %{ "SUB $dst, $src\t# subExact int" %}
ins_encode %{
__ subl($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg_reg);
%}
instruct subExactI_eReg_imm(eAXRegI dst, immI src, eFlagsReg cr)
%{
match(SubExactI dst src);
effect(DEF cr);
format %{ "SUB $dst, $src\t# subExact int" %}
ins_encode %{
__ subl($dst$$Register, $src$$constant);
%}
ins_pipe(ialu_reg_reg);
%}
instruct subExactI_eReg_mem(eAXRegI dst, memory src, eFlagsReg cr)
%{
match(SubExactI dst (LoadI src));
effect(DEF cr);
ins_cost(125);
format %{ "SUB $dst,$src\t# subExact int" %}
ins_encode %{
__ subl($dst$$Register, $src$$Address);
%}
ins_pipe( ialu_reg_mem );
%}
// Integer Subtraction Instructions
instruct subI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
match(Set dst (SubI dst src));
@ -7997,17 +7373,6 @@ instruct negI_eReg(rRegI dst, immI0 zero, eFlagsReg cr) %{
ins_pipe( ialu_reg );
%}
instruct negExactI_eReg(eAXRegI dst, eFlagsReg cr) %{
match(NegExactI dst);
effect(DEF cr);
format %{ "NEG $dst\t# negExact int"%}
ins_encode %{
__ negl($dst$$Register);
%}
ins_pipe(ialu_reg);
%}
//----------Multiplication/Division Instructions-------------------------------
// Integer Multiplication Instructions
// Multiply Register
@ -8219,46 +7584,6 @@ instruct mulL_eReg_con(eADXRegL dst, immL_127 src, rRegI tmp, eFlagsReg cr) %{
ins_pipe( pipe_slow );
%}
instruct mulExactI_eReg(eAXRegI dst, rRegI src, eFlagsReg cr)
%{
match(MulExactI dst src);
effect(DEF cr);
ins_cost(300);
format %{ "IMUL $dst, $src\t# mulExact int" %}
ins_encode %{
__ imull($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg_reg_alu0);
%}
instruct mulExactI_eReg_imm(eAXRegI dst, rRegI src, immI imm, eFlagsReg cr)
%{
match(MulExactI src imm);
effect(DEF cr);
ins_cost(300);
format %{ "IMUL $dst, $src, $imm\t# mulExact int" %}
ins_encode %{
__ imull($dst$$Register, $src$$Register, $imm$$constant);
%}
ins_pipe(ialu_reg_reg_alu0);
%}
instruct mulExactI_eReg_mem(eAXRegI dst, memory src, eFlagsReg cr)
%{
match(MulExactI dst (LoadI src));
effect(DEF cr);
ins_cost(350);
format %{ "IMUL $dst, $src\t# mulExact int" %}
ins_encode %{
__ imull($dst$$Register, $src$$Address);
%}
ins_pipe(ialu_reg_mem_alu0);
%}
// Integer DIV with Register
instruct divI_eReg(eAXRegI rax, eDXRegI rdx, eCXRegI div, eFlagsReg cr) %{
match(Set rax (DivI rax div));
@ -9124,6 +8449,91 @@ instruct and_cmpLTMask(rRegI p, rRegI q, rRegI y, eFlagsReg cr) %{
instruct cadd_cmpLTMask_mem(ncxRegI p, ncxRegI q, memory y, eCXRegI tmp, eFlagsReg cr) %{
match(Set p (AddI (AndI (CmpLTMask p q) (LoadI y)) (SubI p q)));
*/
//----------Overflow Math Instructions-----------------------------------------
instruct overflowAddI_eReg(eFlagsReg cr, eAXRegI op1, rRegI op2)
%{
match(Set cr (OverflowAddI op1 op2));
effect(DEF cr, USE_KILL op1, USE op2);
format %{ "ADD $op1, $op2\t# overflow check int" %}
ins_encode %{
__ addl($op1$$Register, $op2$$Register);
%}
ins_pipe(ialu_reg_reg);
%}
instruct overflowAddI_rReg_imm(eFlagsReg cr, eAXRegI op1, immI op2)
%{
match(Set cr (OverflowAddI op1 op2));
effect(DEF cr, USE_KILL op1, USE op2);
format %{ "ADD $op1, $op2\t# overflow check int" %}
ins_encode %{
__ addl($op1$$Register, $op2$$constant);
%}
ins_pipe(ialu_reg_reg);
%}
instruct overflowSubI_rReg(eFlagsReg cr, rRegI op1, rRegI op2)
%{
match(Set cr (OverflowSubI op1 op2));
format %{ "CMP $op1, $op2\t# overflow check int" %}
ins_encode %{
__ cmpl($op1$$Register, $op2$$Register);
%}
ins_pipe(ialu_reg_reg);
%}
instruct overflowSubI_rReg_imm(eFlagsReg cr, rRegI op1, immI op2)
%{
match(Set cr (OverflowSubI op1 op2));
format %{ "CMP $op1, $op2\t# overflow check int" %}
ins_encode %{
__ cmpl($op1$$Register, $op2$$constant);
%}
ins_pipe(ialu_reg_reg);
%}
instruct overflowNegI_rReg(eFlagsReg cr, immI0 zero, eAXRegI op2)
%{
match(Set cr (OverflowSubI zero op2));
effect(DEF cr, USE_KILL op2);
format %{ "NEG $op2\t# overflow check int" %}
ins_encode %{
__ negl($op2$$Register);
%}
ins_pipe(ialu_reg_reg);
%}
instruct overflowMulI_rReg(eFlagsReg cr, eAXRegI op1, rRegI op2)
%{
match(Set cr (OverflowMulI op1 op2));
effect(DEF cr, USE_KILL op1, USE op2);
format %{ "IMUL $op1, $op2\t# overflow check int" %}
ins_encode %{
__ imull($op1$$Register, $op2$$Register);
%}
ins_pipe(ialu_reg_reg_alu0);
%}
instruct overflowMulI_rReg_imm(eFlagsReg cr, rRegI op1, immI op2, rRegI tmp)
%{
match(Set cr (OverflowMulI op1 op2));
effect(DEF cr, TEMP tmp, USE op1, USE op2);
format %{ "IMUL $tmp, $op1, $op2\t# overflow check int" %}
ins_encode %{
__ imull($tmp$$Register, $op1$$Register, $op2$$constant);
%}
ins_pipe(ialu_reg_reg_alu0);
%}
//----------Long Instructions------------------------------------------------
// Add Long Register with Register
@ -13157,23 +12567,26 @@ instruct RethrowException()
// inlined locking and unlocking
instruct cmpFastLock( eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eRegP scr) %{
match( Set cr (FastLock object box) );
effect( TEMP tmp, TEMP scr, USE_KILL box );
instruct cmpFastLock(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eRegP scr) %{
match(Set cr (FastLock object box));
effect(TEMP tmp, TEMP scr, USE_KILL box);
ins_cost(300);
format %{ "FASTLOCK $object,$box\t! kills $box,$tmp,$scr" %}
ins_encode( Fast_Lock(object,box,tmp,scr) );
ins_pipe( pipe_slow );
ins_encode %{
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register, _counters);
%}
ins_pipe(pipe_slow);
%}
instruct cmpFastUnlock( eFlagsReg cr, eRegP object, eAXRegP box, eRegP tmp ) %{
match( Set cr (FastUnlock object box) );
effect( TEMP tmp, USE_KILL box );
instruct cmpFastUnlock(eFlagsReg cr, eRegP object, eAXRegP box, eRegP tmp ) %{
match(Set cr (FastUnlock object box));
effect(TEMP tmp, USE_KILL box);
ins_cost(300);
format %{ "FASTUNLOCK $object,$box\t! kills $box,$tmp" %}
ins_encode( Fast_Unlock(object,box,tmp) );
ins_pipe( pipe_slow );
ins_encode %{
__ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
%}
ins_pipe(pipe_slow);
%}

View File

@ -1657,18 +1657,6 @@ const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return PTR_RBP_REG_mask();
}
const RegMask Matcher::mathExactI_result_proj_mask() {
return INT_RAX_REG_mask();
}
const RegMask Matcher::mathExactL_result_proj_mask() {
return LONG_RAX_REG_mask();
}
const RegMask Matcher::mathExactI_flags_proj_mask() {
return INT_FLAGS_mask();
}
%}
//----------ENCODING BLOCK-----------------------------------------------------
@ -2599,231 +2587,6 @@ encode %{
%}
// obj: object to lock
// box: box address (header location) -- killed
// tmp: rax -- killed
// scr: rbx -- killed
//
// What follows is a direct transliteration of fast_lock() and fast_unlock()
// from i486.ad. See that file for comments.
// TODO: where possible switch from movq (r, 0) to movl(r,0) and
// use the shorter encoding. (Movl clears the high-order 32-bits).
enc_class Fast_Lock(rRegP obj, rRegP box, rax_RegI tmp, rRegP scr)
%{
Register objReg = as_Register((int)$obj$$reg);
Register boxReg = as_Register((int)$box$$reg);
Register tmpReg = as_Register($tmp$$reg);
Register scrReg = as_Register($scr$$reg);
MacroAssembler masm(&cbuf);
// Verify uniqueness of register assignments -- necessary but not sufficient
assert (objReg != boxReg && objReg != tmpReg &&
objReg != scrReg && tmpReg != scrReg, "invariant") ;
if (_counters != NULL) {
masm.atomic_incl(ExternalAddress((address) _counters->total_entry_count_addr()));
}
if (EmitSync & 1) {
// Without cast to int32_t a movptr will destroy r10 which is typically obj
masm.movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
masm.cmpptr(rsp, (int32_t)NULL_WORD) ;
} else
if (EmitSync & 2) {
Label DONE_LABEL;
if (UseBiasedLocking) {
// Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
}
// QQQ was movl...
masm.movptr(tmpReg, 0x1);
masm.orptr(tmpReg, Address(objReg, 0));
masm.movptr(Address(boxReg, 0), tmpReg);
if (os::is_MP()) {
masm.lock();
}
masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
masm.jcc(Assembler::equal, DONE_LABEL);
// Recursive locking
masm.subptr(tmpReg, rsp);
masm.andptr(tmpReg, 7 - os::vm_page_size());
masm.movptr(Address(boxReg, 0), tmpReg);
masm.bind(DONE_LABEL);
masm.nop(); // avoid branch to branch
} else {
Label DONE_LABEL, IsInflated, Egress;
masm.movptr(tmpReg, Address(objReg, 0)) ;
masm.testl (tmpReg, 0x02) ; // inflated vs stack-locked|neutral|biased
masm.jcc (Assembler::notZero, IsInflated) ;
// it's stack-locked, biased or neutral
// TODO: optimize markword triage order to reduce the number of
// conditional branches in the most common cases.
// Beware -- there's a subtle invariant that fetch of the markword
// at [FETCH], below, will never observe a biased encoding (*101b).
// If this invariant is not held we'll suffer exclusion (safety) failure.
if (UseBiasedLocking && !UseOptoBiasInlining) {
masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, _counters);
masm.movptr(tmpReg, Address(objReg, 0)) ; // [FETCH]
}
// was q will it destroy high?
masm.orl (tmpReg, 1) ;
masm.movptr(Address(boxReg, 0), tmpReg) ;
if (os::is_MP()) { masm.lock(); }
masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
if (_counters != NULL) {
masm.cond_inc32(Assembler::equal,
ExternalAddress((address) _counters->fast_path_entry_count_addr()));
}
masm.jcc (Assembler::equal, DONE_LABEL);
// Recursive locking
masm.subptr(tmpReg, rsp);
masm.andptr(tmpReg, 7 - os::vm_page_size());
masm.movptr(Address(boxReg, 0), tmpReg);
if (_counters != NULL) {
masm.cond_inc32(Assembler::equal,
ExternalAddress((address) _counters->fast_path_entry_count_addr()));
}
masm.jmp (DONE_LABEL) ;
masm.bind (IsInflated) ;
// It's inflated
// TODO: someday avoid the ST-before-CAS penalty by
// relocating (deferring) the following ST.
// We should also think about trying a CAS without having
// fetched _owner. If the CAS is successful we may
// avoid an RTO->RTS upgrade on the $line.
// Without cast to int32_t a movptr will destroy r10 which is typically obj
masm.movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
masm.mov (boxReg, tmpReg) ;
masm.movptr (tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
masm.testptr(tmpReg, tmpReg) ;
masm.jcc (Assembler::notZero, DONE_LABEL) ;
// It's inflated and appears unlocked
if (os::is_MP()) { masm.lock(); }
masm.cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
// Intentional fall-through into DONE_LABEL ...
masm.bind (DONE_LABEL) ;
masm.nop () ; // avoid jmp to jmp
}
%}
// obj: object to unlock
// box: box address (displaced header location), killed
// RBX: killed tmp; cannot be obj nor box
enc_class Fast_Unlock(rRegP obj, rax_RegP box, rRegP tmp)
%{
Register objReg = as_Register($obj$$reg);
Register boxReg = as_Register($box$$reg);
Register tmpReg = as_Register($tmp$$reg);
MacroAssembler masm(&cbuf);
if (EmitSync & 4) {
masm.cmpptr(rsp, 0) ;
} else
if (EmitSync & 8) {
Label DONE_LABEL;
if (UseBiasedLocking) {
masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
}
// Check whether the displaced header is 0
//(=> recursive unlock)
masm.movptr(tmpReg, Address(boxReg, 0));
masm.testptr(tmpReg, tmpReg);
masm.jcc(Assembler::zero, DONE_LABEL);
// If not recursive lock, reset the header to displaced header
if (os::is_MP()) {
masm.lock();
}
masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
masm.bind(DONE_LABEL);
masm.nop(); // avoid branch to branch
} else {
Label DONE_LABEL, Stacked, CheckSucc ;
if (UseBiasedLocking && !UseOptoBiasInlining) {
masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
}
masm.movptr(tmpReg, Address(objReg, 0)) ;
masm.cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD) ;
masm.jcc (Assembler::zero, DONE_LABEL) ;
masm.testl (tmpReg, 0x02) ;
masm.jcc (Assembler::zero, Stacked) ;
// It's inflated
masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
masm.xorptr(boxReg, r15_thread) ;
masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
masm.jcc (Assembler::notZero, DONE_LABEL) ;
masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
masm.jcc (Assembler::notZero, CheckSucc) ;
masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
masm.jmp (DONE_LABEL) ;
if ((EmitSync & 65536) == 0) {
Label LSuccess, LGoSlowPath ;
masm.bind (CheckSucc) ;
masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
masm.jcc (Assembler::zero, LGoSlowPath) ;
// I'd much rather use lock:andl m->_owner, 0 as it's faster than the
// the explicit ST;MEMBAR combination, but masm doesn't currently support
// "ANDQ M,IMM". Don't use MFENCE here. lock:add to TOS, xchg, etc
// are all faster when the write buffer is populated.
masm.movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
if (os::is_MP()) {
masm.lock () ; masm.addl (Address(rsp, 0), 0) ;
}
masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
masm.jcc (Assembler::notZero, LSuccess) ;
masm.movptr (boxReg, (int32_t)NULL_WORD) ; // box is really EAX
if (os::is_MP()) { masm.lock(); }
masm.cmpxchgptr(r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
masm.jcc (Assembler::notEqual, LSuccess) ;
// Intentional fall-through into slow-path
masm.bind (LGoSlowPath) ;
masm.orl (boxReg, 1) ; // set ICC.ZF=0 to indicate failure
masm.jmp (DONE_LABEL) ;
masm.bind (LSuccess) ;
masm.testl (boxReg, 0) ; // set ICC.ZF=1 to indicate success
masm.jmp (DONE_LABEL) ;
}
masm.bind (Stacked) ;
masm.movptr(tmpReg, Address (boxReg, 0)) ; // re-fetch
if (os::is_MP()) { masm.lock(); }
masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
if (EmitSync & 65536) {
masm.bind (CheckSucc) ;
}
masm.bind(DONE_LABEL);
if (EmitSync & 32768) {
masm.nop(); // avoid branch to branch
}
}
%}
enc_class enc_rethrow()
%{
cbuf.set_insts_mark();
@ -6963,82 +6726,6 @@ instruct cmovD_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, regD dst, regD src) %{
//----------Arithmetic Instructions--------------------------------------------
//----------Addition Instructions----------------------------------------------
instruct addExactI_rReg(rax_RegI dst, rRegI src, rFlagsReg cr)
%{
match(AddExactI dst src);
effect(DEF cr);
format %{ "addl $dst, $src\t# addExact int" %}
ins_encode %{
__ addl($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg_reg);
%}
instruct addExactI_rReg_imm(rax_RegI dst, immI src, rFlagsReg cr)
%{
match(AddExactI dst src);
effect(DEF cr);
format %{ "addl $dst, $src\t# addExact int" %}
ins_encode %{
__ addl($dst$$Register, $src$$constant);
%}
ins_pipe(ialu_reg_reg);
%}
instruct addExactI_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr)
%{
match(AddExactI dst (LoadI src));
effect(DEF cr);
ins_cost(125); // XXX
format %{ "addl $dst, $src\t# addExact int" %}
ins_encode %{
__ addl($dst$$Register, $src$$Address);
%}
ins_pipe(ialu_reg_mem);
%}
instruct addExactL_rReg(rax_RegL dst, rRegL src, rFlagsReg cr)
%{
match(AddExactL dst src);
effect(DEF cr);
format %{ "addq $dst, $src\t# addExact long" %}
ins_encode %{
__ addq($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg_reg);
%}
instruct addExactL_rReg_imm(rax_RegL dst, immL32 src, rFlagsReg cr)
%{
match(AddExactL dst src);
effect(DEF cr);
format %{ "addq $dst, $src\t# addExact long" %}
ins_encode %{
__ addq($dst$$Register, $src$$constant);
%}
ins_pipe(ialu_reg_reg);
%}
instruct addExactL_rReg_mem(rax_RegL dst, memory src, rFlagsReg cr)
%{
match(AddExactL dst (LoadL src));
effect(DEF cr);
ins_cost(125); // XXX
format %{ "addq $dst, $src\t# addExact long" %}
ins_encode %{
__ addq($dst$$Register, $src$$Address);
%}
ins_pipe(ialu_reg_mem);
%}
instruct addI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
%{
match(Set dst (AddI dst src));
@ -7651,80 +7338,6 @@ instruct subI_mem_imm(memory dst, immI src, rFlagsReg cr)
ins_pipe(ialu_mem_imm);
%}
instruct subExactI_rReg(rax_RegI dst, rRegI src, rFlagsReg cr)
%{
match(SubExactI dst src);
effect(DEF cr);
format %{ "subl $dst, $src\t# subExact int" %}
ins_encode %{
__ subl($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg_reg);
%}
instruct subExactI_rReg_imm(rax_RegI dst, immI src, rFlagsReg cr)
%{
match(SubExactI dst src);
effect(DEF cr);
format %{ "subl $dst, $src\t# subExact int" %}
ins_encode %{
__ subl($dst$$Register, $src$$constant);
%}
ins_pipe(ialu_reg_reg);
%}
instruct subExactI_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr)
%{
match(SubExactI dst (LoadI src));
effect(DEF cr);
ins_cost(125);
format %{ "subl $dst, $src\t# subExact int" %}
ins_encode %{
__ subl($dst$$Register, $src$$Address);
%}
ins_pipe(ialu_reg_mem);
%}
instruct subExactL_rReg(rax_RegL dst, rRegL src, rFlagsReg cr)
%{
match(SubExactL dst src);
effect(DEF cr);
format %{ "subq $dst, $src\t# subExact long" %}
ins_encode %{
__ subq($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg_reg);
%}
instruct subExactL_rReg_imm(rax_RegL dst, immL32 src, rFlagsReg cr)
%{
match(SubExactL dst (LoadL src));
effect(DEF cr);
format %{ "subq $dst, $src\t# subExact long" %}
ins_encode %{
__ subq($dst$$Register, $src$$constant);
%}
ins_pipe(ialu_reg_reg);
%}
instruct subExactL_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr)
%{
match(SubExactI dst src);
effect(DEF cr);
ins_cost(125);
format %{ "subq $dst, $src\t# subExact long" %}
ins_encode %{
__ subq($dst$$Register, $src$$Address);
%}
ins_pipe(ialu_reg_mem);
%}
instruct subL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
%{
match(Set dst (SubL dst src));
@ -7841,31 +7454,6 @@ instruct negL_mem(memory dst, immL0 zero, rFlagsReg cr)
ins_pipe(ialu_reg);
%}
instruct negExactI_rReg(rax_RegI dst, rFlagsReg cr)
%{
match(NegExactI dst);
effect(KILL cr);
format %{ "negl $dst\t# negExact int" %}
ins_encode %{
__ negl($dst$$Register);
%}
ins_pipe(ialu_reg);
%}
instruct negExactL_rReg(rax_RegL dst, rFlagsReg cr)
%{
match(NegExactL dst);
effect(KILL cr);
format %{ "negq $dst\t# negExact long" %}
ins_encode %{
__ negq($dst$$Register);
%}
ins_pipe(ialu_reg);
%}
//----------Multiplication/Division Instructions-------------------------------
// Integer Multiplication Instructions
// Multiply Register
@ -7982,86 +7570,6 @@ instruct mulHiL_rReg(rdx_RegL dst, no_rax_RegL src, rax_RegL rax, rFlagsReg cr)
ins_pipe(ialu_reg_reg_alu0);
%}
instruct mulExactI_rReg(rax_RegI dst, rRegI src, rFlagsReg cr)
%{
match(MulExactI dst src);
effect(DEF cr);
ins_cost(300);
format %{ "imull $dst, $src\t# mulExact int" %}
ins_encode %{
__ imull($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg_reg_alu0);
%}
instruct mulExactI_rReg_imm(rax_RegI dst, rRegI src, immI imm, rFlagsReg cr)
%{
match(MulExactI src imm);
effect(DEF cr);
ins_cost(300);
format %{ "imull $dst, $src, $imm\t# mulExact int" %}
ins_encode %{
__ imull($dst$$Register, $src$$Register, $imm$$constant);
%}
ins_pipe(ialu_reg_reg_alu0);
%}
instruct mulExactI_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr)
%{
match(MulExactI dst (LoadI src));
effect(DEF cr);
ins_cost(350);
format %{ "imull $dst, $src\t# mulExact int" %}
ins_encode %{
__ imull($dst$$Register, $src$$Address);
%}
ins_pipe(ialu_reg_mem_alu0);
%}
instruct mulExactL_rReg(rax_RegL dst, rRegL src, rFlagsReg cr)
%{
match(MulExactL dst src);
effect(DEF cr);
ins_cost(300);
format %{ "imulq $dst, $src\t# mulExact long" %}
ins_encode %{
__ imulq($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg_reg_alu0);
%}
instruct mulExactL_rReg_imm(rax_RegL dst, rRegL src, immL32 imm, rFlagsReg cr)
%{
match(MulExactL src imm);
effect(DEF cr);
ins_cost(300);
format %{ "imulq $dst, $src, $imm\t# mulExact long" %}
ins_encode %{
__ imulq($dst$$Register, $src$$Register, $imm$$constant);
%}
ins_pipe(ialu_reg_reg_alu0);
%}
instruct mulExactL_rReg_mem(rax_RegL dst, memory src, rFlagsReg cr)
%{
match(MulExactL dst (LoadL src));
effect(DEF cr);
ins_cost(350);
format %{ "imulq $dst, $src\t# mulExact long" %}
ins_encode %{
__ imulq($dst$$Register, $src$$Address);
%}
ins_pipe(ialu_reg_mem_alu0);
%}
instruct divI_rReg(rax_RegI rax, rdx_RegI rdx, no_rax_rdx_RegI div,
rFlagsReg cr)
%{
@ -10670,6 +10178,174 @@ instruct encode_iso_array(rsi_RegP src, rdi_RegP dst, rdx_RegI len,
ins_pipe( pipe_slow );
%}
//----------Overflow Math Instructions-----------------------------------------
instruct overflowAddI_rReg(rFlagsReg cr, rax_RegI op1, rRegI op2)
%{
match(Set cr (OverflowAddI op1 op2));
effect(DEF cr, USE_KILL op1, USE op2);
format %{ "addl $op1, $op2\t# overflow check int" %}
ins_encode %{
__ addl($op1$$Register, $op2$$Register);
%}
ins_pipe(ialu_reg_reg);
%}
instruct overflowAddI_rReg_imm(rFlagsReg cr, rax_RegI op1, immI op2)
%{
match(Set cr (OverflowAddI op1 op2));
effect(DEF cr, USE_KILL op1, USE op2);
format %{ "addl $op1, $op2\t# overflow check int" %}
ins_encode %{
__ addl($op1$$Register, $op2$$constant);
%}
ins_pipe(ialu_reg_reg);
%}
instruct overflowAddL_rReg(rFlagsReg cr, rax_RegL op1, rRegL op2)
%{
match(Set cr (OverflowAddL op1 op2));
effect(DEF cr, USE_KILL op1, USE op2);
format %{ "addq $op1, $op2\t# overflow check long" %}
ins_encode %{
__ addq($op1$$Register, $op2$$Register);
%}
ins_pipe(ialu_reg_reg);
%}
instruct overflowAddL_rReg_imm(rFlagsReg cr, rax_RegL op1, immL32 op2)
%{
match(Set cr (OverflowAddL op1 op2));
effect(DEF cr, USE_KILL op1, USE op2);
format %{ "addq $op1, $op2\t# overflow check long" %}
ins_encode %{
__ addq($op1$$Register, $op2$$constant);
%}
ins_pipe(ialu_reg_reg);
%}
instruct overflowSubI_rReg(rFlagsReg cr, rRegI op1, rRegI op2)
%{
match(Set cr (OverflowSubI op1 op2));
format %{ "cmpl $op1, $op2\t# overflow check int" %}
ins_encode %{
__ cmpl($op1$$Register, $op2$$Register);
%}
ins_pipe(ialu_reg_reg);
%}
instruct overflowSubI_rReg_imm(rFlagsReg cr, rRegI op1, immI op2)
%{
match(Set cr (OverflowSubI op1 op2));
format %{ "cmpl $op1, $op2\t# overflow check int" %}
ins_encode %{
__ cmpl($op1$$Register, $op2$$constant);
%}
ins_pipe(ialu_reg_reg);
%}
instruct overflowSubL_rReg(rFlagsReg cr, rRegL op1, rRegL op2)
%{
match(Set cr (OverflowSubL op1 op2));
format %{ "cmpq $op1, $op2\t# overflow check long" %}
ins_encode %{
__ cmpq($op1$$Register, $op2$$Register);
%}
ins_pipe(ialu_reg_reg);
%}
instruct overflowSubL_rReg_imm(rFlagsReg cr, rRegL op1, immL32 op2)
%{
match(Set cr (OverflowSubL op1 op2));
format %{ "cmpq $op1, $op2\t# overflow check long" %}
ins_encode %{
__ cmpq($op1$$Register, $op2$$constant);
%}
ins_pipe(ialu_reg_reg);
%}
instruct overflowNegI_rReg(rFlagsReg cr, immI0 zero, rax_RegI op2)
%{
match(Set cr (OverflowSubI zero op2));
effect(DEF cr, USE_KILL op2);
format %{ "negl $op2\t# overflow check int" %}
ins_encode %{
__ negl($op2$$Register);
%}
ins_pipe(ialu_reg_reg);
%}
instruct overflowNegL_rReg(rFlagsReg cr, immL0 zero, rax_RegL op2)
%{
match(Set cr (OverflowSubL zero op2));
effect(DEF cr, USE_KILL op2);
format %{ "negq $op2\t# overflow check long" %}
ins_encode %{
__ negq($op2$$Register);
%}
ins_pipe(ialu_reg_reg);
%}
instruct overflowMulI_rReg(rFlagsReg cr, rax_RegI op1, rRegI op2)
%{
match(Set cr (OverflowMulI op1 op2));
effect(DEF cr, USE_KILL op1, USE op2);
format %{ "imull $op1, $op2\t# overflow check int" %}
ins_encode %{
__ imull($op1$$Register, $op2$$Register);
%}
ins_pipe(ialu_reg_reg_alu0);
%}
instruct overflowMulI_rReg_imm(rFlagsReg cr, rRegI op1, immI op2, rRegI tmp)
%{
match(Set cr (OverflowMulI op1 op2));
effect(DEF cr, TEMP tmp, USE op1, USE op2);
format %{ "imull $tmp, $op1, $op2\t# overflow check int" %}
ins_encode %{
__ imull($tmp$$Register, $op1$$Register, $op2$$constant);
%}
ins_pipe(ialu_reg_reg_alu0);
%}
instruct overflowMulL_rReg(rFlagsReg cr, rax_RegL op1, rRegL op2)
%{
match(Set cr (OverflowMulL op1 op2));
effect(DEF cr, USE_KILL op1, USE op2);
format %{ "imulq $op1, $op2\t# overflow check long" %}
ins_encode %{
__ imulq($op1$$Register, $op2$$Register);
%}
ins_pipe(ialu_reg_reg_alu0);
%}
instruct overflowMulL_rReg_imm(rFlagsReg cr, rRegL op1, immL32 op2, rRegL tmp)
%{
match(Set cr (OverflowMulL op1 op2));
effect(DEF cr, TEMP tmp, USE op1, USE op2);
format %{ "imulq $tmp, $op1, $op2\t# overflow check long" %}
ins_encode %{
__ imulq($tmp$$Register, $op1$$Register, $op2$$constant);
%}
ins_pipe(ialu_reg_reg_alu0);
%}
//----------Control Flow Instructions------------------------------------------
// Signed compare Instructions
@ -11453,27 +11129,25 @@ instruct jmpConUCF2_short(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{
// ============================================================================
// inlined locking and unlocking
instruct cmpFastLock(rFlagsReg cr,
rRegP object, rbx_RegP box, rax_RegI tmp, rRegP scr)
%{
instruct cmpFastLock(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp, rRegP scr) %{
match(Set cr (FastLock object box));
effect(TEMP tmp, TEMP scr, USE_KILL box);
ins_cost(300);
format %{ "fastlock $object,$box\t! kills $box,$tmp,$scr" %}
ins_encode(Fast_Lock(object, box, tmp, scr));
ins_encode %{
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register, _counters);
%}
ins_pipe(pipe_slow);
%}
instruct cmpFastUnlock(rFlagsReg cr,
rRegP object, rax_RegP box, rRegP tmp)
%{
instruct cmpFastUnlock(rFlagsReg cr, rRegP object, rax_RegP box, rRegP tmp) %{
match(Set cr (FastUnlock object box));
effect(TEMP tmp, USE_KILL box);
ins_cost(300);
format %{ "fastunlock $object,$box\t! kills $box,$tmp" %}
ins_encode(Fast_Unlock(object, box, tmp));
ins_encode %{
__ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
%}
ins_pipe(pipe_slow);
%}

View File

@ -1496,6 +1496,10 @@ void* os::dll_lookup(void* handle, const char* name) {
return res;
}
void* os::get_default_process_handle() {
return (void*)::dlopen(NULL, RTLD_LAZY);
}
void os::print_dll_info(outputStream *st) {
st->print_cr("Dynamic libraries:");
LoadedLibraries::print(st);

View File

@ -1788,7 +1788,7 @@ void os::jvm_path(char *buf, jint buflen) {
jrelib_p = buf + len;
snprintf(jrelib_p, buflen-len, "/%s", COMPILER_VARIANT);
if (0 != access(buf, F_OK)) {
snprintf(jrelib_p, buflen-len, "");
snprintf(jrelib_p, buflen-len, "%s", "");
}
// If the path exists within JAVA_HOME, add the JVM library name

View File

@ -1167,15 +1167,12 @@ void ArchDesc::buildMustCloneMap(FILE *fp_hpp, FILE *fp_cpp) {
|| strcmp(idealName,"CmpF") == 0
|| strcmp(idealName,"FastLock") == 0
|| strcmp(idealName,"FastUnlock") == 0
|| strcmp(idealName,"AddExactI") == 0
|| strcmp(idealName,"AddExactL") == 0
|| strcmp(idealName,"SubExactI") == 0
|| strcmp(idealName,"SubExactL") == 0
|| strcmp(idealName,"MulExactI") == 0
|| strcmp(idealName,"MulExactL") == 0
|| strcmp(idealName,"NegExactI") == 0
|| strcmp(idealName,"NegExactL") == 0
|| strcmp(idealName,"FlagsProj") == 0
|| strcmp(idealName,"OverflowAddI") == 0
|| strcmp(idealName,"OverflowAddL") == 0
|| strcmp(idealName,"OverflowSubI") == 0
|| strcmp(idealName,"OverflowSubL") == 0
|| strcmp(idealName,"OverflowMulI") == 0
|| strcmp(idealName,"OverflowMulL") == 0
|| strcmp(idealName,"Bool") == 0
|| strcmp(idealName,"Binary") == 0 ) {
// Removed ConI from the must_clone list. CPUs that cannot use

View File

@ -103,6 +103,7 @@ friend class ciMethodHandle; \
friend class ciMethodType; \
friend class ciReceiverTypeData; \
friend class ciTypeEntries; \
friend class ciSpeculativeTrapData; \
friend class ciSymbol; \
friend class ciArray; \
friend class ciObjArray; \

View File

@ -78,6 +78,35 @@ ciMethodData::ciMethodData() : ciMetadata(NULL) {
_parameters = NULL;
}
void ciMethodData::load_extra_data() {
MethodData* mdo = get_MethodData();
// speculative trap entries also hold a pointer to a Method so need to be translated
DataLayout* dp_src = mdo->extra_data_base();
DataLayout* end_src = mdo->extra_data_limit();
DataLayout* dp_dst = extra_data_base();
for (;; dp_src = MethodData::next_extra(dp_src), dp_dst = MethodData::next_extra(dp_dst)) {
assert(dp_src < end_src, "moved past end of extra data");
assert(dp_src->tag() == dp_dst->tag(), err_msg("should be same tags %d != %d", dp_src->tag(), dp_dst->tag()));
switch(dp_src->tag()) {
case DataLayout::speculative_trap_data_tag: {
ciSpeculativeTrapData* data_dst = new ciSpeculativeTrapData(dp_dst);
SpeculativeTrapData* data_src = new SpeculativeTrapData(dp_src);
data_dst->translate_from(data_src);
break;
}
case DataLayout::bit_data_tag:
break;
case DataLayout::no_tag:
case DataLayout::arg_info_data_tag:
// An empty slot or ArgInfoData entry marks the end of the trap data
return;
default:
fatal(err_msg("bad tag = %d", dp_src->tag()));
}
}
}
void ciMethodData::load_data() {
MethodData* mdo = get_MethodData();
if (mdo == NULL) {
@ -116,6 +145,8 @@ void ciMethodData::load_data() {
parameters->translate_from(mdo->parameters_type_data());
}
load_extra_data();
// Note: Extra data are all BitData, and do not need translation.
_current_mileage = MethodData::mileage_of(mdo->method());
_invocation_counter = mdo->invocation_count();
@ -156,6 +187,12 @@ void ciReturnTypeEntry::translate_type_data_from(const ReturnTypeEntry* ret) {
set_type(translate_klass(k));
}
void ciSpeculativeTrapData::translate_from(const ProfileData* data) {
Method* m = data->as_SpeculativeTrapData()->method();
ciMethod* ci_m = CURRENT_ENV->get_method(m);
set_method(ci_m);
}
// Get the data at an arbitrary (sort of) data index.
ciProfileData* ciMethodData::data_at(int data_index) {
if (out_of_bounds(data_index)) {
@ -203,33 +240,65 @@ ciProfileData* ciMethodData::next_data(ciProfileData* current) {
return next;
}
// Translate a bci to its corresponding data, or NULL.
ciProfileData* ciMethodData::bci_to_data(int bci) {
ciProfileData* data = data_before(bci);
for ( ; is_valid(data); data = next_data(data)) {
if (data->bci() == bci) {
set_hint_di(dp_to_di(data->dp()));
return data;
} else if (data->bci() > bci) {
break;
}
}
ciProfileData* ciMethodData::bci_to_extra_data(int bci, ciMethod* m, bool& two_free_slots) {
// bci_to_extra_data(bci) ...
DataLayout* dp = data_layout_at(data_size());
DataLayout* end = data_layout_at(data_size() + extra_data_size());
for (; dp < end; dp = MethodData::next_extra(dp)) {
if (dp->tag() == DataLayout::no_tag) {
two_free_slots = false;
for (;dp < end; dp = MethodData::next_extra(dp)) {
switch(dp->tag()) {
case DataLayout::no_tag:
_saw_free_extra_data = true; // observed an empty slot (common case)
two_free_slots = (MethodData::next_extra(dp)->tag() == DataLayout::no_tag);
return NULL;
case DataLayout::arg_info_data_tag:
return NULL; // ArgInfoData is at the end of extra data section.
case DataLayout::bit_data_tag:
if (m == NULL && dp->bci() == bci) {
return new ciBitData(dp);
}
break;
case DataLayout::speculative_trap_data_tag: {
ciSpeculativeTrapData* data = new ciSpeculativeTrapData(dp);
// data->method() might be null if the MDO is snapshotted
// concurrently with a trap
if (m != NULL && data->method() == m && dp->bci() == bci) {
return data;
}
break;
}
if (dp->tag() == DataLayout::arg_info_data_tag) {
break; // ArgInfoData is at the end of extra data section.
default:
fatal(err_msg("bad tag = %d", dp->tag()));
}
if (dp->bci() == bci) {
assert(dp->tag() == DataLayout::bit_data_tag, "sane");
return new ciBitData(dp);
}
return NULL;
}
// Translate a bci to its corresponding data, or NULL.
ciProfileData* ciMethodData::bci_to_data(int bci, ciMethod* m) {
// If m is not NULL we look for a SpeculativeTrapData entry
if (m == NULL) {
ciProfileData* data = data_before(bci);
for ( ; is_valid(data); data = next_data(data)) {
if (data->bci() == bci) {
set_hint_di(dp_to_di(data->dp()));
return data;
} else if (data->bci() > bci) {
break;
}
}
}
bool two_free_slots = false;
ciProfileData* result = bci_to_extra_data(bci, m, two_free_slots);
if (result != NULL) {
return result;
}
if (m != NULL && !two_free_slots) {
// We were looking for a SpeculativeTrapData entry we didn't
// find. Room is not available for more SpeculativeTrapData
// entries, look in the non SpeculativeTrapData entries.
return bci_to_data(bci, NULL);
}
return NULL;
}
@ -525,18 +594,25 @@ void ciMethodData::print_data_on(outputStream* st) {
st->print_cr("--- Extra data:");
DataLayout* dp = data_layout_at(data_size());
DataLayout* end = data_layout_at(data_size() + extra_data_size());
for (; dp < end; dp = MethodData::next_extra(dp)) {
if (dp->tag() == DataLayout::no_tag) continue;
if (dp->tag() == DataLayout::bit_data_tag) {
for (;; dp = MethodData::next_extra(dp)) {
assert(dp < end, "moved past end of extra data");
switch (dp->tag()) {
case DataLayout::no_tag:
continue;
case DataLayout::bit_data_tag:
data = new BitData(dp);
} else {
assert(dp->tag() == DataLayout::arg_info_data_tag, "must be BitData or ArgInfo");
break;
case DataLayout::arg_info_data_tag:
data = new ciArgInfoData(dp);
dp = end; // ArgInfoData is at the end of extra data section.
break;
default:
fatal(err_msg("unexpected tag %d", dp->tag()));
}
st->print("%d", dp_to_di(data->dp()));
st->fill_to(6);
data->print_data_on(st);
if (dp >= end) return;
}
}
@ -569,8 +645,8 @@ void ciReturnTypeEntry::print_data_on(outputStream* st) const {
st->cr();
}
void ciCallTypeData::print_data_on(outputStream* st) const {
print_shared(st, "ciCallTypeData");
void ciCallTypeData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "ciCallTypeData", extra);
if (has_arguments()) {
tab(st, true);
st->print("argument types");
@ -599,18 +675,18 @@ void ciReceiverTypeData::print_receiver_data_on(outputStream* st) const {
}
}
void ciReceiverTypeData::print_data_on(outputStream* st) const {
print_shared(st, "ciReceiverTypeData");
void ciReceiverTypeData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "ciReceiverTypeData", extra);
print_receiver_data_on(st);
}
void ciVirtualCallData::print_data_on(outputStream* st) const {
print_shared(st, "ciVirtualCallData");
void ciVirtualCallData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "ciVirtualCallData", extra);
rtd_super()->print_receiver_data_on(st);
}
void ciVirtualCallTypeData::print_data_on(outputStream* st) const {
print_shared(st, "ciVirtualCallTypeData");
void ciVirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "ciVirtualCallTypeData", extra);
rtd_super()->print_receiver_data_on(st);
if (has_arguments()) {
tab(st, true);
@ -624,8 +700,15 @@ void ciVirtualCallTypeData::print_data_on(outputStream* st) const {
}
}
void ciParametersTypeData::print_data_on(outputStream* st) const {
st->print_cr("Parametertypes");
void ciParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
st->print_cr("ciParametersTypeData");
parameters()->print_data_on(st);
}
void ciSpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
st->print_cr("ciSpeculativeTrapData");
tab(st);
method()->print_short_name(st);
st->cr();
}
#endif

View File

@ -31,6 +31,7 @@
#include "ci/ciUtilities.hpp"
#include "oops/methodData.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/deoptimization.hpp"
class ciBitData;
class ciCounterData;
@ -44,6 +45,7 @@ class ciArgInfoData;
class ciCallTypeData;
class ciVirtualCallTypeData;
class ciParametersTypeData;
class ciSpeculativeTrapData;;
typedef ProfileData ciProfileData;
@ -173,7 +175,7 @@ public:
}
#ifndef PRODUCT
void print_data_on(outputStream* st) const;
void print_data_on(outputStream* st, const char* extra) const;
#endif
};
@ -200,7 +202,7 @@ public:
}
void translate_receiver_data_from(const ProfileData* data);
#ifndef PRODUCT
void print_data_on(outputStream* st) const;
void print_data_on(outputStream* st, const char* extra) const;
void print_receiver_data_on(outputStream* st) const;
#endif
};
@ -225,7 +227,7 @@ public:
rtd_super()->translate_receiver_data_from(data);
}
#ifndef PRODUCT
void print_data_on(outputStream* st) const;
void print_data_on(outputStream* st, const char* extra) const;
#endif
};
@ -287,7 +289,7 @@ public:
}
#ifndef PRODUCT
void print_data_on(outputStream* st) const;
void print_data_on(outputStream* st, const char* extra) const;
#endif
};
@ -336,7 +338,26 @@ public:
}
#ifndef PRODUCT
void print_data_on(outputStream* st) const;
void print_data_on(outputStream* st, const char* extra) const;
#endif
};
class ciSpeculativeTrapData : public SpeculativeTrapData {
public:
ciSpeculativeTrapData(DataLayout* layout) : SpeculativeTrapData(layout) {}
virtual void translate_from(const ProfileData* data);
ciMethod* method() const {
return (ciMethod*)intptr_at(method_offset);
}
void set_method(ciMethod* m) {
set_intptr_at(method_offset, (intptr_t)m);
}
#ifndef PRODUCT
void print_data_on(outputStream* st, const char* extra) const;
#endif
};
@ -436,6 +457,16 @@ private:
ciArgInfoData *arg_info() const;
address data_base() const {
return (address) _data;
}
DataLayout* limit_data_position() const {
return (DataLayout*)((address)data_base() + _data_size);
}
void load_extra_data();
ciProfileData* bci_to_extra_data(int bci, ciMethod* m, bool& two_free_slots);
public:
bool is_method_data() const { return true; }
@ -475,9 +506,11 @@ public:
ciProfileData* next_data(ciProfileData* current);
bool is_valid(ciProfileData* current) { return current != NULL; }
// Get the data at an arbitrary bci, or NULL if there is none.
ciProfileData* bci_to_data(int bci);
ciProfileData* bci_to_extra_data(int bci, bool create_if_missing);
DataLayout* extra_data_base() const { return limit_data_position(); }
// Get the data at an arbitrary bci, or NULL if there is none. If m
// is not NULL look for a SpeculativeTrapData if any first.
ciProfileData* bci_to_data(int bci, ciMethod* m = NULL);
uint overflow_trap_count() const {
return _orig.overflow_trap_count();
@ -496,12 +529,13 @@ public:
// Helpful query functions that decode trap_state.
int has_trap_at(ciProfileData* data, int reason);
int has_trap_at(int bci, int reason) {
return has_trap_at(bci_to_data(bci), reason);
int has_trap_at(int bci, ciMethod* m, int reason) {
assert((m != NULL) == Deoptimization::reason_is_speculate(reason), "inconsistent method/reason");
return has_trap_at(bci_to_data(bci, m), reason);
}
int trap_recompiled_at(ciProfileData* data);
int trap_recompiled_at(int bci) {
return trap_recompiled_at(bci_to_data(bci));
int trap_recompiled_at(int bci, ciMethod* m) {
return trap_recompiled_at(bci_to_data(bci, m));
}
void clear_escape_info();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -520,6 +520,13 @@ void ClassLoaderData::verify() {
}
}
bool ClassLoaderData::contains_klass(Klass* klass) {
for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
if (k == klass) return true;
}
return false;
}
// GC root of class loader data created.
ClassLoaderData* ClassLoaderDataGraph::_head = NULL;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -260,6 +260,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
jobject add_handle(Handle h);
void add_class(Klass* k);
void remove_class(Klass* k);
bool contains_klass(Klass* k);
void record_dependency(Klass* to, TRAPS);
void init_dependencies(TRAPS);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -707,7 +707,7 @@ void Dictionary::verify() {
loader_data->class_loader() == NULL ||
loader_data->class_loader()->is_instance(),
"checking type of class_loader");
e->verify(/*check_dictionary*/false);
e->verify();
probe->verify_protection_domain_set();
element_count++;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2650,23 +2650,6 @@ void SystemDictionary::verify() {
constraints()->verify(dictionary(), placeholders());
}
void SystemDictionary::verify_obj_klass_present(Symbol* class_name,
ClassLoaderData* loader_data) {
GCMutexLocker mu(SystemDictionary_lock);
Symbol* name;
Klass* probe = find_class(class_name, loader_data);
if (probe == NULL) {
probe = SystemDictionary::find_shared_class(class_name);
if (probe == NULL) {
name = find_placeholder(class_name, loader_data);
}
}
guarantee(probe != NULL || name != NULL,
"Loaded klasses should be in SystemDictionary");
}
// utility function for class load event
void SystemDictionary::post_class_load_event(const Ticks& start_time,
instanceKlassHandle k,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -375,10 +375,6 @@ public:
static bool is_internal_format(Symbol* class_name);
#endif
// Verify class is in dictionary
static void verify_obj_klass_present(Symbol* class_name,
ClassLoaderData* loader_data);
// Initialization
static void initialize(TRAPS);

View File

@ -50,27 +50,6 @@
// Only bother with this argument setup if dtrace is available
#ifndef USDT2
HS_DTRACE_PROBE_DECL8(hotspot, compiled__method__load,
const char*, int, const char*, int, const char*, int, void*, size_t);
HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload,
char*, int, char*, int, char*, int);
#define DTRACE_METHOD_UNLOAD_PROBE(method) \
{ \
Method* m = (method); \
if (m != NULL) { \
Symbol* klass_name = m->klass_name(); \
Symbol* name = m->name(); \
Symbol* signature = m->signature(); \
HS_DTRACE_PROBE6(hotspot, compiled__method__unload, \
klass_name->bytes(), klass_name->utf8_length(), \
name->bytes(), name->utf8_length(), \
signature->bytes(), signature->utf8_length()); \
} \
}
#else /* USDT2 */
#define DTRACE_METHOD_UNLOAD_PROBE(method) \
{ \
Method* m = (method); \
@ -84,7 +63,6 @@ HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload,
(char *) signature->bytes(), signature->utf8_length()); \
} \
}
#endif /* USDT2 */
#else // ndef DTRACE_ENABLED
@ -1520,16 +1498,6 @@ bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_
void nmethod::post_compiled_method_load_event() {
Method* moop = method();
#ifndef USDT2
HS_DTRACE_PROBE8(hotspot, compiled__method__load,
moop->klass_name()->bytes(),
moop->klass_name()->utf8_length(),
moop->name()->bytes(),
moop->name()->utf8_length(),
moop->signature()->bytes(),
moop->signature()->utf8_length(),
insts_begin(), insts_size());
#else /* USDT2 */
HOTSPOT_COMPILED_METHOD_LOAD(
(char *) moop->klass_name()->bytes(),
moop->klass_name()->utf8_length(),
@ -1538,7 +1506,6 @@ void nmethod::post_compiled_method_load_event() {
(char *) moop->signature()->bytes(),
moop->signature()->utf8_length(),
insts_begin(), insts_size());
#endif /* USDT2 */
if (JvmtiExport::should_post_compiled_method_load() ||
JvmtiExport::should_post_compiled_method_unload()) {

View File

@ -60,38 +60,6 @@
// Only bother with this argument setup if dtrace is available
#ifndef USDT2
HS_DTRACE_PROBE_DECL8(hotspot, method__compile__begin,
char*, intptr_t, char*, intptr_t, char*, intptr_t, char*, intptr_t);
HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end,
char*, intptr_t, char*, intptr_t, char*, intptr_t, char*, intptr_t, bool);
#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) \
{ \
Symbol* klass_name = (method)->klass_name(); \
Symbol* name = (method)->name(); \
Symbol* signature = (method)->signature(); \
HS_DTRACE_PROBE8(hotspot, method__compile__begin, \
comp_name, strlen(comp_name), \
klass_name->bytes(), klass_name->utf8_length(), \
name->bytes(), name->utf8_length(), \
signature->bytes(), signature->utf8_length()); \
}
#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) \
{ \
Symbol* klass_name = (method)->klass_name(); \
Symbol* name = (method)->name(); \
Symbol* signature = (method)->signature(); \
HS_DTRACE_PROBE9(hotspot, method__compile__end, \
comp_name, strlen(comp_name), \
klass_name->bytes(), klass_name->utf8_length(), \
name->bytes(), name->utf8_length(), \
signature->bytes(), signature->utf8_length(), (success)); \
}
#else /* USDT2 */
#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) \
{ \
Symbol* klass_name = (method)->klass_name(); \
@ -115,7 +83,6 @@ HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end,
(char *) name->bytes(), name->utf8_length(), \
(char *) signature->bytes(), signature->utf8_length(), (success)); \
}
#endif /* USDT2 */
#else // ndef DTRACE_ENABLED

View File

@ -1730,8 +1730,8 @@ CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
_dictionary->return_chunk(chunk);
#ifndef PRODUCT
if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
TreeChunk<FreeChunk, AdaptiveFreeList>* tc = TreeChunk<FreeChunk, AdaptiveFreeList>::as_TreeChunk(chunk);
TreeList<FreeChunk, AdaptiveFreeList>* tl = tc->list();
TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
tl->verify_stats();
}
#endif // PRODUCT
@ -2541,10 +2541,10 @@ void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
#ifndef PRODUCT
void CompactibleFreeListSpace::check_free_list_consistency() const {
assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size() <= IndexSetSize),
assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
"Some sizes can't be allocated without recourse to"
" linear allocation buffers");
assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList>)),
assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
"else MIN_TREE_CHUNK_SIZE is wrong");
assert(IndexSetStart != 0, "IndexSetStart not initialized");
assert(IndexSetStride != 0, "IndexSetStride not initialized");

View File

@ -3035,7 +3035,6 @@ void CMSCollector::verify_after_remark_work_1() {
true, // activate StrongRootsScope
SharedHeap::ScanningOption(roots_scanning_options()),
&notOlder,
true, // walk code active on stacks
NULL,
NULL); // SSS: Provide correct closure
@ -3102,7 +3101,6 @@ void CMSCollector::verify_after_remark_work_2() {
true, // activate StrongRootsScope
SharedHeap::ScanningOption(roots_scanning_options()),
&notOlder,
true, // walk code active on stacks
NULL,
&klass_closure);
@ -3680,12 +3678,6 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
ResourceMark rm;
HandleMark hm;
FalseClosure falseClosure;
// In the case of a synchronous collection, we will elide the
// remark step, so it's important to catch all the nmethod oops
// in this step.
// The final 'true' flag to gen_process_strong_roots will ensure this.
// If 'async' is true, we can relax the nmethod tracing.
MarkRefsIntoClosure notOlder(_span, &_markBitMap);
GenCollectedHeap* gch = GenCollectedHeap::heap();
@ -3738,7 +3730,6 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
true, // activate StrongRootsScope
SharedHeap::ScanningOption(roots_scanning_options()),
&notOlder,
true, // walk all of code cache if (so & SO_AllCodeCache)
NULL,
&klass_closure);
}
@ -5237,7 +5228,6 @@ void CMSParInitialMarkTask::work(uint worker_id) {
false, // this is parallel code
SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
&par_mri_cl,
true, // walk all of code cache if (so & SO_AllCodeCache)
NULL,
&klass_closure);
assert(_collector->should_unload_classes()
@ -5373,7 +5363,6 @@ void CMSParRemarkTask::work(uint worker_id) {
false, // this is parallel code
SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
&par_mrias_cl,
true, // walk all of code cache if (so & SO_AllCodeCache)
NULL,
NULL); // The dirty klasses will be handled below
assert(_collector->should_unload_classes()
@ -5963,7 +5952,6 @@ void CMSCollector::do_remark_non_parallel() {
false, // use the local StrongRootsScope
SharedHeap::ScanningOption(roots_scanning_options()),
&mrias_cl,
true, // walk code active on stacks
NULL,
NULL); // The dirty klasses will be handled below

View File

@ -1383,13 +1383,6 @@ class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
// Closures of various sorts used by CMS to accomplish its work
//
// This closure is used to check that a certain set of oops is empty.
class FalseClosure: public OopClosure {
public:
void do_oop(oop* p) { guarantee(false, "Should be an empty set"); }
void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); }
};
// This closure is used to do concurrent marking from the roots
// following the first checkpoint.
class MarkFromRootsClosure: public BitMapClosure {

View File

@ -35,14 +35,6 @@
#include "utilities/dtrace.hpp"
#ifndef USDT2
HS_DTRACE_PROBE_DECL(hs_private, cms__initmark__begin);
HS_DTRACE_PROBE_DECL(hs_private, cms__initmark__end);
HS_DTRACE_PROBE_DECL(hs_private, cms__remark__begin);
HS_DTRACE_PROBE_DECL(hs_private, cms__remark__end);
#endif /* !USDT2 */
//////////////////////////////////////////////////////////
// Methods in abstract class VM_CMS_Operation
//////////////////////////////////////////////////////////
@ -138,11 +130,7 @@ void VM_CMS_Initial_Mark::doit() {
// Nothing to do.
return;
}
#ifndef USDT2
HS_DTRACE_PROBE(hs_private, cms__initmark__begin);
#else /* USDT2 */
HS_PRIVATE_CMS_INITMARK_BEGIN();
#endif /* USDT2 */
_collector->_gc_timer_cm->register_gc_pause_start("Initial Mark");
@ -158,11 +146,7 @@ void VM_CMS_Initial_Mark::doit() {
_collector->_gc_timer_cm->register_gc_pause_end();
#ifndef USDT2
HS_DTRACE_PROBE(hs_private, cms__initmark__end);
#else /* USDT2 */
HS_PRIVATE_CMS_INITMARK_END();
#endif /* USDT2 */
}
//////////////////////////////////////////////////////////
@ -173,11 +157,7 @@ void VM_CMS_Final_Remark::doit() {
// Nothing to do.
return;
}
#ifndef USDT2
HS_DTRACE_PROBE(hs_private, cms__remark__begin);
#else /* USDT2 */
HS_PRIVATE_CMS_REMARK_BEGIN();
#endif /* USDT2 */
_collector->_gc_timer_cm->register_gc_pause_start("Final Mark");
@ -194,11 +174,7 @@ void VM_CMS_Final_Remark::doit() {
_collector->save_heap_summary();
_collector->_gc_timer_cm->register_gc_pause_end();
#ifndef USDT2
HS_DTRACE_PROBE(hs_private, cms__remark__end);
#else /* USDT2 */
HS_PRIVATE_CMS_REMARK_END();
#endif /* USDT2 */
}
// VM operation to invoke a concurrent collection of a

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,7 +41,7 @@
nonstatic_field(LinearAllocBlock, _word_size, size_t) \
nonstatic_field(AFLBinaryTreeDictionary, _total_size, size_t) \
nonstatic_field(CompactibleFreeListSpace, _dictionary, AFLBinaryTreeDictionary*) \
nonstatic_field(CompactibleFreeListSpace, _indexedFreeList[0], FreeList<FreeChunk>) \
nonstatic_field(CompactibleFreeListSpace, _indexedFreeList[0], AdaptiveFreeList<FreeChunk>) \
nonstatic_field(CompactibleFreeListSpace, _smallLinearAllocBlock, LinearAllocBlock)

View File

@ -3394,13 +3394,12 @@ void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
if (!silent) { gclog_or_tty->print("Roots "); }
VerifyRootsClosure rootsCl(vo);
G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
VerifyKlassClosure klassCl(this, &rootsCl);
// We apply the relevant closures to all the oops in the
// system dictionary, the string table and the code cache.
const int so = SO_AllClasses | SO_Strings | SO_AllCodeCache;
// system dictionary, class loader data graph and the string table.
// Don't verify the code cache here, since it's verified below.
const int so = SO_AllClasses | SO_Strings;
// Need cleared claim bits for the strong roots processing
ClassLoaderDataGraph::clear_claimed_marks();
@ -3408,10 +3407,14 @@ void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
process_strong_roots(true, // activate StrongRootsScope
ScanningOption(so), // roots scanning options
&rootsCl,
&blobsCl,
&klassCl
);
// Verify the nmethods in the code cache.
G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
CodeCache::blobs_do(&blobsCl);
bool failures = rootsCl.failures() || codeRootsCl.failures();
if (vo != VerifyOption_G1UseMarkWord) {
@ -5115,12 +5118,9 @@ g1_process_strong_roots(bool is_scavenging,
BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
CodeBlobToOopClosure scan_code_roots(&buf_scan_non_heap_roots, true /* do_marking */);
process_strong_roots(false, // no scoping; this is parallel code
so,
&buf_scan_non_heap_roots,
&scan_code_roots,
scan_klasses
);
@ -5180,12 +5180,6 @@ g1_process_strong_roots(bool is_scavenging,
_process_strong_tasks->all_tasks_completed();
}
void
G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) {
CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
SharedHeap::process_weak_roots(root_closure, &roots_in_blobs);
}
class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
private:
BoolObjectClosure* _is_alive;

View File

@ -833,11 +833,6 @@ protected:
G1KlassScanClosure* scan_klasses,
int worker_i);
// Apply "blk" to all the weak roots of the system. These include
// JNI weak roots, the code cache, system dictionary, symbol table,
// string table, and referents of reachable weak refs.
void g1_process_weak_roots(OopClosure* root_closure);
// Frees a non-humongous region by initializing its contents and
// adding it to the free list that's passed as a parameter (this is
// usually a local list which will be appended to the master free

View File

@ -133,7 +133,6 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
sh->process_strong_roots(true, // activate StrongRootsScope
SharedHeap::SO_SystemClasses,
&GenMarkSweep::follow_root_closure,
&GenMarkSweep::follow_code_root_closure,
&GenMarkSweep::follow_klass_closure);
// Process reference objects found during marking
@ -307,9 +306,8 @@ void G1MarkSweep::mark_sweep_phase3() {
ClassLoaderDataGraph::clear_claimed_marks();
sh->process_strong_roots(true, // activate StrongRootsScope
SharedHeap::SO_AllClasses,
SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache,
&GenMarkSweep::adjust_pointer_closure,
NULL, // do not touch code cache here
&GenMarkSweep::adjust_klass_closure);
assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
@ -317,7 +315,7 @@ void G1MarkSweep::mark_sweep_phase3() {
// Now adjust pointers in remaining weak roots. (All of which should
// have been cleared if they pointed to non-surviving objects.)
g1h->g1_process_weak_roots(&GenMarkSweep::adjust_pointer_closure);
sh->process_weak_roots(&GenMarkSweep::adjust_pointer_closure);
GenMarkSweep::adjust_marks();

View File

@ -621,7 +621,6 @@ void ParNewGenTask::work(uint worker_id) {
false, // no scope; this is parallel code
SharedHeap::ScanningOption(so),
&par_scan_state.to_space_root_closure(),
true, // walk *all* scavengable nmethods
&par_scan_state.older_gen_closure(),
&klass_scan_closure);
par_scan_state.end_strong_roots();

View File

@ -47,7 +47,6 @@ STWGCTimer* MarkSweep::_gc_timer = NULL;
SerialOldTracer* MarkSweep::_gc_tracer = NULL;
MarkSweep::FollowRootClosure MarkSweep::follow_root_closure;
CodeBlobToOopClosure MarkSweep::follow_code_root_closure(&MarkSweep::follow_root_closure, /*do_marking=*/ true);
void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); }
void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }

View File

@ -143,7 +143,6 @@ class MarkSweep : AllStatic {
// Public closures
static IsAliveClosure is_alive;
static FollowRootClosure follow_root_closure;
static CodeBlobToOopClosure follow_code_root_closure; // => follow_root_closure
static MarkAndPushClosure mark_and_push_closure;
static FollowKlassClosure follow_klass_closure;
static FollowStackClosure follow_stack_closure;

View File

@ -41,33 +41,18 @@
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#endif // INCLUDE_ALL_GCS
#ifndef USDT2
HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool);
HS_DTRACE_PROBE_DECL(hotspot, gc__end);
#endif /* !USDT2 */
// The same dtrace probe can't be inserted in two different files, so we
// have to call it here, so it's only in one file. Can't create new probes
// for the other file anymore. The dtrace probes have to remain stable.
void VM_GC_Operation::notify_gc_begin(bool full) {
#ifndef USDT2
HS_DTRACE_PROBE1(hotspot, gc__begin, full);
HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
#else /* USDT2 */
HOTSPOT_GC_BEGIN(
full);
HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
#endif /* USDT2 */
}
void VM_GC_Operation::notify_gc_end() {
#ifndef USDT2
HS_DTRACE_PROBE(hotspot, gc__end);
HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
#else /* USDT2 */
HOTSPOT_GC_END();
HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
#endif /* USDT2 */
}
void VM_GC_Operation::acquire_pending_list_lock() {

View File

@ -415,10 +415,10 @@
* On some architectures/platforms it should be possible to do this implicitly
*/
#undef CHECK_NULL
#define CHECK_NULL(obj_) \
if ((obj_) == NULL) { \
VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), "", note_nullCheck_trap); \
} \
#define CHECK_NULL(obj_) \
if ((obj_) == NULL) { \
VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), NULL, note_nullCheck_trap); \
} \
VERIFY_OOP(obj_)
#define VMdoubleConstZero() 0.0

View File

@ -596,7 +596,7 @@ void BytecodePrinter::bytecode_epilog(int bci, outputStream* st) {
if (data != NULL) {
st->print(" %d", mdo->dp_to_di(data->dp()));
st->fill_to(6);
data->print_data_on(st);
data->print_data_on(st, mdo);
}
}
}

View File

@ -44,16 +44,16 @@
// This is currently used in the Concurrent Mark&Sweep implementation.
////////////////////////////////////////////////////////////////////////////////
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
size_t TreeChunk<Chunk_t, FreeList_t>::_min_tree_chunk_size = sizeof(TreeChunk<Chunk_t, FreeList_t>)/HeapWordSize;
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
TreeChunk<Chunk_t, FreeList_t>* TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(Chunk_t* fc) {
// Do some assertion checking here.
return (TreeChunk<Chunk_t, FreeList_t>*) fc;
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void TreeChunk<Chunk_t, FreeList_t>::verify_tree_chunk_list() const {
TreeChunk<Chunk_t, FreeList_t>* nextTC = (TreeChunk<Chunk_t, FreeList_t>*)next();
if (prev() != NULL) { // interior list node shouldn't have tree fields
@ -67,11 +67,11 @@ void TreeChunk<Chunk_t, FreeList_t>::verify_tree_chunk_list() const {
}
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
TreeList<Chunk_t, FreeList_t>::TreeList() : _parent(NULL),
_left(NULL), _right(NULL) {}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
TreeList<Chunk_t, FreeList_t>*
TreeList<Chunk_t, FreeList_t>::as_TreeList(TreeChunk<Chunk_t,FreeList_t>* tc) {
// This first free chunk in the list will be the tree list.
@ -88,20 +88,7 @@ TreeList<Chunk_t, FreeList_t>::as_TreeList(TreeChunk<Chunk_t,FreeList_t>* tc) {
return tl;
}
template <class Chunk_t, template <class> class FreeList_t>
TreeList<Chunk_t, FreeList_t>*
get_chunk(size_t size, enum FreeBlockDictionary<Chunk_t>::Dither dither) {
FreeBlockDictionary<Chunk_t>::verify_par_locked();
Chunk_t* res = get_chunk_from_tree(size, dither);
assert(res == NULL || res->is_free(),
"Should be returning a free chunk");
assert(dither != FreeBlockDictionary<Chunk_t>::exactly ||
res->size() == size, "Not correct size");
return res;
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
TreeList<Chunk_t, FreeList_t>*
TreeList<Chunk_t, FreeList_t>::as_TreeList(HeapWord* addr, size_t size) {
TreeChunk<Chunk_t, FreeList_t>* tc = (TreeChunk<Chunk_t, FreeList_t>*) addr;
@ -125,17 +112,17 @@ TreeList<Chunk_t, FreeList_t>::as_TreeList(HeapWord* addr, size_t size) {
// an over populated size. The general get_better_list() just returns
// the current list.
template <>
TreeList<FreeChunk, AdaptiveFreeList>*
TreeList<FreeChunk, AdaptiveFreeList>::get_better_list(
BinaryTreeDictionary<FreeChunk, ::AdaptiveFreeList>* dictionary) {
TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >*
TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >::get_better_list(
BinaryTreeDictionary<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* dictionary) {
// A candidate chunk has been found. If it is already under
// populated, get a chunk associated with the hint for this
// chunk.
TreeList<FreeChunk, ::AdaptiveFreeList>* curTL = this;
TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* curTL = this;
if (surplus() <= 0) {
/* Use the hint to find a size with a surplus, and reset the hint. */
TreeList<FreeChunk, ::AdaptiveFreeList>* hintTL = this;
TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* hintTL = this;
while (hintTL->hint() != 0) {
assert(hintTL->hint() > hintTL->size(),
"hint points in the wrong direction");
@ -163,14 +150,14 @@ TreeList<FreeChunk, AdaptiveFreeList>::get_better_list(
}
#endif // INCLUDE_ALL_GCS
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
TreeList<Chunk_t, FreeList_t>*
TreeList<Chunk_t, FreeList_t>::get_better_list(
BinaryTreeDictionary<Chunk_t, FreeList_t>* dictionary) {
return this;
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
TreeList<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::remove_chunk_replace_if_needed(TreeChunk<Chunk_t, FreeList_t>* tc) {
TreeList<Chunk_t, FreeList_t>* retTL = this;
@ -286,7 +273,7 @@ TreeList<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::remove_chunk_repla
return retTL;
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void TreeList<Chunk_t, FreeList_t>::return_chunk_at_tail(TreeChunk<Chunk_t, FreeList_t>* chunk) {
assert(chunk != NULL, "returning NULL chunk");
assert(chunk->list() == this, "list should be set for chunk");
@ -301,7 +288,7 @@ void TreeList<Chunk_t, FreeList_t>::return_chunk_at_tail(TreeChunk<Chunk_t, Free
this->link_tail(chunk);
assert(!tail() || size() == tail()->size(), "Wrong sized chunk in list");
FreeList_t<Chunk_t>::increment_count();
FreeList_t::increment_count();
debug_only(this->increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
@ -311,7 +298,7 @@ void TreeList<Chunk_t, FreeList_t>::return_chunk_at_tail(TreeChunk<Chunk_t, Free
// is defined to be after the chunk pointer to by head(). This is
// because the TreeList<Chunk_t, FreeList_t> is embedded in the first TreeChunk<Chunk_t, FreeList_t> in the
// list. See the definition of TreeChunk<Chunk_t, FreeList_t>.
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void TreeList<Chunk_t, FreeList_t>::return_chunk_at_head(TreeChunk<Chunk_t, FreeList_t>* chunk) {
assert(chunk->list() == this, "list should be set for chunk");
assert(head() != NULL, "The tree list is embedded in the first chunk");
@ -329,13 +316,13 @@ void TreeList<Chunk_t, FreeList_t>::return_chunk_at_head(TreeChunk<Chunk_t, Free
}
head()->link_after(chunk);
assert(!head() || size() == head()->size(), "Wrong sized chunk in list");
FreeList_t<Chunk_t>::increment_count();
FreeList_t::increment_count();
debug_only(this->increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void TreeChunk<Chunk_t, FreeList_t>::assert_is_mangled() const {
assert((ZapUnusedHeapArea &&
SpaceMangler::is_mangled((HeapWord*) Chunk_t::size_addr()) &&
@ -345,14 +332,14 @@ void TreeChunk<Chunk_t, FreeList_t>::assert_is_mangled() const {
"Space should be clear or mangled");
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
TreeChunk<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::head_as_TreeChunk() {
assert(head() == NULL || (TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(head())->list() == this),
"Wrong type of chunk?");
return TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(head());
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
TreeChunk<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::first_available() {
assert(head() != NULL, "The head of the list cannot be NULL");
Chunk_t* fc = head()->next();
@ -369,7 +356,7 @@ TreeChunk<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::first_available()
// Returns the block with the largest heap address amongst
// those in the list for this size; potentially slow and expensive,
// use with caution!
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
TreeChunk<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::largest_address() {
assert(head() != NULL, "The head of the list cannot be NULL");
Chunk_t* fc = head()->next();
@ -392,7 +379,7 @@ TreeChunk<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::largest_address()
return retTC;
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
BinaryTreeDictionary<Chunk_t, FreeList_t>::BinaryTreeDictionary(MemRegion mr) {
assert((mr.byte_size() > min_size()), "minimum chunk size");
@ -405,17 +392,17 @@ BinaryTreeDictionary<Chunk_t, FreeList_t>::BinaryTreeDictionary(MemRegion mr) {
assert(total_free_blocks() == 1, "reset check failed");
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void BinaryTreeDictionary<Chunk_t, FreeList_t>::inc_total_size(size_t inc) {
_total_size = _total_size + inc;
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void BinaryTreeDictionary<Chunk_t, FreeList_t>::dec_total_size(size_t dec) {
_total_size = _total_size - dec;
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void BinaryTreeDictionary<Chunk_t, FreeList_t>::reset(MemRegion mr) {
assert((mr.byte_size() > min_size()), "minimum chunk size");
set_root(TreeList<Chunk_t, FreeList_t>::as_TreeList(mr.start(), mr.word_size()));
@ -423,13 +410,13 @@ void BinaryTreeDictionary<Chunk_t, FreeList_t>::reset(MemRegion mr) {
set_total_free_blocks(1);
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void BinaryTreeDictionary<Chunk_t, FreeList_t>::reset(HeapWord* addr, size_t byte_size) {
MemRegion mr(addr, heap_word_size(byte_size));
reset(mr);
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void BinaryTreeDictionary<Chunk_t, FreeList_t>::reset() {
set_root(NULL);
set_total_size(0);
@ -437,7 +424,7 @@ void BinaryTreeDictionary<Chunk_t, FreeList_t>::reset() {
}
// Get a free block of size at least size from tree, or NULL.
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
TreeChunk<Chunk_t, FreeList_t>*
BinaryTreeDictionary<Chunk_t, FreeList_t>::get_chunk_from_tree(
size_t size,
@ -496,7 +483,7 @@ BinaryTreeDictionary<Chunk_t, FreeList_t>::get_chunk_from_tree(
return retTC;
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
TreeList<Chunk_t, FreeList_t>* BinaryTreeDictionary<Chunk_t, FreeList_t>::find_list(size_t size) const {
TreeList<Chunk_t, FreeList_t>* curTL;
for (curTL = root(); curTL != NULL;) {
@ -515,7 +502,7 @@ TreeList<Chunk_t, FreeList_t>* BinaryTreeDictionary<Chunk_t, FreeList_t>::find_l
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
bool BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_chunk_in_free_list(Chunk_t* tc) const {
size_t size = tc->size();
TreeList<Chunk_t, FreeList_t>* tl = find_list(size);
@ -526,7 +513,7 @@ bool BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_chunk_in_free_list(Chunk_
}
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
Chunk_t* BinaryTreeDictionary<Chunk_t, FreeList_t>::find_largest_dict() const {
TreeList<Chunk_t, FreeList_t> *curTL = root();
if (curTL != NULL) {
@ -541,7 +528,7 @@ Chunk_t* BinaryTreeDictionary<Chunk_t, FreeList_t>::find_largest_dict() const {
// chunk in a list on a tree node, just unlink it.
// If it is the last chunk in the list (the next link is NULL),
// remove the node and repair the tree.
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
TreeChunk<Chunk_t, FreeList_t>*
BinaryTreeDictionary<Chunk_t, FreeList_t>::remove_chunk_from_tree(TreeChunk<Chunk_t, FreeList_t>* tc) {
assert(tc != NULL, "Should not call with a NULL chunk");
@ -682,7 +669,7 @@ BinaryTreeDictionary<Chunk_t, FreeList_t>::remove_chunk_from_tree(TreeChunk<Chun
// Remove the leftmost node (lm) in the tree and return it.
// If lm has a right child, link it to the left node of
// the parent of lm.
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
TreeList<Chunk_t, FreeList_t>* BinaryTreeDictionary<Chunk_t, FreeList_t>::remove_tree_minimum(TreeList<Chunk_t, FreeList_t>* tl) {
assert(tl != NULL && tl->parent() != NULL, "really need a proper sub-tree");
// locate the subtree minimum by walking down left branches
@ -717,7 +704,7 @@ TreeList<Chunk_t, FreeList_t>* BinaryTreeDictionary<Chunk_t, FreeList_t>::remove
return curTL;
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void BinaryTreeDictionary<Chunk_t, FreeList_t>::insert_chunk_in_tree(Chunk_t* fc) {
TreeList<Chunk_t, FreeList_t> *curTL, *prevTL;
size_t size = fc->size();
@ -783,7 +770,7 @@ void BinaryTreeDictionary<Chunk_t, FreeList_t>::insert_chunk_in_tree(Chunk_t* fc
}
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::max_chunk_size() const {
FreeBlockDictionary<Chunk_t>::verify_par_locked();
TreeList<Chunk_t, FreeList_t>* tc = root();
@ -792,7 +779,7 @@ size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::max_chunk_size() const {
return tc->size();
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_list_length(TreeList<Chunk_t, FreeList_t>* tl) const {
size_t res;
res = tl->count();
@ -805,7 +792,7 @@ size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_list_length(TreeList<Chu
return res;
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_size_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const {
if (tl == NULL)
return 0;
@ -814,7 +801,7 @@ size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_size_in_tree(TreeList<Ch
total_size_in_tree(tl->right());
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
double BinaryTreeDictionary<Chunk_t, FreeList_t>::sum_of_squared_block_sizes(TreeList<Chunk_t, FreeList_t>* const tl) const {
if (tl == NULL) {
return 0.0;
@ -826,7 +813,7 @@ double BinaryTreeDictionary<Chunk_t, FreeList_t>::sum_of_squared_block_sizes(Tre
return curr;
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_free_blocks_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const {
if (tl == NULL)
return 0;
@ -835,14 +822,14 @@ size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_free_blocks_in_tree(Tree
total_free_blocks_in_tree(tl->right());
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::num_free_blocks() const {
assert(total_free_blocks_in_tree(root()) == total_free_blocks(),
"_total_free_blocks inconsistency");
return total_free_blocks();
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::tree_height_helper(TreeList<Chunk_t, FreeList_t>* tl) const {
if (tl == NULL)
return 0;
@ -850,12 +837,12 @@ size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::tree_height_helper(TreeList<Ch
tree_height_helper(tl->right()));
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::tree_height() const {
return tree_height_helper(root());
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_nodes_helper(TreeList<Chunk_t, FreeList_t>* tl) const {
if (tl == NULL) {
return 0;
@ -864,18 +851,18 @@ size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_nodes_helper(TreeList<Ch
total_nodes_helper(tl->right());
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_nodes_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const {
return total_nodes_helper(root());
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void BinaryTreeDictionary<Chunk_t, FreeList_t>::dict_census_update(size_t size, bool split, bool birth){}
#if INCLUDE_ALL_GCS
template <>
void AFLBinaryTreeDictionary::dict_census_update(size_t size, bool split, bool birth){
TreeList<FreeChunk, AdaptiveFreeList>* nd = find_list(size);
void AFLBinaryTreeDictionary::dict_census_update(size_t size, bool split, bool birth) {
TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* nd = find_list(size);
if (nd) {
if (split) {
if (birth) {
@ -903,7 +890,7 @@ void AFLBinaryTreeDictionary::dict_census_update(size_t size, bool split, bool b
}
#endif // INCLUDE_ALL_GCS
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
bool BinaryTreeDictionary<Chunk_t, FreeList_t>::coal_dict_over_populated(size_t size) {
// For the general type of freelists, encourage coalescing by
// returning true.
@ -915,7 +902,7 @@ template <>
bool AFLBinaryTreeDictionary::coal_dict_over_populated(size_t size) {
if (FLSAlwaysCoalesceLarge) return true;
TreeList<FreeChunk, AdaptiveFreeList>* list_of_size = find_list(size);
TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* list_of_size = find_list(size);
// None of requested size implies overpopulated.
return list_of_size == NULL || list_of_size->coal_desired() <= 0 ||
list_of_size->count() > list_of_size->coal_desired();
@ -928,15 +915,15 @@ bool AFLBinaryTreeDictionary::coal_dict_over_populated(size_t size) {
// do_tree() walks the nodes in the binary tree applying do_list()
// to each list at each node.
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
class TreeCensusClosure : public StackObj {
protected:
virtual void do_list(FreeList_t<Chunk_t>* fl) = 0;
virtual void do_list(FreeList_t* fl) = 0;
public:
virtual void do_tree(TreeList<Chunk_t, FreeList_t>* tl) = 0;
};
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
class AscendTreeCensusClosure : public TreeCensusClosure<Chunk_t, FreeList_t> {
public:
void do_tree(TreeList<Chunk_t, FreeList_t>* tl) {
@ -948,7 +935,7 @@ class AscendTreeCensusClosure : public TreeCensusClosure<Chunk_t, FreeList_t> {
}
};
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
class DescendTreeCensusClosure : public TreeCensusClosure<Chunk_t, FreeList_t> {
public:
void do_tree(TreeList<Chunk_t, FreeList_t>* tl) {
@ -962,7 +949,7 @@ class DescendTreeCensusClosure : public TreeCensusClosure<Chunk_t, FreeList_t> {
// For each list in the tree, calculate the desired, desired
// coalesce, count before sweep, and surplus before sweep.
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
class BeginSweepClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
double _percentage;
float _inter_sweep_current;
@ -995,16 +982,16 @@ class BeginSweepClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
// Similar to TreeCensusClosure but searches the
// tree and returns promptly when found.
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
class TreeSearchClosure : public StackObj {
protected:
virtual bool do_list(FreeList_t<Chunk_t>* fl) = 0;
virtual bool do_list(FreeList_t* fl) = 0;
public:
virtual bool do_tree(TreeList<Chunk_t, FreeList_t>* tl) = 0;
};
#if 0 // Don't need this yet but here for symmetry.
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
class AscendTreeSearchClosure : public TreeSearchClosure<Chunk_t> {
public:
bool do_tree(TreeList<Chunk_t, FreeList_t>* tl) {
@ -1018,7 +1005,7 @@ class AscendTreeSearchClosure : public TreeSearchClosure<Chunk_t> {
};
#endif
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
class DescendTreeSearchClosure : public TreeSearchClosure<Chunk_t, FreeList_t> {
public:
bool do_tree(TreeList<Chunk_t, FreeList_t>* tl) {
@ -1033,14 +1020,14 @@ class DescendTreeSearchClosure : public TreeSearchClosure<Chunk_t, FreeList_t> {
// Searches the tree for a chunk that ends at the
// specified address.
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
class EndTreeSearchClosure : public DescendTreeSearchClosure<Chunk_t, FreeList_t> {
HeapWord* _target;
Chunk_t* _found;
public:
EndTreeSearchClosure(HeapWord* target) : _target(target), _found(NULL) {}
bool do_list(FreeList_t<Chunk_t>* fl) {
bool do_list(FreeList_t* fl) {
Chunk_t* item = fl->head();
while (item != NULL) {
if (item->end() == (uintptr_t*) _target) {
@ -1054,7 +1041,7 @@ class EndTreeSearchClosure : public DescendTreeSearchClosure<Chunk_t, FreeList_t
Chunk_t* found() { return _found; }
};
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
Chunk_t* BinaryTreeDictionary<Chunk_t, FreeList_t>::find_chunk_ends_at(HeapWord* target) const {
EndTreeSearchClosure<Chunk_t, FreeList_t> etsc(target);
bool found_target = etsc.do_tree(root());
@ -1063,7 +1050,7 @@ Chunk_t* BinaryTreeDictionary<Chunk_t, FreeList_t>::find_chunk_ends_at(HeapWord*
return etsc.found();
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void BinaryTreeDictionary<Chunk_t, FreeList_t>::begin_sweep_dict_census(double coalSurplusPercent,
float inter_sweep_current, float inter_sweep_estimate, float intra_sweep_estimate) {
BeginSweepClosure<Chunk_t, FreeList_t> bsc(coalSurplusPercent, inter_sweep_current,
@ -1075,32 +1062,32 @@ void BinaryTreeDictionary<Chunk_t, FreeList_t>::begin_sweep_dict_census(double c
// Closures and methods for calculating total bytes returned to the
// free lists in the tree.
#ifndef PRODUCT
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
class InitializeDictReturnedBytesClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
public:
void do_list(FreeList_t<Chunk_t>* fl) {
void do_list(FreeList_t* fl) {
fl->set_returned_bytes(0);
}
};
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void BinaryTreeDictionary<Chunk_t, FreeList_t>::initialize_dict_returned_bytes() {
InitializeDictReturnedBytesClosure<Chunk_t, FreeList_t> idrb;
idrb.do_tree(root());
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
class ReturnedBytesClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
size_t _dict_returned_bytes;
public:
ReturnedBytesClosure() { _dict_returned_bytes = 0; }
void do_list(FreeList_t<Chunk_t>* fl) {
void do_list(FreeList_t* fl) {
_dict_returned_bytes += fl->returned_bytes();
}
size_t dict_returned_bytes() { return _dict_returned_bytes; }
};
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::sum_dict_returned_bytes() {
ReturnedBytesClosure<Chunk_t, FreeList_t> rbc;
rbc.do_tree(root());
@ -1109,17 +1096,17 @@ size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::sum_dict_returned_bytes() {
}
// Count the number of entries in the tree.
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
class treeCountClosure : public DescendTreeCensusClosure<Chunk_t, FreeList_t> {
public:
uint count;
treeCountClosure(uint c) { count = c; }
void do_list(FreeList_t<Chunk_t>* fl) {
void do_list(FreeList_t* fl) {
count++;
}
};
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_count() {
treeCountClosure<Chunk_t, FreeList_t> ctc(0);
ctc.do_tree(root());
@ -1128,7 +1115,7 @@ size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_count() {
#endif // PRODUCT
// Calculate surpluses for the lists in the tree.
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
class setTreeSurplusClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
double percentage;
public:
@ -1144,14 +1131,14 @@ class setTreeSurplusClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t
#endif // INCLUDE_ALL_GCS
};
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void BinaryTreeDictionary<Chunk_t, FreeList_t>::set_tree_surplus(double splitSurplusPercent) {
setTreeSurplusClosure<Chunk_t, FreeList_t> sts(splitSurplusPercent);
sts.do_tree(root());
}
// Set hints for the lists in the tree.
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
class setTreeHintsClosure : public DescendTreeCensusClosure<Chunk_t, FreeList_t> {
size_t hint;
public:
@ -1170,14 +1157,14 @@ class setTreeHintsClosure : public DescendTreeCensusClosure<Chunk_t, FreeList_t>
#endif // INCLUDE_ALL_GCS
};
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void BinaryTreeDictionary<Chunk_t, FreeList_t>::set_tree_hints(void) {
setTreeHintsClosure<Chunk_t, FreeList_t> sth(0);
sth.do_tree(root());
}
// Save count before previous sweep and splits and coalesces.
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
class clearTreeCensusClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
void do_list(FreeList<Chunk_t>* fl) {}
@ -1192,14 +1179,14 @@ class clearTreeCensusClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_
#endif // INCLUDE_ALL_GCS
};
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void BinaryTreeDictionary<Chunk_t, FreeList_t>::clear_tree_census(void) {
clearTreeCensusClosure<Chunk_t, FreeList_t> ctc;
ctc.do_tree(root());
}
// Do reporting and post sweep clean up.
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void BinaryTreeDictionary<Chunk_t, FreeList_t>::end_sweep_dict_census(double splitSurplusPercent) {
// Does walking the tree 3 times hurt?
set_tree_surplus(splitSurplusPercent);
@ -1211,7 +1198,7 @@ void BinaryTreeDictionary<Chunk_t, FreeList_t>::end_sweep_dict_census(double spl
}
// Print summary statistics
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void BinaryTreeDictionary<Chunk_t, FreeList_t>::report_statistics() const {
FreeBlockDictionary<Chunk_t>::verify_par_locked();
gclog_or_tty->print("Statistics for BinaryTreeDictionary:\n"
@ -1230,22 +1217,22 @@ void BinaryTreeDictionary<Chunk_t, FreeList_t>::report_statistics() const {
// Print census information - counts, births, deaths, etc.
// for each list in the tree. Also print some summary
// information.
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
class PrintTreeCensusClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
int _print_line;
size_t _total_free;
FreeList_t<Chunk_t> _total;
FreeList_t _total;
public:
PrintTreeCensusClosure() {
_print_line = 0;
_total_free = 0;
}
FreeList_t<Chunk_t>* total() { return &_total; }
FreeList_t* total() { return &_total; }
size_t total_free() { return _total_free; }
void do_list(FreeList<Chunk_t>* fl) {
if (++_print_line >= 40) {
FreeList_t<Chunk_t>::print_labels_on(gclog_or_tty, "size");
FreeList_t::print_labels_on(gclog_or_tty, "size");
_print_line = 0;
}
fl->print_on(gclog_or_tty);
@ -1256,7 +1243,7 @@ class PrintTreeCensusClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_
#if INCLUDE_ALL_GCS
void do_list(AdaptiveFreeList<Chunk_t>* fl) {
if (++_print_line >= 40) {
FreeList_t<Chunk_t>::print_labels_on(gclog_or_tty, "size");
FreeList_t::print_labels_on(gclog_or_tty, "size");
_print_line = 0;
}
fl->print_on(gclog_or_tty);
@ -1275,16 +1262,16 @@ class PrintTreeCensusClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_
#endif // INCLUDE_ALL_GCS
};
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void BinaryTreeDictionary<Chunk_t, FreeList_t>::print_dict_census(void) const {
gclog_or_tty->print("\nBinaryTree\n");
FreeList_t<Chunk_t>::print_labels_on(gclog_or_tty, "size");
FreeList_t::print_labels_on(gclog_or_tty, "size");
PrintTreeCensusClosure<Chunk_t, FreeList_t> ptc;
ptc.do_tree(root());
FreeList_t<Chunk_t>* total = ptc.total();
FreeList_t<Chunk_t>::print_labels_on(gclog_or_tty, " ");
FreeList_t* total = ptc.total();
FreeList_t::print_labels_on(gclog_or_tty, " ");
}
#if INCLUDE_ALL_GCS
@ -1293,7 +1280,7 @@ void AFLBinaryTreeDictionary::print_dict_census(void) const {
gclog_or_tty->print("\nBinaryTree\n");
AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
PrintTreeCensusClosure<FreeChunk, AdaptiveFreeList> ptc;
PrintTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > ptc;
ptc.do_tree(root());
AdaptiveFreeList<FreeChunk>* total = ptc.total();
@ -1311,7 +1298,7 @@ void AFLBinaryTreeDictionary::print_dict_census(void) const {
}
#endif // INCLUDE_ALL_GCS
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
class PrintFreeListsClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
outputStream* _st;
int _print_line;
@ -1321,9 +1308,9 @@ class PrintFreeListsClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t
_st = st;
_print_line = 0;
}
void do_list(FreeList_t<Chunk_t>* fl) {
void do_list(FreeList_t* fl) {
if (++_print_line >= 40) {
FreeList_t<Chunk_t>::print_labels_on(_st, "size");
FreeList_t::print_labels_on(_st, "size");
_print_line = 0;
}
fl->print_on(gclog_or_tty);
@ -1337,10 +1324,10 @@ class PrintFreeListsClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t
}
};
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void BinaryTreeDictionary<Chunk_t, FreeList_t>::print_free_lists(outputStream* st) const {
FreeList_t<Chunk_t>::print_labels_on(st, "size");
FreeList_t::print_labels_on(st, "size");
PrintFreeListsClosure<Chunk_t, FreeList_t> pflc(st);
pflc.do_tree(root());
}
@ -1349,7 +1336,7 @@ void BinaryTreeDictionary<Chunk_t, FreeList_t>::print_free_lists(outputStream* s
// . _root has no parent
// . parent and child point to each other
// . each node's key correctly related to that of its child(ren)
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_tree() const {
guarantee(root() == NULL || total_free_blocks() == 0 ||
total_size() != 0, "_total_size shouldn't be 0?");
@ -1357,7 +1344,7 @@ void BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_tree() const {
verify_tree_helper(root());
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_prev_free_ptrs(TreeList<Chunk_t, FreeList_t>* tl) {
size_t ct = 0;
for (Chunk_t* curFC = tl->head(); curFC != NULL; curFC = curFC->next()) {
@ -1371,7 +1358,7 @@ size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_prev_free_ptrs(TreeList
// Note: this helper is recursive rather than iterative, so use with
// caution on very deep trees; and watch out for stack overflow errors;
// In general, to be used only for debugging.
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_tree_helper(TreeList<Chunk_t, FreeList_t>* tl) const {
if (tl == NULL)
return;
@ -1400,25 +1387,25 @@ void BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_tree_helper(TreeList<Chun
verify_tree_helper(tl->right());
}
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
void BinaryTreeDictionary<Chunk_t, FreeList_t>::verify() const {
verify_tree();
guarantee(total_size() == total_size_in_tree(root()), "Total Size inconsistency");
}
template class TreeList<Metablock, FreeList>;
template class BinaryTreeDictionary<Metablock, FreeList>;
template class TreeChunk<Metablock, FreeList>;
template class TreeList<Metablock, FreeList<Metablock> >;
template class BinaryTreeDictionary<Metablock, FreeList<Metablock> >;
template class TreeChunk<Metablock, FreeList<Metablock> >;
template class TreeList<Metachunk, FreeList>;
template class BinaryTreeDictionary<Metachunk, FreeList>;
template class TreeChunk<Metachunk, FreeList>;
template class TreeList<Metachunk, FreeList<Metachunk> >;
template class BinaryTreeDictionary<Metachunk, FreeList<Metachunk> >;
template class TreeChunk<Metachunk, FreeList<Metachunk> >;
#if INCLUDE_ALL_GCS
// Explicitly instantiate these types for FreeChunk.
template class TreeList<FreeChunk, AdaptiveFreeList>;
template class BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>;
template class TreeChunk<FreeChunk, AdaptiveFreeList>;
template class TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >;
template class BinaryTreeDictionary<FreeChunk, AdaptiveFreeList<FreeChunk> >;
template class TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >;
#endif // INCLUDE_ALL_GCS

View File

@ -37,18 +37,18 @@
// A TreeList is a FreeList which can be used to maintain a
// binary tree of free lists.
template <class Chunk_t, template <class> class FreeList_t> class TreeChunk;
template <class Chunk_t, template <class> class FreeList_t> class BinaryTreeDictionary;
template <class Chunk_t, template <class> class FreeList_t> class AscendTreeCensusClosure;
template <class Chunk_t, template <class> class FreeList_t> class DescendTreeCensusClosure;
template <class Chunk_t, template <class> class FreeList_t> class DescendTreeSearchClosure;
template <class Chunk_t, class FreeList_t> class TreeChunk;
template <class Chunk_t, class FreeList_t> class BinaryTreeDictionary;
template <class Chunk_t, class FreeList_t> class AscendTreeCensusClosure;
template <class Chunk_t, class FreeList_t> class DescendTreeCensusClosure;
template <class Chunk_t, class FreeList_t> class DescendTreeSearchClosure;
class FreeChunk;
template <class> class AdaptiveFreeList;
typedef BinaryTreeDictionary<FreeChunk, AdaptiveFreeList> AFLBinaryTreeDictionary;
typedef BinaryTreeDictionary<FreeChunk, AdaptiveFreeList<FreeChunk> > AFLBinaryTreeDictionary;
template <class Chunk_t, template <class> class FreeList_t>
class TreeList : public FreeList_t<Chunk_t> {
template <class Chunk_t, class FreeList_t>
class TreeList : public FreeList_t {
friend class TreeChunk<Chunk_t, FreeList_t>;
friend class BinaryTreeDictionary<Chunk_t, FreeList_t>;
friend class AscendTreeCensusClosure<Chunk_t, FreeList_t>;
@ -66,12 +66,12 @@ class TreeList : public FreeList_t<Chunk_t> {
TreeList<Chunk_t, FreeList_t>* right() const { return _right; }
// Wrapper on call to base class, to get the template to compile.
Chunk_t* head() const { return FreeList_t<Chunk_t>::head(); }
Chunk_t* tail() const { return FreeList_t<Chunk_t>::tail(); }
void set_head(Chunk_t* head) { FreeList_t<Chunk_t>::set_head(head); }
void set_tail(Chunk_t* tail) { FreeList_t<Chunk_t>::set_tail(tail); }
Chunk_t* head() const { return FreeList_t::head(); }
Chunk_t* tail() const { return FreeList_t::tail(); }
void set_head(Chunk_t* head) { FreeList_t::set_head(head); }
void set_tail(Chunk_t* tail) { FreeList_t::set_tail(tail); }
size_t size() const { return FreeList_t<Chunk_t>::size(); }
size_t size() const { return FreeList_t::size(); }
// Accessors for links in tree.
@ -90,7 +90,7 @@ class TreeList : public FreeList_t<Chunk_t> {
void clear_left() { _left = NULL; }
void clear_right() { _right = NULL; }
void clear_parent() { _parent = NULL; }
void initialize() { clear_left(); clear_right(), clear_parent(); FreeList_t<Chunk_t>::initialize(); }
void initialize() { clear_left(); clear_right(), clear_parent(); FreeList_t::initialize(); }
// For constructing a TreeList from a Tree chunk or
// address and size.
@ -139,7 +139,7 @@ class TreeList : public FreeList_t<Chunk_t> {
// on the free list for a node in the tree and is only removed if
// it is the last chunk on the free list.
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
class TreeChunk : public Chunk_t {
friend class TreeList<Chunk_t, FreeList_t>;
TreeList<Chunk_t, FreeList_t>* _list;
@ -173,7 +173,7 @@ class TreeChunk : public Chunk_t {
};
template <class Chunk_t, template <class> class FreeList_t>
template <class Chunk_t, class FreeList_t>
class BinaryTreeDictionary: public FreeBlockDictionary<Chunk_t> {
friend class VMStructs;
size_t _total_size;

View File

@ -626,7 +626,6 @@ void DefNewGeneration::collect(bool full,
true, // activate StrongRootsScope
SharedHeap::ScanningOption(so),
&fsc_with_no_gc_barrier,
true, // walk *all* scavengable nmethods
&fsc_with_gc_barrier,
&klass_scan_closure);

View File

@ -594,20 +594,12 @@ gen_process_strong_roots(int level,
bool activate_scope,
SharedHeap::ScanningOption so,
OopsInGenClosure* not_older_gens,
bool do_code_roots,
OopsInGenClosure* older_gens,
KlassClosure* klass_closure) {
// General strong roots.
if (!do_code_roots) {
SharedHeap::process_strong_roots(activate_scope, so,
not_older_gens, NULL, klass_closure);
} else {
bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active());
CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking);
SharedHeap::process_strong_roots(activate_scope, so,
not_older_gens, &code_roots, klass_closure);
}
SharedHeap::process_strong_roots(activate_scope, so,
not_older_gens, klass_closure);
if (younger_gens_as_roots) {
if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
@ -629,9 +621,8 @@ gen_process_strong_roots(int level,
_gen_process_strong_tasks->all_tasks_completed();
}
void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
CodeBlobClosure* code_roots) {
SharedHeap::process_weak_roots(root_closure, code_roots);
void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
SharedHeap::process_weak_roots(root_closure);
// "Local" "weak" refs
for (int i = 0; i < _n_gens; i++) {
_gens[i]->ref_processor()->weak_oops_do(root_closure);

View File

@ -414,15 +414,13 @@ public:
bool activate_scope,
SharedHeap::ScanningOption so,
OopsInGenClosure* not_older_gens,
bool do_code_roots,
OopsInGenClosure* older_gens,
KlassClosure* klass_closure);
// Apply "blk" to all the weak roots of the system. These include
// JNI weak roots, the code cache, system dictionary, symbol table,
// string table, and referents of reachable weak refs.
void gen_process_weak_roots(OopClosure* root_closure,
CodeBlobClosure* code_roots);
// Apply "root_closure" to all the weak roots of the system.
// These include JNI weak roots, string table,
// and referents of reachable weak refs.
void gen_process_weak_roots(OopClosure* root_closure);
// Set the saved marks of generations, if that makes sense.
// In particular, if any generation might iterate over the oops

View File

@ -212,7 +212,6 @@ void GenMarkSweep::mark_sweep_phase1(int level,
true, // activate StrongRootsScope
SharedHeap::SO_SystemClasses,
&follow_root_closure,
true, // walk code active on stacks
&follow_root_closure,
&follow_klass_closure);
@ -295,18 +294,12 @@ void GenMarkSweep::mark_sweep_phase3(int level) {
gch->gen_process_strong_roots(level,
false, // Younger gens are not roots.
true, // activate StrongRootsScope
SharedHeap::SO_AllClasses,
SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache,
&adjust_pointer_closure,
false, // do not walk code
&adjust_pointer_closure,
&adjust_klass_closure);
// Now adjust pointers in remaining weak roots. (All of which should
// have been cleared if they pointed to non-surviving objects.)
CodeBlobToOopClosure adjust_code_pointer_closure(&adjust_pointer_closure,
/*do_marking=*/ false);
gch->gen_process_weak_roots(&adjust_pointer_closure,
&adjust_code_pointer_closure);
gch->gen_process_weak_roots(&adjust_pointer_closure);
adjust_marks();
GenAdjustPointersClosure blk;

View File

@ -46,8 +46,8 @@
#include "utilities/copy.hpp"
#include "utilities/debug.hpp"
typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
// Set this constant to enable slow integrity checking of the free chunk lists
const bool metaspace_slow_verify = false;
@ -790,7 +790,7 @@ MetaWord* BlockFreelist::get_block(size_t word_size) {
return NULL;
}
if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
// Dark matter. Too small for dictionary.
return NULL;
}
@ -810,7 +810,7 @@ MetaWord* BlockFreelist::get_block(size_t word_size) {
MetaWord* new_block = (MetaWord*)free_block;
assert(block_size >= word_size, "Incorrect size of block from freelist");
const size_t unused = block_size - word_size;
if (unused >= TreeChunk<Metablock, FreeList>::min_size()) {
if (unused >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
return_block(new_block + word_size, unused);
}
@ -2240,7 +2240,7 @@ ChunkIndex ChunkManager::list_index(size_t size) {
void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
assert_lock_strong(_lock);
size_t raw_word_size = get_raw_word_size(word_size);
size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size();
assert(raw_word_size >= min_size,
err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
block_freelists()->return_block(p, raw_word_size);
@ -2296,7 +2296,7 @@ void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
void SpaceManager::retire_current_chunk() {
if (current_chunk() != NULL) {
size_t remaining_words = current_chunk()->free_word_size();
if (remaining_words >= TreeChunk<Metablock, FreeList>::min_size()) {
if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
inc_used_metrics(remaining_words);
}
@ -3279,7 +3279,7 @@ void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
assert(Thread::current()->is_VM_thread(), "should be the VM thread");
// Don't take Heap_lock
MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
// Dark matter. Too small for dictionary.
#ifdef ASSERT
Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
@ -3294,7 +3294,7 @@ void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
} else {
MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
// Dark matter. Too small for dictionary.
#ifdef ASSERT
Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);

View File

@ -139,7 +139,6 @@ SharedHeap::StrongRootsScope::~StrongRootsScope() {
void SharedHeap::process_strong_roots(bool activate_scope,
ScanningOption so,
OopClosure* roots,
CodeBlobClosure* code_roots,
KlassClosure* klass_closure) {
StrongRootsScope srs(this, activate_scope);
@ -156,15 +155,17 @@ void SharedHeap::process_strong_roots(bool activate_scope,
if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
JNIHandles::oops_do(roots);
CodeBlobToOopClosure code_roots(roots, true);
CLDToOopClosure roots_from_clds(roots);
// If we limit class scanning to SO_SystemClasses we need to apply a CLD closure to
// CLDs which are strongly reachable from the thread stacks.
CLDToOopClosure* roots_from_clds_p = ((so & SO_SystemClasses) ? &roots_from_clds : NULL);
// All threads execute this; the individual threads are task groups.
if (CollectedHeap::use_parallel_gc_threads()) {
Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, code_roots);
Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, &code_roots);
} else {
Threads::oops_do(roots, roots_from_clds_p, code_roots);
Threads::oops_do(roots, roots_from_clds_p, &code_roots);
}
if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
@ -206,17 +207,17 @@ void SharedHeap::process_strong_roots(bool activate_scope,
if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
if (so & SO_ScavengeCodeCache) {
assert(code_roots != NULL, "must supply closure for code cache");
assert(&code_roots != NULL, "must supply closure for code cache");
// We only visit parts of the CodeCache when scavenging.
CodeCache::scavenge_root_nmethods_do(code_roots);
CodeCache::scavenge_root_nmethods_do(&code_roots);
}
if (so & SO_AllCodeCache) {
assert(code_roots != NULL, "must supply closure for code cache");
assert(&code_roots != NULL, "must supply closure for code cache");
// CMSCollector uses this to do intermediate-strength collections.
// We scan the entire code cache, since CodeCache::do_unloading is not called.
CodeCache::blobs_do(code_roots);
CodeCache::blobs_do(&code_roots);
}
// Verify that the code cache contents are not subject to
// movement by a scavenging collection.
@ -233,13 +234,9 @@ public:
};
static AlwaysTrueClosure always_true;
void SharedHeap::process_weak_roots(OopClosure* root_closure,
CodeBlobClosure* code_roots) {
void SharedHeap::process_weak_roots(OopClosure* root_closure) {
// Global (weak) JNI handles
JNIHandles::weak_oops_do(&always_true, root_closure);
CodeCache::blobs_do(code_roots);
StringTable::oops_do(root_closure);
}
void SharedHeap::set_barrier_set(BarrierSet* bs) {

View File

@ -238,14 +238,10 @@ public:
void process_strong_roots(bool activate_scope,
ScanningOption so,
OopClosure* roots,
CodeBlobClosure* code_roots,
KlassClosure* klass_closure);
// Apply "blk" to all the weak roots of the system. These include
// JNI weak roots, the code cache, system dictionary, symbol table,
// string table.
void process_weak_roots(OopClosure* root_closure,
CodeBlobClosure* code_roots);
// Apply "root_closure" to the JNI weak roots..
void process_weak_roots(OopClosure* root_closure);
// The functions below are helper functions that a subclass of
// "SharedHeap" can use in the implementation of its virtual
@ -275,4 +271,8 @@ public:
size_t capacity);
};
inline SharedHeap::ScanningOption operator|(SharedHeap::ScanningOption so0, SharedHeap::ScanningOption so1) {
return static_cast<SharedHeap::ScanningOption>(static_cast<int>(so0) | static_cast<int>(so1));
}
#endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -214,8 +214,8 @@ void ArrayKlass::oop_print_on(oop obj, outputStream* st) {
// Verification
void ArrayKlass::verify_on(outputStream* st, bool check_dictionary) {
Klass::verify_on(st, check_dictionary);
void ArrayKlass::verify_on(outputStream* st) {
Klass::verify_on(st);
if (component_mirror() != NULL) {
guarantee(component_mirror()->klass() != NULL, "should have a class");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -146,7 +146,7 @@ class ArrayKlass: public Klass {
void oop_print_on(oop obj, outputStream* st);
// Verification
void verify_on(outputStream* st, bool check_dictionary);
void verify_on(outputStream* st);
void oop_verify_on(oop obj, outputStream* st);
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -77,51 +77,6 @@
#ifdef DTRACE_ENABLED
#ifndef USDT2
HS_DTRACE_PROBE_DECL4(hotspot, class__initialization__required,
char*, intptr_t, oop, intptr_t);
HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__recursive,
char*, intptr_t, oop, intptr_t, int);
HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__concurrent,
char*, intptr_t, oop, intptr_t, int);
HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__erroneous,
char*, intptr_t, oop, intptr_t, int);
HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__super__failed,
char*, intptr_t, oop, intptr_t, int);
HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__clinit,
char*, intptr_t, oop, intptr_t, int);
HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__error,
char*, intptr_t, oop, intptr_t, int);
HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end,
char*, intptr_t, oop, intptr_t, int);
#define DTRACE_CLASSINIT_PROBE(type, clss, thread_type) \
{ \
char* data = NULL; \
int len = 0; \
Symbol* name = (clss)->name(); \
if (name != NULL) { \
data = (char*)name->bytes(); \
len = name->utf8_length(); \
} \
HS_DTRACE_PROBE4(hotspot, class__initialization__##type, \
data, len, SOLARIS_ONLY((void *))(clss)->class_loader(), thread_type); \
}
#define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \
{ \
char* data = NULL; \
int len = 0; \
Symbol* name = (clss)->name(); \
if (name != NULL) { \
data = (char*)name->bytes(); \
len = name->utf8_length(); \
} \
HS_DTRACE_PROBE5(hotspot, class__initialization__##type, \
data, len, SOLARIS_ONLY((void *))(clss)->class_loader(), thread_type, wait); \
}
#else /* USDT2 */
#define HOTSPOT_CLASS_INITIALIZATION_required HOTSPOT_CLASS_INITIALIZATION_REQUIRED
#define HOTSPOT_CLASS_INITIALIZATION_recursive HOTSPOT_CLASS_INITIALIZATION_RECURSIVE
@ -156,7 +111,6 @@ HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end,
HOTSPOT_CLASS_INITIALIZATION_##type( \
data, len, (clss)->class_loader(), thread_type, wait); \
}
#endif /* USDT2 */
#else // ndef DTRACE_ENABLED
@ -2238,15 +2192,7 @@ void InstanceKlass::clean_method_data(BoolObjectClosure* is_alive) {
for (int m = 0; m < methods()->length(); m++) {
MethodData* mdo = methods()->at(m)->method_data();
if (mdo != NULL) {
for (ProfileData* data = mdo->first_data();
mdo->is_valid(data);
data = mdo->next_data(data)) {
data->clean_weak_klass_links(is_alive);
}
ParametersTypeData* parameters = mdo->parameters_type_data();
if (parameters != NULL) {
parameters->clean_weak_klass_links(is_alive);
}
mdo->clean_method_data(is_alive);
}
}
}
@ -3184,7 +3130,7 @@ class VerifyFieldClosure: public OopClosure {
virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
};
void InstanceKlass::verify_on(outputStream* st, bool check_dictionary) {
void InstanceKlass::verify_on(outputStream* st) {
#ifndef PRODUCT
// Avoid redundant verifies, this really should be in product.
if (_verify_count == Universe::verify_count()) return;
@ -3192,14 +3138,11 @@ void InstanceKlass::verify_on(outputStream* st, bool check_dictionary) {
#endif
// Verify Klass
Klass::verify_on(st, check_dictionary);
Klass::verify_on(st);
// Verify that klass is present in SystemDictionary if not already
// verifying the SystemDictionary.
if (is_loaded() && !is_anonymous() && check_dictionary) {
Symbol* h_name = name();
SystemDictionary::verify_obj_klass_present(h_name, class_loader_data());
}
// Verify that klass is present in ClassLoaderData
guarantee(class_loader_data()->contains_klass(this),
"this class isn't found in class loader data");
// Verify vtables
if (is_linked()) {

View File

@ -306,7 +306,7 @@ class InstanceKlass: public Klass {
// three cases:
// NULL: no implementor.
// A Klass* that's not itself: one implementor.
// Itsef: more than one implementors.
// Itself: more than one implementors.
// embedded host klass follows here
// The embedded host klass only exists in an anonymous class for
// dynamic language support (JSR 292 enabled). The host class grants
@ -1087,7 +1087,7 @@ public:
const char* internal_name() const;
// Verification
void verify_on(outputStream* st, bool check_dictionary);
void verify_on(outputStream* st);
void oop_verify_on(oop obj, outputStream* st);
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -638,7 +638,7 @@ void Klass::collect_statistics(KlassSizeStats *sz) const {
// Verification
void Klass::verify_on(outputStream* st, bool check_dictionary) {
void Klass::verify_on(outputStream* st) {
// This can be expensive, but it is worth checking that this klass is actually
// in the CLD graph but not in production.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -695,8 +695,8 @@ class Klass : public Metadata {
virtual const char* internal_name() const = 0;
// Verification
virtual void verify_on(outputStream* st, bool check_dictionary);
void verify(bool check_dictionary = true) { verify_on(tty, check_dictionary); }
virtual void verify_on(outputStream* st);
void verify() { verify_on(tty); }
#ifndef PRODUCT
bool verify_vtable_index(int index);

View File

@ -80,8 +80,42 @@ ProfileData::ProfileData() {
_data = NULL;
}
char* ProfileData::print_data_on_helper(const MethodData* md) const {
DataLayout* dp = md->extra_data_base();
DataLayout* end = md->extra_data_limit();
stringStream ss;
for (;; dp = MethodData::next_extra(dp)) {
assert(dp < end, "moved past end of extra data");
switch(dp->tag()) {
case DataLayout::speculative_trap_data_tag:
if (dp->bci() == bci()) {
SpeculativeTrapData* data = new SpeculativeTrapData(dp);
int trap = data->trap_state();
char buf[100];
ss.print("trap/");
data->method()->print_short_name(&ss);
ss.print("(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
}
break;
case DataLayout::bit_data_tag:
break;
case DataLayout::no_tag:
case DataLayout::arg_info_data_tag:
return ss.as_string();
break;
default:
fatal(err_msg("unexpected tag %d", dp->tag()));
}
}
return NULL;
}
void ProfileData::print_data_on(outputStream* st, const MethodData* md) const {
print_data_on(st, print_data_on_helper(md));
}
#ifndef PRODUCT
void ProfileData::print_shared(outputStream* st, const char* name) const {
void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const {
st->print("bci: %d", bci());
st->fill_to(tab_width_one);
st->print("%s", name);
@ -91,9 +125,13 @@ void ProfileData::print_shared(outputStream* st, const char* name) const {
char buf[100];
st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
}
if (extra != NULL) {
st->print(extra);
}
int flags = data()->flags();
if (flags != 0)
if (flags != 0) {
st->print("flags(%d) ", flags);
}
}
void ProfileData::tab(outputStream* st, bool first) const {
@ -109,8 +147,8 @@ void ProfileData::tab(outputStream* st, bool first) const {
#ifndef PRODUCT
void BitData::print_data_on(outputStream* st) const {
print_shared(st, "BitData");
void BitData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "BitData", extra);
}
#endif // !PRODUCT
@ -120,8 +158,8 @@ void BitData::print_data_on(outputStream* st) const {
// A CounterData corresponds to a simple counter.
#ifndef PRODUCT
void CounterData::print_data_on(outputStream* st) const {
print_shared(st, "CounterData");
void CounterData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "CounterData", extra);
st->print_cr("count(%u)", count());
}
#endif // !PRODUCT
@ -150,8 +188,8 @@ void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
}
#ifndef PRODUCT
void JumpData::print_data_on(outputStream* st) const {
print_shared(st, "JumpData");
void JumpData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "JumpData", extra);
st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
}
#endif // !PRODUCT
@ -332,8 +370,8 @@ void ReturnTypeEntry::print_data_on(outputStream* st) const {
st->cr();
}
void CallTypeData::print_data_on(outputStream* st) const {
CounterData::print_data_on(st);
void CallTypeData::print_data_on(outputStream* st, const char* extra) const {
CounterData::print_data_on(st, extra);
if (has_arguments()) {
tab(st, true);
st->print("argument types");
@ -346,8 +384,8 @@ void CallTypeData::print_data_on(outputStream* st) const {
}
}
void VirtualCallTypeData::print_data_on(outputStream* st) const {
VirtualCallData::print_data_on(st);
void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const {
VirtualCallData::print_data_on(st, extra);
if (has_arguments()) {
tab(st, true);
st->print("argument types");
@ -400,12 +438,12 @@ void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
}
}
}
void ReceiverTypeData::print_data_on(outputStream* st) const {
print_shared(st, "ReceiverTypeData");
void ReceiverTypeData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "ReceiverTypeData", extra);
print_receiver_data_on(st);
}
void VirtualCallData::print_data_on(outputStream* st) const {
print_shared(st, "VirtualCallData");
void VirtualCallData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "VirtualCallData", extra);
print_receiver_data_on(st);
}
#endif // !PRODUCT
@ -461,8 +499,8 @@ DataLayout* RetData::advance(MethodData *md, int bci) {
#endif // CC_INTERP
#ifndef PRODUCT
void RetData::print_data_on(outputStream* st) const {
print_shared(st, "RetData");
void RetData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "RetData", extra);
uint row;
int entries = 0;
for (row = 0; row < row_limit(); row++) {
@ -496,8 +534,8 @@ void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
}
#ifndef PRODUCT
void BranchData::print_data_on(outputStream* st) const {
print_shared(st, "BranchData");
void BranchData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "BranchData", extra);
st->print_cr("taken(%u) displacement(%d)",
taken(), displacement());
tab(st);
@ -570,8 +608,8 @@ void MultiBranchData::post_initialize(BytecodeStream* stream,
}
#ifndef PRODUCT
void MultiBranchData::print_data_on(outputStream* st) const {
print_shared(st, "MultiBranchData");
void MultiBranchData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "MultiBranchData", extra);
st->print_cr("default_count(%u) displacement(%d)",
default_count(), default_displacement());
int cases = number_of_cases();
@ -584,8 +622,8 @@ void MultiBranchData::print_data_on(outputStream* st) const {
#endif
#ifndef PRODUCT
void ArgInfoData::print_data_on(outputStream* st) const {
print_shared(st, "ArgInfoData");
void ArgInfoData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "ArgInfoData", extra);
int nargs = number_of_args();
for (int i = 0; i < nargs; i++) {
st->print(" 0x%x", arg_modified(i));
@ -616,10 +654,17 @@ bool ParametersTypeData::profiling_enabled() {
}
#ifndef PRODUCT
void ParametersTypeData::print_data_on(outputStream* st) const {
st->print("parameter types");
void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
st->print("parameter types", extra);
_parameters.print_data_on(st);
}
void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "SpeculativeTrapData", extra);
tab(st);
method()->print_short_name(st);
st->cr();
}
#endif
// ==================================================================
@ -745,7 +790,27 @@ int MethodData::compute_data_size(BytecodeStream* stream) {
return DataLayout::compute_size_in_bytes(cell_count);
}
int MethodData::compute_extra_data_count(int data_size, int empty_bc_count) {
bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) {
// Bytecodes for which we may use speculation
switch (code) {
case Bytecodes::_checkcast:
case Bytecodes::_instanceof:
case Bytecodes::_aastore:
case Bytecodes::_invokevirtual:
case Bytecodes::_invokeinterface:
case Bytecodes::_if_acmpeq:
case Bytecodes::_if_acmpne:
case Bytecodes::_invokestatic:
#ifdef COMPILER2
return UseTypeSpeculation;
#endif
default:
return false;
}
return false;
}
int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) {
if (ProfileTraps) {
// Assume that up to 3% of BCIs with no MDP will need to allocate one.
int extra_data_count = (uint)(empty_bc_count * 3) / 128 + 1;
@ -756,7 +821,18 @@ int MethodData::compute_extra_data_count(int data_size, int empty_bc_count) {
extra_data_count = one_percent_of_data;
if (extra_data_count > empty_bc_count)
extra_data_count = empty_bc_count; // no need for more
return extra_data_count;
// Make sure we have a minimum number of extra data slots to
// allocate SpeculativeTrapData entries. We would want to have one
// entry per compilation that inlines this method and for which
// some type speculation assumption fails. So the room we need for
// the SpeculativeTrapData entries doesn't directly depend on the
// size of the method. Because it's hard to estimate, we reserve
// space for an arbitrary number of entries.
int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
(SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
return MAX2(extra_data_count, spec_data_count);
} else {
return 0;
}
@ -769,15 +845,17 @@ int MethodData::compute_allocation_size_in_bytes(methodHandle method) {
BytecodeStream stream(method);
Bytecodes::Code c;
int empty_bc_count = 0; // number of bytecodes lacking data
bool needs_speculative_traps = false;
while ((c = stream.next()) >= 0) {
int size_in_bytes = compute_data_size(&stream);
data_size += size_in_bytes;
if (size_in_bytes == 0) empty_bc_count += 1;
needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
}
int object_size = in_bytes(data_offset()) + data_size;
// Add some extra DataLayout cells (at least one) to track stray traps.
int extra_data_count = compute_extra_data_count(data_size, empty_bc_count);
int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
object_size += extra_data_count * DataLayout::compute_size_in_bytes(0);
// Add a cell to record information about modified arguments.
@ -1009,18 +1087,23 @@ MethodData::MethodData(methodHandle method, int size, TRAPS) {
_data[0] = 0; // apparently not set below.
BytecodeStream stream(method);
Bytecodes::Code c;
bool needs_speculative_traps = false;
while ((c = stream.next()) >= 0) {
int size_in_bytes = initialize_data(&stream, data_size);
data_size += size_in_bytes;
if (size_in_bytes == 0) empty_bc_count += 1;
needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
}
_data_size = data_size;
int object_size = in_bytes(data_offset()) + data_size;
// Add some extra DataLayout cells (at least one) to track stray traps.
int extra_data_count = compute_extra_data_count(data_size, empty_bc_count);
int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0);
// Let's zero the space for the extra data
Copy::zero_to_bytes(((address)_data) + data_size, extra_size);
// Add a cell to record information about modified arguments.
// Set up _args_modified array after traps cells so that
// the code for traps cells works.
@ -1032,17 +1115,17 @@ MethodData::MethodData(methodHandle method, int size, TRAPS) {
int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1);
object_size += extra_size + arg_data_size;
int args_cell = ParametersTypeData::compute_cell_count(method());
int parms_cell = ParametersTypeData::compute_cell_count(method());
// If we are profiling parameters, we reserver an area near the end
// of the MDO after the slots for bytecodes (because there's no bci
// for method entry so they don't fit with the framework for the
// profiling of bytecodes). We store the offset within the MDO of
// this area (or -1 if no parameter is profiled)
if (args_cell > 0) {
object_size += DataLayout::compute_size_in_bytes(args_cell);
if (parms_cell > 0) {
object_size += DataLayout::compute_size_in_bytes(parms_cell);
_parameters_type_data_di = data_size + extra_size + arg_data_size;
DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size);
dp->initialize(DataLayout::parameters_type_data_tag, 0, args_cell);
dp->initialize(DataLayout::parameters_type_data_tag, 0, parms_cell);
} else {
_parameters_type_data_di = -1;
}
@ -1133,39 +1216,113 @@ ProfileData* MethodData::bci_to_data(int bci) {
break;
}
}
return bci_to_extra_data(bci, false);
return bci_to_extra_data(bci, NULL, false);
}
// Translate a bci to its corresponding extra data, or NULL.
ProfileData* MethodData::bci_to_extra_data(int bci, bool create_if_missing) {
DataLayout* dp = extra_data_base();
DataLayout* end = extra_data_limit();
DataLayout* avail = NULL;
for (; dp < end; dp = next_extra(dp)) {
DataLayout* MethodData::next_extra(DataLayout* dp) {
int nb_cells = 0;
switch(dp->tag()) {
case DataLayout::bit_data_tag:
case DataLayout::no_tag:
nb_cells = BitData::static_cell_count();
break;
case DataLayout::speculative_trap_data_tag:
nb_cells = SpeculativeTrapData::static_cell_count();
break;
default:
fatal(err_msg("unexpected tag %d", dp->tag()));
}
return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells));
}
ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp) {
DataLayout* end = extra_data_limit();
for (;; dp = next_extra(dp)) {
assert(dp < end, "moved past end of extra data");
// No need for "OrderAccess::load_acquire" ops,
// since the data structure is monotonic.
if (dp->tag() == DataLayout::no_tag) break;
if (dp->tag() == DataLayout::arg_info_data_tag) {
dp = end; // ArgInfoData is at the end of extra data section.
switch(dp->tag()) {
case DataLayout::no_tag:
return NULL;
case DataLayout::arg_info_data_tag:
dp = end;
return NULL; // ArgInfoData is at the end of extra data section.
case DataLayout::bit_data_tag:
if (m == NULL && dp->bci() == bci) {
return new BitData(dp);
}
break;
}
if (dp->bci() == bci) {
assert(dp->tag() == DataLayout::bit_data_tag, "sane");
return new BitData(dp);
case DataLayout::speculative_trap_data_tag:
if (m != NULL) {
SpeculativeTrapData* data = new SpeculativeTrapData(dp);
// data->method() may be null in case of a concurrent
// allocation. Assume it's for the same method and use that
// entry in that case.
if (dp->bci() == bci) {
if (data->method() == NULL) {
return NULL;
} else if (data->method() == m) {
return data;
}
}
}
break;
default:
fatal(err_msg("unexpected tag %d", dp->tag()));
}
}
if (create_if_missing && dp < end) {
// Allocate this one. There is no mutual exclusion,
// so two threads could allocate different BCIs to the
// same data layout. This means these extra data
// records, like most other MDO contents, must not be
// trusted too much.
DataLayout temp;
temp.initialize(DataLayout::bit_data_tag, bci, 0);
dp->release_set_header(temp.header());
assert(dp->tag() == DataLayout::bit_data_tag, "sane");
//NO: assert(dp->bci() == bci, "no concurrent allocation");
return new BitData(dp);
return NULL;
}
// Translate a bci to its corresponding extra data, or NULL.
ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) {
// This code assumes an entry for a SpeculativeTrapData is 2 cells
assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) ==
DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()),
"code needs to be adjusted");
DataLayout* dp = extra_data_base();
DataLayout* end = extra_data_limit();
// Allocation in the extra data space has to be atomic because not
// all entries have the same size and non atomic concurrent
// allocation would result in a corrupted extra data space.
while (true) {
ProfileData* result = bci_to_extra_data_helper(bci, m, dp);
if (result != NULL) {
return result;
}
if (create_if_missing && dp < end) {
assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != NULL), "should be free");
assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info");
u1 tag = m == NULL ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag;
// SpeculativeTrapData is 2 slots. Make sure we have room.
if (m != NULL && next_extra(dp)->tag() != DataLayout::no_tag) {
return NULL;
}
DataLayout temp;
temp.initialize(tag, bci, 0);
// May have been set concurrently
if (dp->header() != temp.header() && !dp->atomic_set_header(temp.header())) {
// Allocation failure because of concurrent allocation. Try
// again.
continue;
}
assert(dp->tag() == tag, "sane");
assert(dp->bci() == bci, "no concurrent allocation");
if (tag == DataLayout::bit_data_tag) {
return new BitData(dp);
} else {
// If being allocated concurrently, one trap may be lost
SpeculativeTrapData* data = new SpeculativeTrapData(dp);
data->set_method(m);
return data;
}
}
return NULL;
}
return NULL;
}
@ -1210,25 +1367,35 @@ void MethodData::print_data_on(outputStream* st) const {
for ( ; is_valid(data); data = next_data(data)) {
st->print("%d", dp_to_di(data->dp()));
st->fill_to(6);
data->print_data_on(st);
data->print_data_on(st, this);
}
st->print_cr("--- Extra data:");
DataLayout* dp = extra_data_base();
DataLayout* end = extra_data_limit();
for (; dp < end; dp = next_extra(dp)) {
for (;; dp = next_extra(dp)) {
assert(dp < end, "moved past end of extra data");
// No need for "OrderAccess::load_acquire" ops,
// since the data structure is monotonic.
if (dp->tag() == DataLayout::no_tag) continue;
if (dp->tag() == DataLayout::bit_data_tag) {
switch(dp->tag()) {
case DataLayout::no_tag:
continue;
case DataLayout::bit_data_tag:
data = new BitData(dp);
} else {
assert(dp->tag() == DataLayout::arg_info_data_tag, "must be BitData or ArgInfo");
break;
case DataLayout::speculative_trap_data_tag:
data = new SpeculativeTrapData(dp);
break;
case DataLayout::arg_info_data_tag:
data = new ArgInfoData(dp);
dp = end; // ArgInfoData is at the end of extra data section.
break;
default:
fatal(err_msg("unexpected tag %d", dp->tag()));
}
st->print("%d", dp_to_di(data->dp()));
st->fill_to(6);
data->print_data_on(st);
if (dp >= end) return;
}
}
#endif
@ -1351,3 +1518,110 @@ bool MethodData::profile_parameters_for_method(methodHandle m) {
assert(profile_parameters_jsr292_only(), "inconsistent");
return m->is_compiled_lambda_form();
}
void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
if (shift == 0) {
return;
}
if (!reset) {
// Move all cells of trap entry at dp left by "shift" cells
intptr_t* start = (intptr_t*)dp;
intptr_t* end = (intptr_t*)next_extra(dp);
for (intptr_t* ptr = start; ptr < end; ptr++) {
*(ptr-shift) = *ptr;
}
} else {
// Reset "shift" cells stopping at dp
intptr_t* start = ((intptr_t*)dp) - shift;
intptr_t* end = (intptr_t*)dp;
for (intptr_t* ptr = start; ptr < end; ptr++) {
*ptr = 0;
}
}
}
// Remove SpeculativeTrapData entries that reference an unloaded
// method
void MethodData::clean_extra_data(BoolObjectClosure* is_alive) {
DataLayout* dp = extra_data_base();
DataLayout* end = extra_data_limit();
int shift = 0;
for (; dp < end; dp = next_extra(dp)) {
switch(dp->tag()) {
case DataLayout::speculative_trap_data_tag: {
SpeculativeTrapData* data = new SpeculativeTrapData(dp);
Method* m = data->method();
assert(m != NULL, "should have a method");
if (!m->method_holder()->is_loader_alive(is_alive)) {
// "shift" accumulates the number of cells for dead
// SpeculativeTrapData entries that have been seen so
// far. Following entries must be shifted left by that many
// cells to remove the dead SpeculativeTrapData entries.
shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp);
} else {
// Shift this entry left if it follows dead
// SpeculativeTrapData entries
clean_extra_data_helper(dp, shift);
}
break;
}
case DataLayout::bit_data_tag:
// Shift this entry left if it follows dead SpeculativeTrapData
// entries
clean_extra_data_helper(dp, shift);
continue;
case DataLayout::no_tag:
case DataLayout::arg_info_data_tag:
// We are at end of the live trap entries. The previous "shift"
// cells contain entries that are either dead or were shifted
// left. They need to be reset to no_tag
clean_extra_data_helper(dp, shift, true);
return;
default:
fatal(err_msg("unexpected tag %d", dp->tag()));
}
}
}
// Verify there's no unloaded method referenced by a
// SpeculativeTrapData entry
void MethodData::verify_extra_data_clean(BoolObjectClosure* is_alive) {
#ifdef ASSERT
DataLayout* dp = extra_data_base();
DataLayout* end = extra_data_limit();
for (; dp < end; dp = next_extra(dp)) {
switch(dp->tag()) {
case DataLayout::speculative_trap_data_tag: {
SpeculativeTrapData* data = new SpeculativeTrapData(dp);
Method* m = data->method();
assert(m != NULL && m->method_holder()->is_loader_alive(is_alive), "Method should exist");
break;
}
case DataLayout::bit_data_tag:
continue;
case DataLayout::no_tag:
case DataLayout::arg_info_data_tag:
return;
default:
fatal(err_msg("unexpected tag %d", dp->tag()));
}
}
#endif
}
void MethodData::clean_method_data(BoolObjectClosure* is_alive) {
for (ProfileData* data = first_data();
is_valid(data);
data = next_data(data)) {
data->clean_weak_klass_links(is_alive);
}
ParametersTypeData* parameters = parameters_type_data();
if (parameters != NULL) {
parameters->clean_weak_klass_links(is_alive);
}
clean_extra_data(is_alive);
verify_extra_data_clean(is_alive);
}

View File

@ -120,7 +120,8 @@ public:
arg_info_data_tag,
call_type_data_tag,
virtual_call_type_data_tag,
parameters_type_data_tag
parameters_type_data_tag,
speculative_trap_data_tag
};
enum {
@ -189,8 +190,11 @@ public:
void set_header(intptr_t value) {
_header._bits = value;
}
void release_set_header(intptr_t value) {
OrderAccess::release_store_ptr(&_header._bits, value);
bool atomic_set_header(intptr_t value) {
if (Atomic::cmpxchg_ptr(value, (volatile intptr_t*)&_header._bits, 0) == 0) {
return true;
}
return false;
}
intptr_t header() {
return _header._bits;
@ -271,6 +275,7 @@ class ArrayData;
class MultiBranchData;
class ArgInfoData;
class ParametersTypeData;
class SpeculativeTrapData;
// ProfileData
//
@ -291,6 +296,8 @@ private:
// This is a pointer to a section of profiling data.
DataLayout* _data;
char* print_data_on_helper(const MethodData* md) const;
protected:
DataLayout* data() { return _data; }
const DataLayout* data() const { return _data; }
@ -440,6 +447,7 @@ public:
virtual bool is_CallTypeData() const { return false; }
virtual bool is_VirtualCallTypeData()const { return false; }
virtual bool is_ParametersTypeData() const { return false; }
virtual bool is_SpeculativeTrapData()const { return false; }
BitData* as_BitData() const {
@ -494,6 +502,10 @@ public:
assert(is_ParametersTypeData(), "wrong type");
return is_ParametersTypeData() ? (ParametersTypeData*)this : NULL;
}
SpeculativeTrapData* as_SpeculativeTrapData() const {
assert(is_SpeculativeTrapData(), "wrong type");
return is_SpeculativeTrapData() ? (SpeculativeTrapData*)this : NULL;
}
// Subclass specific initialization
@ -509,12 +521,14 @@ public:
// translation here, and the required translators are in the ci subclasses.
virtual void translate_from(const ProfileData* data) {}
virtual void print_data_on(outputStream* st) const {
virtual void print_data_on(outputStream* st, const char* extra = NULL) const {
ShouldNotReachHere();
}
void print_data_on(outputStream* st, const MethodData* md) const;
#ifndef PRODUCT
void print_shared(outputStream* st, const char* name) const;
void print_shared(outputStream* st, const char* name, const char* extra) const;
void tab(outputStream* st, bool first = false) const;
#endif
};
@ -576,7 +590,7 @@ public:
#endif // CC_INTERP
#ifndef PRODUCT
void print_data_on(outputStream* st) const;
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
@ -639,7 +653,7 @@ public:
#endif // CC_INTERP
#ifndef PRODUCT
void print_data_on(outputStream* st) const;
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
@ -726,7 +740,7 @@ public:
void post_initialize(BytecodeStream* stream, MethodData* mdo);
#ifndef PRODUCT
void print_data_on(outputStream* st) const;
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
@ -1137,7 +1151,7 @@ public:
}
#ifndef PRODUCT
virtual void print_data_on(outputStream* st) const;
virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
@ -1282,7 +1296,7 @@ public:
#ifndef PRODUCT
void print_receiver_data_on(outputStream* st) const;
void print_data_on(outputStream* st) const;
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
@ -1325,7 +1339,7 @@ public:
#endif // CC_INTERP
#ifndef PRODUCT
void print_data_on(outputStream* st) const;
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
@ -1451,7 +1465,7 @@ public:
}
#ifndef PRODUCT
virtual void print_data_on(outputStream* st) const;
virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
@ -1554,7 +1568,7 @@ public:
void post_initialize(BytecodeStream* stream, MethodData* mdo);
#ifndef PRODUCT
void print_data_on(outputStream* st) const;
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
@ -1632,7 +1646,7 @@ public:
void post_initialize(BytecodeStream* stream, MethodData* mdo);
#ifndef PRODUCT
void print_data_on(outputStream* st) const;
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
@ -1825,7 +1839,7 @@ public:
void post_initialize(BytecodeStream* stream, MethodData* mdo);
#ifndef PRODUCT
void print_data_on(outputStream* st) const;
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
@ -1852,7 +1866,7 @@ public:
}
#ifndef PRODUCT
void print_data_on(outputStream* st) const;
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
@ -1913,7 +1927,7 @@ public:
}
#ifndef PRODUCT
virtual void print_data_on(outputStream* st) const;
virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
static ByteSize stack_slot_offset(int i) {
@ -1925,6 +1939,54 @@ public:
}
};
// SpeculativeTrapData
//
// A SpeculativeTrapData is used to record traps due to type
// speculation. It records the root of the compilation: that type
// speculation is wrong in the context of one compilation (for
// method1) doesn't mean it's wrong in the context of another one (for
// method2). Type speculation could have more/different data in the
// context of the compilation of method2 and it's worthwhile to try an
// optimization that failed for compilation of method1 in the context
// of compilation of method2.
// Space for SpeculativeTrapData entries is allocated from the extra
// data space in the MDO. If we run out of space, the trap data for
// the ProfileData at that bci is updated.
class SpeculativeTrapData : public ProfileData {
protected:
enum {
method_offset,
speculative_trap_cell_count
};
public:
SpeculativeTrapData(DataLayout* layout) : ProfileData(layout) {
assert(layout->tag() == DataLayout::speculative_trap_data_tag, "wrong type");
}
virtual bool is_SpeculativeTrapData() const { return true; }
static int static_cell_count() {
return speculative_trap_cell_count;
}
virtual int cell_count() const {
return static_cell_count();
}
// Direct accessor
Method* method() const {
return (Method*)intptr_at(method_offset);
}
void set_method(Method* m) {
set_intptr_at(method_offset, (intptr_t)m);
}
#ifndef PRODUCT
virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
// MethodData*
//
// A MethodData* holds information which has been collected about
@ -1994,7 +2056,7 @@ public:
// Whole-method sticky bits and flags
enum {
_trap_hist_limit = 17, // decoupled from Deoptimization::Reason_LIMIT
_trap_hist_limit = 18, // decoupled from Deoptimization::Reason_LIMIT
_trap_hist_mask = max_jubyte,
_extra_data_count = 4 // extra DataLayout headers, for trap history
}; // Public flag values
@ -2049,6 +2111,7 @@ private:
// Helper for size computation
static int compute_data_size(BytecodeStream* stream);
static int bytecode_cell_count(Bytecodes::Code code);
static bool is_speculative_trap_bytecode(Bytecodes::Code code);
enum { no_profile_data = -1, variable_cell_count = -2 };
// Helper for initialization
@ -2092,8 +2155,9 @@ private:
// What is the index of the first data entry?
int first_di() const { return 0; }
ProfileData* bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp);
// Find or create an extra ProfileData:
ProfileData* bci_to_extra_data(int bci, bool create_if_missing);
ProfileData* bci_to_extra_data(int bci, Method* m, bool create_if_missing);
// return the argument info cell
ArgInfoData *arg_info();
@ -2116,6 +2180,10 @@ private:
static bool profile_parameters_jsr292_only();
static bool profile_all_parameters();
void clean_extra_data(BoolObjectClosure* is_alive);
void clean_extra_data_helper(DataLayout* dp, int shift, bool reset = false);
void verify_extra_data_clean(BoolObjectClosure* is_alive);
public:
static int header_size() {
return sizeof(MethodData)/wordSize;
@ -2124,7 +2192,7 @@ public:
// Compute the size of a MethodData* before it is created.
static int compute_allocation_size_in_bytes(methodHandle method);
static int compute_allocation_size_in_words(methodHandle method);
static int compute_extra_data_count(int data_size, int empty_bc_count);
static int compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps);
// Determine if a given bytecode can have profile information.
static bool bytecode_has_profile(Bytecodes::Code code) {
@ -2265,9 +2333,26 @@ public:
ProfileData* bci_to_data(int bci);
// Same, but try to create an extra_data record if one is needed:
ProfileData* allocate_bci_to_data(int bci) {
ProfileData* data = bci_to_data(bci);
return (data != NULL) ? data : bci_to_extra_data(bci, true);
ProfileData* allocate_bci_to_data(int bci, Method* m) {
ProfileData* data = NULL;
// If m not NULL, try to allocate a SpeculativeTrapData entry
if (m == NULL) {
data = bci_to_data(bci);
}
if (data != NULL) {
return data;
}
data = bci_to_extra_data(bci, m, true);
if (data != NULL) {
return data;
}
// If SpeculativeTrapData allocation fails try to allocate a
// regular entry
data = bci_to_data(bci);
if (data != NULL) {
return data;
}
return bci_to_extra_data(bci, NULL, true);
}
// Add a handful of extra data records, for trap tracking.
@ -2275,7 +2360,7 @@ public:
DataLayout* extra_data_limit() const { return (DataLayout*)((address)this + size_in_bytes()); }
int extra_data_size() const { return (address)extra_data_limit()
- (address)extra_data_base(); }
static DataLayout* next_extra(DataLayout* dp) { return (DataLayout*)((address)dp + in_bytes(DataLayout::cell_offset(0))); }
static DataLayout* next_extra(DataLayout* dp);
// Return (uint)-1 for overflow.
uint trap_count(int reason) const {
@ -2375,6 +2460,8 @@ public:
static bool profile_return();
static bool profile_parameters();
static bool profile_return_jsr292_only();
void clean_method_data(BoolObjectClosure* is_alive);
};
#endif // SHARE_VM_OOPS_METHODDATAOOP_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -674,8 +674,8 @@ const char* ObjArrayKlass::internal_name() const {
// Verification
void ObjArrayKlass::verify_on(outputStream* st, bool check_dictionary) {
ArrayKlass::verify_on(st, check_dictionary);
void ObjArrayKlass::verify_on(outputStream* st) {
ArrayKlass::verify_on(st);
guarantee(element_klass()->is_klass(), "should be klass");
guarantee(bottom_klass()->is_klass(), "should be klass");
Klass* bk = bottom_klass();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -151,7 +151,7 @@ class ObjArrayKlass : public ArrayKlass {
const char* internal_name() const;
// Verification
void verify_on(outputStream* st, bool check_dictionary);
void verify_on(outputStream* st);
void oop_verify_on(oop obj, outputStream* st);
};

View File

@ -90,9 +90,9 @@ public:
class CFGElement : public ResourceObj {
friend class VMStructs;
public:
float _freq; // Execution frequency (estimate)
double _freq; // Execution frequency (estimate)
CFGElement() : _freq(0.0f) {}
CFGElement() : _freq(0.0) {}
virtual bool is_block() { return false; }
virtual bool is_loop() { return false; }
Block* as_Block() { assert(is_block(), "must be block"); return (Block*)this; }
@ -202,7 +202,7 @@ public:
// BLOCK_FREQUENCY is a sentinel to mark uses of constant block frequencies.
// It is currently also used to scale such frequencies relative to
// FreqCountInvocations relative to the old value of 1500.
#define BLOCK_FREQUENCY(f) ((f * (float) 1500) / FreqCountInvocations)
#define BLOCK_FREQUENCY(f) ((f * (double) 1500) / FreqCountInvocations)
// Register Pressure (estimate) for Splitting heuristic
uint _reg_pressure;
@ -393,7 +393,7 @@ class PhaseCFG : public Phase {
CFGLoop* _root_loop;
// Outmost loop frequency
float _outer_loop_frequency;
double _outer_loop_frequency;
// Per node latency estimation, valid only during GCM
GrowableArray<uint>* _node_latency;
@ -508,7 +508,7 @@ class PhaseCFG : public Phase {
}
// Get the outer most frequency
float get_outer_loop_frequency() const {
double get_outer_loop_frequency() const {
return _outer_loop_frequency;
}
@ -656,13 +656,13 @@ public:
class BlockProbPair VALUE_OBJ_CLASS_SPEC {
protected:
Block* _target; // block target
float _prob; // probability of edge to block
double _prob; // probability of edge to block
public:
BlockProbPair() : _target(NULL), _prob(0.0) {}
BlockProbPair(Block* b, float p) : _target(b), _prob(p) {}
BlockProbPair(Block* b, double p) : _target(b), _prob(p) {}
Block* get_target() const { return _target; }
float get_prob() const { return _prob; }
double get_prob() const { return _prob; }
};
//------------------------------CFGLoop-------------------------------------------
@ -675,8 +675,8 @@ class CFGLoop : public CFGElement {
CFGLoop *_child; // first child, use child's sibling to visit all immediately nested loops
GrowableArray<CFGElement*> _members; // list of members of loop
GrowableArray<BlockProbPair> _exits; // list of successor blocks and their probabilities
float _exit_prob; // probability any loop exit is taken on a single loop iteration
void update_succ_freq(Block* b, float freq);
double _exit_prob; // probability any loop exit is taken on a single loop iteration
void update_succ_freq(Block* b, double freq);
public:
CFGLoop(int id) :
@ -702,9 +702,9 @@ class CFGLoop : public CFGElement {
void compute_loop_depth(int depth);
void compute_freq(); // compute frequency with loop assuming head freq 1.0f
void scale_freq(); // scale frequency by loop trip count (including outer loops)
float outer_loop_freq() const; // frequency of outer loop
double outer_loop_freq() const; // frequency of outer loop
bool in_loop_nest(Block* b);
float trip_count() const { return 1.0f / _exit_prob; }
double trip_count() const { return 1.0 / _exit_prob; }
virtual bool is_loop() { return true; }
int id() { return _id; }
@ -723,7 +723,7 @@ class CFGEdge : public ResourceObj {
private:
Block * _from; // Source basic block
Block * _to; // Destination basic block
float _freq; // Execution frequency (estimate)
double _freq; // Execution frequency (estimate)
int _state;
bool _infrequent;
int _from_pct;
@ -742,13 +742,13 @@ class CFGEdge : public ResourceObj {
interior // edge is interior to trace (could be backedge)
};
CFGEdge(Block *from, Block *to, float freq, int from_pct, int to_pct) :
CFGEdge(Block *from, Block *to, double freq, int from_pct, int to_pct) :
_from(from), _to(to), _freq(freq),
_from_pct(from_pct), _to_pct(to_pct), _state(open) {
_infrequent = from_infrequent() || to_infrequent();
}
float freq() const { return _freq; }
double freq() const { return _freq; }
Block* from() const { return _from; }
Block* to () const { return _to; }
int infrequent() const { return _infrequent; }

View File

@ -644,7 +644,7 @@
diagnostic(bool, OptimizeExpensiveOps, true, \
"Find best control for expensive operations") \
\
experimental(bool, UseMathExactIntrinsics, false, \
product(bool, UseMathExactIntrinsics, true, \
"Enables intrinsification of various java.lang.Math functions") \
\
experimental(bool, ReplaceInParentMaps, false, \

View File

@ -210,7 +210,7 @@ PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher)
{
NOT_PRODUCT( Compile::TracePhase t3("ctorChaitin", &_t_ctorChaitin, TimeCompiler); )
_high_frequency_lrg = MIN2(float(OPTO_LRG_HIGH_FREQ), _cfg.get_outer_loop_frequency());
_high_frequency_lrg = MIN2(double(OPTO_LRG_HIGH_FREQ), _cfg.get_outer_loop_frequency());
// Build a list of basic blocks, sorted by frequency
_blks = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks());
@ -1799,7 +1799,7 @@ bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) {
Block *phi_block = _cfg.get_block_for_node(phi);
if (_cfg.get_block_for_node(phi_block->pred(2)) == block) {
const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI];
Node *spill = new (C) MachSpillCopyNode( phi, *mask, *mask );
Node *spill = new (C) MachSpillCopyNode(MachSpillCopyNode::LoopPhiInput, phi, *mask, *mask);
insert_proj( phi_block, 1, spill, maxlrg++ );
n->set_req(1,spill);
must_recompute_live = true;

View File

@ -34,10 +34,9 @@
#include "opto/phase.hpp"
#include "opto/regalloc.hpp"
#include "opto/regmask.hpp"
#include "opto/machnode.hpp"
class LoopTree;
class MachCallNode;
class MachSafePointNode;
class Matcher;
class PhaseCFG;
class PhaseLive;
@ -424,8 +423,8 @@ class PhaseChaitin : public PhaseRegAlloc {
uint _simplified; // Linked list head of simplified LRGs
// Helper functions for Split()
uint split_DEF( Node *def, Block *b, int loc, uint max, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx );
uint split_USE( Node *def, Block *b, Node *use, uint useidx, uint max, bool def_down, bool cisc_sp, GrowableArray<uint> splits, int slidx );
uint split_DEF(Node *def, Block *b, int loc, uint max, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx );
uint split_USE(MachSpillCopyNode::SpillType spill_type, Node *def, Block *b, Node *use, uint useidx, uint max, bool def_down, bool cisc_sp, GrowableArray<uint> splits, int slidx );
//------------------------------clone_projs------------------------------------
// After cloning some rematerialized instruction, clone any MachProj's that
@ -447,7 +446,7 @@ class PhaseChaitin : public PhaseRegAlloc {
int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru);
// True if lidx is used before any real register is def'd in the block
bool prompt_use( Block *b, uint lidx );
Node *get_spillcopy_wide( Node *def, Node *use, uint uidx );
Node *get_spillcopy_wide(MachSpillCopyNode::SpillType spill_type, Node *def, Node *use, uint uidx );
// Insert the spill at chosen location. Skip over any intervening Proj's or
// Phis. Skip over a CatchNode and projs, inserting in the fall-through block
// instead. Update high-pressure indices. Create a new live range.
@ -501,8 +500,9 @@ private:
// Used for aggressive coalescing.
void build_ifg_virtual( );
// used when computing the register pressure for each block in the CFG. This
// is done during IFG creation.
class Pressure {
public:
// keeps track of the register pressure at the current
// instruction (used when stepping backwards in the block)
uint _current_pressure;
@ -518,6 +518,7 @@ private:
// number of live ranges that constitute high register pressure
const uint _high_pressure_limit;
public:
// lower the register pressure and look for a low to high pressure
// transition
@ -525,9 +526,6 @@ private:
_current_pressure -= lrg.reg_pressure();
if (_current_pressure == _high_pressure_limit) {
_high_pressure_index = location;
if (_current_pressure > _final_pressure) {
_final_pressure = _current_pressure + 1;
}
}
}
@ -540,6 +538,45 @@ private:
}
}
uint high_pressure_index() const {
return _high_pressure_index;
}
uint final_pressure() const {
return _final_pressure;
}
uint current_pressure() const {
return _current_pressure;
}
uint high_pressure_limit() const {
return _high_pressure_limit;
}
void lower_high_pressure_index() {
_high_pressure_index--;
}
void set_high_pressure_index_to_block_start() {
_high_pressure_index = 0;
}
void check_pressure_at_fatproj(uint fatproj_location, RegMask& fatproj_mask) {
// this pressure is only valid at this instruction, i.e. we don't need to lower
// the register pressure since the fat proj was never live before (going backwards)
uint new_pressure = current_pressure() + fatproj_mask.Size();
if (new_pressure > final_pressure()) {
_final_pressure = new_pressure;
}
// if we were at a low pressure and now and the fat proj is at high pressure, record the fat proj location
// as coming from a low to high (to low again)
if (current_pressure() <= high_pressure_limit() && new_pressure > high_pressure_limit()) {
_high_pressure_index = fatproj_location;
}
}
Pressure(uint high_pressure_index, uint high_pressure_limit)
: _current_pressure(0)
, _high_pressure_index(high_pressure_index)

View File

@ -29,8 +29,6 @@ macro(AbsD)
macro(AbsF)
macro(AbsI)
macro(AddD)
macro(AddExactI)
macro(AddExactL)
macro(AddF)
macro(AddI)
macro(AddL)
@ -135,7 +133,6 @@ macro(EncodePKlass)
macro(ExpD)
macro(FastLock)
macro(FastUnlock)
macro(FlagsProj)
macro(Goto)
macro(Halt)
macro(If)
@ -170,9 +167,6 @@ macro(Loop)
macro(LoopLimit)
macro(Mach)
macro(MachProj)
macro(MathExact)
macro(MathExactI)
macro(MathExactL)
macro(MaxI)
macro(MemBarAcquire)
macro(LoadFence)
@ -194,22 +188,24 @@ macro(MoveF2I)
macro(MoveL2D)
macro(MoveD2L)
macro(MulD)
macro(MulExactI)
macro(MulExactL)
macro(MulF)
macro(MulHiL)
macro(MulI)
macro(MulL)
macro(Multi)
macro(NegD)
macro(NegExactI)
macro(NegExactL)
macro(NegF)
macro(NeverBranch)
macro(Opaque1)
macro(Opaque2)
macro(OrI)
macro(OrL)
macro(OverflowAddI)
macro(OverflowSubI)
macro(OverflowMulI)
macro(OverflowAddL)
macro(OverflowSubL)
macro(OverflowMulL)
macro(PCTable)
macro(Parm)
macro(PartialSubtypeCheck)
@ -253,8 +249,6 @@ macro(StrComp)
macro(StrEquals)
macro(StrIndexOf)
macro(SubD)
macro(SubExactI)
macro(SubExactL)
macro(SubF)
macro(SubI)
macro(SubL)

View File

@ -291,7 +291,7 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
_phc.clone_projs(pred, pred->end_idx(), m, copy, _phc._lrg_map);
} else {
const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
copy = new (C) MachSpillCopyNode(m, *rm, *rm);
copy = new (C) MachSpillCopyNode(MachSpillCopyNode::PhiInput, m, *rm, *rm);
// Find a good place to insert. Kinda tricky, use a subroutine
insert_copy_with_overlap(pred,copy,phi_name,src_name);
}
@ -325,7 +325,7 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
l += _phc.clone_projs(b, l, m, copy, _phc._lrg_map);
} else {
const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
copy = new (C) MachSpillCopyNode(m, *rm, *rm);
copy = new (C) MachSpillCopyNode(MachSpillCopyNode::TwoAddress, m, *rm, *rm);
// Insert the copy in the basic block, just before us
b->insert_node(copy, l++);
}
@ -372,7 +372,7 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
continue; // Live out; do not pre-split
// Split the lrg at this use
const RegMask *rm = C->matcher()->idealreg2spillmask[inp->ideal_reg()];
Node *copy = new (C) MachSpillCopyNode( inp, *rm, *rm );
Node* copy = new (C) MachSpillCopyNode(MachSpillCopyNode::DebugUse, inp, *rm, *rm);
// Insert the copy in the use-def chain
n->set_req(inpidx, copy );
// Insert the copy in the basic block, just before us

View File

@ -3028,42 +3028,6 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
n->set_req(MemBarNode::Precedent, top());
}
break;
// Must set a control edge on all nodes that produce a FlagsProj
// so they can't escape the block that consumes the flags.
// Must also set the non throwing branch as the control
// for all nodes that depends on the result. Unless the node
// already have a control that isn't the control of the
// flag producer
case Op_FlagsProj:
{
MathExactNode* math = (MathExactNode*) n->in(0);
Node* ctrl = math->control_node();
Node* non_throwing = math->non_throwing_branch();
math->set_req(0, ctrl);
Node* result = math->result_node();
if (result != NULL) {
for (DUIterator_Fast jmax, j = result->fast_outs(jmax); j < jmax; j++) {
Node* out = result->fast_out(j);
// Phi nodes shouldn't be moved. They would only match below if they
// had the same control as the MathExactNode. The only time that
// would happen is if the Phi is also an input to the MathExact
//
// Cmp nodes shouldn't have control set at all.
if (out->is_Phi() ||
out->is_Cmp()) {
continue;
}
if (out->in(0) == NULL) {
out->set_req(0, non_throwing);
} else if (out->in(0) == ctrl) {
out->set_req(0, non_throwing);
}
}
}
}
break;
default:
assert( !n->is_Call(), "" );
assert( !n->is_Mem(), "" );
@ -3285,7 +3249,8 @@ bool Compile::too_many_traps(ciMethod* method,
// because of a transient condition during start-up in the interpreter.
return false;
}
if (md->has_trap_at(bci, reason) != 0) {
ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : NULL;
if (md->has_trap_at(bci, m, reason) != 0) {
// Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
// Also, if there are multiple reasons, or if there is no per-BCI record,
// assume the worst.
@ -3303,7 +3268,7 @@ bool Compile::too_many_traps(ciMethod* method,
// Less-accurate variant which does not require a method and bci.
bool Compile::too_many_traps(Deoptimization::DeoptReason reason,
ciMethodData* logmd) {
if (trap_count(reason) >= (uint)PerMethodTrapLimit) {
if (trap_count(reason) >= Deoptimization::per_method_trap_limit(reason)) {
// Too many traps globally.
// Note that we use cumulative trap_count, not just md->trap_count.
if (log()) {
@ -3338,10 +3303,11 @@ bool Compile::too_many_recompiles(ciMethod* method,
uint m_cutoff = (uint) PerMethodRecompilationCutoff / 2 + 1; // not zero
Deoptimization::DeoptReason per_bc_reason
= Deoptimization::reason_recorded_per_bytecode_if_any(reason);
ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : NULL;
if ((per_bc_reason == Deoptimization::Reason_none
|| md->has_trap_at(bci, reason) != 0)
|| md->has_trap_at(bci, m, reason) != 0)
// The trap frequency measure we care about is the recompile count:
&& md->trap_recompiled_at(bci)
&& md->trap_recompiled_at(bci, m)
&& md->overflow_recompile_count() >= bc_cutoff) {
// Do not emit a trap here if it has already caused recompilations.
// Also, if there are multiple reasons, or if there is no per-BCI record,

View File

@ -250,7 +250,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
CallGenerator* miss_cg;
Deoptimization::DeoptReason reason = morphism == 2 ?
Deoptimization::Reason_bimorphic :
Deoptimization::Reason_class_check;
(speculative_receiver_type == NULL ? Deoptimization::Reason_class_check : Deoptimization::Reason_speculate_class_check);
if ((morphism == 1 || (morphism == 2 && next_hit_cg != NULL)) &&
!too_many_traps(jvms->method(), jvms->bci(), reason)
) {

View File

@ -1661,10 +1661,10 @@ void CFGLoop::compute_freq() {
}
assert (_members.length() > 0, "no empty loops");
Block* hd = head();
hd->_freq = 1.0f;
hd->_freq = 1.0;
for (int i = 0; i < _members.length(); i++) {
CFGElement* s = _members.at(i);
float freq = s->_freq;
double freq = s->_freq;
if (s->is_block()) {
Block* b = s->as_Block();
for (uint j = 0; j < b->_num_succs; j++) {
@ -1676,7 +1676,7 @@ void CFGLoop::compute_freq() {
assert(lp->_parent == this, "immediate child");
for (int k = 0; k < lp->_exits.length(); k++) {
Block* eb = lp->_exits.at(k).get_target();
float prob = lp->_exits.at(k).get_prob();
double prob = lp->_exits.at(k).get_prob();
update_succ_freq(eb, freq * prob);
}
}
@ -1688,7 +1688,7 @@ void CFGLoop::compute_freq() {
// inner blocks do not get erroneously scaled.
if (_depth != 0) {
// Total the exit probabilities for this loop.
float exits_sum = 0.0f;
double exits_sum = 0.0f;
for (int i = 0; i < _exits.length(); i++) {
exits_sum += _exits.at(i).get_prob();
}
@ -1935,7 +1935,7 @@ void Block::update_uncommon_branch(Block* ub) {
//------------------------------update_succ_freq-------------------------------
// Update the appropriate frequency associated with block 'b', a successor of
// a block in this loop.
void CFGLoop::update_succ_freq(Block* b, float freq) {
void CFGLoop::update_succ_freq(Block* b, double freq) {
if (b->_loop == this) {
if (b == head()) {
// back branch within the loop
@ -1976,11 +1976,11 @@ bool CFGLoop::in_loop_nest(Block* b) {
// Scale frequency of loops and blocks by trip counts from outer loops
// Do a top down traversal of loop tree (visit outer loops first.)
void CFGLoop::scale_freq() {
float loop_freq = _freq * trip_count();
double loop_freq = _freq * trip_count();
_freq = loop_freq;
for (int i = 0; i < _members.length(); i++) {
CFGElement* s = _members.at(i);
float block_freq = s->_freq * loop_freq;
double block_freq = s->_freq * loop_freq;
if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY)
block_freq = MIN_BLOCK_FREQUENCY;
s->_freq = block_freq;
@ -1993,7 +1993,7 @@ void CFGLoop::scale_freq() {
}
// Frequency of outer loop
float CFGLoop::outer_loop_freq() const {
double CFGLoop::outer_loop_freq() const {
if (_child != NULL) {
return _child->_freq;
}
@ -2042,7 +2042,7 @@ void CFGLoop::dump() const {
k = 0;
}
Block *blk = _exits.at(i).get_target();
float prob = _exits.at(i).get_prob();
double prob = _exits.at(i).get_prob();
tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100));
}
tty->print("\n");

View File

@ -612,9 +612,10 @@ void GraphKit::builtin_throw(Deoptimization::DeoptReason reason, Node* arg) {
// Usual case: Bail to interpreter.
// Reserve the right to recompile if we haven't seen anything yet.
assert(!Deoptimization::reason_is_speculate(reason), "unsupported");
Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile;
if (treat_throw_as_hot
&& (method()->method_data()->trap_recompiled_at(bci())
&& (method()->method_data()->trap_recompiled_at(bci(), NULL)
|| C->too_many_traps(reason))) {
// We cannot afford to take more traps here. Suffer in the interpreter.
if (C->log() != NULL)
@ -2145,7 +2146,7 @@ Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls) {
*
* @param n receiver node
*
* @return node with improved type
* @return node with improved type
*/
Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
if (!UseTypeSpeculation) {
@ -2739,12 +2740,14 @@ bool GraphKit::seems_never_null(Node* obj, ciProfileData* data) {
// Subsequent type checks will always fold up.
Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
ciKlass* require_klass,
ciKlass* spec_klass,
ciKlass* spec_klass,
bool safe_for_replace) {
if (!UseTypeProfile || !TypeProfileCasts) return NULL;
Deoptimization::DeoptReason reason = spec_klass == NULL ? Deoptimization::Reason_class_check : Deoptimization::Reason_speculate_class_check;
// Make sure we haven't already deoptimized from this tactic.
if (too_many_traps(Deoptimization::Reason_class_check))
if (too_many_traps(reason))
return NULL;
// (No, this isn't a call, but it's enough like a virtual call
@ -2766,7 +2769,7 @@ Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
&exact_obj);
{ PreserveJVMState pjvms(this);
set_control(slow_ctl);
uncommon_trap(Deoptimization::Reason_class_check,
uncommon_trap(reason,
Deoptimization::Action_maybe_recompile);
}
if (safe_for_replace) {
@ -2793,8 +2796,10 @@ Node* GraphKit::maybe_cast_profiled_obj(Node* obj,
bool not_null) {
// type == NULL if profiling tells us this object is always null
if (type != NULL) {
if (!too_many_traps(Deoptimization::Reason_null_check) &&
!too_many_traps(Deoptimization::Reason_class_check)) {
Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check;
Deoptimization::DeoptReason null_reason = Deoptimization::Reason_null_check;
if (!too_many_traps(null_reason) &&
!too_many_traps(class_reason)) {
Node* not_null_obj = NULL;
// not_null is true if we know the object is not null and
// there's no need for a null check
@ -2813,7 +2818,7 @@ Node* GraphKit::maybe_cast_profiled_obj(Node* obj,
{
PreserveJVMState pjvms(this);
set_control(slow_ctl);
uncommon_trap(Deoptimization::Reason_class_check,
uncommon_trap(class_reason,
Deoptimization::Action_maybe_recompile);
}
replace_in_map(not_null_obj, exact_obj);
@ -2882,7 +2887,7 @@ Node* GraphKit::gen_instanceof(Node* obj, Node* superklass, bool safe_for_replac
}
if (known_statically && UseTypeSpeculation) {
// If we know the type check always succeed then we don't use the
// If we know the type check always succeeds then we don't use the
// profiling data at this bytecode. Don't lose it, feed it to the
// type system as a speculative type.
not_null_obj = record_profiled_receiver_for_speculation(not_null_obj);

View File

@ -406,7 +406,7 @@ class GraphKit : public Phase {
// Use the type profile to narrow an object type.
Node* maybe_cast_profiled_receiver(Node* not_null_obj,
ciKlass* require_klass,
ciKlass* spec,
ciKlass* spec,
bool safe_for_replace);
// Cast obj to type and emit guard unless we had too many traps here already

View File

@ -439,8 +439,8 @@ void PhaseChaitin::lower_pressure(Block* b, uint location, LRG& lrg, IndexSet* l
}
}
}
assert(int_pressure._current_pressure == count_int_pressure(liveout), "the int pressure is incorrect");
assert(float_pressure._current_pressure == count_float_pressure(liveout), "the float pressure is incorrect");
assert(int_pressure.current_pressure() == count_int_pressure(liveout), "the int pressure is incorrect");
assert(float_pressure.current_pressure() == count_float_pressure(liveout), "the float pressure is incorrect");
}
/* Go to the first non-phi index in a block */
@ -513,8 +513,8 @@ void PhaseChaitin::compute_initial_block_pressure(Block* b, IndexSet* liveout, P
raise_pressure(b, lrg, int_pressure, float_pressure);
lid = elements.next();
}
assert(int_pressure._current_pressure == count_int_pressure(liveout), "the int pressure is incorrect");
assert(float_pressure._current_pressure == count_float_pressure(liveout), "the float pressure is incorrect");
assert(int_pressure.current_pressure() == count_int_pressure(liveout), "the int pressure is incorrect");
assert(float_pressure.current_pressure() == count_float_pressure(liveout), "the float pressure is incorrect");
}
/*
@ -548,17 +548,7 @@ bool PhaseChaitin::remove_node_if_not_used(Block* b, uint location, Node* n, uin
void PhaseChaitin::check_for_high_pressure_transition_at_fatproj(uint& block_reg_pressure, uint location, LRG& lrg, Pressure& pressure, const int op_regtype) {
RegMask mask_tmp = lrg.mask();
mask_tmp.AND(*Matcher::idealreg2regmask[op_regtype]);
// this pressure is only valid at this instruction, i.e. we don't need to lower
// the register pressure since the fat proj was never live before (going backwards)
uint new_pressure = pressure._current_pressure + mask_tmp.Size();
if (new_pressure > pressure._final_pressure) {
pressure._final_pressure = new_pressure;
}
// if we were at a low pressure and now at the fat proj is at high pressure, record the fat proj location
// as coming from a low to high (to low again)
if (pressure._current_pressure <= pressure._high_pressure_limit && new_pressure > pressure._high_pressure_limit) {
pressure._high_pressure_index = location;
}
pressure.check_pressure_at_fatproj(location, mask_tmp);
}
/*
@ -700,23 +690,23 @@ void PhaseChaitin::add_input_to_liveout(Block* b, Node* n, IndexSet* liveout, do
// Newly live things assumed live from here to top of block
lrg._area += cost;
raise_pressure(b, lrg, int_pressure, float_pressure);
assert(int_pressure._current_pressure == count_int_pressure(liveout), "the int pressure is incorrect");
assert(float_pressure._current_pressure == count_float_pressure(liveout), "the float pressure is incorrect");
assert(int_pressure.current_pressure() == count_int_pressure(liveout), "the int pressure is incorrect");
assert(float_pressure.current_pressure() == count_float_pressure(liveout), "the float pressure is incorrect");
}
assert(!(lrg._area < 0.0), "negative spill area" );
assert(lrg._area >= 0.0, "negative spill area" );
}
}
/*
* If we run off the top of the block with high pressure just record that the
* whole block is high pressure. (Even though we might have a transition
* lower down in the block)
* later down in the block)
*/
void PhaseChaitin::check_for_high_pressure_block(Pressure& pressure) {
// current pressure now means the pressure before the first instruction in the block
// (since we have stepped through all instructions backwards)
if (pressure._current_pressure > pressure._high_pressure_limit) {
pressure._high_pressure_index = 0;
if (pressure.current_pressure() > pressure.high_pressure_limit()) {
pressure.set_high_pressure_index_to_block_start();
}
}
@ -725,7 +715,7 @@ void PhaseChaitin::check_for_high_pressure_block(Pressure& pressure) {
* and set the high pressure index for the block
*/
void PhaseChaitin::adjust_high_pressure_index(Block* b, uint& block_hrp_index, Pressure& pressure) {
uint i = pressure._high_pressure_index;
uint i = pressure.high_pressure_index();
if (i < b->number_of_nodes() && i < b->end_idx() + 1) {
Node* cur = b->get_node(i);
while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
@ -772,7 +762,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
int inst_count = last_inst - first_inst;
double cost = (inst_count <= 0) ? 0.0 : block->_freq * double(inst_count);
assert(!(cost < 0.0), "negative spill cost" );
assert(cost >= 0.0, "negative spill cost" );
compute_initial_block_pressure(block, &liveout, int_pressure, float_pressure, cost);
@ -789,8 +779,8 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
if (!liveout.member(lid) && n->Opcode() != Op_SafePoint) {
if (remove_node_if_not_used(block, location, n, lid, &liveout)) {
float_pressure._high_pressure_index--;
int_pressure._high_pressure_index--;
float_pressure.lower_high_pressure_index();
int_pressure.lower_high_pressure_index();
continue;
}
if (lrg._fat_proj) {
@ -799,7 +789,11 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
}
} else {
// A live range ends at its definition, remove the remaining area.
lrg._area -= cost;
// If the cost is +Inf (which might happen in extreme cases), the lrg area will also be +Inf,
// and +Inf - +Inf = NaN. So let's not do that subtraction.
if (g_isfinite(cost)) {
lrg._area -= cost;
}
assert(lrg._area >= 0.0, "negative spill area" );
assign_high_score_to_immediate_copies(block, n, lrg, location + 1, last_inst);
@ -837,13 +831,13 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
adjust_high_pressure_index(block, block->_ihrp_index, int_pressure);
adjust_high_pressure_index(block, block->_fhrp_index, float_pressure);
// set the final_pressure as the register pressure for the block
block->_reg_pressure = int_pressure._final_pressure;
block->_freg_pressure = float_pressure._final_pressure;
block->_reg_pressure = int_pressure.final_pressure();
block->_freg_pressure = float_pressure.final_pressure();
#ifndef PRODUCT
// Gather Register Pressure Statistics
if (PrintOptoStatistics) {
if (block->_reg_pressure > int_pressure._high_pressure_limit || block->_freg_pressure > float_pressure._high_pressure_limit) {
if (block->_reg_pressure > int_pressure.high_pressure_limit() || block->_freg_pressure > float_pressure.high_pressure_limit()) {
_high_pressure++;
} else {
_low_pressure++;

View File

@ -76,7 +76,6 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
if( !i1->is_Bool() ) return NULL;
BoolNode *b = i1->as_Bool();
Node *cmp = b->in(1);
if( cmp->is_FlagsProj() ) return NULL;
if( !cmp->is_Cmp() ) return NULL;
i1 = cmp->in(1);
if( i1 == NULL || !i1->is_Phi() ) return NULL;

View File

@ -520,13 +520,6 @@ Node* PhaseCFG::select(Block* block, Node_List &worklist, GrowableArray<int> &re
break;
}
// For nodes that produce a FlagsProj, make the node adjacent to the
// use of the FlagsProj
if (use->is_FlagsProj() && get_block_for_node(use) == block) {
found_machif = true;
break;
}
// More than this instruction pending for successor to be ready,
// don't choose this if other opportunities are ready
if (ready_cnt.at(use->_idx) > 1)

View File

@ -203,7 +203,9 @@ class LibraryCallKit : public GraphKit {
bool inline_math_native(vmIntrinsics::ID id);
bool inline_trig(vmIntrinsics::ID id);
bool inline_math(vmIntrinsics::ID id);
void inline_math_mathExact(Node* math);
template <typename OverflowOp>
bool inline_math_overflow(Node* arg1, Node* arg2);
void inline_math_mathExact(Node* math, Node* test);
bool inline_math_addExactI(bool is_increment);
bool inline_math_addExactL(bool is_increment);
bool inline_math_multiplyExactI();
@ -517,31 +519,31 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
case vmIntrinsics::_incrementExactI:
case vmIntrinsics::_addExactI:
if (!Matcher::match_rule_supported(Op_AddExactI) || !UseMathExactIntrinsics) return NULL;
if (!Matcher::match_rule_supported(Op_OverflowAddI) || !UseMathExactIntrinsics) return NULL;
break;
case vmIntrinsics::_incrementExactL:
case vmIntrinsics::_addExactL:
if (!Matcher::match_rule_supported(Op_AddExactL) || !UseMathExactIntrinsics) return NULL;
if (!Matcher::match_rule_supported(Op_OverflowAddL) || !UseMathExactIntrinsics) return NULL;
break;
case vmIntrinsics::_decrementExactI:
case vmIntrinsics::_subtractExactI:
if (!Matcher::match_rule_supported(Op_SubExactI) || !UseMathExactIntrinsics) return NULL;
if (!Matcher::match_rule_supported(Op_OverflowSubI) || !UseMathExactIntrinsics) return NULL;
break;
case vmIntrinsics::_decrementExactL:
case vmIntrinsics::_subtractExactL:
if (!Matcher::match_rule_supported(Op_SubExactL) || !UseMathExactIntrinsics) return NULL;
if (!Matcher::match_rule_supported(Op_OverflowSubL) || !UseMathExactIntrinsics) return NULL;
break;
case vmIntrinsics::_negateExactI:
if (!Matcher::match_rule_supported(Op_NegExactI) || !UseMathExactIntrinsics) return NULL;
if (!Matcher::match_rule_supported(Op_OverflowSubI) || !UseMathExactIntrinsics) return NULL;
break;
case vmIntrinsics::_negateExactL:
if (!Matcher::match_rule_supported(Op_NegExactL) || !UseMathExactIntrinsics) return NULL;
if (!Matcher::match_rule_supported(Op_OverflowSubL) || !UseMathExactIntrinsics) return NULL;
break;
case vmIntrinsics::_multiplyExactI:
if (!Matcher::match_rule_supported(Op_MulExactI) || !UseMathExactIntrinsics) return NULL;
if (!Matcher::match_rule_supported(Op_OverflowMulI) || !UseMathExactIntrinsics) return NULL;
break;
case vmIntrinsics::_multiplyExactL:
if (!Matcher::match_rule_supported(Op_MulExactL) || !UseMathExactIntrinsics) return NULL;
if (!Matcher::match_rule_supported(Op_OverflowMulL) || !UseMathExactIntrinsics) return NULL;
break;
default:
@ -1937,7 +1939,7 @@ bool LibraryCallKit::inline_math_native(vmIntrinsics::ID id) {
runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog10), "LOG10");
// These intrinsics are supported on all hardware
case vmIntrinsics::_dsqrt: return Matcher::has_match_rule(Op_SqrtD) ? inline_math(id) : false;
case vmIntrinsics::_dsqrt: return Matcher::match_rule_supported(Op_SqrtD) ? inline_math(id) : false;
case vmIntrinsics::_dabs: return Matcher::has_match_rule(Op_AbsD) ? inline_math(id) : false;
case vmIntrinsics::_dexp: return Matcher::has_match_rule(Op_ExpD) ? inline_exp() :
@ -1970,18 +1972,8 @@ bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) {
return true;
}
void LibraryCallKit::inline_math_mathExact(Node* math) {
// If we didn't get the expected opcode it means we have optimized
// the node to something else and don't need the exception edge.
if (!math->is_MathExact()) {
set_result(math);
return;
}
Node* result = _gvn.transform( new(C) ProjNode(math, MathExactNode::result_proj_node));
Node* flags = _gvn.transform( new(C) FlagsProjNode(math, MathExactNode::flags_proj_node));
Node* bol = _gvn.transform( new (C) BoolNode(flags, BoolTest::overflow) );
void LibraryCallKit::inline_math_mathExact(Node* math, Node *test) {
Node* bol = _gvn.transform( new (C) BoolNode(test, BoolTest::overflow) );
IfNode* check = create_and_map_if(control(), bol, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
Node* fast_path = _gvn.transform( new (C) IfFalseNode(check));
Node* slow_path = _gvn.transform( new (C) IfTrueNode(check) );
@ -1999,108 +1991,50 @@ void LibraryCallKit::inline_math_mathExact(Node* math) {
}
set_control(fast_path);
set_result(result);
set_result(math);
}
template <typename OverflowOp>
bool LibraryCallKit::inline_math_overflow(Node* arg1, Node* arg2) {
typedef typename OverflowOp::MathOp MathOp;
MathOp* mathOp = new(C) MathOp(arg1, arg2);
Node* operation = _gvn.transform( mathOp );
Node* ofcheck = _gvn.transform( new(C) OverflowOp(arg1, arg2) );
inline_math_mathExact(operation, ofcheck);
return true;
}
bool LibraryCallKit::inline_math_addExactI(bool is_increment) {
Node* arg1 = argument(0);
Node* arg2 = NULL;
if (is_increment) {
arg2 = intcon(1);
} else {
arg2 = argument(1);
}
Node* add = _gvn.transform( new(C) AddExactINode(NULL, arg1, arg2) );
inline_math_mathExact(add);
return true;
return inline_math_overflow<OverflowAddINode>(argument(0), is_increment ? intcon(1) : argument(1));
}
bool LibraryCallKit::inline_math_addExactL(bool is_increment) {
Node* arg1 = argument(0); // type long
// argument(1) == TOP
Node* arg2 = NULL;
if (is_increment) {
arg2 = longcon(1);
} else {
arg2 = argument(2); // type long
// argument(3) == TOP
}
Node* add = _gvn.transform(new(C) AddExactLNode(NULL, arg1, arg2));
inline_math_mathExact(add);
return true;
return inline_math_overflow<OverflowAddLNode>(argument(0), is_increment ? longcon(1) : argument(2));
}
bool LibraryCallKit::inline_math_subtractExactI(bool is_decrement) {
Node* arg1 = argument(0);
Node* arg2 = NULL;
if (is_decrement) {
arg2 = intcon(1);
} else {
arg2 = argument(1);
}
Node* sub = _gvn.transform(new(C) SubExactINode(NULL, arg1, arg2));
inline_math_mathExact(sub);
return true;
return inline_math_overflow<OverflowSubINode>(argument(0), is_decrement ? intcon(1) : argument(1));
}
bool LibraryCallKit::inline_math_subtractExactL(bool is_decrement) {
Node* arg1 = argument(0); // type long
// argument(1) == TOP
Node* arg2 = NULL;
if (is_decrement) {
arg2 = longcon(1);
} else {
arg2 = argument(2); // type long
// argument(3) == TOP
}
Node* sub = _gvn.transform(new(C) SubExactLNode(NULL, arg1, arg2));
inline_math_mathExact(sub);
return true;
return inline_math_overflow<OverflowSubLNode>(argument(0), is_decrement ? longcon(1) : argument(2));
}
bool LibraryCallKit::inline_math_negateExactI() {
Node* arg1 = argument(0);
Node* neg = _gvn.transform(new(C) NegExactINode(NULL, arg1));
inline_math_mathExact(neg);
return true;
return inline_math_overflow<OverflowSubINode>(intcon(0), argument(0));
}
bool LibraryCallKit::inline_math_negateExactL() {
Node* arg1 = argument(0);
// argument(1) == TOP
Node* neg = _gvn.transform(new(C) NegExactLNode(NULL, arg1));
inline_math_mathExact(neg);
return true;
return inline_math_overflow<OverflowSubLNode>(longcon(0), argument(0));
}
bool LibraryCallKit::inline_math_multiplyExactI() {
Node* arg1 = argument(0);
Node* arg2 = argument(1);
Node* mul = _gvn.transform(new(C) MulExactINode(NULL, arg1, arg2));
inline_math_mathExact(mul);
return true;
return inline_math_overflow<OverflowMulINode>(argument(0), argument(1));
}
bool LibraryCallKit::inline_math_multiplyExactL() {
Node* arg1 = argument(0);
// argument(1) == TOP
Node* arg2 = argument(2);
// argument(3) == TOP
Node* mul = _gvn.transform(new(C) MulExactLNode(NULL, arg1, arg2));
inline_math_mathExact(mul);
return true;
return inline_math_overflow<OverflowMulLNode>(argument(0), argument(2));
}
Node*

Some files were not shown because too many files have changed in this diff Show More