Merge
This commit is contained in:
commit
abfbf29b4d
@ -252,3 +252,4 @@ d0b525cd31b87abeb6d5b7e3516953eeb13b323c jdk9-b06
|
||||
0ea015c298b201c07fa33990f2445b6d0ef3566d jdk9-b07
|
||||
db045d8faa0924b7378102d24a1a0d850c1e3834 jdk9-b08
|
||||
4a21dc7d57d1069a01f68e7182c074cb37349dfb jdk9-b09
|
||||
fa13f2b926f8426876ec03e7903f3ee0ee150f2e jdk9-b10
|
||||
|
@ -145,7 +145,7 @@
|
||||
root repository:
|
||||
<blockquote>
|
||||
<code>
|
||||
hg clone http://hg.openjdk.java.net/jdk8/jdk8
|
||||
hg clone http://hg.openjdk.java.net/jdk9/jdk9
|
||||
<i>YourOpenJDK</i>
|
||||
<br>
|
||||
cd <i>YourOpenJDK</i>
|
||||
@ -373,18 +373,17 @@
|
||||
particular update level.
|
||||
<br> <br>
|
||||
|
||||
<b><i>Building JDK 8 requires use of a version
|
||||
of JDK 7 that is at Update 7 or newer. JDK 8
|
||||
developers should not use JDK 8 as the boot
|
||||
JDK, to ensure that JDK 8 dependencies are
|
||||
<b><i>Building JDK 9 requires JDK 8. JDK 9
|
||||
developers should not use JDK 9 as the boot
|
||||
JDK, to ensure that JDK 9 dependencies are
|
||||
not introduced into the parts of the system
|
||||
that are built with JDK 7.</i></b>
|
||||
that are built with JDK 8.</i></b>
|
||||
|
||||
<br> <br>
|
||||
The JDK 7 binaries can be downloaded from Oracle's
|
||||
The JDK 8 binaries can be downloaded from Oracle's
|
||||
<a href="http://www.oracle.com/technetwork/java/javase/downloads/index.html"
|
||||
target="_blank">JDK 7 download site</a>.
|
||||
For build performance reasons
|
||||
target="_blank">JDK 8 download site</a>.
|
||||
For build performance reasons it
|
||||
is very important that this bootstrap JDK be made available
|
||||
on the local disk of the machine doing the build.
|
||||
You should add its <code>bin</code> directory
|
||||
@ -1454,9 +1453,7 @@
|
||||
<p>One of the top goals of the new build system is to improve the
|
||||
build performance and decrease the time needed to build. This will
|
||||
soon also apply to the java compilation when the Smart Javac wrapper
|
||||
is making its way into jdk8. It can be tried in the build-infra
|
||||
repository already. You are likely to find that the new build system
|
||||
is faster than the old one even without this feature.</p>
|
||||
is fully supported.</p>
|
||||
|
||||
<p>At the end of a successful execution of <code>configure</code>,
|
||||
you will get a performance summary,
|
||||
|
@ -82,10 +82,10 @@ AC_DEFUN([BOOTJDK_DO_CHECK],
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
[FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`]
|
||||
[FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`]
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
AC_MSG_NOTICE([Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring])
|
||||
AC_MSG_NOTICE([(Your Boot JDK must be version 7, 8 or 9)])
|
||||
AC_MSG_NOTICE([(Your Boot JDK must be version 8 or 9)])
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
|
@ -89,7 +89,7 @@ AC_DEFUN([BPERF_CHECK_MEMORY_SIZE],
|
||||
if test "x$FOUND_MEM" = xyes; then
|
||||
AC_MSG_RESULT([$MEMORY_SIZE MB])
|
||||
else
|
||||
AC_MSG_RESULT([could not detect memory size, defaulting to 1024 MB])
|
||||
AC_MSG_RESULT([could not detect memory size, defaulting to $MEMORY_SIZE MB])
|
||||
AC_MSG_WARN([This might seriously impact build performance!])
|
||||
fi
|
||||
])
|
||||
|
@ -473,7 +473,8 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
|
||||
CFLAGS_JDK="${CFLAGS_JDK} -fno-strict-aliasing"
|
||||
;;
|
||||
ppc )
|
||||
# on ppc we don't prevent gcc to omit frame pointer nor strict-aliasing
|
||||
# on ppc we don't prevent gcc to omit frame pointer but do prevent strict aliasing
|
||||
CFLAGS_JDK="${CFLAGS_JDK} -fno-strict-aliasing"
|
||||
;;
|
||||
* )
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -fno-omit-frame-pointer"
|
||||
|
@ -4243,7 +4243,7 @@ TOOLCHAIN_DESCRIPTION_xlc="IBM XL C/C++"
|
||||
#CUSTOM_AUTOCONF_INCLUDE
|
||||
|
||||
# Do not change or remove the following line, it is needed for consistency checks:
|
||||
DATE_WHEN_GENERATED=1396624161
|
||||
DATE_WHEN_GENERATED=1398196583
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
@ -19896,12 +19896,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -20228,12 +20228,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -20422,12 +20422,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -20609,12 +20609,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -20795,12 +20795,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -20981,12 +20981,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -21158,12 +21158,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -21476,12 +21476,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -21804,12 +21804,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -22019,12 +22019,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -22199,12 +22199,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -22407,12 +22407,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -22587,12 +22587,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -22795,12 +22795,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -22975,12 +22975,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -23183,12 +23183,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -23363,12 +23363,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -23558,12 +23558,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -23736,12 +23736,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -23932,12 +23932,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -24110,12 +24110,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -24305,12 +24305,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -24483,12 +24483,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -24679,12 +24679,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -24857,12 +24857,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -25034,12 +25034,12 @@ $as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK did not contain an rt.ja
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[789]\.'`
|
||||
FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`
|
||||
if test "x$FOUND_CORRECT_VERSION" = x; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&5
|
||||
$as_echo "$as_me: Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 7, 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 7, 8 or 9)" >&6;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: (Your Boot JDK must be version 8 or 9)" >&5
|
||||
$as_echo "$as_me: (Your Boot JDK must be version 8 or 9)" >&6;}
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# We're done! :-)
|
||||
@ -41662,7 +41662,8 @@ fi
|
||||
CFLAGS_JDK="${CFLAGS_JDK} -fno-strict-aliasing"
|
||||
;;
|
||||
ppc )
|
||||
# on ppc we don't prevent gcc to omit frame pointer nor strict-aliasing
|
||||
# on ppc we don't prevent gcc to omit frame pointer but do prevent strict aliasing
|
||||
CFLAGS_JDK="${CFLAGS_JDK} -fno-strict-aliasing"
|
||||
;;
|
||||
* )
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -fno-omit-frame-pointer"
|
||||
@ -48566,8 +48567,8 @@ $as_echo_n "checking for memory size... " >&6; }
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $MEMORY_SIZE MB" >&5
|
||||
$as_echo "$MEMORY_SIZE MB" >&6; }
|
||||
else
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: could not detect memory size, defaulting to 1024 MB" >&5
|
||||
$as_echo "could not detect memory size, defaulting to 1024 MB" >&6; }
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: could not detect memory size, defaulting to $MEMORY_SIZE MB" >&5
|
||||
$as_echo "could not detect memory size, defaulting to $MEMORY_SIZE MB" >&6; }
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: This might seriously impact build performance!" >&5
|
||||
$as_echo "$as_me: WARNING: This might seriously impact build performance!" >&2;}
|
||||
fi
|
||||
|
@ -261,6 +261,7 @@ BOOT_JDK_SOURCETARGET:=@BOOT_JDK_SOURCETARGET@
|
||||
|
||||
# Information about the build system
|
||||
NUM_CORES:=@NUM_CORES@
|
||||
MEMORY_SIZE:=@MEMORY_SIZE@
|
||||
# Enable sjavac support = use a javac server,
|
||||
# multi core javac compilation and dependency tracking.
|
||||
ENABLE_SJAVAC:=@ENABLE_SJAVAC@
|
||||
|
@ -72,12 +72,21 @@ usage() {
|
||||
exit 1
|
||||
}
|
||||
|
||||
|
||||
if [ "x" = "x$command" ] ; then
|
||||
echo "ERROR: No command to hg supplied!"
|
||||
usage
|
||||
fi
|
||||
|
||||
# Check if we can use fifos for monitoring sub-process completion.
|
||||
on_windows=`uname -s | egrep -ic -e 'cygwin|msys'`
|
||||
if [ ${on_windows} = "1" ]; then
|
||||
# cygwin has (2014-04-18) broken (single writer only) FIFOs
|
||||
# msys has (2014-04-18) no FIFOs.
|
||||
have_fifos="false"
|
||||
else
|
||||
have_fifos="true"
|
||||
fi
|
||||
|
||||
# Clean out the temporary directory that stores the pid files.
|
||||
tmp=/tmp/forest.$$
|
||||
rm -f -r ${tmp}
|
||||
@ -210,7 +219,19 @@ if [ "${command}" = "serve" ] ; then
|
||||
) &
|
||||
else
|
||||
# Run the supplied command on all repos in parallel.
|
||||
|
||||
# n is the number of subprocess started or which might still be running.
|
||||
n=0
|
||||
if [ $have_fifos = "true" ]; then
|
||||
# if we have fifos use them to detect command completion.
|
||||
mkfifo ${tmp}/fifo
|
||||
exec 3<>${tmp}/fifo
|
||||
if [ "${sflag}" = "true" ] ; then
|
||||
# force sequential
|
||||
at_a_time=1
|
||||
fi
|
||||
fi
|
||||
|
||||
for i in ${repos} ${repos_extra} ; do
|
||||
n=`expr ${n} '+' 1`
|
||||
repopidfile=`echo ${i} | sed -e 's@./@@' -e 's@/@_@g'`
|
||||
@ -221,10 +242,11 @@ else
|
||||
pull_base="${pull_extra}"
|
||||
fi
|
||||
done
|
||||
pull_base="`echo ${pull_base} | sed -e 's@[/]*$@@'`"
|
||||
(
|
||||
(
|
||||
if [ "${command}" = "clone" -o "${command}" = "fclone" -o "${command}" = "tclone" ] ; then
|
||||
pull_newrepo="`echo ${pull_base}/${i} | sed -e 's@\([^:]/\)//*@\1@g'`"
|
||||
pull_newrepo="${pull_base}/${i}"
|
||||
path="`dirname ${i}`"
|
||||
if [ "${path}" != "." ] ; then
|
||||
times=0
|
||||
@ -237,7 +259,7 @@ else
|
||||
sleep 5
|
||||
done
|
||||
fi
|
||||
echo "hg clone ${pull_newrepo} ${i}" > ${status_output}
|
||||
echo "hg${global_opts} clone ${pull_newrepo} ${i}" > ${status_output}
|
||||
(PYTHONUNBUFFERED=true hg${global_opts} clone ${pull_newrepo} ${i}; echo "$?" > ${tmp}/${repopidfile}.pid.rc ) 2>&1 &
|
||||
else
|
||||
echo "cd ${i} && hg${global_opts} ${command} ${command_args}" > ${status_output}
|
||||
@ -246,21 +268,41 @@ else
|
||||
|
||||
echo $! > ${tmp}/${repopidfile}.pid
|
||||
) 2>&1 | sed -e "s@^@${reponame}: @" > ${status_output}
|
||||
if [ $have_fifos = "true" ]; then
|
||||
echo "${reponame}" >&3
|
||||
fi
|
||||
) &
|
||||
|
||||
if [ `expr ${n} '%' ${at_a_time}` -eq 0 -a "${sflag}" = "false" ] ; then
|
||||
sleep 2
|
||||
echo "Waiting 5 secs before spawning next background command." > ${status_output}
|
||||
sleep 3
|
||||
fi
|
||||
|
||||
if [ "${sflag}" = "true" ] ; then
|
||||
if [ $have_fifos = "true" ]; then
|
||||
# check on count of running subprocesses and possibly wait for completion
|
||||
if [ ${at_a_time} -lt ${n} ] ; then
|
||||
# read will block until there are completed subprocesses
|
||||
while read repo_done; do
|
||||
n=`expr ${n} '-' 1`
|
||||
if [ ${n} -lt ${at_a_time} ] ; then
|
||||
# we should start more subprocesses
|
||||
break;
|
||||
fi
|
||||
done <&3
|
||||
fi
|
||||
else
|
||||
if [ "${sflag}" = "false" ] ; then
|
||||
# Compare completions to starts
|
||||
completed="`(ls -1 ${tmp}/*.pid.rc 2> /dev/null | wc -l) || echo 0`"
|
||||
while [ ${at_a_time} -lt `expr ${n} '-' ${completed}` ] ; do
|
||||
# sleep a short time to give time for something to complete
|
||||
sleep 1
|
||||
completed="`(ls -1 ${tmp}/*.pid.rc 2> /dev/null | wc -l) || echo 0`"
|
||||
done
|
||||
else
|
||||
# complete this task before starting another.
|
||||
wait
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Wait for all hg commands to complete
|
||||
# Wait for all subprocesses to complete
|
||||
wait
|
||||
|
||||
# Terminate with exit 0 only if all subprocesses were successful
|
||||
@ -270,7 +312,7 @@ if [ -d ${tmp} ]; then
|
||||
exit_code=`cat ${rc} | tr -d ' \n\r'`
|
||||
if [ "${exit_code}" != "0" ] ; then
|
||||
repo="`echo ${rc} | sed -e s@^${tmp}@@ -e 's@/*\([^/]*\)\.pid\.rc$@\1@' -e 's@_@/@g'`"
|
||||
echo "WARNING: ${repo} exited abnormally." > ${status_output}
|
||||
echo "WARNING: ${repo} exited abnormally ($exit_code)" > ${status_output}
|
||||
ec=1
|
||||
fi
|
||||
done
|
||||
|
@ -252,3 +252,4 @@ a4bf701ac316946c2e5e83138ad8e687da6a4b30 jdk9-b06
|
||||
6c8563600a71394c949405189ddd66267a88d8cd jdk9-b07
|
||||
2da7fead826bc27f193c7d63048c2cf100a8809c jdk9-b08
|
||||
1a3a4f48515dbf1cff37279691b2fb74f228298d jdk9-b09
|
||||
3bd4039dfc632fd7fc8418a25a3dcc34d1cd4019 jdk9-b10
|
||||
|
@ -412,3 +412,4 @@ bdc5311e1db7598589b77015119b821bf8c828bd jdk9-b05
|
||||
52f7edf2589d9f9d35db3008bc5377f279de9c18 jdk9-b07
|
||||
4dedef5e51ed3a36677a8ba82949fc517ad64162 jdk9-b08
|
||||
05e8f5242c26ba45d4fa947e4f4f54c058c9b522 jdk9-b09
|
||||
ebc44d040cd149d2120d69fe183a3dae7840f4b4 jdk9-b10
|
||||
|
@ -65,9 +65,6 @@ jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}}
|
||||
jprt.my.linux.ppcv2.jdk9=linux_ppcv2_2.6
|
||||
jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.linux.ppcsflt.jdk9=linux_ppcsflt_2.6
|
||||
jprt.my.linux.ppcsflt=${jprt.my.linux.ppcsflt.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.linux.armvfpsflt.jdk9=linux_armvfpsflt_2.6
|
||||
jprt.my.linux.armvfpsflt=${jprt.my.linux.armvfpsflt.${jprt.tools.default.release}}
|
||||
|
||||
@ -113,7 +110,6 @@ jprt.build.targets.embedded= \
|
||||
${jprt.my.linux.i586}-{productEmb|fastdebugEmb}, \
|
||||
${jprt.my.linux.ppc}-{productEmb|fastdebugEmb}, \
|
||||
${jprt.my.linux.ppcv2}-{productEmb|fastdebugEmb}, \
|
||||
${jprt.my.linux.ppcsflt}-{productEmb|fastdebugEmb}, \
|
||||
${jprt.my.linux.armvfpsflt}-{productEmb|fastdebugEmb}, \
|
||||
${jprt.my.linux.armvfphflt}-{productEmb|fastdebugEmb}, \
|
||||
${jprt.my.linux.armsflt}-{productEmb|fastdebugEmb}
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -2947,17 +2947,60 @@ void BytecodeInterpreter::pd_layout_interpreterState(interpreterState istate,
|
||||
istate->_last_Java_fp = last_Java_fp;
|
||||
}
|
||||
|
||||
int AbstractInterpreter::layout_activation(Method* method,
|
||||
int temps, // Number of slots on java expression stack in use.
|
||||
int popframe_args,
|
||||
int monitors, // Number of active monitors.
|
||||
int caller_actual_parameters,
|
||||
int callee_params,// Number of slots for callee parameters.
|
||||
int callee_locals,// Number of slots for locals.
|
||||
frame* caller,
|
||||
frame* interpreter_frame,
|
||||
bool is_top_frame,
|
||||
bool is_bottom_frame) {
|
||||
// Computes monitor_size and top_frame_size in bytes.
|
||||
static void frame_size_helper(int max_stack,
|
||||
int monitors,
|
||||
int& monitor_size,
|
||||
int& top_frame_size) {
|
||||
monitor_size = frame::interpreter_frame_monitor_size_in_bytes() * monitors;
|
||||
top_frame_size = round_to(frame::interpreter_frame_cinterpreterstate_size_in_bytes()
|
||||
+ monitor_size
|
||||
+ max_stack * Interpreter::stackElementSize
|
||||
+ 2 * Interpreter::stackElementSize,
|
||||
frame::alignment_in_bytes)
|
||||
+ frame::top_ijava_frame_abi_size;
|
||||
}
|
||||
|
||||
// Returns number of stackElementWords needed for the interpreter frame with the
|
||||
// given sections.
|
||||
int AbstractInterpreter::size_activation(int max_stack,
|
||||
int temps,
|
||||
int extra_args,
|
||||
int monitors,
|
||||
int callee_params,
|
||||
int callee_locals,
|
||||
bool is_top_frame) {
|
||||
int monitor_size = 0;
|
||||
int top_frame_size = 0;
|
||||
frame_size_helper(max_stack, monitors, monitor_size, top_frame_size);
|
||||
|
||||
int frame_size;
|
||||
if (is_top_frame) {
|
||||
frame_size = top_frame_size;
|
||||
} else {
|
||||
frame_size = round_to(frame::interpreter_frame_cinterpreterstate_size_in_bytes()
|
||||
+ monitor_size
|
||||
+ (temps - callee_params + callee_locals) * Interpreter::stackElementSize
|
||||
+ 2 * Interpreter::stackElementSize,
|
||||
frame::alignment_in_bytes)
|
||||
+ frame::parent_ijava_frame_abi_size;
|
||||
assert(extra_args == 0, "non-zero for top_frame only");
|
||||
}
|
||||
|
||||
return frame_size / Interpreter::stackElementSize;
|
||||
}
|
||||
|
||||
void AbstractInterpreter::layout_activation(Method* method,
|
||||
int temps, // Number of slots on java expression stack in use.
|
||||
int popframe_args,
|
||||
int monitors, // Number of active monitors.
|
||||
int caller_actual_parameters,
|
||||
int callee_params,// Number of slots for callee parameters.
|
||||
int callee_locals,// Number of slots for locals.
|
||||
frame* caller,
|
||||
frame* interpreter_frame,
|
||||
bool is_top_frame,
|
||||
bool is_bottom_frame) {
|
||||
|
||||
// NOTE this code must exactly mimic what
|
||||
// InterpreterGenerator::generate_compute_interpreter_state() does
|
||||
@ -2967,86 +3010,64 @@ int AbstractInterpreter::layout_activation(Method* method,
|
||||
// both the abi scratch area and a place to hold a result from a
|
||||
// callee on its way to the callers stack.
|
||||
|
||||
int monitor_size = frame::interpreter_frame_monitor_size_in_bytes() * monitors;
|
||||
int frame_size;
|
||||
int top_frame_size = round_to(frame::interpreter_frame_cinterpreterstate_size_in_bytes()
|
||||
+ monitor_size
|
||||
+ (method->max_stack() *Interpreter::stackElementWords * BytesPerWord)
|
||||
+ 2*BytesPerWord,
|
||||
frame::alignment_in_bytes)
|
||||
+ frame::top_ijava_frame_abi_size;
|
||||
if (is_top_frame) {
|
||||
frame_size = top_frame_size;
|
||||
} else {
|
||||
frame_size = round_to(frame::interpreter_frame_cinterpreterstate_size_in_bytes()
|
||||
+ monitor_size
|
||||
+ ((temps - callee_params + callee_locals) *
|
||||
Interpreter::stackElementWords * BytesPerWord)
|
||||
+ 2*BytesPerWord,
|
||||
frame::alignment_in_bytes)
|
||||
+ frame::parent_ijava_frame_abi_size;
|
||||
assert(popframe_args==0, "non-zero for top_frame only");
|
||||
}
|
||||
int monitor_size = 0;
|
||||
int top_frame_size = 0;
|
||||
frame_size_helper(method->max_stack(), monitors, monitor_size, top_frame_size);
|
||||
|
||||
// If we actually have a frame to layout we must now fill in all the pieces.
|
||||
if (interpreter_frame != NULL) {
|
||||
intptr_t sp = (intptr_t)interpreter_frame->sp();
|
||||
intptr_t fp = *(intptr_t *)sp;
|
||||
assert(fp == (intptr_t)caller->sp(), "fp must match");
|
||||
interpreterState cur_state =
|
||||
(interpreterState)(fp - frame::interpreter_frame_cinterpreterstate_size_in_bytes());
|
||||
|
||||
intptr_t sp = (intptr_t)interpreter_frame->sp();
|
||||
intptr_t fp = *(intptr_t *)sp;
|
||||
assert(fp == (intptr_t)caller->sp(), "fp must match");
|
||||
interpreterState cur_state =
|
||||
(interpreterState)(fp - frame::interpreter_frame_cinterpreterstate_size_in_bytes());
|
||||
// Now fill in the interpreterState object.
|
||||
|
||||
// Now fill in the interpreterState object.
|
||||
|
||||
intptr_t* locals;
|
||||
if (caller->is_interpreted_frame()) {
|
||||
// Locals must agree with the caller because it will be used to set the
|
||||
// caller's tos when we return.
|
||||
interpreterState prev = caller->get_interpreterState();
|
||||
// Calculate start of "locals" for MH calls. For MH calls, the
|
||||
// current method() (= MH target) and prev->callee() (=
|
||||
// MH.invoke*()) are different and especially have different
|
||||
// signatures. To pop the argumentsof the caller, we must use
|
||||
// the prev->callee()->size_of_arguments() because that's what
|
||||
// the caller actually pushed. Currently, for synthetic MH
|
||||
// calls (deoptimized from inlined MH calls), detected by
|
||||
// is_method_handle_invoke(), we use the callee's arguments
|
||||
// because here, the caller's and callee's signature match.
|
||||
if (true /*!caller->is_at_mh_callsite()*/) {
|
||||
locals = prev->stack() + method->size_of_parameters();
|
||||
} else {
|
||||
// Normal MH call.
|
||||
locals = prev->stack() + prev->callee()->size_of_parameters();
|
||||
}
|
||||
intptr_t* locals;
|
||||
if (caller->is_interpreted_frame()) {
|
||||
// Locals must agree with the caller because it will be used to set the
|
||||
// caller's tos when we return.
|
||||
interpreterState prev = caller->get_interpreterState();
|
||||
// Calculate start of "locals" for MH calls. For MH calls, the
|
||||
// current method() (= MH target) and prev->callee() (=
|
||||
// MH.invoke*()) are different and especially have different
|
||||
// signatures. To pop the argumentsof the caller, we must use
|
||||
// the prev->callee()->size_of_arguments() because that's what
|
||||
// the caller actually pushed. Currently, for synthetic MH
|
||||
// calls (deoptimized from inlined MH calls), detected by
|
||||
// is_method_handle_invoke(), we use the callee's arguments
|
||||
// because here, the caller's and callee's signature match.
|
||||
if (true /*!caller->is_at_mh_callsite()*/) {
|
||||
locals = prev->stack() + method->size_of_parameters();
|
||||
} else {
|
||||
bool is_deopted;
|
||||
locals = (intptr_t*) (fp + ((method->max_locals() - 1) * BytesPerWord) +
|
||||
frame::parent_ijava_frame_abi_size);
|
||||
// Normal MH call.
|
||||
locals = prev->stack() + prev->callee()->size_of_parameters();
|
||||
}
|
||||
|
||||
intptr_t* monitor_base = (intptr_t*) cur_state;
|
||||
intptr_t* stack_base = (intptr_t*) ((intptr_t) monitor_base - monitor_size);
|
||||
|
||||
// Provide pop_frame capability on PPC64, add popframe_args.
|
||||
// +1 because stack is always prepushed.
|
||||
intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (temps + popframe_args + 1) * BytesPerWord);
|
||||
|
||||
BytecodeInterpreter::layout_interpreterState(cur_state,
|
||||
caller,
|
||||
interpreter_frame,
|
||||
method,
|
||||
locals,
|
||||
stack,
|
||||
stack_base,
|
||||
monitor_base,
|
||||
(intptr_t*)(((intptr_t)fp)-top_frame_size),
|
||||
is_top_frame);
|
||||
|
||||
BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address,
|
||||
interpreter_frame->fp());
|
||||
} else {
|
||||
bool is_deopted;
|
||||
locals = (intptr_t*) (fp + ((method->max_locals() - 1) * BytesPerWord) +
|
||||
frame::parent_ijava_frame_abi_size);
|
||||
}
|
||||
return frame_size/BytesPerWord;
|
||||
|
||||
intptr_t* monitor_base = (intptr_t*) cur_state;
|
||||
intptr_t* stack_base = (intptr_t*) ((intptr_t) monitor_base - monitor_size);
|
||||
|
||||
// Provide pop_frame capability on PPC64, add popframe_args.
|
||||
// +1 because stack is always prepushed.
|
||||
intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (temps + popframe_args + 1) * BytesPerWord);
|
||||
|
||||
BytecodeInterpreter::layout_interpreterState(cur_state,
|
||||
caller,
|
||||
interpreter_frame,
|
||||
method,
|
||||
locals,
|
||||
stack,
|
||||
stack_base,
|
||||
monitor_base,
|
||||
(intptr_t*)(((intptr_t)fp) - top_frame_size),
|
||||
is_top_frame);
|
||||
|
||||
BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address,
|
||||
interpreter_frame->fp());
|
||||
}
|
||||
|
||||
#endif // CC_INTERP
|
||||
|
@ -1,6 +1,6 @@
|
||||
//
|
||||
// Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
// Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@ -1362,8 +1362,8 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
|
||||
Compile* C = ra_->C;
|
||||
MacroAssembler _masm(&cbuf);
|
||||
|
||||
const long framesize = ((long)C->frame_slots()) << LogBytesPerInt;
|
||||
assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
|
||||
const long framesize = C->frame_size_in_bytes();
|
||||
assert(framesize % (2 * wordSize) == 0, "must preserve 2*wordSize alignment");
|
||||
|
||||
const bool method_is_frameless = false /* TODO: PPC port C->is_frameless_method()*/;
|
||||
|
||||
@ -1388,19 +1388,22 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
|
||||
// careful, because some VM calls (such as call site linkage) can
|
||||
// use several kilobytes of stack. But the stack safety zone should
|
||||
// account for that. See bugs 4446381, 4468289, 4497237.
|
||||
if (C->need_stack_bang(framesize) && UseStackBanging) {
|
||||
|
||||
int bangsize = C->bang_size_in_bytes();
|
||||
assert(bangsize >= framesize || bangsize <= 0, "stack bang size incorrect");
|
||||
if (C->need_stack_bang(bangsize) && UseStackBanging) {
|
||||
// Unfortunately we cannot use the function provided in
|
||||
// assembler.cpp as we have to emulate the pipes. So I had to
|
||||
// insert the code of generate_stack_overflow_check(), see
|
||||
// assembler.cpp for some illuminative comments.
|
||||
const int page_size = os::vm_page_size();
|
||||
int bang_end = StackShadowPages*page_size;
|
||||
int bang_end = StackShadowPages * page_size;
|
||||
|
||||
// This is how far the previous frame's stack banging extended.
|
||||
const int bang_end_safe = bang_end;
|
||||
|
||||
if (framesize > page_size) {
|
||||
bang_end += framesize;
|
||||
if (bangsize > page_size) {
|
||||
bang_end += bangsize;
|
||||
}
|
||||
|
||||
int bang_offset = bang_end_safe;
|
||||
@ -1446,7 +1449,7 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
|
||||
|
||||
unsigned int bytes = (unsigned int)framesize;
|
||||
long offset = Assembler::align_addr(bytes, frame::alignment_in_bytes);
|
||||
ciMethod *currMethod = C -> method();
|
||||
ciMethod *currMethod = C->method();
|
||||
|
||||
// Optimized version for most common case.
|
||||
if (UsePower6SchedulerPPC64 &&
|
||||
|
@ -1334,21 +1334,42 @@ bool AbstractInterpreter::can_be_compiled(methodHandle m) {
|
||||
int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
|
||||
const int max_alignment_size = 2;
|
||||
const int abi_scratch = frame::abi_reg_args_size;
|
||||
return method->max_locals() + method->max_stack() + frame::interpreter_frame_monitor_size() + max_alignment_size + abi_scratch;
|
||||
return method->max_locals() + method->max_stack() +
|
||||
frame::interpreter_frame_monitor_size() + max_alignment_size + abi_scratch;
|
||||
}
|
||||
|
||||
// Fills a sceletal interpreter frame generated during deoptimizations
|
||||
// and returns the frame size in slots.
|
||||
// Returns number of stackElementWords needed for the interpreter frame with the
|
||||
// given sections.
|
||||
// This overestimates the stack by one slot in case of alignments.
|
||||
int AbstractInterpreter::size_activation(int max_stack,
|
||||
int temps,
|
||||
int extra_args,
|
||||
int monitors,
|
||||
int callee_params,
|
||||
int callee_locals,
|
||||
bool is_top_frame) {
|
||||
// Note: This calculation must exactly parallel the frame setup
|
||||
// in AbstractInterpreterGenerator::generate_method_entry.
|
||||
assert(Interpreter::stackElementWords == 1, "sanity");
|
||||
const int max_alignment_space = StackAlignmentInBytes / Interpreter::stackElementSize;
|
||||
const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :
|
||||
(frame::abi_minframe_size / Interpreter::stackElementSize);
|
||||
const int size =
|
||||
max_stack +
|
||||
(callee_locals - callee_params) +
|
||||
monitors * frame::interpreter_frame_monitor_size() +
|
||||
max_alignment_space +
|
||||
abi_scratch +
|
||||
frame::ijava_state_size / Interpreter::stackElementSize;
|
||||
|
||||
// Fixed size of an interpreter frame, align to 16-byte.
|
||||
return (size & -2);
|
||||
}
|
||||
|
||||
// Fills a sceletal interpreter frame generated during deoptimizations.
|
||||
//
|
||||
// Parameters:
|
||||
//
|
||||
// interpreter_frame == NULL:
|
||||
// Only calculate the size of an interpreter activation, no actual layout.
|
||||
// Note: This calculation must exactly parallel the frame setup
|
||||
// in TemplateInterpreter::generate_normal_entry. But it does not
|
||||
// account for the SP alignment, that might further enhance the
|
||||
// frame size, depending on FP.
|
||||
//
|
||||
// interpreter_frame != NULL:
|
||||
// set up the method, locals, and monitors.
|
||||
// The frame interpreter_frame, if not NULL, is guaranteed to be the
|
||||
@ -1365,59 +1386,41 @@ int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
|
||||
// the arguments off advance the esp by dummy popframe_extra_args slots.
|
||||
// Popping off those will establish the stack layout as it was before the call.
|
||||
//
|
||||
int AbstractInterpreter::layout_activation(Method* method,
|
||||
int tempcount,
|
||||
int popframe_extra_args,
|
||||
int moncount,
|
||||
int caller_actual_parameters,
|
||||
int callee_param_count,
|
||||
int callee_locals,
|
||||
frame* caller,
|
||||
frame* interpreter_frame,
|
||||
bool is_top_frame,
|
||||
bool is_bottom_frame) {
|
||||
void AbstractInterpreter::layout_activation(Method* method,
|
||||
int tempcount,
|
||||
int popframe_extra_args,
|
||||
int moncount,
|
||||
int caller_actual_parameters,
|
||||
int callee_param_count,
|
||||
int callee_locals_count,
|
||||
frame* caller,
|
||||
frame* interpreter_frame,
|
||||
bool is_top_frame,
|
||||
bool is_bottom_frame) {
|
||||
|
||||
const int max_alignment_space = 2;
|
||||
const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :
|
||||
(frame::abi_minframe_size / Interpreter::stackElementSize) ;
|
||||
const int conservative_framesize_in_slots =
|
||||
method->max_stack() + callee_locals - callee_param_count +
|
||||
(moncount * frame::interpreter_frame_monitor_size()) + max_alignment_space +
|
||||
abi_scratch + frame::ijava_state_size / Interpreter::stackElementSize;
|
||||
(frame::abi_minframe_size / Interpreter::stackElementSize);
|
||||
|
||||
assert(!is_top_frame || conservative_framesize_in_slots * 8 > frame::abi_reg_args_size + frame::ijava_state_size, "frame too small");
|
||||
intptr_t* locals_base = (caller->is_interpreted_frame()) ?
|
||||
caller->interpreter_frame_esp() + caller_actual_parameters :
|
||||
caller->sp() + method->max_locals() - 1 + (frame::abi_minframe_size / Interpreter::stackElementSize) ;
|
||||
|
||||
if (interpreter_frame == NULL) {
|
||||
// Since we don't know the exact alignment, we return the conservative size.
|
||||
return (conservative_framesize_in_slots & -2);
|
||||
} else {
|
||||
// Now we know our caller, calc the exact frame layout and size.
|
||||
intptr_t* locals_base = (caller->is_interpreted_frame()) ?
|
||||
caller->interpreter_frame_esp() + caller_actual_parameters :
|
||||
caller->sp() + method->max_locals() - 1 + (frame::abi_minframe_size / Interpreter::stackElementSize) ;
|
||||
intptr_t* monitor_base = caller->sp() - frame::ijava_state_size / Interpreter::stackElementSize ;
|
||||
intptr_t* monitor = monitor_base - (moncount * frame::interpreter_frame_monitor_size());
|
||||
intptr_t* esp_base = monitor - 1;
|
||||
intptr_t* esp = esp_base - tempcount - popframe_extra_args;
|
||||
intptr_t* sp = (intptr_t *) (((intptr_t) (esp_base - callee_locals_count + callee_param_count - method->max_stack()- abi_scratch)) & -StackAlignmentInBytes);
|
||||
intptr_t* sender_sp = caller->sp() + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
|
||||
intptr_t* top_frame_sp = is_top_frame ? sp : sp + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
|
||||
|
||||
intptr_t* monitor_base = caller->sp() - frame::ijava_state_size / Interpreter::stackElementSize ;
|
||||
intptr_t* monitor = monitor_base - (moncount * frame::interpreter_frame_monitor_size());
|
||||
intptr_t* esp_base = monitor - 1;
|
||||
intptr_t* esp = esp_base - tempcount - popframe_extra_args;
|
||||
intptr_t* sp = (intptr_t *) (((intptr_t) (esp_base- callee_locals + callee_param_count - method->max_stack()- abi_scratch)) & -StackAlignmentInBytes);
|
||||
intptr_t* sender_sp = caller->sp() + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
|
||||
intptr_t* top_frame_sp = is_top_frame ? sp : sp + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
|
||||
|
||||
interpreter_frame->interpreter_frame_set_method(method);
|
||||
interpreter_frame->interpreter_frame_set_locals(locals_base);
|
||||
interpreter_frame->interpreter_frame_set_cpcache(method->constants()->cache());
|
||||
interpreter_frame->interpreter_frame_set_esp(esp);
|
||||
interpreter_frame->interpreter_frame_set_monitor_end((BasicObjectLock *)monitor);
|
||||
interpreter_frame->interpreter_frame_set_top_frame_sp(top_frame_sp);
|
||||
if (!is_bottom_frame) {
|
||||
interpreter_frame->interpreter_frame_set_sender_sp(sender_sp);
|
||||
}
|
||||
|
||||
int framesize_in_slots = caller->sp() - sp;
|
||||
assert(!is_top_frame ||framesize_in_slots >= (frame::abi_reg_args_size / Interpreter::stackElementSize) + frame::ijava_state_size / Interpreter::stackElementSize, "frame too small");
|
||||
assert(framesize_in_slots <= conservative_framesize_in_slots, "exact frame size must be smaller than the convervative size!");
|
||||
return framesize_in_slots;
|
||||
interpreter_frame->interpreter_frame_set_method(method);
|
||||
interpreter_frame->interpreter_frame_set_locals(locals_base);
|
||||
interpreter_frame->interpreter_frame_set_cpcache(method->constants()->cache());
|
||||
interpreter_frame->interpreter_frame_set_esp(esp);
|
||||
interpreter_frame->interpreter_frame_set_monitor_end((BasicObjectLock *)monitor);
|
||||
interpreter_frame->interpreter_frame_set_top_frame_sp(top_frame_sp);
|
||||
if (!is_bottom_frame) {
|
||||
interpreter_frame->interpreter_frame_set_sender_sp(sender_sp);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -630,11 +630,20 @@ class Assembler : public AbstractAssembler {
|
||||
}
|
||||
|
||||
protected:
|
||||
// Insert a nop if the previous is cbcond
|
||||
void insert_nop_after_cbcond() {
|
||||
if (UseCBCond && cbcond_before()) {
|
||||
nop();
|
||||
}
|
||||
}
|
||||
// Delay slot helpers
|
||||
// cti is called when emitting control-transfer instruction,
|
||||
// BEFORE doing the emitting.
|
||||
// Only effective when assertion-checking is enabled.
|
||||
void cti() {
|
||||
// A cbcond instruction immediately followed by a CTI
|
||||
// instruction introduces pipeline stalls, we need to avoid that.
|
||||
no_cbcond_before();
|
||||
#ifdef CHECK_DELAY
|
||||
assert_not_delayed("cti should not be in delay slot");
|
||||
#endif
|
||||
@ -658,7 +667,6 @@ class Assembler : public AbstractAssembler {
|
||||
void no_cbcond_before() {
|
||||
assert(offset() == 0 || !cbcond_before(), "cbcond should not follow an other cbcond");
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
bool use_cbcond(Label& L) {
|
||||
|
@ -54,33 +54,33 @@ inline void Assembler::emit_data(int x, RelocationHolder const& rspec) {
|
||||
inline void Assembler::add(Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); }
|
||||
inline void Assembler::add(Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
|
||||
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); }
|
||||
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { bpr( c, a, p, s1, target(L)); }
|
||||
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); }
|
||||
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { insert_nop_after_cbcond(); bpr( c, a, p, s1, target(L)); }
|
||||
|
||||
inline void Assembler::fb( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
|
||||
inline void Assembler::fb( Condition c, bool a, Label& L ) { fb(c, a, target(L)); }
|
||||
inline void Assembler::fb( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
|
||||
inline void Assembler::fb( Condition c, bool a, Label& L ) { insert_nop_after_cbcond(); fb(c, a, target(L)); }
|
||||
|
||||
inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
|
||||
inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { fbp(c, a, cc, p, target(L)); }
|
||||
inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
|
||||
inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { insert_nop_after_cbcond(); fbp(c, a, cc, p, target(L)); }
|
||||
|
||||
inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
|
||||
inline void Assembler::br( Condition c, bool a, Label& L ) { br(c, a, target(L)); }
|
||||
inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
|
||||
inline void Assembler::br( Condition c, bool a, Label& L ) { insert_nop_after_cbcond(); br(c, a, target(L)); }
|
||||
|
||||
inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
|
||||
inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) { bp(c, a, cc, p, target(L)); }
|
||||
inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
|
||||
inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) { insert_nop_after_cbcond(); bp(c, a, cc, p, target(L)); }
|
||||
|
||||
// compare and branch
|
||||
inline void Assembler::cbcond(Condition c, CC cc, Register s1, Register s2, Label& L) { cti(); no_cbcond_before(); emit_data(op(branch_op) | cond_cbcond(c) | op2(bpr_op2) | branchcc(cc) | wdisp10(intptr_t(target(L)), intptr_t(pc())) | rs1(s1) | rs2(s2)); }
|
||||
inline void Assembler::cbcond(Condition c, CC cc, Register s1, int simm5, Label& L) { cti(); no_cbcond_before(); emit_data(op(branch_op) | cond_cbcond(c) | op2(bpr_op2) | branchcc(cc) | wdisp10(intptr_t(target(L)), intptr_t(pc())) | rs1(s1) | immed(true) | simm(simm5, 5)); }
|
||||
|
||||
inline void Assembler::call( address d, relocInfo::relocType rt ) { cti(); emit_data( op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rt); has_delay_slot(); assert(rt != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec"); }
|
||||
inline void Assembler::call( Label& L, relocInfo::relocType rt ) { call( target(L), rt); }
|
||||
inline void Assembler::call( address d, relocInfo::relocType rt ) { insert_nop_after_cbcond(); cti(); emit_data( op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rt); has_delay_slot(); assert(rt != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec"); }
|
||||
inline void Assembler::call( Label& L, relocInfo::relocType rt ) { insert_nop_after_cbcond(); call( target(L), rt); }
|
||||
|
||||
inline void Assembler::flush( Register s1, Register s2) { emit_int32( op(arith_op) | op3(flush_op3) | rs1(s1) | rs2(s2)); }
|
||||
inline void Assembler::flush( Register s1, int simm13a) { emit_data( op(arith_op) | op3(flush_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||
|
||||
inline void Assembler::jmpl( Register s1, Register s2, Register d ) { cti(); emit_int32( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
|
||||
inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { cti(); emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); }
|
||||
inline void Assembler::jmpl( Register s1, Register s2, Register d ) { insert_nop_after_cbcond(); cti(); emit_int32( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
|
||||
inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { insert_nop_after_cbcond(); cti(); emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); }
|
||||
|
||||
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
|
||||
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
|
||||
|
@ -152,7 +152,7 @@ LIR_Opr LIR_Assembler::osrBufferPointer() {
|
||||
}
|
||||
|
||||
|
||||
int LIR_Assembler::initial_frame_size_in_bytes() {
|
||||
int LIR_Assembler::initial_frame_size_in_bytes() const {
|
||||
return in_bytes(frame_map()->framesize_in_bytes());
|
||||
}
|
||||
|
||||
@ -182,7 +182,7 @@ void LIR_Assembler::osr_entry() {
|
||||
int number_of_locks = entry_state->locks_size();
|
||||
|
||||
// Create a frame for the compiled activation.
|
||||
__ build_frame(initial_frame_size_in_bytes());
|
||||
__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
|
||||
|
||||
// OSR buffer is
|
||||
//
|
||||
|
@ -55,9 +55,9 @@ void C1_MacroAssembler::explicit_null_check(Register base) {
|
||||
}
|
||||
|
||||
|
||||
void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
|
||||
|
||||
generate_stack_overflow_check(frame_size_in_bytes);
|
||||
void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
|
||||
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
|
||||
generate_stack_overflow_check(bang_size_in_bytes);
|
||||
// Create the frame.
|
||||
save_frame_c1(frame_size_in_bytes);
|
||||
}
|
||||
|
@ -2099,7 +2099,7 @@ int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
|
||||
int monitor_size = method->is_synchronized() ?
|
||||
1*frame::interpreter_frame_monitor_size() : 0;
|
||||
return size_activation_helper(method->max_locals(), method->max_stack(),
|
||||
monitor_size) + call_stub_size;
|
||||
monitor_size) + call_stub_size;
|
||||
}
|
||||
|
||||
void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
|
||||
@ -2183,31 +2183,31 @@ void BytecodeInterpreter::pd_layout_interpreterState(interpreterState istate, ad
|
||||
istate->_last_Java_pc = (intptr_t*) last_Java_pc;
|
||||
}
|
||||
|
||||
static int frame_size_helper(int max_stack,
|
||||
int moncount,
|
||||
int callee_param_size,
|
||||
int callee_locals_size,
|
||||
bool is_top_frame,
|
||||
int& monitor_size,
|
||||
int& full_frame_words) {
|
||||
int extra_locals_size = callee_locals_size - callee_param_size;
|
||||
monitor_size = (sizeof(BasicObjectLock) * moncount) / wordSize;
|
||||
full_frame_words = size_activation_helper(extra_locals_size, max_stack, monitor_size);
|
||||
int short_frame_words = size_activation_helper(extra_locals_size, max_stack, monitor_size);
|
||||
int frame_words = is_top_frame ? full_frame_words : short_frame_words;
|
||||
|
||||
int AbstractInterpreter::layout_activation(Method* method,
|
||||
int tempcount, // Number of slots on java expression stack in use
|
||||
int popframe_extra_args,
|
||||
int moncount, // Number of active monitors
|
||||
int caller_actual_parameters,
|
||||
int callee_param_size,
|
||||
int callee_locals_size,
|
||||
frame* caller,
|
||||
frame* interpreter_frame,
|
||||
bool is_top_frame,
|
||||
bool is_bottom_frame) {
|
||||
return frame_words;
|
||||
}
|
||||
|
||||
assert(popframe_extra_args == 0, "NEED TO FIX");
|
||||
// NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
|
||||
// does as far as allocating an interpreter frame.
|
||||
// If interpreter_frame!=NULL, set up the method, locals, and monitors.
|
||||
// The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
|
||||
// as determined by a previous call to this method.
|
||||
// It is also guaranteed to be walkable even though it is in a skeletal state
|
||||
int AbstractInterpreter::size_activation(int max_stack,
|
||||
int tempcount,
|
||||
int extra_args,
|
||||
int moncount,
|
||||
int callee_param_size,
|
||||
int callee_locals_size,
|
||||
bool is_top_frame) {
|
||||
assert(extra_args == 0, "NEED TO FIX");
|
||||
// NOTE: return size is in words not bytes
|
||||
// NOTE: tempcount is the current size of the java expression stack. For top most
|
||||
// frames we will allocate a full sized expression stack and not the curback
|
||||
// version that non-top frames have.
|
||||
|
||||
// Calculate the amount our frame will be adjust by the callee. For top frame
|
||||
// this is zero.
|
||||
|
||||
@ -2216,87 +2216,108 @@ int AbstractInterpreter::layout_activation(Method* method,
|
||||
// to it. So it ignores last_frame_adjust value. Seems suspicious as far
|
||||
// as getting sender_sp correct.
|
||||
|
||||
int extra_locals_size = callee_locals_size - callee_param_size;
|
||||
int monitor_size = (sizeof(BasicObjectLock) * moncount) / wordSize;
|
||||
int full_frame_words = size_activation_helper(extra_locals_size, method->max_stack(), monitor_size);
|
||||
int short_frame_words = size_activation_helper(extra_locals_size, method->max_stack(), monitor_size);
|
||||
int frame_words = is_top_frame ? full_frame_words : short_frame_words;
|
||||
int unused_monitor_size = 0;
|
||||
int unused_full_frame_words = 0;
|
||||
return frame_size_helper(max_stack, moncount, callee_param_size, callee_locals_size, is_top_frame,
|
||||
unused_monitor_size, unused_full_frame_words);
|
||||
}
|
||||
void AbstractInterpreter::layout_activation(Method* method,
|
||||
int tempcount, // Number of slots on java expression stack in use
|
||||
int popframe_extra_args,
|
||||
int moncount, // Number of active monitors
|
||||
int caller_actual_parameters,
|
||||
int callee_param_size,
|
||||
int callee_locals_size,
|
||||
frame* caller,
|
||||
frame* interpreter_frame,
|
||||
bool is_top_frame,
|
||||
bool is_bottom_frame) {
|
||||
assert(popframe_extra_args == 0, "NEED TO FIX");
|
||||
// NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
|
||||
// does as far as allocating an interpreter frame.
|
||||
// Set up the method, locals, and monitors.
|
||||
// The frame interpreter_frame is guaranteed to be the right size,
|
||||
// as determined by a previous call to the size_activation() method.
|
||||
// It is also guaranteed to be walkable even though it is in a skeletal state
|
||||
// NOTE: tempcount is the current size of the java expression stack. For top most
|
||||
// frames we will allocate a full sized expression stack and not the curback
|
||||
// version that non-top frames have.
|
||||
|
||||
int monitor_size = 0;
|
||||
int full_frame_words = 0;
|
||||
int frame_words = frame_size_helper(method->max_stack(), moncount, callee_param_size, callee_locals_size,
|
||||
is_top_frame, monitor_size, full_frame_words);
|
||||
|
||||
/*
|
||||
if we actually have a frame to layout we must now fill in all the pieces. This means both
|
||||
We must now fill in all the pieces of the frame. This means both
|
||||
the interpreterState and the registers.
|
||||
*/
|
||||
if (interpreter_frame != NULL) {
|
||||
|
||||
// MUCHO HACK
|
||||
// MUCHO HACK
|
||||
|
||||
intptr_t* frame_bottom = interpreter_frame->sp() - (full_frame_words - frame_words);
|
||||
// 'interpreter_frame->sp()' is unbiased while 'frame_bottom' must be a biased value in 64bit mode.
|
||||
assert(((intptr_t)frame_bottom & 0xf) == 0, "SP biased in layout_activation");
|
||||
frame_bottom = (intptr_t*)((intptr_t)frame_bottom - STACK_BIAS);
|
||||
intptr_t* frame_bottom = interpreter_frame->sp() - (full_frame_words - frame_words);
|
||||
// 'interpreter_frame->sp()' is unbiased while 'frame_bottom' must be a biased value in 64bit mode.
|
||||
assert(((intptr_t)frame_bottom & 0xf) == 0, "SP biased in layout_activation");
|
||||
frame_bottom = (intptr_t*)((intptr_t)frame_bottom - STACK_BIAS);
|
||||
|
||||
/* Now fillin the interpreterState object */
|
||||
/* Now fillin the interpreterState object */
|
||||
|
||||
interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() - sizeof(BytecodeInterpreter));
|
||||
interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() - sizeof(BytecodeInterpreter));
|
||||
|
||||
|
||||
intptr_t* locals;
|
||||
intptr_t* locals;
|
||||
|
||||
// Calculate the postion of locals[0]. This is painful because of
|
||||
// stack alignment (same as ia64). The problem is that we can
|
||||
// not compute the location of locals from fp(). fp() will account
|
||||
// for the extra locals but it also accounts for aligning the stack
|
||||
// and we can't determine if the locals[0] was misaligned but max_locals
|
||||
// was enough to have the
|
||||
// calculate postion of locals. fp already accounts for extra locals.
|
||||
// +2 for the static long no_params() issue.
|
||||
// Calculate the postion of locals[0]. This is painful because of
|
||||
// stack alignment (same as ia64). The problem is that we can
|
||||
// not compute the location of locals from fp(). fp() will account
|
||||
// for the extra locals but it also accounts for aligning the stack
|
||||
// and we can't determine if the locals[0] was misaligned but max_locals
|
||||
// was enough to have the
|
||||
// calculate postion of locals. fp already accounts for extra locals.
|
||||
// +2 for the static long no_params() issue.
|
||||
|
||||
if (caller->is_interpreted_frame()) {
|
||||
// locals must agree with the caller because it will be used to set the
|
||||
// caller's tos when we return.
|
||||
interpreterState prev = caller->get_interpreterState();
|
||||
// stack() is prepushed.
|
||||
locals = prev->stack() + method->size_of_parameters();
|
||||
if (caller->is_interpreted_frame()) {
|
||||
// locals must agree with the caller because it will be used to set the
|
||||
// caller's tos when we return.
|
||||
interpreterState prev = caller->get_interpreterState();
|
||||
// stack() is prepushed.
|
||||
locals = prev->stack() + method->size_of_parameters();
|
||||
} else {
|
||||
// Lay out locals block in the caller adjacent to the register window save area.
|
||||
//
|
||||
// Compiled frames do not allocate a varargs area which is why this if
|
||||
// statement is needed.
|
||||
//
|
||||
intptr_t* fp = interpreter_frame->fp();
|
||||
int local_words = method->max_locals() * Interpreter::stackElementWords;
|
||||
|
||||
if (caller->is_compiled_frame()) {
|
||||
locals = fp + frame::register_save_words + local_words - 1;
|
||||
} else {
|
||||
// Lay out locals block in the caller adjacent to the register window save area.
|
||||
//
|
||||
// Compiled frames do not allocate a varargs area which is why this if
|
||||
// statement is needed.
|
||||
//
|
||||
intptr_t* fp = interpreter_frame->fp();
|
||||
int local_words = method->max_locals() * Interpreter::stackElementWords;
|
||||
|
||||
if (caller->is_compiled_frame()) {
|
||||
locals = fp + frame::register_save_words + local_words - 1;
|
||||
} else {
|
||||
locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
|
||||
}
|
||||
|
||||
locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
|
||||
}
|
||||
// END MUCHO HACK
|
||||
|
||||
intptr_t* monitor_base = (intptr_t*) cur_state;
|
||||
intptr_t* stack_base = monitor_base - monitor_size;
|
||||
/* +1 because stack is always prepushed */
|
||||
intptr_t* stack = stack_base - (tempcount + 1);
|
||||
|
||||
|
||||
BytecodeInterpreter::layout_interpreterState(cur_state,
|
||||
caller,
|
||||
interpreter_frame,
|
||||
method,
|
||||
locals,
|
||||
stack,
|
||||
stack_base,
|
||||
monitor_base,
|
||||
frame_bottom,
|
||||
is_top_frame);
|
||||
|
||||
BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
|
||||
|
||||
}
|
||||
return frame_words;
|
||||
// END MUCHO HACK
|
||||
|
||||
intptr_t* monitor_base = (intptr_t*) cur_state;
|
||||
intptr_t* stack_base = monitor_base - monitor_size;
|
||||
/* +1 because stack is always prepushed */
|
||||
intptr_t* stack = stack_base - (tempcount + 1);
|
||||
|
||||
|
||||
BytecodeInterpreter::layout_interpreterState(cur_state,
|
||||
caller,
|
||||
interpreter_frame,
|
||||
method,
|
||||
locals,
|
||||
stack,
|
||||
stack_base,
|
||||
monitor_base,
|
||||
frame_bottom,
|
||||
is_top_frame);
|
||||
|
||||
BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
|
||||
}
|
||||
|
||||
#endif // CC_INTERP
|
||||
|
@ -3531,7 +3531,7 @@ void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp,
|
||||
// was post-decremented.) Skip this address by starting at i=1, and
|
||||
// touch a few more pages below. N.B. It is important to touch all
|
||||
// the way down to and including i=StackShadowPages.
|
||||
for (int i = 1; i <= StackShadowPages; i++) {
|
||||
for (int i = 1; i < StackShadowPages; i++) {
|
||||
set((-i*offset)+STACK_BIAS, Rscratch);
|
||||
st(G0, Rtsp, Rscratch);
|
||||
}
|
||||
|
@ -233,6 +233,7 @@ inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, reloc
|
||||
}
|
||||
|
||||
inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
|
||||
insert_nop_after_cbcond();
|
||||
br(c, a, p, target(L));
|
||||
}
|
||||
|
||||
@ -248,6 +249,7 @@ inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relo
|
||||
}
|
||||
|
||||
inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
|
||||
insert_nop_after_cbcond();
|
||||
brx(c, a, p, target(L));
|
||||
}
|
||||
|
||||
@ -269,6 +271,7 @@ inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, reloc
|
||||
}
|
||||
|
||||
inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
|
||||
insert_nop_after_cbcond();
|
||||
fb(c, a, p, target(L));
|
||||
}
|
||||
|
||||
@ -318,6 +321,7 @@ inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
|
||||
}
|
||||
|
||||
inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
|
||||
insert_nop_after_cbcond();
|
||||
MacroAssembler::call( target(L), rt);
|
||||
}
|
||||
|
||||
|
@ -3355,13 +3355,16 @@ static void make_new_frames(MacroAssembler* masm, bool deopt) {
|
||||
Register O4array_size = O4;
|
||||
Label loop;
|
||||
|
||||
// Before we make new frames, check to see if stack is available.
|
||||
// Do this after the caller's return address is on top of stack
|
||||
#ifdef ASSERT
|
||||
// Compilers generate code that bang the stack by as much as the
|
||||
// interpreter would need. So this stack banging should never
|
||||
// trigger a fault. Verify that it does not on non product builds.
|
||||
if (UseStackBanging) {
|
||||
// Get total frame size for interpreted frames
|
||||
__ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
|
||||
__ bang_stack_size(O4, O3, G3_scratch);
|
||||
}
|
||||
#endif
|
||||
|
||||
__ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
|
||||
__ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
|
||||
@ -3409,9 +3412,11 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
ResourceMark rm;
|
||||
// setup code generation tools
|
||||
int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
|
||||
#ifdef ASSERT
|
||||
if (UseStackBanging) {
|
||||
pad += StackShadowPages*16 + 32;
|
||||
}
|
||||
#endif
|
||||
#ifdef _LP64
|
||||
CodeBuffer buffer("deopt_blob", 2100+pad, 512);
|
||||
#else
|
||||
@ -3632,9 +3637,11 @@ void SharedRuntime::generate_uncommon_trap_blob() {
|
||||
ResourceMark rm;
|
||||
// setup code generation tools
|
||||
int pad = VerifyThread ? 512 : 0;
|
||||
#ifdef ASSERT
|
||||
if (UseStackBanging) {
|
||||
pad += StackShadowPages*16 + 32;
|
||||
}
|
||||
#endif
|
||||
#ifdef _LP64
|
||||
CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
|
||||
#else
|
||||
|
@ -1193,15 +1193,16 @@ void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
|
||||
st->print_cr("Verify_Thread"); st->print("\t");
|
||||
}
|
||||
|
||||
size_t framesize = C->frame_slots() << LogBytesPerInt;
|
||||
size_t framesize = C->frame_size_in_bytes();
|
||||
int bangsize = C->bang_size_in_bytes();
|
||||
|
||||
// Calls to C2R adapters often do not accept exceptional returns.
|
||||
// We require that their callers must bang for them. But be careful, because
|
||||
// some VM calls (such as call site linkage) can use several kilobytes of
|
||||
// stack. But the stack safety zone should account for that.
|
||||
// See bugs 4446381, 4468289, 4497237.
|
||||
if (C->need_stack_bang(framesize)) {
|
||||
st->print_cr("! stack bang"); st->print("\t");
|
||||
if (C->need_stack_bang(bangsize)) {
|
||||
st->print_cr("! stack bang (%d bytes)", bangsize); st->print("\t");
|
||||
}
|
||||
|
||||
if (Assembler::is_simm13(-framesize)) {
|
||||
@ -1225,17 +1226,18 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
|
||||
|
||||
__ verify_thread();
|
||||
|
||||
size_t framesize = C->frame_slots() << LogBytesPerInt;
|
||||
size_t framesize = C->frame_size_in_bytes();
|
||||
assert(framesize >= 16*wordSize, "must have room for reg. save area");
|
||||
assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
|
||||
int bangsize = C->bang_size_in_bytes();
|
||||
|
||||
// Calls to C2R adapters often do not accept exceptional returns.
|
||||
// We require that their callers must bang for them. But be careful, because
|
||||
// some VM calls (such as call site linkage) can use several kilobytes of
|
||||
// stack. But the stack safety zone should account for that.
|
||||
// See bugs 4446381, 4468289, 4497237.
|
||||
if (C->need_stack_bang(framesize)) {
|
||||
__ generate_stack_overflow_check(framesize);
|
||||
if (C->need_stack_bang(bangsize)) {
|
||||
__ generate_stack_overflow_check(bangsize);
|
||||
}
|
||||
|
||||
if (Assembler::is_simm13(-framesize)) {
|
||||
@ -1268,7 +1270,7 @@ int MachPrologNode::reloc() const {
|
||||
void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
|
||||
Compile* C = ra_->C;
|
||||
|
||||
if( do_polling() && ra_->C->is_method_compilation() ) {
|
||||
if(do_polling() && ra_->C->is_method_compilation()) {
|
||||
st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t");
|
||||
#ifdef _LP64
|
||||
st->print("LDX [L0],G0\t!Poll for Safepointing\n\t");
|
||||
@ -1277,8 +1279,12 @@ void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
|
||||
#endif
|
||||
}
|
||||
|
||||
if( do_polling() )
|
||||
if(do_polling()) {
|
||||
if (UseCBCond && !ra_->C->is_method_compilation()) {
|
||||
st->print("NOP\n\t");
|
||||
}
|
||||
st->print("RET\n\t");
|
||||
}
|
||||
|
||||
st->print("RESTORE");
|
||||
}
|
||||
@ -1291,15 +1297,20 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
|
||||
__ verify_thread();
|
||||
|
||||
// If this does safepoint polling, then do it here
|
||||
if( do_polling() && ra_->C->is_method_compilation() ) {
|
||||
if(do_polling() && ra_->C->is_method_compilation()) {
|
||||
AddressLiteral polling_page(os::get_polling_page());
|
||||
__ sethi(polling_page, L0);
|
||||
__ relocate(relocInfo::poll_return_type);
|
||||
__ ld_ptr( L0, 0, G0 );
|
||||
__ ld_ptr(L0, 0, G0);
|
||||
}
|
||||
|
||||
// If this is a return, then stuff the restore in the delay slot
|
||||
if( do_polling() ) {
|
||||
if(do_polling()) {
|
||||
if (UseCBCond && !ra_->C->is_method_compilation()) {
|
||||
// Insert extra padding for the case when the epilogue is preceded by
|
||||
// a cbcond jump, which can't be followed by a CTI instruction
|
||||
__ nop();
|
||||
}
|
||||
__ ret();
|
||||
__ delayed()->restore();
|
||||
} else {
|
||||
@ -2538,7 +2549,7 @@ encode %{
|
||||
enc_class call_epilog %{
|
||||
if( VerifyStackAtCalls ) {
|
||||
MacroAssembler _masm(&cbuf);
|
||||
int framesize = ra_->C->frame_slots() << LogBytesPerInt;
|
||||
int framesize = ra_->C->frame_size_in_bytes();
|
||||
Register temp_reg = G3;
|
||||
__ add(SP, framesize, temp_reg);
|
||||
__ cmp(temp_reg, FP);
|
||||
@ -3330,7 +3341,18 @@ op_attrib op_cost(1); // Required cost attribute
|
||||
//----------Instruction Attributes---------------------------------------------
|
||||
ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute
|
||||
ins_attrib ins_size(32); // Required size attribute (in bits)
|
||||
ins_attrib ins_avoid_back_to_back(0); // instruction should not be generated back to back
|
||||
|
||||
// avoid_back_to_back attribute is an expression that must return
|
||||
// one of the following values defined in MachNode:
|
||||
// AVOID_NONE - instruction can be placed anywhere
|
||||
// AVOID_BEFORE - instruction cannot be placed after an
|
||||
// instruction with MachNode::AVOID_AFTER
|
||||
// AVOID_AFTER - the next instruction cannot be the one
|
||||
// with MachNode::AVOID_BEFORE
|
||||
// AVOID_BEFORE_AND_AFTER - BEFORE and AFTER attributes at
|
||||
// the same time
|
||||
ins_attrib ins_avoid_back_to_back(MachNode::AVOID_NONE);
|
||||
|
||||
ins_attrib ins_short_branch(0); // Required flag: is this instruction a
|
||||
// non-matching short branch variant of some
|
||||
// long branch?
|
||||
@ -6630,6 +6652,7 @@ instruct encodeHeapOop(iRegN dst, iRegP src) %{
|
||||
ins_encode %{
|
||||
__ encode_heap_oop($src$$Register, $dst$$Register);
|
||||
%}
|
||||
ins_avoid_back_to_back(Universe::narrow_oop_base() == NULL ? AVOID_NONE : AVOID_BEFORE);
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
@ -9199,6 +9222,7 @@ instruct branch(label labl) %{
|
||||
__ ba(*L);
|
||||
__ delayed()->nop();
|
||||
%}
|
||||
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||
ins_pipe(br);
|
||||
%}
|
||||
|
||||
@ -9217,7 +9241,7 @@ instruct branch_short(label labl) %{
|
||||
__ ba_short(*L);
|
||||
%}
|
||||
ins_short_branch(1);
|
||||
ins_avoid_back_to_back(1);
|
||||
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||
ins_pipe(cbcond_reg_imm);
|
||||
%}
|
||||
|
||||
@ -9231,6 +9255,7 @@ instruct branchCon(cmpOp cmp, flagsReg icc, label labl) %{
|
||||
format %{ "BP$cmp $icc,$labl" %}
|
||||
// Prim = bits 24-22, Secnd = bits 31-30
|
||||
ins_encode( enc_bp( labl, cmp, icc ) );
|
||||
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||
ins_pipe(br_cc);
|
||||
%}
|
||||
|
||||
@ -9242,6 +9267,7 @@ instruct branchConU(cmpOpU cmp, flagsRegU icc, label labl) %{
|
||||
format %{ "BP$cmp $icc,$labl" %}
|
||||
// Prim = bits 24-22, Secnd = bits 31-30
|
||||
ins_encode( enc_bp( labl, cmp, icc ) );
|
||||
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||
ins_pipe(br_cc);
|
||||
%}
|
||||
|
||||
@ -9260,6 +9286,7 @@ instruct branchConP(cmpOpP cmp, flagsRegP pcc, label labl) %{
|
||||
__ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
|
||||
__ delayed()->nop();
|
||||
%}
|
||||
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||
ins_pipe(br_cc);
|
||||
%}
|
||||
|
||||
@ -9278,6 +9305,7 @@ instruct branchConF(cmpOpF cmp, flagsRegF fcc, label labl) %{
|
||||
__ fbp( (Assembler::Condition)($cmp$$cmpcode), false, (Assembler::CC)($fcc$$reg), predict_taken, *L);
|
||||
__ delayed()->nop();
|
||||
%}
|
||||
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||
ins_pipe(br_fcc);
|
||||
%}
|
||||
|
||||
@ -9290,6 +9318,7 @@ instruct branchLoopEnd(cmpOp cmp, flagsReg icc, label labl) %{
|
||||
format %{ "BP$cmp $icc,$labl\t! Loop end" %}
|
||||
// Prim = bits 24-22, Secnd = bits 31-30
|
||||
ins_encode( enc_bp( labl, cmp, icc ) );
|
||||
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||
ins_pipe(br_cc);
|
||||
%}
|
||||
|
||||
@ -9302,6 +9331,7 @@ instruct branchLoopEndU(cmpOpU cmp, flagsRegU icc, label labl) %{
|
||||
format %{ "BP$cmp $icc,$labl\t! Loop end" %}
|
||||
// Prim = bits 24-22, Secnd = bits 31-30
|
||||
ins_encode( enc_bp( labl, cmp, icc ) );
|
||||
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||
ins_pipe(br_cc);
|
||||
%}
|
||||
|
||||
@ -9552,7 +9582,7 @@ instruct cmpI_reg_branch_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flag
|
||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
|
||||
%}
|
||||
ins_short_branch(1);
|
||||
ins_avoid_back_to_back(1);
|
||||
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||
ins_pipe(cbcond_reg_reg);
|
||||
%}
|
||||
|
||||
@ -9570,7 +9600,7 @@ instruct cmpI_imm_branch_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flag
|
||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
|
||||
%}
|
||||
ins_short_branch(1);
|
||||
ins_avoid_back_to_back(1);
|
||||
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||
ins_pipe(cbcond_reg_imm);
|
||||
%}
|
||||
|
||||
@ -9588,7 +9618,7 @@ instruct cmpU_reg_branch_short(cmpOpU cmp, iRegI op1, iRegI op2, label labl, fla
|
||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
|
||||
%}
|
||||
ins_short_branch(1);
|
||||
ins_avoid_back_to_back(1);
|
||||
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||
ins_pipe(cbcond_reg_reg);
|
||||
%}
|
||||
|
||||
@ -9606,7 +9636,7 @@ instruct cmpU_imm_branch_short(cmpOpU cmp, iRegI op1, immI5 op2, label labl, fla
|
||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
|
||||
%}
|
||||
ins_short_branch(1);
|
||||
ins_avoid_back_to_back(1);
|
||||
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||
ins_pipe(cbcond_reg_imm);
|
||||
%}
|
||||
|
||||
@ -9624,7 +9654,7 @@ instruct cmpL_reg_branch_short(cmpOp cmp, iRegL op1, iRegL op2, label labl, flag
|
||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$Register, *L);
|
||||
%}
|
||||
ins_short_branch(1);
|
||||
ins_avoid_back_to_back(1);
|
||||
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||
ins_pipe(cbcond_reg_reg);
|
||||
%}
|
||||
|
||||
@ -9642,7 +9672,7 @@ instruct cmpL_imm_branch_short(cmpOp cmp, iRegL op1, immL5 op2, label labl, flag
|
||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$constant, *L);
|
||||
%}
|
||||
ins_short_branch(1);
|
||||
ins_avoid_back_to_back(1);
|
||||
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||
ins_pipe(cbcond_reg_imm);
|
||||
%}
|
||||
|
||||
@ -9665,7 +9695,7 @@ instruct cmpP_reg_branch_short(cmpOpP cmp, iRegP op1, iRegP op2, label labl, fla
|
||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, $op2$$Register, *L);
|
||||
%}
|
||||
ins_short_branch(1);
|
||||
ins_avoid_back_to_back(1);
|
||||
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||
ins_pipe(cbcond_reg_reg);
|
||||
%}
|
||||
|
||||
@ -9687,7 +9717,7 @@ instruct cmpP_null_branch_short(cmpOpP cmp, iRegP op1, immP0 null, label labl, f
|
||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, G0, *L);
|
||||
%}
|
||||
ins_short_branch(1);
|
||||
ins_avoid_back_to_back(1);
|
||||
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||
ins_pipe(cbcond_reg_reg);
|
||||
%}
|
||||
|
||||
@ -9705,7 +9735,7 @@ instruct cmpN_reg_branch_short(cmpOp cmp, iRegN op1, iRegN op2, label labl, flag
|
||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
|
||||
%}
|
||||
ins_short_branch(1);
|
||||
ins_avoid_back_to_back(1);
|
||||
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||
ins_pipe(cbcond_reg_reg);
|
||||
%}
|
||||
|
||||
@ -9723,7 +9753,7 @@ instruct cmpN_null_branch_short(cmpOp cmp, iRegN op1, immN0 null, label labl, fl
|
||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, G0, *L);
|
||||
%}
|
||||
ins_short_branch(1);
|
||||
ins_avoid_back_to_back(1);
|
||||
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||
ins_pipe(cbcond_reg_reg);
|
||||
%}
|
||||
|
||||
@ -9742,7 +9772,7 @@ instruct cmpI_reg_branchLoopEnd_short(cmpOp cmp, iRegI op1, iRegI op2, label lab
|
||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
|
||||
%}
|
||||
ins_short_branch(1);
|
||||
ins_avoid_back_to_back(1);
|
||||
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||
ins_pipe(cbcond_reg_reg);
|
||||
%}
|
||||
|
||||
@ -9760,7 +9790,7 @@ instruct cmpI_imm_branchLoopEnd_short(cmpOp cmp, iRegI op1, immI5 op2, label lab
|
||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
|
||||
%}
|
||||
ins_short_branch(1);
|
||||
ins_avoid_back_to_back(1);
|
||||
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||
ins_pipe(cbcond_reg_imm);
|
||||
%}
|
||||
|
||||
@ -9777,6 +9807,7 @@ instruct branchCon_regI(cmpOp_reg cmp, iRegI op1, immI0 zero, label labl) %{
|
||||
ins_cost(BRANCH_COST);
|
||||
format %{ "BR$cmp $op1,$labl" %}
|
||||
ins_encode( enc_bpr( labl, cmp, op1 ) );
|
||||
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||
ins_pipe(br_reg);
|
||||
%}
|
||||
|
||||
@ -9789,6 +9820,7 @@ instruct branchCon_regP(cmpOp_reg cmp, iRegP op1, immP0 null, label labl) %{
|
||||
ins_cost(BRANCH_COST);
|
||||
format %{ "BR$cmp $op1,$labl" %}
|
||||
ins_encode( enc_bpr( labl, cmp, op1 ) );
|
||||
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||
ins_pipe(br_reg);
|
||||
%}
|
||||
|
||||
@ -9801,6 +9833,7 @@ instruct branchCon_regL(cmpOp_reg cmp, iRegL op1, immL0 zero, label labl) %{
|
||||
ins_cost(BRANCH_COST);
|
||||
format %{ "BR$cmp $op1,$labl" %}
|
||||
ins_encode( enc_bpr( labl, cmp, op1 ) );
|
||||
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||
ins_pipe(br_reg);
|
||||
%}
|
||||
|
||||
@ -9841,6 +9874,7 @@ instruct branchCon_long(cmpOp cmp, flagsRegL xcc, label labl) %{
|
||||
__ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
|
||||
__ delayed()->nop();
|
||||
%}
|
||||
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||
ins_pipe(br_cc);
|
||||
%}
|
||||
|
||||
@ -9968,6 +10002,7 @@ instruct CallStaticJavaDirect( method meth ) %{
|
||||
ins_cost(CALL_COST);
|
||||
format %{ "CALL,static ; NOP ==> " %}
|
||||
ins_encode( Java_Static_Call( meth ), call_epilog );
|
||||
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||
ins_pipe(simple_call);
|
||||
%}
|
||||
|
||||
@ -10004,6 +10039,7 @@ instruct CallRuntimeDirect(method meth, l7RegP l7) %{
|
||||
format %{ "CALL,runtime" %}
|
||||
ins_encode( Java_To_Runtime( meth ),
|
||||
call_epilog, adjust_long_from_native_call );
|
||||
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||
ins_pipe(simple_call);
|
||||
%}
|
||||
|
||||
@ -10016,6 +10052,7 @@ instruct CallLeafDirect(method meth, l7RegP l7) %{
|
||||
ins_encode( Java_To_Runtime( meth ),
|
||||
call_epilog,
|
||||
adjust_long_from_native_call );
|
||||
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||
ins_pipe(simple_call);
|
||||
%}
|
||||
|
||||
@ -10028,6 +10065,7 @@ instruct CallLeafNoFPDirect(method meth, l7RegP l7) %{
|
||||
ins_encode( Java_To_Runtime( meth ),
|
||||
call_epilog,
|
||||
adjust_long_from_native_call );
|
||||
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||
ins_pipe(simple_call);
|
||||
%}
|
||||
|
||||
@ -10041,6 +10079,7 @@ instruct TailCalljmpInd(g3RegP jump_target, inline_cache_regP method_oop) %{
|
||||
ins_cost(CALL_COST);
|
||||
format %{ "Jmp $jump_target ; NOP \t! $method_oop holds method oop" %}
|
||||
ins_encode(form_jmpl(jump_target));
|
||||
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||
ins_pipe(tail_call);
|
||||
%}
|
||||
|
||||
@ -10072,6 +10111,7 @@ instruct tailjmpInd(g1RegP jump_target, i0RegP ex_oop) %{
|
||||
// opcode(Assembler::jmpl_op3, Assembler::arith_op);
|
||||
// The hack duplicates the exception oop into G3, so that CreateEx can use it there.
|
||||
// ins_encode( form3_rs1_simm13_rd( jump_target, 0x00, R_G0 ), move_return_pc_to_o1() );
|
||||
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||
ins_pipe(tail_call);
|
||||
%}
|
||||
|
||||
@ -10102,6 +10142,7 @@ instruct RethrowException()
|
||||
// use the following format syntax
|
||||
format %{ "Jmp rethrow_stub" %}
|
||||
ins_encode(enc_rethrow);
|
||||
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||
ins_pipe(tail_call);
|
||||
%}
|
||||
|
||||
@ -10130,6 +10171,7 @@ instruct partialSubtypeCheck( o0RegP index, o1RegP sub, o2RegP super, flagsRegP
|
||||
ins_cost(DEFAULT_COST*10);
|
||||
format %{ "CALL PartialSubtypeCheck\n\tNOP" %}
|
||||
ins_encode( enc_PartialSubtypeCheck() );
|
||||
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||
ins_pipe(partial_subtype_check_pipe);
|
||||
%}
|
||||
|
||||
@ -10139,6 +10181,7 @@ instruct partialSubtypeCheck_vs_zero( flagsRegP pcc, o1RegP sub, o2RegP super, i
|
||||
ins_cost(DEFAULT_COST*10);
|
||||
format %{ "CALL PartialSubtypeCheck\n\tNOP\t# (sets condition codes)" %}
|
||||
ins_encode( enc_PartialSubtypeCheck() );
|
||||
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||
ins_pipe(partial_subtype_check_pipe);
|
||||
%}
|
||||
|
||||
|
@ -1564,37 +1564,23 @@ int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
|
||||
int monitor_size = method->is_synchronized() ?
|
||||
1*frame::interpreter_frame_monitor_size() : 0;
|
||||
return size_activation_helper(method->max_locals(), method->max_stack(),
|
||||
monitor_size) + call_stub_size;
|
||||
monitor_size) + call_stub_size;
|
||||
}
|
||||
|
||||
int AbstractInterpreter::layout_activation(Method* method,
|
||||
int tempcount,
|
||||
int popframe_extra_args,
|
||||
int moncount,
|
||||
int caller_actual_parameters,
|
||||
int callee_param_count,
|
||||
int callee_local_count,
|
||||
frame* caller,
|
||||
frame* interpreter_frame,
|
||||
bool is_top_frame,
|
||||
bool is_bottom_frame) {
|
||||
int AbstractInterpreter::size_activation(int max_stack,
|
||||
int temps,
|
||||
int extra_args,
|
||||
int monitors,
|
||||
int callee_params,
|
||||
int callee_locals,
|
||||
bool is_top_frame) {
|
||||
// Note: This calculation must exactly parallel the frame setup
|
||||
// in InterpreterGenerator::generate_fixed_frame.
|
||||
// If f!=NULL, set up the following variables:
|
||||
// - Lmethod
|
||||
// - Llocals
|
||||
// - Lmonitors (to the indicated number of monitors)
|
||||
// - Lesp (to the indicated number of temps)
|
||||
// The frame f (if not NULL) on entry is a description of the caller of the frame
|
||||
// we are about to layout. We are guaranteed that we will be able to fill in a
|
||||
// new interpreter frame as its callee (i.e. the stack space is allocated and
|
||||
// the amount was determined by an earlier call to this method with f == NULL).
|
||||
// On return f (if not NULL) while describe the interpreter frame we just layed out.
|
||||
|
||||
int monitor_size = moncount * frame::interpreter_frame_monitor_size();
|
||||
int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
|
||||
int monitor_size = monitors * frame::interpreter_frame_monitor_size();
|
||||
|
||||
assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
|
||||
|
||||
//
|
||||
// Note: if you look closely this appears to be doing something much different
|
||||
// than generate_fixed_frame. What is happening is this. On sparc we have to do
|
||||
@ -1619,146 +1605,171 @@ int AbstractInterpreter::layout_activation(Method* method,
|
||||
// there is no sense in messing working code.
|
||||
//
|
||||
|
||||
int rounded_cls = round_to((callee_local_count - callee_param_count), WordsPerLong);
|
||||
int rounded_cls = round_to((callee_locals - callee_params), WordsPerLong);
|
||||
assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align");
|
||||
|
||||
int raw_frame_size = size_activation_helper(rounded_cls, method->max_stack(),
|
||||
monitor_size);
|
||||
int raw_frame_size = size_activation_helper(rounded_cls, max_stack, monitor_size);
|
||||
|
||||
if (interpreter_frame != NULL) {
|
||||
// The skeleton frame must already look like an interpreter frame
|
||||
// even if not fully filled out.
|
||||
assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame");
|
||||
return raw_frame_size;
|
||||
}
|
||||
|
||||
intptr_t* fp = interpreter_frame->fp();
|
||||
void AbstractInterpreter::layout_activation(Method* method,
|
||||
int tempcount,
|
||||
int popframe_extra_args,
|
||||
int moncount,
|
||||
int caller_actual_parameters,
|
||||
int callee_param_count,
|
||||
int callee_local_count,
|
||||
frame* caller,
|
||||
frame* interpreter_frame,
|
||||
bool is_top_frame,
|
||||
bool is_bottom_frame) {
|
||||
// Set up the following variables:
|
||||
// - Lmethod
|
||||
// - Llocals
|
||||
// - Lmonitors (to the indicated number of monitors)
|
||||
// - Lesp (to the indicated number of temps)
|
||||
// The frame caller on entry is a description of the caller of the
|
||||
// frame we are about to layout. We are guaranteed that we will be
|
||||
// able to fill in a new interpreter frame as its callee (i.e. the
|
||||
// stack space is allocated and the amount was determined by an
|
||||
// earlier call to the size_activation() method). On return caller
|
||||
// while describe the interpreter frame we just layed out.
|
||||
|
||||
JavaThread* thread = JavaThread::current();
|
||||
RegisterMap map(thread, false);
|
||||
// More verification that skeleton frame is properly walkable
|
||||
assert(fp == caller->sp(), "fp must match");
|
||||
// The skeleton frame must already look like an interpreter frame
|
||||
// even if not fully filled out.
|
||||
assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame");
|
||||
|
||||
intptr_t* montop = fp - rounded_vm_local_words;
|
||||
int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
|
||||
int monitor_size = moncount * frame::interpreter_frame_monitor_size();
|
||||
assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
|
||||
|
||||
// preallocate monitors (cf. __ add_monitor_to_stack)
|
||||
intptr_t* monitors = montop - monitor_size;
|
||||
intptr_t* fp = interpreter_frame->fp();
|
||||
|
||||
// preallocate stack space
|
||||
intptr_t* esp = monitors - 1 -
|
||||
(tempcount * Interpreter::stackElementWords) -
|
||||
popframe_extra_args;
|
||||
JavaThread* thread = JavaThread::current();
|
||||
RegisterMap map(thread, false);
|
||||
// More verification that skeleton frame is properly walkable
|
||||
assert(fp == caller->sp(), "fp must match");
|
||||
|
||||
int local_words = method->max_locals() * Interpreter::stackElementWords;
|
||||
NEEDS_CLEANUP;
|
||||
intptr_t* locals;
|
||||
if (caller->is_interpreted_frame()) {
|
||||
// Can force the locals area to end up properly overlapping the top of the expression stack.
|
||||
intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1;
|
||||
// Note that this computation means we replace size_of_parameters() values from the caller
|
||||
// interpreter frame's expression stack with our argument locals
|
||||
int parm_words = caller_actual_parameters * Interpreter::stackElementWords;
|
||||
locals = Lesp_ptr + parm_words;
|
||||
int delta = local_words - parm_words;
|
||||
int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
|
||||
*interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
|
||||
if (!is_bottom_frame) {
|
||||
// Llast_SP is set below for the current frame to SP (with the
|
||||
// extra space for the callee's locals). Here we adjust
|
||||
// Llast_SP for the caller's frame, removing the extra space
|
||||
// for the current method's locals.
|
||||
*caller->register_addr(Llast_SP) = *interpreter_frame->register_addr(I5_savedSP);
|
||||
} else {
|
||||
assert(*caller->register_addr(Llast_SP) >= *interpreter_frame->register_addr(I5_savedSP), "strange Llast_SP");
|
||||
}
|
||||
intptr_t* montop = fp - rounded_vm_local_words;
|
||||
|
||||
// preallocate monitors (cf. __ add_monitor_to_stack)
|
||||
intptr_t* monitors = montop - monitor_size;
|
||||
|
||||
// preallocate stack space
|
||||
intptr_t* esp = monitors - 1 -
|
||||
(tempcount * Interpreter::stackElementWords) -
|
||||
popframe_extra_args;
|
||||
|
||||
int local_words = method->max_locals() * Interpreter::stackElementWords;
|
||||
NEEDS_CLEANUP;
|
||||
intptr_t* locals;
|
||||
if (caller->is_interpreted_frame()) {
|
||||
// Can force the locals area to end up properly overlapping the top of the expression stack.
|
||||
intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1;
|
||||
// Note that this computation means we replace size_of_parameters() values from the caller
|
||||
// interpreter frame's expression stack with our argument locals
|
||||
int parm_words = caller_actual_parameters * Interpreter::stackElementWords;
|
||||
locals = Lesp_ptr + parm_words;
|
||||
int delta = local_words - parm_words;
|
||||
int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
|
||||
*interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
|
||||
if (!is_bottom_frame) {
|
||||
// Llast_SP is set below for the current frame to SP (with the
|
||||
// extra space for the callee's locals). Here we adjust
|
||||
// Llast_SP for the caller's frame, removing the extra space
|
||||
// for the current method's locals.
|
||||
*caller->register_addr(Llast_SP) = *interpreter_frame->register_addr(I5_savedSP);
|
||||
} else {
|
||||
assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
|
||||
// Don't have Lesp available; lay out locals block in the caller
|
||||
// adjacent to the register window save area.
|
||||
//
|
||||
// Compiled frames do not allocate a varargs area which is why this if
|
||||
// statement is needed.
|
||||
//
|
||||
if (caller->is_compiled_frame()) {
|
||||
locals = fp + frame::register_save_words + local_words - 1;
|
||||
} else {
|
||||
locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
|
||||
}
|
||||
if (!caller->is_entry_frame()) {
|
||||
// Caller wants his own SP back
|
||||
int caller_frame_size = caller->cb()->frame_size();
|
||||
*interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS;
|
||||
assert(*caller->register_addr(Llast_SP) >= *interpreter_frame->register_addr(I5_savedSP), "strange Llast_SP");
|
||||
}
|
||||
} else {
|
||||
assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
|
||||
// Don't have Lesp available; lay out locals block in the caller
|
||||
// adjacent to the register window save area.
|
||||
//
|
||||
// Compiled frames do not allocate a varargs area which is why this if
|
||||
// statement is needed.
|
||||
//
|
||||
if (caller->is_compiled_frame()) {
|
||||
locals = fp + frame::register_save_words + local_words - 1;
|
||||
} else {
|
||||
locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
|
||||
}
|
||||
if (!caller->is_entry_frame()) {
|
||||
// Caller wants his own SP back
|
||||
int caller_frame_size = caller->cb()->frame_size();
|
||||
*interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS;
|
||||
}
|
||||
}
|
||||
if (TraceDeoptimization) {
|
||||
if (caller->is_entry_frame()) {
|
||||
// make sure I5_savedSP and the entry frames notion of saved SP
|
||||
// agree. This assertion duplicate a check in entry frame code
|
||||
// but catches the failure earlier.
|
||||
assert(*caller->register_addr(Lscratch) == *interpreter_frame->register_addr(I5_savedSP),
|
||||
"would change callers SP");
|
||||
}
|
||||
if (caller->is_entry_frame()) {
|
||||
tty->print("entry ");
|
||||
}
|
||||
if (caller->is_compiled_frame()) {
|
||||
tty->print("compiled ");
|
||||
if (caller->is_deoptimized_frame()) {
|
||||
tty->print("(deopt) ");
|
||||
}
|
||||
}
|
||||
if (TraceDeoptimization) {
|
||||
if (caller->is_entry_frame()) {
|
||||
// make sure I5_savedSP and the entry frames notion of saved SP
|
||||
// agree. This assertion duplicate a check in entry frame code
|
||||
// but catches the failure earlier.
|
||||
assert(*caller->register_addr(Lscratch) == *interpreter_frame->register_addr(I5_savedSP),
|
||||
"would change callers SP");
|
||||
}
|
||||
if (caller->is_entry_frame()) {
|
||||
tty->print("entry ");
|
||||
}
|
||||
if (caller->is_compiled_frame()) {
|
||||
tty->print("compiled ");
|
||||
if (caller->is_deoptimized_frame()) {
|
||||
tty->print("(deopt) ");
|
||||
}
|
||||
}
|
||||
if (caller->is_interpreted_frame()) {
|
||||
tty->print("interpreted ");
|
||||
}
|
||||
tty->print_cr("caller fp=0x%x sp=0x%x", caller->fp(), caller->sp());
|
||||
tty->print_cr("save area = 0x%x, 0x%x", caller->sp(), caller->sp() + 16);
|
||||
tty->print_cr("save area = 0x%x, 0x%x", caller->fp(), caller->fp() + 16);
|
||||
tty->print_cr("interpreter fp=0x%x sp=0x%x", interpreter_frame->fp(), interpreter_frame->sp());
|
||||
tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->sp(), interpreter_frame->sp() + 16);
|
||||
tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->fp(), interpreter_frame->fp() + 16);
|
||||
tty->print_cr("Llocals = 0x%x", locals);
|
||||
tty->print_cr("Lesp = 0x%x", esp);
|
||||
tty->print_cr("Lmonitors = 0x%x", monitors);
|
||||
if (caller->is_interpreted_frame()) {
|
||||
tty->print("interpreted ");
|
||||
}
|
||||
tty->print_cr("caller fp=0x%x sp=0x%x", caller->fp(), caller->sp());
|
||||
tty->print_cr("save area = 0x%x, 0x%x", caller->sp(), caller->sp() + 16);
|
||||
tty->print_cr("save area = 0x%x, 0x%x", caller->fp(), caller->fp() + 16);
|
||||
tty->print_cr("interpreter fp=0x%x sp=0x%x", interpreter_frame->fp(), interpreter_frame->sp());
|
||||
tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->sp(), interpreter_frame->sp() + 16);
|
||||
tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->fp(), interpreter_frame->fp() + 16);
|
||||
tty->print_cr("Llocals = 0x%x", locals);
|
||||
tty->print_cr("Lesp = 0x%x", esp);
|
||||
tty->print_cr("Lmonitors = 0x%x", monitors);
|
||||
}
|
||||
|
||||
if (method->max_locals() > 0) {
|
||||
assert(locals < caller->sp() || locals >= (caller->sp() + 16), "locals in save area");
|
||||
assert(locals < caller->fp() || locals > (caller->fp() + 16), "locals in save area");
|
||||
assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area");
|
||||
assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area");
|
||||
}
|
||||
if (method->max_locals() > 0) {
|
||||
assert(locals < caller->sp() || locals >= (caller->sp() + 16), "locals in save area");
|
||||
assert(locals < caller->fp() || locals > (caller->fp() + 16), "locals in save area");
|
||||
assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area");
|
||||
assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area");
|
||||
}
|
||||
#ifdef _LP64
|
||||
assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd");
|
||||
assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd");
|
||||
#endif
|
||||
|
||||
*interpreter_frame->register_addr(Lmethod) = (intptr_t) method;
|
||||
*interpreter_frame->register_addr(Llocals) = (intptr_t) locals;
|
||||
*interpreter_frame->register_addr(Lmonitors) = (intptr_t) monitors;
|
||||
*interpreter_frame->register_addr(Lesp) = (intptr_t) esp;
|
||||
// Llast_SP will be same as SP as there is no adapter space
|
||||
*interpreter_frame->register_addr(Llast_SP) = (intptr_t) interpreter_frame->sp() - STACK_BIAS;
|
||||
*interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache();
|
||||
*interpreter_frame->register_addr(Lmethod) = (intptr_t) method;
|
||||
*interpreter_frame->register_addr(Llocals) = (intptr_t) locals;
|
||||
*interpreter_frame->register_addr(Lmonitors) = (intptr_t) monitors;
|
||||
*interpreter_frame->register_addr(Lesp) = (intptr_t) esp;
|
||||
// Llast_SP will be same as SP as there is no adapter space
|
||||
*interpreter_frame->register_addr(Llast_SP) = (intptr_t) interpreter_frame->sp() - STACK_BIAS;
|
||||
*interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache();
|
||||
#ifdef FAST_DISPATCH
|
||||
*interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table();
|
||||
*interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table();
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef ASSERT
|
||||
BasicObjectLock* mp = (BasicObjectLock*)monitors;
|
||||
BasicObjectLock* mp = (BasicObjectLock*)monitors;
|
||||
|
||||
assert(interpreter_frame->interpreter_frame_method() == method, "method matches");
|
||||
assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize)), "locals match");
|
||||
assert(interpreter_frame->interpreter_frame_monitor_end() == mp, "monitor_end matches");
|
||||
assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches");
|
||||
assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches");
|
||||
assert(interpreter_frame->interpreter_frame_method() == method, "method matches");
|
||||
assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize)), "locals match");
|
||||
assert(interpreter_frame->interpreter_frame_monitor_end() == mp, "monitor_end matches");
|
||||
assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches");
|
||||
assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches");
|
||||
|
||||
// check bounds
|
||||
intptr_t* lo = interpreter_frame->sp() + (frame::memory_parameter_word_sp_offset - 1);
|
||||
intptr_t* hi = interpreter_frame->fp() - rounded_vm_local_words;
|
||||
assert(lo < monitors && montop <= hi, "monitors in bounds");
|
||||
assert(lo <= esp && esp < monitors, "esp in bounds");
|
||||
// check bounds
|
||||
intptr_t* lo = interpreter_frame->sp() + (frame::memory_parameter_word_sp_offset - 1);
|
||||
intptr_t* hi = interpreter_frame->fp() - rounded_vm_local_words;
|
||||
assert(lo < monitors && montop <= hi, "monitors in bounds");
|
||||
assert(lo <= esp && esp < monitors, "esp in bounds");
|
||||
#endif // ASSERT
|
||||
}
|
||||
|
||||
return raw_frame_size;
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
|
@ -288,7 +288,7 @@ void LIR_Assembler::osr_entry() {
|
||||
|
||||
// build frame
|
||||
ciMethod* m = compilation()->method();
|
||||
__ build_frame(initial_frame_size_in_bytes());
|
||||
__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
|
||||
|
||||
// OSR buffer is
|
||||
//
|
||||
@ -376,7 +376,7 @@ void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
|
||||
}
|
||||
|
||||
// This specifies the rsp decrement needed to build the frame
|
||||
int LIR_Assembler::initial_frame_size_in_bytes() {
|
||||
int LIR_Assembler::initial_frame_size_in_bytes() const {
|
||||
// if rounding, must let FrameMap know!
|
||||
|
||||
// The frame_map records size in slots (32bit word)
|
||||
|
@ -349,13 +349,14 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
|
||||
}
|
||||
|
||||
|
||||
void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
|
||||
void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
|
||||
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
|
||||
// Make sure there is enough stack space for this method's activation.
|
||||
// Note that we do this before doing an enter(). This matches the
|
||||
// ordering of C2's stack overflow check / rsp decrement and allows
|
||||
// the SharedRuntime stack overflow handling to be consistent
|
||||
// between the two compilers.
|
||||
generate_stack_overflow_check(frame_size_in_bytes);
|
||||
generate_stack_overflow_check(bang_size_in_bytes);
|
||||
|
||||
push(rbp);
|
||||
#ifdef TIERED
|
||||
|
@ -2342,29 +2342,42 @@ void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
|
||||
"Stack top out of range");
|
||||
}
|
||||
|
||||
int AbstractInterpreter::layout_activation(Method* method,
|
||||
int tempcount, //
|
||||
int popframe_extra_args,
|
||||
int moncount,
|
||||
int caller_actual_parameters,
|
||||
int callee_param_count,
|
||||
int callee_locals,
|
||||
frame* caller,
|
||||
frame* interpreter_frame,
|
||||
bool is_top_frame,
|
||||
bool is_bottom_frame) {
|
||||
|
||||
assert(popframe_extra_args == 0, "FIX ME");
|
||||
// NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
|
||||
// does as far as allocating an interpreter frame.
|
||||
// If interpreter_frame!=NULL, set up the method, locals, and monitors.
|
||||
// The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
|
||||
// as determined by a previous call to this method.
|
||||
// It is also guaranteed to be walkable even though it is in a skeletal state
|
||||
static int frame_size_helper(int max_stack,
|
||||
int tempcount,
|
||||
int moncount,
|
||||
int callee_param_count,
|
||||
int callee_locals,
|
||||
bool is_top_frame,
|
||||
int& monitor_size,
|
||||
int& full_frame_size) {
|
||||
int extra_locals_size = (callee_locals - callee_param_count) * BytesPerWord;
|
||||
monitor_size = sizeof(BasicObjectLock) * moncount;
|
||||
|
||||
// First calculate the frame size without any java expression stack
|
||||
int short_frame_size = size_activation_helper(extra_locals_size,
|
||||
monitor_size);
|
||||
|
||||
// Now with full size expression stack
|
||||
full_frame_size = short_frame_size + max_stack * BytesPerWord;
|
||||
|
||||
// and now with only live portion of the expression stack
|
||||
short_frame_size = short_frame_size + tempcount * BytesPerWord;
|
||||
|
||||
// the size the activation is right now. Only top frame is full size
|
||||
int frame_size = (is_top_frame ? full_frame_size : short_frame_size);
|
||||
return frame_size;
|
||||
}
|
||||
|
||||
int AbstractInterpreter::size_activation(int max_stack,
|
||||
int tempcount,
|
||||
int extra_args,
|
||||
int moncount,
|
||||
int callee_param_count,
|
||||
int callee_locals,
|
||||
bool is_top_frame) {
|
||||
assert(extra_args == 0, "FIX ME");
|
||||
// NOTE: return size is in words not bytes
|
||||
// NOTE: tempcount is the current size of the java expression stack. For top most
|
||||
// frames we will allocate a full sized expression stack and not the curback
|
||||
// version that non-top frames have.
|
||||
|
||||
// Calculate the amount our frame will be adjust by the callee. For top frame
|
||||
// this is zero.
|
||||
@ -2374,87 +2387,102 @@ int AbstractInterpreter::layout_activation(Method* method,
|
||||
// to it. So it ignores last_frame_adjust value. Seems suspicious as far
|
||||
// as getting sender_sp correct.
|
||||
|
||||
int extra_locals_size = (callee_locals - callee_param_count) * BytesPerWord;
|
||||
int monitor_size = sizeof(BasicObjectLock) * moncount;
|
||||
int unused_monitor_size = 0;
|
||||
int unused_full_frame_size = 0;
|
||||
return frame_size_helper(max_stack, tempcount, moncount, callee_param_count, callee_locals,
|
||||
is_top_frame, unused_monitor_size, unused_full_frame_size)/BytesPerWord;
|
||||
}
|
||||
|
||||
// First calculate the frame size without any java expression stack
|
||||
int short_frame_size = size_activation_helper(extra_locals_size,
|
||||
monitor_size);
|
||||
void AbstractInterpreter::layout_activation(Method* method,
|
||||
int tempcount, //
|
||||
int popframe_extra_args,
|
||||
int moncount,
|
||||
int caller_actual_parameters,
|
||||
int callee_param_count,
|
||||
int callee_locals,
|
||||
frame* caller,
|
||||
frame* interpreter_frame,
|
||||
bool is_top_frame,
|
||||
bool is_bottom_frame) {
|
||||
|
||||
// Now with full size expression stack
|
||||
int full_frame_size = short_frame_size + method->max_stack() * BytesPerWord;
|
||||
assert(popframe_extra_args == 0, "FIX ME");
|
||||
// NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
|
||||
// does as far as allocating an interpreter frame.
|
||||
// Set up the method, locals, and monitors.
|
||||
// The frame interpreter_frame is guaranteed to be the right size,
|
||||
// as determined by a previous call to the size_activation() method.
|
||||
// It is also guaranteed to be walkable even though it is in a skeletal state
|
||||
// NOTE: tempcount is the current size of the java expression stack. For top most
|
||||
// frames we will allocate a full sized expression stack and not the curback
|
||||
// version that non-top frames have.
|
||||
|
||||
// and now with only live portion of the expression stack
|
||||
short_frame_size = short_frame_size + tempcount * BytesPerWord;
|
||||
int monitor_size = 0;
|
||||
int full_frame_size = 0;
|
||||
int frame_size = frame_size_helper(method->max_stack(), tempcount, moncount, callee_param_count, callee_locals,
|
||||
is_top_frame, monitor_size, full_frame_size);
|
||||
|
||||
// the size the activation is right now. Only top frame is full size
|
||||
int frame_size = (is_top_frame ? full_frame_size : short_frame_size);
|
||||
|
||||
if (interpreter_frame != NULL) {
|
||||
#ifdef ASSERT
|
||||
assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
|
||||
assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
|
||||
#endif
|
||||
|
||||
// MUCHO HACK
|
||||
// MUCHO HACK
|
||||
|
||||
intptr_t* frame_bottom = (intptr_t*) ((intptr_t)interpreter_frame->sp() - (full_frame_size - frame_size));
|
||||
intptr_t* frame_bottom = (intptr_t*) ((intptr_t)interpreter_frame->sp() - (full_frame_size - frame_size));
|
||||
|
||||
/* Now fillin the interpreterState object */
|
||||
/* Now fillin the interpreterState object */
|
||||
|
||||
// The state object is the first thing on the frame and easily located
|
||||
// The state object is the first thing on the frame and easily located
|
||||
|
||||
interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() - sizeof(BytecodeInterpreter));
|
||||
interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() - sizeof(BytecodeInterpreter));
|
||||
|
||||
|
||||
// Find the locals pointer. This is rather simple on x86 because there is no
|
||||
// confusing rounding at the callee to account for. We can trivially locate
|
||||
// our locals based on the current fp().
|
||||
// Note: the + 2 is for handling the "static long no_params() method" issue.
|
||||
// (too bad I don't really remember that issue well...)
|
||||
// Find the locals pointer. This is rather simple on x86 because there is no
|
||||
// confusing rounding at the callee to account for. We can trivially locate
|
||||
// our locals based on the current fp().
|
||||
// Note: the + 2 is for handling the "static long no_params() method" issue.
|
||||
// (too bad I don't really remember that issue well...)
|
||||
|
||||
intptr_t* locals;
|
||||
// If the caller is interpreted we need to make sure that locals points to the first
|
||||
// argument that the caller passed and not in an area where the stack might have been extended.
|
||||
// because the stack to stack to converter needs a proper locals value in order to remove the
|
||||
// arguments from the caller and place the result in the proper location. Hmm maybe it'd be
|
||||
// simpler if we simply stored the result in the BytecodeInterpreter object and let the c++ code
|
||||
// adjust the stack?? HMMM QQQ
|
||||
//
|
||||
if (caller->is_interpreted_frame()) {
|
||||
// locals must agree with the caller because it will be used to set the
|
||||
// caller's tos when we return.
|
||||
interpreterState prev = caller->get_interpreterState();
|
||||
// stack() is prepushed.
|
||||
locals = prev->stack() + method->size_of_parameters();
|
||||
// locals = caller->unextended_sp() + (method->size_of_parameters() - 1);
|
||||
if (locals != interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2) {
|
||||
// os::breakpoint();
|
||||
}
|
||||
} else {
|
||||
// this is where a c2i would have placed locals (except for the +2)
|
||||
locals = interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2;
|
||||
intptr_t* locals;
|
||||
// If the caller is interpreted we need to make sure that locals points to the first
|
||||
// argument that the caller passed and not in an area where the stack might have been extended.
|
||||
// because the stack to stack to converter needs a proper locals value in order to remove the
|
||||
// arguments from the caller and place the result in the proper location. Hmm maybe it'd be
|
||||
// simpler if we simply stored the result in the BytecodeInterpreter object and let the c++ code
|
||||
// adjust the stack?? HMMM QQQ
|
||||
//
|
||||
if (caller->is_interpreted_frame()) {
|
||||
// locals must agree with the caller because it will be used to set the
|
||||
// caller's tos when we return.
|
||||
interpreterState prev = caller->get_interpreterState();
|
||||
// stack() is prepushed.
|
||||
locals = prev->stack() + method->size_of_parameters();
|
||||
// locals = caller->unextended_sp() + (method->size_of_parameters() - 1);
|
||||
if (locals != interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2) {
|
||||
// os::breakpoint();
|
||||
}
|
||||
|
||||
intptr_t* monitor_base = (intptr_t*) cur_state;
|
||||
intptr_t* stack_base = (intptr_t*) ((intptr_t) monitor_base - monitor_size);
|
||||
/* +1 because stack is always prepushed */
|
||||
intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (tempcount + 1) * BytesPerWord);
|
||||
|
||||
|
||||
BytecodeInterpreter::layout_interpreterState(cur_state,
|
||||
caller,
|
||||
interpreter_frame,
|
||||
method,
|
||||
locals,
|
||||
stack,
|
||||
stack_base,
|
||||
monitor_base,
|
||||
frame_bottom,
|
||||
is_top_frame);
|
||||
|
||||
// BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
|
||||
} else {
|
||||
// this is where a c2i would have placed locals (except for the +2)
|
||||
locals = interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2;
|
||||
}
|
||||
return frame_size/BytesPerWord;
|
||||
|
||||
intptr_t* monitor_base = (intptr_t*) cur_state;
|
||||
intptr_t* stack_base = (intptr_t*) ((intptr_t) monitor_base - monitor_size);
|
||||
/* +1 because stack is always prepushed */
|
||||
intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (tempcount + 1) * BytesPerWord);
|
||||
|
||||
|
||||
BytecodeInterpreter::layout_interpreterState(cur_state,
|
||||
caller,
|
||||
interpreter_frame,
|
||||
method,
|
||||
locals,
|
||||
stack,
|
||||
stack_base,
|
||||
monitor_base,
|
||||
frame_bottom,
|
||||
is_top_frame);
|
||||
|
||||
// BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
|
||||
}
|
||||
|
||||
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
|
||||
|
@ -1051,7 +1051,7 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) {
|
||||
// was post-decremented.) Skip this address by starting at i=1, and
|
||||
// touch a few more pages below. N.B. It is important to touch all
|
||||
// the way down to and including i=StackShadowPages.
|
||||
for (int i = 1; i <= StackShadowPages; i++) {
|
||||
for (int i = 1; i < StackShadowPages; i++) {
|
||||
// this could be any sized move but this is can be a debugging crumb
|
||||
// so the bigger the better.
|
||||
movptr(Address(tmp, (-i*os::vm_page_size())), size );
|
||||
@ -6093,7 +6093,7 @@ void MacroAssembler::reinit_heapbase() {
|
||||
|
||||
|
||||
// C2 compiled method's prolog code.
|
||||
void MacroAssembler::verified_entry(int framesize, bool stack_bang, bool fp_mode_24b) {
|
||||
void MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b) {
|
||||
|
||||
// WARNING: Initial instruction MUST be 5 bytes or longer so that
|
||||
// NativeJump::patch_verified_entry will be able to patch out the entry
|
||||
@ -6101,18 +6101,20 @@ void MacroAssembler::verified_entry(int framesize, bool stack_bang, bool fp_mode
|
||||
// the frame allocation can be either 3 or 6 bytes. So if we don't do
|
||||
// stack bang then we must use the 6 byte frame allocation even if
|
||||
// we have no frame. :-(
|
||||
assert(stack_bang_size >= framesize || stack_bang_size <= 0, "stack bang size incorrect");
|
||||
|
||||
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
|
||||
// Remove word for return addr
|
||||
framesize -= wordSize;
|
||||
stack_bang_size -= wordSize;
|
||||
|
||||
// Calls to C2R adapters often do not accept exceptional returns.
|
||||
// We require that their callers must bang for them. But be careful, because
|
||||
// some VM calls (such as call site linkage) can use several kilobytes of
|
||||
// stack. But the stack safety zone should account for that.
|
||||
// See bugs 4446381, 4468289, 4497237.
|
||||
if (stack_bang) {
|
||||
generate_stack_overflow_check(framesize);
|
||||
if (stack_bang_size > 0) {
|
||||
generate_stack_overflow_check(stack_bang_size);
|
||||
|
||||
// We always push rbp, so that on return to interpreter rbp, will be
|
||||
// restored correctly and we can correct the stack.
|
||||
|
@ -1170,7 +1170,7 @@ public:
|
||||
void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
|
||||
|
||||
// C2 compiled method's prolog code.
|
||||
void verified_entry(int framesize, bool stack_bang, bool fp_mode_24b);
|
||||
void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b);
|
||||
|
||||
// clear memory of size 'cnt' qwords, starting at 'base'.
|
||||
void clear_mem(Register base, Register cnt, Register rtmp);
|
||||
|
@ -3014,11 +3014,15 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
// restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
|
||||
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
|
||||
|
||||
// Stack bang to make sure there's enough room for these interpreter frames.
|
||||
#ifdef ASSERT
|
||||
// Compilers generate code that bang the stack by as much as the
|
||||
// interpreter would need. So this stack banging should never
|
||||
// trigger a fault. Verify that it does not on non product builds.
|
||||
if (UseStackBanging) {
|
||||
__ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
|
||||
__ bang_stack_size(rbx, rcx);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Load array of frame pcs into ECX
|
||||
__ movptr(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
|
||||
@ -3240,12 +3244,15 @@ void SharedRuntime::generate_uncommon_trap_blob() {
|
||||
// restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
|
||||
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
|
||||
|
||||
// Stack bang to make sure there's enough room for these interpreter frames.
|
||||
#ifdef ASSERT
|
||||
// Compilers generate code that bang the stack by as much as the
|
||||
// interpreter would need. So this stack banging should never
|
||||
// trigger a fault. Verify that it does not on non product builds.
|
||||
if (UseStackBanging) {
|
||||
__ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
|
||||
__ bang_stack_size(rbx, rcx);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
// Load array of frame pcs into ECX
|
||||
__ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
|
||||
|
@ -3484,11 +3484,15 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
// restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
|
||||
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
|
||||
|
||||
// Stack bang to make sure there's enough room for these interpreter frames.
|
||||
#ifdef ASSERT
|
||||
// Compilers generate code that bang the stack by as much as the
|
||||
// interpreter would need. So this stack banging should never
|
||||
// trigger a fault. Verify that it does not on non product builds.
|
||||
if (UseStackBanging) {
|
||||
__ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
|
||||
__ bang_stack_size(rbx, rcx);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Load address of array of frame pcs into rcx
|
||||
__ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
|
||||
@ -3682,11 +3686,15 @@ void SharedRuntime::generate_uncommon_trap_blob() {
|
||||
// restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
|
||||
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
|
||||
|
||||
// Stack bang to make sure there's enough room for these interpreter frames.
|
||||
#ifdef ASSERT
|
||||
// Compilers generate code that bang the stack by as much as the
|
||||
// interpreter would need. So this stack banging should never
|
||||
// trigger a fault. Verify that it does not on non product builds.
|
||||
if (UseStackBanging) {
|
||||
__ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
|
||||
__ bang_stack_size(rbx, rcx);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Load address of array of frame pcs into rcx (address*)
|
||||
__ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
|
||||
|
124
hotspot/src/cpu/x86/vm/templateInterpreter_x86.cpp
Normal file
124
hotspot/src/cpu/x86/vm/templateInterpreter_x86.cpp
Normal file
@ -0,0 +1,124 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "ci/ciMethod.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
|
||||
#ifndef CC_INTERP
|
||||
|
||||
// asm based interpreter deoptimization helpers
|
||||
int AbstractInterpreter::size_activation(int max_stack,
|
||||
int temps,
|
||||
int extra_args,
|
||||
int monitors,
|
||||
int callee_params,
|
||||
int callee_locals,
|
||||
bool is_top_frame) {
|
||||
// Note: This calculation must exactly parallel the frame setup
|
||||
// in AbstractInterpreterGenerator::generate_method_entry.
|
||||
|
||||
// fixed size of an interpreter frame:
|
||||
int overhead = frame::sender_sp_offset -
|
||||
frame::interpreter_frame_initial_sp_offset;
|
||||
// Our locals were accounted for by the caller (or last_frame_adjust
|
||||
// on the transistion) Since the callee parameters already account
|
||||
// for the callee's params we only need to account for the extra
|
||||
// locals.
|
||||
int size = overhead +
|
||||
(callee_locals - callee_params)*Interpreter::stackElementWords +
|
||||
monitors * frame::interpreter_frame_monitor_size() +
|
||||
temps* Interpreter::stackElementWords + extra_args;
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
void AbstractInterpreter::layout_activation(Method* method,
|
||||
int tempcount,
|
||||
int popframe_extra_args,
|
||||
int moncount,
|
||||
int caller_actual_parameters,
|
||||
int callee_param_count,
|
||||
int callee_locals,
|
||||
frame* caller,
|
||||
frame* interpreter_frame,
|
||||
bool is_top_frame,
|
||||
bool is_bottom_frame) {
|
||||
// The frame interpreter_frame is guaranteed to be the right size,
|
||||
// as determined by a previous call to the size_activation() method.
|
||||
// It is also guaranteed to be walkable even though it is in a
|
||||
// skeletal state
|
||||
|
||||
int max_locals = method->max_locals() * Interpreter::stackElementWords;
|
||||
int extra_locals = (method->max_locals() - method->size_of_parameters()) *
|
||||
Interpreter::stackElementWords;
|
||||
|
||||
#ifdef ASSERT
|
||||
if (!EnableInvokeDynamic) {
|
||||
// @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
|
||||
// Probably, since deoptimization doesn't work yet.
|
||||
assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
|
||||
}
|
||||
assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
|
||||
#endif
|
||||
|
||||
interpreter_frame->interpreter_frame_set_method(method);
|
||||
// NOTE the difference in using sender_sp and
|
||||
// interpreter_frame_sender_sp interpreter_frame_sender_sp is
|
||||
// the original sp of the caller (the unextended_sp) and
|
||||
// sender_sp is fp+8/16 (32bit/64bit) XXX
|
||||
intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
|
||||
|
||||
#ifdef ASSERT
|
||||
if (caller->is_interpreted_frame()) {
|
||||
assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
|
||||
}
|
||||
#endif
|
||||
|
||||
interpreter_frame->interpreter_frame_set_locals(locals);
|
||||
BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
|
||||
BasicObjectLock* monbot = montop - moncount;
|
||||
interpreter_frame->interpreter_frame_set_monitor_end(monbot);
|
||||
|
||||
// Set last_sp
|
||||
intptr_t* esp = (intptr_t*) monbot -
|
||||
tempcount*Interpreter::stackElementWords -
|
||||
popframe_extra_args;
|
||||
interpreter_frame->interpreter_frame_set_last_sp(esp);
|
||||
|
||||
// All frames but the initial (oldest) interpreter frame we fill in have
|
||||
// a value for sender_sp that allows walking the stack but isn't
|
||||
// truly correct. Correct the value here.
|
||||
if (extra_locals != 0 &&
|
||||
interpreter_frame->sender_sp() ==
|
||||
interpreter_frame->interpreter_frame_sender_sp()) {
|
||||
interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() +
|
||||
extra_locals);
|
||||
}
|
||||
*interpreter_frame->interpreter_frame_cache_addr() =
|
||||
method->constants()->cache();
|
||||
}
|
||||
|
||||
#endif // CC_INTERP
|
@ -1686,91 +1686,6 @@ int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
|
||||
return overhead_size + method_stack + stub_code;
|
||||
}
|
||||
|
||||
// asm based interpreter deoptimization helpers
|
||||
|
||||
int AbstractInterpreter::layout_activation(Method* method,
|
||||
int tempcount,
|
||||
int popframe_extra_args,
|
||||
int moncount,
|
||||
int caller_actual_parameters,
|
||||
int callee_param_count,
|
||||
int callee_locals,
|
||||
frame* caller,
|
||||
frame* interpreter_frame,
|
||||
bool is_top_frame,
|
||||
bool is_bottom_frame) {
|
||||
// Note: This calculation must exactly parallel the frame setup
|
||||
// in AbstractInterpreterGenerator::generate_method_entry.
|
||||
// If interpreter_frame!=NULL, set up the method, locals, and monitors.
|
||||
// The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
|
||||
// as determined by a previous call to this method.
|
||||
// It is also guaranteed to be walkable even though it is in a skeletal state
|
||||
// NOTE: return size is in words not bytes
|
||||
|
||||
// fixed size of an interpreter frame:
|
||||
int max_locals = method->max_locals() * Interpreter::stackElementWords;
|
||||
int extra_locals = (method->max_locals() - method->size_of_parameters()) *
|
||||
Interpreter::stackElementWords;
|
||||
|
||||
int overhead = frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset;
|
||||
|
||||
// Our locals were accounted for by the caller (or last_frame_adjust on the transistion)
|
||||
// Since the callee parameters already account for the callee's params we only need to account for
|
||||
// the extra locals.
|
||||
|
||||
|
||||
int size = overhead +
|
||||
((callee_locals - callee_param_count)*Interpreter::stackElementWords) +
|
||||
(moncount*frame::interpreter_frame_monitor_size()) +
|
||||
tempcount*Interpreter::stackElementWords + popframe_extra_args;
|
||||
|
||||
if (interpreter_frame != NULL) {
|
||||
#ifdef ASSERT
|
||||
if (!EnableInvokeDynamic)
|
||||
// @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
|
||||
// Probably, since deoptimization doesn't work yet.
|
||||
assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
|
||||
assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
|
||||
#endif
|
||||
|
||||
interpreter_frame->interpreter_frame_set_method(method);
|
||||
// NOTE the difference in using sender_sp and interpreter_frame_sender_sp
|
||||
// interpreter_frame_sender_sp is the original sp of the caller (the unextended_sp)
|
||||
// and sender_sp is fp+8
|
||||
intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
|
||||
|
||||
#ifdef ASSERT
|
||||
if (caller->is_interpreted_frame()) {
|
||||
assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
|
||||
}
|
||||
#endif
|
||||
|
||||
interpreter_frame->interpreter_frame_set_locals(locals);
|
||||
BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
|
||||
BasicObjectLock* monbot = montop - moncount;
|
||||
interpreter_frame->interpreter_frame_set_monitor_end(monbot);
|
||||
|
||||
// Set last_sp
|
||||
intptr_t* rsp = (intptr_t*) monbot -
|
||||
tempcount*Interpreter::stackElementWords -
|
||||
popframe_extra_args;
|
||||
interpreter_frame->interpreter_frame_set_last_sp(rsp);
|
||||
|
||||
// All frames but the initial (oldest) interpreter frame we fill in have a
|
||||
// value for sender_sp that allows walking the stack but isn't
|
||||
// truly correct. Correct the value here.
|
||||
|
||||
if (extra_locals != 0 &&
|
||||
interpreter_frame->sender_sp() == interpreter_frame->interpreter_frame_sender_sp() ) {
|
||||
interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + extra_locals);
|
||||
}
|
||||
*interpreter_frame->interpreter_frame_cache_addr() =
|
||||
method->constants()->cache();
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
//------------------------------------------------------------------------------------------------------------------------
|
||||
// Exceptions
|
||||
|
||||
|
@ -1695,87 +1695,6 @@ int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
|
||||
return (overhead_size + method_stack + stub_code);
|
||||
}
|
||||
|
||||
int AbstractInterpreter::layout_activation(Method* method,
|
||||
int tempcount,
|
||||
int popframe_extra_args,
|
||||
int moncount,
|
||||
int caller_actual_parameters,
|
||||
int callee_param_count,
|
||||
int callee_locals,
|
||||
frame* caller,
|
||||
frame* interpreter_frame,
|
||||
bool is_top_frame,
|
||||
bool is_bottom_frame) {
|
||||
// Note: This calculation must exactly parallel the frame setup
|
||||
// in AbstractInterpreterGenerator::generate_method_entry.
|
||||
// If interpreter_frame!=NULL, set up the method, locals, and monitors.
|
||||
// The frame interpreter_frame, if not NULL, is guaranteed to be the
|
||||
// right size, as determined by a previous call to this method.
|
||||
// It is also guaranteed to be walkable even though it is in a skeletal state
|
||||
|
||||
// fixed size of an interpreter frame:
|
||||
int max_locals = method->max_locals() * Interpreter::stackElementWords;
|
||||
int extra_locals = (method->max_locals() - method->size_of_parameters()) *
|
||||
Interpreter::stackElementWords;
|
||||
|
||||
int overhead = frame::sender_sp_offset -
|
||||
frame::interpreter_frame_initial_sp_offset;
|
||||
// Our locals were accounted for by the caller (or last_frame_adjust
|
||||
// on the transistion) Since the callee parameters already account
|
||||
// for the callee's params we only need to account for the extra
|
||||
// locals.
|
||||
int size = overhead +
|
||||
(callee_locals - callee_param_count)*Interpreter::stackElementWords +
|
||||
moncount * frame::interpreter_frame_monitor_size() +
|
||||
tempcount* Interpreter::stackElementWords + popframe_extra_args;
|
||||
if (interpreter_frame != NULL) {
|
||||
#ifdef ASSERT
|
||||
if (!EnableInvokeDynamic)
|
||||
// @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
|
||||
// Probably, since deoptimization doesn't work yet.
|
||||
assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
|
||||
assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
|
||||
#endif
|
||||
|
||||
interpreter_frame->interpreter_frame_set_method(method);
|
||||
// NOTE the difference in using sender_sp and
|
||||
// interpreter_frame_sender_sp interpreter_frame_sender_sp is
|
||||
// the original sp of the caller (the unextended_sp) and
|
||||
// sender_sp is fp+16 XXX
|
||||
intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
|
||||
|
||||
#ifdef ASSERT
|
||||
if (caller->is_interpreted_frame()) {
|
||||
assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
|
||||
}
|
||||
#endif
|
||||
|
||||
interpreter_frame->interpreter_frame_set_locals(locals);
|
||||
BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
|
||||
BasicObjectLock* monbot = montop - moncount;
|
||||
interpreter_frame->interpreter_frame_set_monitor_end(monbot);
|
||||
|
||||
// Set last_sp
|
||||
intptr_t* esp = (intptr_t*) monbot -
|
||||
tempcount*Interpreter::stackElementWords -
|
||||
popframe_extra_args;
|
||||
interpreter_frame->interpreter_frame_set_last_sp(esp);
|
||||
|
||||
// All frames but the initial (oldest) interpreter frame we fill in have
|
||||
// a value for sender_sp that allows walking the stack but isn't
|
||||
// truly correct. Correct the value here.
|
||||
if (extra_locals != 0 &&
|
||||
interpreter_frame->sender_sp() ==
|
||||
interpreter_frame->interpreter_frame_sender_sp()) {
|
||||
interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() +
|
||||
extra_locals);
|
||||
}
|
||||
*interpreter_frame->interpreter_frame_cache_addr() =
|
||||
method->constants()->cache();
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Exceptions
|
||||
|
||||
|
@ -512,14 +512,15 @@ void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
|
||||
void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
|
||||
Compile* C = ra_->C;
|
||||
|
||||
int framesize = C->frame_slots() << LogBytesPerInt;
|
||||
int framesize = C->frame_size_in_bytes();
|
||||
int bangsize = C->bang_size_in_bytes();
|
||||
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
|
||||
// Remove wordSize for return addr which is already pushed.
|
||||
framesize -= wordSize;
|
||||
|
||||
if (C->need_stack_bang(framesize)) {
|
||||
if (C->need_stack_bang(bangsize)) {
|
||||
framesize -= wordSize;
|
||||
st->print("# stack bang");
|
||||
st->print("# stack bang (%d bytes)", bangsize);
|
||||
st->print("\n\t");
|
||||
st->print("PUSH EBP\t# Save EBP");
|
||||
if (framesize) {
|
||||
@ -563,9 +564,10 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
|
||||
Compile* C = ra_->C;
|
||||
MacroAssembler _masm(&cbuf);
|
||||
|
||||
int framesize = C->frame_slots() << LogBytesPerInt;
|
||||
int framesize = C->frame_size_in_bytes();
|
||||
int bangsize = C->bang_size_in_bytes();
|
||||
|
||||
__ verified_entry(framesize, C->need_stack_bang(framesize), C->in_24_bit_fp_mode());
|
||||
__ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, C->in_24_bit_fp_mode());
|
||||
|
||||
C->set_frame_complete(cbuf.insts_size());
|
||||
|
||||
@ -589,7 +591,7 @@ int MachPrologNode::reloc() const {
|
||||
#ifndef PRODUCT
|
||||
void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
|
||||
Compile *C = ra_->C;
|
||||
int framesize = C->frame_slots() << LogBytesPerInt;
|
||||
int framesize = C->frame_size_in_bytes();
|
||||
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
|
||||
// Remove two words for return addr and rbp,
|
||||
framesize -= 2*wordSize;
|
||||
@ -629,7 +631,7 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
|
||||
masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
|
||||
}
|
||||
|
||||
int framesize = C->frame_slots() << LogBytesPerInt;
|
||||
int framesize = C->frame_size_in_bytes();
|
||||
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
|
||||
// Remove two words for return addr and rbp,
|
||||
framesize -= 2*wordSize;
|
||||
@ -663,7 +665,7 @@ uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
|
||||
if (C->max_vector_size() > 16) size += 3; // vzeroupper
|
||||
if (do_polling() && C->is_method_compilation()) size += 6;
|
||||
|
||||
int framesize = C->frame_slots() << LogBytesPerInt;
|
||||
int framesize = C->frame_size_in_bytes();
|
||||
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
|
||||
// Remove two words for return addr and rbp,
|
||||
framesize -= 2*wordSize;
|
||||
|
@ -713,14 +713,15 @@ void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
|
||||
void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
|
||||
Compile* C = ra_->C;
|
||||
|
||||
int framesize = C->frame_slots() << LogBytesPerInt;
|
||||
int framesize = C->frame_size_in_bytes();
|
||||
int bangsize = C->bang_size_in_bytes();
|
||||
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
|
||||
// Remove wordSize for return addr which is already pushed.
|
||||
framesize -= wordSize;
|
||||
|
||||
if (C->need_stack_bang(framesize)) {
|
||||
if (C->need_stack_bang(bangsize)) {
|
||||
framesize -= wordSize;
|
||||
st->print("# stack bang");
|
||||
st->print("# stack bang (%d bytes)", bangsize);
|
||||
st->print("\n\t");
|
||||
st->print("pushq rbp\t# Save rbp");
|
||||
if (framesize) {
|
||||
@ -751,9 +752,10 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
|
||||
Compile* C = ra_->C;
|
||||
MacroAssembler _masm(&cbuf);
|
||||
|
||||
int framesize = C->frame_slots() << LogBytesPerInt;
|
||||
int framesize = C->frame_size_in_bytes();
|
||||
int bangsize = C->bang_size_in_bytes();
|
||||
|
||||
__ verified_entry(framesize, C->need_stack_bang(framesize), false);
|
||||
__ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
|
||||
|
||||
C->set_frame_complete(cbuf.insts_size());
|
||||
|
||||
@ -786,7 +788,7 @@ void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
|
||||
st->cr(); st->print("\t");
|
||||
}
|
||||
|
||||
int framesize = C->frame_slots() << LogBytesPerInt;
|
||||
int framesize = C->frame_size_in_bytes();
|
||||
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
|
||||
// Remove word for return adr already pushed
|
||||
// and RBP
|
||||
@ -822,7 +824,7 @@ void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
|
||||
__ vzeroupper();
|
||||
}
|
||||
|
||||
int framesize = C->frame_slots() << LogBytesPerInt;
|
||||
int framesize = C->frame_size_in_bytes();
|
||||
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
|
||||
// Remove word for return adr already pushed
|
||||
// and RBP
|
||||
|
@ -916,17 +916,32 @@ InterpreterFrame *InterpreterFrame::build(int size, TRAPS) {
|
||||
return (InterpreterFrame *) fp;
|
||||
}
|
||||
|
||||
int AbstractInterpreter::layout_activation(Method* method,
|
||||
int tempcount,
|
||||
int popframe_extra_args,
|
||||
int moncount,
|
||||
int caller_actual_parameters,
|
||||
int callee_param_count,
|
||||
int callee_locals,
|
||||
frame* caller,
|
||||
frame* interpreter_frame,
|
||||
bool is_top_frame,
|
||||
bool is_bottom_frame) {
|
||||
int AbstractInterpreter::size_activation(int max_stack,
|
||||
int tempcount,
|
||||
int extra_args,
|
||||
int moncount,
|
||||
int callee_param_count,
|
||||
int callee_locals,
|
||||
bool is_top_frame) {
|
||||
int header_words = InterpreterFrame::header_words;
|
||||
int monitor_words = moncount * frame::interpreter_frame_monitor_size();
|
||||
int stack_words = is_top_frame ? max_stack : tempcount;
|
||||
int callee_extra_locals = callee_locals - callee_param_count;
|
||||
|
||||
return header_words + monitor_words + stack_words + callee_extra_locals;
|
||||
}
|
||||
|
||||
void AbstractInterpreter::layout_activation(Method* method,
|
||||
int tempcount,
|
||||
int popframe_extra_args,
|
||||
int moncount,
|
||||
int caller_actual_parameters,
|
||||
int callee_param_count,
|
||||
int callee_locals,
|
||||
frame* caller,
|
||||
frame* interpreter_frame,
|
||||
bool is_top_frame,
|
||||
bool is_bottom_frame) {
|
||||
assert(popframe_extra_args == 0, "what to do?");
|
||||
assert(!is_top_frame || (!callee_locals && !callee_param_count),
|
||||
"top frame should have no caller");
|
||||
@ -935,39 +950,31 @@ int AbstractInterpreter::layout_activation(Method* method,
|
||||
// does (the full InterpreterFrame::build, that is, not the
|
||||
// one that creates empty frames for the deoptimizer).
|
||||
//
|
||||
// If interpreter_frame is not NULL then it will be filled in.
|
||||
// It's size is determined by a previous call to this method,
|
||||
// so it should be correct.
|
||||
// interpreter_frame will be filled in. It's size is determined by
|
||||
// a previous call to the size_activation() method,
|
||||
//
|
||||
// Note that tempcount is the current size of the expression
|
||||
// stack. For top most frames we will allocate a full sized
|
||||
// expression stack and not the trimmed version that non-top
|
||||
// frames have.
|
||||
|
||||
int header_words = InterpreterFrame::header_words;
|
||||
int monitor_words = moncount * frame::interpreter_frame_monitor_size();
|
||||
int stack_words = is_top_frame ? method->max_stack() : tempcount;
|
||||
int callee_extra_locals = callee_locals - callee_param_count;
|
||||
intptr_t *locals = interpreter_frame->fp() + method->max_locals();
|
||||
interpreterState istate = interpreter_frame->get_interpreterState();
|
||||
intptr_t *monitor_base = (intptr_t*) istate;
|
||||
intptr_t *stack_base = monitor_base - monitor_words;
|
||||
intptr_t *stack = stack_base - tempcount - 1;
|
||||
|
||||
if (interpreter_frame) {
|
||||
intptr_t *locals = interpreter_frame->fp() + method->max_locals();
|
||||
interpreterState istate = interpreter_frame->get_interpreterState();
|
||||
intptr_t *monitor_base = (intptr_t*) istate;
|
||||
intptr_t *stack_base = monitor_base - monitor_words;
|
||||
intptr_t *stack = stack_base - tempcount - 1;
|
||||
|
||||
BytecodeInterpreter::layout_interpreterState(istate,
|
||||
caller,
|
||||
NULL,
|
||||
method,
|
||||
locals,
|
||||
stack,
|
||||
stack_base,
|
||||
monitor_base,
|
||||
NULL,
|
||||
is_top_frame);
|
||||
}
|
||||
return header_words + monitor_words + stack_words + callee_extra_locals;
|
||||
BytecodeInterpreter::layout_interpreterState(istate,
|
||||
caller,
|
||||
NULL,
|
||||
method,
|
||||
locals,
|
||||
stack,
|
||||
stack_base,
|
||||
monitor_base,
|
||||
NULL,
|
||||
is_top_frame);
|
||||
}
|
||||
|
||||
void BytecodeInterpreter::layout_interpreterState(interpreterState istate,
|
||||
|
@ -25,6 +25,7 @@
|
||||
package com.sun.hotspot.tools.compiler;
|
||||
|
||||
import java.io.PrintStream;
|
||||
import java.util.ArrayDeque;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
@ -40,6 +41,7 @@ public class CallSite {
|
||||
private int endNodes;
|
||||
private int endLiveNodes;
|
||||
private double timeStamp;
|
||||
private long inlineId;
|
||||
|
||||
CallSite() {
|
||||
}
|
||||
@ -94,7 +96,7 @@ public class CallSite {
|
||||
|
||||
public void print(PrintStream stream, int indent) {
|
||||
emit(stream, indent);
|
||||
String m = getMethod().getHolder().replace('/', '.') + "::" + getMethod().getName();
|
||||
String m = getMethod().getHolder() + "::" + getMethod().getName();
|
||||
if (getReason() == null) {
|
||||
stream.print(" @ " + getBci() + " " + m + " (" + getMethod().getBytes() + " bytes)");
|
||||
|
||||
@ -214,4 +216,45 @@ public class CallSite {
|
||||
return timeStamp;
|
||||
}
|
||||
|
||||
private boolean matches(CallSite other) {
|
||||
// Every late inline call site has a unique inline id. If the
|
||||
// call site we're looking for has one then use it other rely
|
||||
// on method name and bci.
|
||||
if (other.inlineId != 0) {
|
||||
return inlineId == other.inlineId;
|
||||
}
|
||||
return method.equals(other.method) && bci == other.bci;
|
||||
}
|
||||
|
||||
public CallSite findCallSite(ArrayDeque<CallSite> sites) {
|
||||
// Locate a late inline call site. Multiple chains of
|
||||
// identical call sites with the same method name/bci are
|
||||
// possible so we have to try them all until we find the late
|
||||
// inline call site that has a matching inline id.
|
||||
CallSite site = sites.pop();
|
||||
for (CallSite c : calls) {
|
||||
if (c.matches(site)) {
|
||||
if (!sites.isEmpty()) {
|
||||
CallSite res = c.findCallSite(sites);
|
||||
if (res != null) {
|
||||
sites.push(site);
|
||||
return res;
|
||||
}
|
||||
} else {
|
||||
sites.push(site);
|
||||
return c;
|
||||
}
|
||||
}
|
||||
}
|
||||
sites.push(site);
|
||||
return null;
|
||||
}
|
||||
|
||||
public long getInlineId() {
|
||||
return inlineId;
|
||||
}
|
||||
|
||||
public void setInlineId(long inlineId) {
|
||||
this.inlineId = inlineId;
|
||||
}
|
||||
}
|
||||
|
@ -31,6 +31,7 @@ package com.sun.hotspot.tools.compiler;
|
||||
|
||||
import java.io.FileReader;
|
||||
import java.io.Reader;
|
||||
import java.util.ArrayDeque;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
@ -144,9 +145,12 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
|
||||
private Stack<CallSite> scopes = new Stack<CallSite>();
|
||||
private Compilation compile;
|
||||
private CallSite site;
|
||||
private CallSite methodHandleSite;
|
||||
private Stack<Phase> phaseStack = new Stack<Phase>();
|
||||
private UncommonTrapEvent currentTrap;
|
||||
private Stack<CallSite> late_inline_scope;
|
||||
private Stack<CallSite> lateInlineScope;
|
||||
private boolean lateInlining;
|
||||
|
||||
|
||||
long parseLong(String l) {
|
||||
try {
|
||||
@ -330,18 +334,61 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
|
||||
}
|
||||
methods.put(id, m);
|
||||
} else if (qname.equals("call")) {
|
||||
site = new CallSite(bci, method(search(atts, "method")));
|
||||
if (methodHandleSite != null) {
|
||||
methodHandleSite = null;
|
||||
}
|
||||
Method m = method(search(atts, "method"));
|
||||
if (lateInlining && scopes.size() == 0) {
|
||||
// re-attempting already seen call site (late inlining for MH invokes)
|
||||
if (m != site.getMethod()) {
|
||||
if (bci != site.getBci()) {
|
||||
System.out.println(m + " bci: " + bci);
|
||||
System.out.println(site.getMethod() + " bci: " + site.getBci());
|
||||
throw new InternalError("bci mismatch after late inlining");
|
||||
}
|
||||
site.setMethod(m);
|
||||
}
|
||||
} else {
|
||||
site = new CallSite(bci, m);
|
||||
}
|
||||
site.setCount(Integer.parseInt(search(atts, "count", "0")));
|
||||
String receiver = atts.getValue("receiver");
|
||||
if (receiver != null) {
|
||||
site.setReceiver(type(receiver));
|
||||
site.setReceiver_count(Integer.parseInt(search(atts, "receiver_count")));
|
||||
}
|
||||
scopes.peek().add(site);
|
||||
int methodHandle = Integer.parseInt(search(atts, "method_handle_intrinsic", "0"));
|
||||
if (lateInlining && scopes.size() == 0) {
|
||||
// The call was added before this round of late inlining
|
||||
} else if (methodHandle == 0) {
|
||||
scopes.peek().add(site);
|
||||
} else {
|
||||
// method handle call site can be followed by another
|
||||
// call (in case it is inlined). If that happens we
|
||||
// discard the method handle call site. So we keep
|
||||
// track of it but don't add it to the list yet.
|
||||
methodHandleSite = site;
|
||||
}
|
||||
} else if (qname.equals("regalloc")) {
|
||||
compile.setAttempts(Integer.parseInt(search(atts, "attempts")));
|
||||
} else if (qname.equals("inline_fail")) {
|
||||
scopes.peek().last().setReason(search(atts, "reason"));
|
||||
if (methodHandleSite != null) {
|
||||
scopes.peek().add(methodHandleSite);
|
||||
methodHandleSite = null;
|
||||
}
|
||||
if (lateInlining && scopes.size() == 0) {
|
||||
site.setReason(search(atts, "reason"));
|
||||
lateInlining = false;
|
||||
} else {
|
||||
scopes.peek().last().setReason(search(atts, "reason"));
|
||||
}
|
||||
} else if (qname.equals("inline_success")) {
|
||||
if (methodHandleSite != null) {
|
||||
throw new InternalError("method handle site should have been replaced");
|
||||
}
|
||||
if (lateInlining && scopes.size() == 0) {
|
||||
site.setReason(null);
|
||||
}
|
||||
} else if (qname.equals("failure")) {
|
||||
failureReason = search(atts, "reason");
|
||||
} else if (qname.equals("task_done")) {
|
||||
@ -371,22 +418,30 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
|
||||
// ignore for now
|
||||
}
|
||||
} else if (qname.equals("late_inline")) {
|
||||
late_inline_scope = new Stack<CallSite>();
|
||||
long inlineId = Long.parseLong(search(atts, "inline_id"));
|
||||
lateInlineScope = new Stack<CallSite>();
|
||||
site = new CallSite(-999, method(search(atts, "method")));
|
||||
late_inline_scope.push(site);
|
||||
site.setInlineId(inlineId);
|
||||
lateInlineScope.push(site);
|
||||
} else if (qname.equals("jvms")) {
|
||||
// <jvms bci='4' method='java/io/DataInputStream readChar ()C' bytes='40' count='5815' iicount='20815'/>
|
||||
if (currentTrap != null) {
|
||||
currentTrap.addJVMS(atts.getValue("method"), Integer.parseInt(atts.getValue("bci")));
|
||||
} else if (late_inline_scope != null) {
|
||||
} else if (lateInlineScope != null) {
|
||||
bci = Integer.parseInt(search(atts, "bci"));
|
||||
site = new CallSite(bci, method(search(atts, "method")));
|
||||
late_inline_scope.push(site);
|
||||
lateInlineScope.push(site);
|
||||
} else {
|
||||
// Ignore <eliminate_allocation type='667'>,
|
||||
// <eliminate_lock lock='1'>,
|
||||
// <replace_string_concat arguments='2' string_alloc='0' multiple='0'>
|
||||
}
|
||||
} else if (qname.equals("inline_id")) {
|
||||
if (methodHandleSite != null) {
|
||||
throw new InternalError("method handle site should have been replaced");
|
||||
}
|
||||
long id = Long.parseLong(search(atts, "id"));
|
||||
site.setInlineId(id);
|
||||
} else if (qname.equals("nmethod")) {
|
||||
String id = makeId(atts);
|
||||
NMethod nm = new NMethod(Double.parseDouble(search(atts, "stamp")),
|
||||
@ -396,8 +451,18 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
|
||||
nmethods.put(id, nm);
|
||||
events.add(nm);
|
||||
} else if (qname.equals("parse")) {
|
||||
if (methodHandleSite != null) {
|
||||
throw new InternalError("method handle site should have been replaced");
|
||||
}
|
||||
Method m = method(search(atts, "method"));
|
||||
if (scopes.size() == 0) {
|
||||
if (lateInlining && scopes.size() == 0) {
|
||||
if (site.getMethod() != m) {
|
||||
System.out.println(site.getMethod());
|
||||
System.out.println(m);
|
||||
throw new InternalError("Unexpected method mismatch during late inlining");
|
||||
}
|
||||
}
|
||||
if (scopes.size() == 0 && !lateInlining) {
|
||||
compile.setMethod(m);
|
||||
scopes.push(site);
|
||||
} else {
|
||||
@ -427,14 +492,19 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
|
||||
if (qname.equals("parse")) {
|
||||
indent -= 2;
|
||||
scopes.pop();
|
||||
if (scopes.size() == 0) {
|
||||
lateInlining = false;
|
||||
}
|
||||
} else if (qname.equals("uncommon_trap")) {
|
||||
currentTrap = null;
|
||||
} else if (qname.equals("late_inline")) {
|
||||
// Populate late inlining info.
|
||||
|
||||
// late_inline scopes are specified in reverse order:
|
||||
if (scopes.size() != 0) {
|
||||
throw new InternalError("scopes should be empty for late inline");
|
||||
}
|
||||
// late inline scopes are specified in reverse order:
|
||||
// compiled method should be on top of stack.
|
||||
CallSite caller = late_inline_scope.pop();
|
||||
CallSite caller = lateInlineScope.pop();
|
||||
Method m = compile.getMethod();
|
||||
if (m != caller.getMethod()) {
|
||||
System.out.println(m);
|
||||
@ -444,28 +514,42 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
|
||||
|
||||
// late_inline contains caller+bci info, convert it
|
||||
// to bci+callee info used by LogCompilation.
|
||||
site = compile.getLateInlineCall();
|
||||
CallSite lateInlineSite = compile.getLateInlineCall();
|
||||
ArrayDeque<CallSite> thisCallScopes = new ArrayDeque<CallSite>();
|
||||
do {
|
||||
bci = caller.getBci();
|
||||
// Next inlined call.
|
||||
caller = late_inline_scope.pop();
|
||||
caller = lateInlineScope.pop();
|
||||
CallSite callee = new CallSite(bci, caller.getMethod());
|
||||
site.add(callee);
|
||||
site = callee;
|
||||
} while (!late_inline_scope.empty());
|
||||
callee.setInlineId(caller.getInlineId());
|
||||
thisCallScopes.addLast(callee);
|
||||
lateInlineSite.add(callee);
|
||||
lateInlineSite = callee;
|
||||
} while (!lateInlineScope.empty());
|
||||
|
||||
site = compile.getCall().findCallSite(thisCallScopes);
|
||||
if (site == null) {
|
||||
System.out.println(caller.getMethod() + " bci: " + bci);
|
||||
throw new InternalError("couldn't find call site");
|
||||
}
|
||||
lateInlining = true;
|
||||
|
||||
if (caller.getBci() != -999) {
|
||||
System.out.println(caller.getMethod());
|
||||
throw new InternalError("broken late_inline info");
|
||||
}
|
||||
if (site.getMethod() != caller.getMethod()) {
|
||||
System.out.println(site.getMethod());
|
||||
System.out.println(caller.getMethod());
|
||||
throw new InternalError("call site and late_inline info don't match");
|
||||
if (site.getInlineId() == caller.getInlineId()) {
|
||||
site.setMethod(caller.getMethod());
|
||||
} else {
|
||||
System.out.println(site.getMethod());
|
||||
System.out.println(caller.getMethod());
|
||||
throw new InternalError("call site and late_inline info don't match");
|
||||
}
|
||||
}
|
||||
// late_inline is followed by parse with scopes.size() == 0,
|
||||
// 'site' will be pushed to scopes.
|
||||
late_inline_scope = null;
|
||||
lateInlineScope = null;
|
||||
} else if (qname.equals("task")) {
|
||||
types.clear();
|
||||
methods.clear();
|
||||
|
@ -51,15 +51,15 @@ public class Method implements Constants {
|
||||
|
||||
String format(int osr_bci) {
|
||||
if (osr_bci >= 0) {
|
||||
return getHolder().replace('/', '.') + "::" + getName() + " @ " + osr_bci + " (" + getBytes() + " bytes)";
|
||||
return getHolder() + "::" + getName() + " @ " + osr_bci + " (" + getBytes() + " bytes)";
|
||||
} else {
|
||||
return getHolder().replace('/', '.') + "::" + getName() + " (" + getBytes() + " bytes)";
|
||||
return getHolder() + "::" + getName() + " (" + getBytes() + " bytes)";
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return getHolder().replace('/', '.') + "::" + getName() + " (" + getBytes() + " bytes)";
|
||||
return getHolder() + "::" + getName() + " (" + getBytes() + " bytes)";
|
||||
}
|
||||
|
||||
public String getHolder() {
|
||||
@ -117,4 +117,14 @@ public class Method implements Constants {
|
||||
public void setFlags(String flags) {
|
||||
this.flags = flags;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (o instanceof Method) {
|
||||
Method other = (Method)o;
|
||||
return holder.equals(other.holder) && name.equals(other.name) &&
|
||||
arguments.equals(other.arguments) && returnType.equals(other.returnType);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -1613,21 +1613,20 @@ void ArchDesc::declareClasses(FILE *fp) {
|
||||
// Each instruction attribute results in a virtual call of same name.
|
||||
// The ins_cost is not handled here.
|
||||
Attribute *attr = instr->_attribs;
|
||||
bool avoid_back_to_back = false;
|
||||
Attribute *avoid_back_to_back_attr = NULL;
|
||||
while (attr != NULL) {
|
||||
if (strcmp (attr->_ident, "ins_cost") != 0 &&
|
||||
if (strcmp (attr->_ident, "ins_is_TrapBasedCheckNode") == 0) {
|
||||
fprintf(fp, " virtual bool is_TrapBasedCheckNode() const { return %s; }\n", attr->_val);
|
||||
} else if (strcmp (attr->_ident, "ins_cost") != 0 &&
|
||||
strncmp(attr->_ident, "ins_field_", 10) != 0 &&
|
||||
// Must match function in node.hpp: return type bool, no prefix "ins_".
|
||||
strcmp (attr->_ident, "ins_is_TrapBasedCheckNode") != 0 &&
|
||||
strcmp (attr->_ident, "ins_short_branch") != 0) {
|
||||
fprintf(fp, " virtual int %s() const { return %s; }\n", attr->_ident, attr->_val);
|
||||
}
|
||||
// Check value for ins_avoid_back_to_back, and if it is true (1), set the flag
|
||||
if (!strcmp(attr->_ident, "ins_avoid_back_to_back") != 0 && attr->int_val(*this) != 0)
|
||||
avoid_back_to_back = true;
|
||||
if (strcmp (attr->_ident, "ins_is_TrapBasedCheckNode") == 0)
|
||||
fprintf(fp, " virtual bool is_TrapBasedCheckNode() const { return %s; }\n", attr->_val);
|
||||
|
||||
if (strcmp(attr->_ident, "ins_avoid_back_to_back") == 0) {
|
||||
avoid_back_to_back_attr = attr;
|
||||
}
|
||||
attr = (Attribute *)attr->_next;
|
||||
}
|
||||
|
||||
@ -1799,11 +1798,11 @@ void ArchDesc::declareClasses(FILE *fp) {
|
||||
}
|
||||
|
||||
// flag: if this instruction should not be generated back to back.
|
||||
if ( avoid_back_to_back ) {
|
||||
if ( node_flags_set ) {
|
||||
fprintf(fp," | Flag_avoid_back_to_back");
|
||||
if (avoid_back_to_back_attr != NULL) {
|
||||
if (node_flags_set) {
|
||||
fprintf(fp," | (%s)", avoid_back_to_back_attr->_val);
|
||||
} else {
|
||||
fprintf(fp,"init_flags(Flag_avoid_back_to_back");
|
||||
fprintf(fp,"init_flags((%s)", avoid_back_to_back_attr->_val);
|
||||
node_flags_set = true;
|
||||
}
|
||||
}
|
||||
|
@ -968,6 +968,7 @@ void CodeBuffer::verify_section_allocation() {
|
||||
|
||||
void CodeBuffer::log_section_sizes(const char* name) {
|
||||
if (xtty != NULL) {
|
||||
ttyLocker ttyl;
|
||||
// log info about buffer usage
|
||||
xtty->print_cr("<blob name='%s' size='%d'>", name, _total_size);
|
||||
for (int n = (int) CodeBuffer::SECT_FIRST; n < (int) CodeBuffer::SECT_LIMIT; n++) {
|
||||
|
@ -546,6 +546,7 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
|
||||
, _code(buffer_blob)
|
||||
, _has_access_indexed(false)
|
||||
, _current_instruction(NULL)
|
||||
, _interpreter_frame_size(0)
|
||||
#ifndef PRODUCT
|
||||
, _last_instruction_printed(NULL)
|
||||
#endif // PRODUCT
|
||||
|
@ -88,6 +88,7 @@ class Compilation: public StackObj {
|
||||
CodeOffsets _offsets;
|
||||
CodeBuffer _code;
|
||||
bool _has_access_indexed;
|
||||
int _interpreter_frame_size; // Stack space needed in case of a deoptimization
|
||||
|
||||
// compilation helpers
|
||||
void initialize();
|
||||
@ -262,6 +263,18 @@ class Compilation: public StackObj {
|
||||
|
||||
// Dump inlining replay data to the stream.
|
||||
void dump_inline_data(outputStream* out) { /* do nothing now */ }
|
||||
|
||||
// How much stack space would the interpreter need in case of a
|
||||
// deoptimization (worst case)
|
||||
void update_interpreter_frame_size(int size) {
|
||||
if (_interpreter_frame_size < size) {
|
||||
_interpreter_frame_size = size;
|
||||
}
|
||||
}
|
||||
|
||||
int interpreter_frame_size() const {
|
||||
return _interpreter_frame_size;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
@ -227,8 +227,38 @@ void CodeEmitInfo::add_register_oop(LIR_Opr opr) {
|
||||
_oop_map->set_oop(name);
|
||||
}
|
||||
|
||||
// Mirror the stack size calculation in the deopt code
|
||||
// How much stack space would we need at this point in the program in
|
||||
// case of deoptimization?
|
||||
int CodeEmitInfo::interpreter_frame_size() const {
|
||||
ValueStack* state = _stack;
|
||||
int size = 0;
|
||||
int callee_parameters = 0;
|
||||
int callee_locals = 0;
|
||||
int extra_args = state->scope()->method()->max_stack() - state->stack_size();
|
||||
|
||||
while (state != NULL) {
|
||||
int locks = state->locks_size();
|
||||
int temps = state->stack_size();
|
||||
bool is_top_frame = (state == _stack);
|
||||
ciMethod* method = state->scope()->method();
|
||||
|
||||
int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(),
|
||||
temps + callee_parameters,
|
||||
extra_args,
|
||||
locks,
|
||||
callee_parameters,
|
||||
callee_locals,
|
||||
is_top_frame);
|
||||
size += frame_size;
|
||||
|
||||
callee_parameters = method->size_of_parameters();
|
||||
callee_locals = method->max_locals();
|
||||
extra_args = 0;
|
||||
state = state->caller_state();
|
||||
}
|
||||
return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord;
|
||||
}
|
||||
|
||||
// Implementation of IR
|
||||
|
||||
|
@ -284,6 +284,8 @@ class CodeEmitInfo: public CompilationResourceObj {
|
||||
|
||||
bool is_method_handle_invoke() const { return _is_method_handle_invoke; }
|
||||
void set_is_method_handle_invoke(bool x) { _is_method_handle_invoke = x; }
|
||||
|
||||
int interpreter_frame_size() const;
|
||||
};
|
||||
|
||||
|
||||
|
@ -190,6 +190,13 @@ address LIR_Assembler::pc() const {
|
||||
return _masm->pc();
|
||||
}
|
||||
|
||||
// To bang the stack of this compiled method we use the stack size
|
||||
// that the interpreter would need in case of a deoptimization. This
|
||||
// removes the need to bang the stack in the deoptimization blob which
|
||||
// in turn simplifies stack overflow handling.
|
||||
int LIR_Assembler::bang_size_in_bytes() const {
|
||||
return MAX2(initial_frame_size_in_bytes(), _compilation->interpreter_frame_size());
|
||||
}
|
||||
|
||||
void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
|
||||
for (int i = 0; i < info_list->length(); i++) {
|
||||
@ -797,7 +804,7 @@ void LIR_Assembler::emit_op2(LIR_Op2* op) {
|
||||
|
||||
|
||||
void LIR_Assembler::build_frame() {
|
||||
_masm->build_frame(initial_frame_size_in_bytes());
|
||||
_masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
|
||||
}
|
||||
|
||||
|
||||
|
@ -132,7 +132,8 @@ class LIR_Assembler: public CompilationResourceObj {
|
||||
int code_offset() const;
|
||||
address pc() const;
|
||||
|
||||
int initial_frame_size_in_bytes();
|
||||
int initial_frame_size_in_bytes() const;
|
||||
int bang_size_in_bytes() const;
|
||||
|
||||
// test for constants which can be encoded directly in instructions
|
||||
static bool is_small_constant(LIR_Opr opr);
|
||||
|
@ -2451,6 +2451,9 @@ void LinearScan::compute_oop_map(IntervalWalker* iw, const LIR_OpVisitState &vis
|
||||
CodeEmitInfo* info = visitor.info_at(i);
|
||||
OopMap* oop_map = first_oop_map;
|
||||
|
||||
// compute worst case interpreter size in case of a deoptimization
|
||||
_compilation->update_interpreter_frame_size(info->interpreter_frame_size());
|
||||
|
||||
if (info->stack()->locks_size() != first_info->stack()->locks_size()) {
|
||||
// this info has a different number of locks then the precomputed oop map
|
||||
// (possible for lock and unlock instructions) -> compute oop map with
|
||||
|
@ -39,7 +39,7 @@ class C1_MacroAssembler: public MacroAssembler {
|
||||
void explicit_null_check(Register base);
|
||||
|
||||
void inline_cache_check(Register receiver, Register iCache);
|
||||
void build_frame(int frame_size_in_bytes);
|
||||
void build_frame(int frame_size_in_bytes, int bang_size_in_bytes);
|
||||
void remove_frame(int frame_size_in_bytes);
|
||||
|
||||
void unverified_entry(Register receiver, Register ic_klass);
|
||||
|
@ -237,3 +237,9 @@ void ciKlass::print_impl(outputStream* st) {
|
||||
void ciKlass::print_name_on(outputStream* st) {
|
||||
name()->print_symbol_on(st);
|
||||
}
|
||||
|
||||
const char* ciKlass::external_name() const {
|
||||
GUARDED_VM_ENTRY(
|
||||
return get_Klass()->external_name();
|
||||
)
|
||||
}
|
||||
|
@ -125,6 +125,8 @@ public:
|
||||
virtual ciKlass* exact_klass() = 0;
|
||||
|
||||
void print_name_on(outputStream* st);
|
||||
|
||||
const char* external_name() const;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_CI_CIKLASS_HPP
|
||||
|
@ -80,6 +80,7 @@ ciMethod::ciMethod(methodHandle h_m) : ciMetadata(h_m()) {
|
||||
_code_size = h_m()->code_size();
|
||||
_intrinsic_id = h_m()->intrinsic_id();
|
||||
_handler_count = h_m()->exception_table_length();
|
||||
_size_of_parameters = h_m()->size_of_parameters();
|
||||
_uses_monitors = h_m()->access_flags().has_monitor_bytecodes();
|
||||
_balanced_monitors = !_uses_monitors || h_m()->access_flags().is_monitor_matching();
|
||||
_is_c1_compilable = !h_m()->is_not_c1_compilable();
|
||||
|
@ -71,6 +71,7 @@ class ciMethod : public ciMetadata {
|
||||
int _interpreter_invocation_count;
|
||||
int _interpreter_throwout_count;
|
||||
int _instructions_size;
|
||||
int _size_of_parameters;
|
||||
|
||||
bool _uses_monitors;
|
||||
bool _balanced_monitors;
|
||||
@ -166,6 +167,7 @@ class ciMethod : public ciMetadata {
|
||||
int exception_table_length() const { check_is_loaded(); return _handler_count; }
|
||||
int interpreter_invocation_count() const { check_is_loaded(); return _interpreter_invocation_count; }
|
||||
int interpreter_throwout_count() const { check_is_loaded(); return _interpreter_throwout_count; }
|
||||
int size_of_parameters() const { check_is_loaded(); return _size_of_parameters; }
|
||||
|
||||
// Code size for inlining decisions.
|
||||
int code_size_for_inlining();
|
||||
@ -241,7 +243,6 @@ class ciMethod : public ciMetadata {
|
||||
|
||||
ciField* get_field_at_bci( int bci, bool &will_link);
|
||||
ciMethod* get_method_at_bci(int bci, bool &will_link, ciSignature* *declared_signature);
|
||||
|
||||
// Given a certain calling environment, find the monomorphic target
|
||||
// for the call. Return NULL if the call is not monomorphic in
|
||||
// its calling environment.
|
||||
|
@ -123,6 +123,10 @@ void ciSymbol::print_symbol_on(outputStream *st) {
|
||||
GUARDED_VM_ENTRY(get_symbol()->print_symbol_on(st);)
|
||||
}
|
||||
|
||||
const char* ciSymbol::as_klass_external_name() const {
|
||||
GUARDED_VM_ENTRY(return get_symbol()->as_klass_external_name(););
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciSymbol::make_impl
|
||||
//
|
||||
|
@ -90,6 +90,7 @@ public:
|
||||
void print_symbol() {
|
||||
print_symbol_on(tty);
|
||||
}
|
||||
const char* as_klass_external_name() const;
|
||||
|
||||
// Make a ciSymbol from a C string.
|
||||
// Consider adding to vmSymbols.hpp instead of using this constructor.
|
||||
|
@ -390,20 +390,6 @@ class MethodFamily : public ResourceObj {
|
||||
Symbol* get_exception_message() { return _exception_message; }
|
||||
Symbol* get_exception_name() { return _exception_name; }
|
||||
|
||||
// Return true if the specified klass has a static method that matches
|
||||
// the name and signature of the target method.
|
||||
bool has_matching_static(InstanceKlass* root) {
|
||||
if (_members.length() > 0) {
|
||||
Pair<Method*,QualifiedState> entry = _members.at(0);
|
||||
Method* impl = root->find_method(entry.first->name(),
|
||||
entry.first->signature());
|
||||
if ((impl != NULL) && impl->is_static()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Either sets the target or the exception error message
|
||||
void determine_target(InstanceKlass* root, TRAPS) {
|
||||
if (has_target() || throws_exception()) {
|
||||
@ -433,21 +419,19 @@ class MethodFamily : public ResourceObj {
|
||||
// If the root klass has a static method with matching name and signature
|
||||
// then do not generate an overpass method because it will hide the
|
||||
// static method during resolution.
|
||||
if (!has_matching_static(root)) {
|
||||
if (qualified_methods.length() == 0) {
|
||||
_exception_message = generate_no_defaults_message(CHECK);
|
||||
} else {
|
||||
assert(root != NULL, "Null root class");
|
||||
_exception_message = generate_method_message(root->name(), qualified_methods.at(0), CHECK);
|
||||
}
|
||||
_exception_name = vmSymbols::java_lang_AbstractMethodError();
|
||||
if (qualified_methods.length() == 0) {
|
||||
_exception_message = generate_no_defaults_message(CHECK);
|
||||
} else {
|
||||
assert(root != NULL, "Null root class");
|
||||
_exception_message = generate_method_message(root->name(), qualified_methods.at(0), CHECK);
|
||||
}
|
||||
_exception_name = vmSymbols::java_lang_AbstractMethodError();
|
||||
|
||||
// If only one qualified method is default, select that
|
||||
} else if (num_defaults == 1) {
|
||||
_selected_target = qualified_methods.at(default_index);
|
||||
|
||||
} else if (num_defaults > 1 && !has_matching_static(root)) {
|
||||
} else if (num_defaults > 1) {
|
||||
_exception_message = generate_conflicts_message(&qualified_methods,CHECK);
|
||||
_exception_name = vmSymbols::java_lang_IncompatibleClassChangeError();
|
||||
if (TraceDefaultMethods) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -299,7 +299,7 @@ class VerificationType VALUE_OBJ_CLASS_SPEC {
|
||||
int dimensions() const {
|
||||
assert(is_array(), "Must be an array");
|
||||
int index = 0;
|
||||
while (name()->byte_at(index++) == '[');
|
||||
while (name()->byte_at(index) == '[') index++;
|
||||
return index;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1943,7 +1943,7 @@ bool ClassVerifier::is_protected_access(instanceKlassHandle this_class,
|
||||
InstanceKlass* target_instance = InstanceKlass::cast(target_class);
|
||||
fieldDescriptor fd;
|
||||
if (is_method) {
|
||||
Method* m = target_instance->uncached_lookup_method(field_name, field_sig);
|
||||
Method* m = target_instance->uncached_lookup_method(field_name, field_sig, Klass::normal);
|
||||
if (m != NULL && m->is_protected()) {
|
||||
if (!this_class->is_same_class_package(m->method_holder())) {
|
||||
return true;
|
||||
@ -2280,7 +2280,8 @@ void ClassVerifier::verify_invoke_init(
|
||||
ref_class_type.name(), CHECK_VERIFY(this));
|
||||
Method* m = InstanceKlass::cast(ref_klass)->uncached_lookup_method(
|
||||
vmSymbols::object_initializer_name(),
|
||||
cp->signature_ref_at(bcs->get_index_u2()));
|
||||
cp->signature_ref_at(bcs->get_index_u2()),
|
||||
Klass::normal);
|
||||
instanceKlassHandle mh(THREAD, m->method_holder());
|
||||
if (m->is_protected() && !mh->is_same_class_package(_klass())) {
|
||||
bool assignable = current_type().is_assignable_from(
|
||||
|
@ -106,7 +106,7 @@ int CompileLog::identify(ciBaseObject* obj) {
|
||||
if (mobj->is_klass()) {
|
||||
ciKlass* klass = mobj->as_klass();
|
||||
begin_elem("klass id='%d'", id);
|
||||
name(klass->name());
|
||||
name(klass);
|
||||
if (!klass->is_loaded()) {
|
||||
print(" unloaded='1'");
|
||||
} else {
|
||||
@ -171,6 +171,15 @@ void CompileLog::name(ciSymbol* name) {
|
||||
print("'");
|
||||
}
|
||||
|
||||
void CompileLog::name(ciKlass* k) {
|
||||
print(" name='");
|
||||
if (!k->is_loaded()) {
|
||||
text()->print(k->name()->as_klass_external_name());
|
||||
} else {
|
||||
text()->print(k->external_name());
|
||||
}
|
||||
print("'");
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileLog::clear_identities
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "utilities/xmlstream.hpp"
|
||||
|
||||
class ciBaseObject;
|
||||
class ciKlass;
|
||||
class ciObject;
|
||||
class ciMetadata;
|
||||
class ciSymbol;
|
||||
@ -72,6 +73,7 @@ class CompileLog : public xmlStream {
|
||||
|
||||
void name(ciSymbol* s); // name='s'
|
||||
void name(Symbol* s) { xmlStream::name(s); }
|
||||
void name(ciKlass* k);
|
||||
|
||||
// Output an object description, return obj->ident().
|
||||
int identify(ciBaseObject* obj);
|
||||
|
@ -151,7 +151,7 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
inline static void post_allocation_setup_no_klass_install(KlassHandle klass,
|
||||
HeapWord* objPtr);
|
||||
|
||||
inline static void post_allocation_setup_obj(KlassHandle klass, HeapWord* obj);
|
||||
inline static void post_allocation_setup_obj(KlassHandle klass, HeapWord* obj, int size);
|
||||
|
||||
inline static void post_allocation_setup_array(KlassHandle klass,
|
||||
HeapWord* obj, int length);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -70,7 +70,7 @@ void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass,
|
||||
}
|
||||
|
||||
// Support for jvmti and dtrace
|
||||
inline void post_allocation_notify(KlassHandle klass, oop obj) {
|
||||
inline void post_allocation_notify(KlassHandle klass, oop obj, int size) {
|
||||
// support low memory notifications (no-op if not enabled)
|
||||
LowMemoryDetector::detect_low_memory_for_collected_pools();
|
||||
|
||||
@ -80,18 +80,19 @@ inline void post_allocation_notify(KlassHandle klass, oop obj) {
|
||||
if (DTraceAllocProbes) {
|
||||
// support for Dtrace object alloc event (no-op most of the time)
|
||||
if (klass() != NULL && klass()->name() != NULL) {
|
||||
SharedRuntime::dtrace_object_alloc(obj);
|
||||
SharedRuntime::dtrace_object_alloc(obj, size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CollectedHeap::post_allocation_setup_obj(KlassHandle klass,
|
||||
HeapWord* obj) {
|
||||
HeapWord* obj,
|
||||
int size) {
|
||||
post_allocation_setup_common(klass, obj);
|
||||
assert(Universe::is_bootstrapping() ||
|
||||
!((oop)obj)->is_array(), "must not be an array");
|
||||
// notify jvmti and dtrace
|
||||
post_allocation_notify(klass, (oop)obj);
|
||||
post_allocation_notify(klass, (oop)obj, size);
|
||||
}
|
||||
|
||||
void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
|
||||
@ -103,9 +104,10 @@ void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
|
||||
assert(length >= 0, "length should be non-negative");
|
||||
((arrayOop)obj)->set_length(length);
|
||||
post_allocation_setup_common(klass, obj);
|
||||
assert(((oop)obj)->is_array(), "must be an array");
|
||||
oop new_obj = (oop)obj;
|
||||
assert(new_obj->is_array(), "must be an array");
|
||||
// notify jvmti and dtrace (must be after length is set for dtrace)
|
||||
post_allocation_notify(klass, (oop)obj);
|
||||
post_allocation_notify(klass, new_obj, new_obj->size());
|
||||
}
|
||||
|
||||
HeapWord* CollectedHeap::common_mem_allocate_noinit(KlassHandle klass, size_t size, TRAPS) {
|
||||
@ -199,7 +201,7 @@ oop CollectedHeap::obj_allocate(KlassHandle klass, int size, TRAPS) {
|
||||
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
|
||||
assert(size >= 0, "int won't convert to size_t");
|
||||
HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL);
|
||||
post_allocation_setup_obj(klass, obj);
|
||||
post_allocation_setup_obj(klass, obj, size);
|
||||
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
|
||||
return (oop)obj;
|
||||
}
|
||||
|
@ -181,30 +181,16 @@ class AbstractInterpreter: AllStatic {
|
||||
// Deoptimization should reexecute this bytecode
|
||||
static bool bytecode_should_reexecute(Bytecodes::Code code);
|
||||
|
||||
// share implementation of size_activation and layout_activation:
|
||||
static int size_activation(Method* method,
|
||||
// deoptimization support
|
||||
static int size_activation(int max_stack,
|
||||
int temps,
|
||||
int popframe_args,
|
||||
int extra_args,
|
||||
int monitors,
|
||||
int caller_actual_parameters,
|
||||
int callee_params,
|
||||
int callee_locals,
|
||||
bool is_top_frame,
|
||||
bool is_bottom_frame) {
|
||||
return layout_activation(method,
|
||||
temps,
|
||||
popframe_args,
|
||||
monitors,
|
||||
caller_actual_parameters,
|
||||
callee_params,
|
||||
callee_locals,
|
||||
(frame*)NULL,
|
||||
(frame*)NULL,
|
||||
is_top_frame,
|
||||
is_bottom_frame);
|
||||
}
|
||||
bool is_top_frame);
|
||||
|
||||
static int layout_activation(Method* method,
|
||||
static void layout_activation(Method* method,
|
||||
int temps,
|
||||
int popframe_args,
|
||||
int monitors,
|
||||
|
@ -243,7 +243,8 @@ void LinkResolver::resolve_klass(KlassHandle& result, constantPoolHandle pool, i
|
||||
// Look up method in klasses, including static methods
|
||||
// Then look up local default methods
|
||||
void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, bool checkpolymorphism, bool in_imethod_resolve, TRAPS) {
|
||||
Method* result_oop = klass->uncached_lookup_method(name, signature);
|
||||
// Ignore overpasses so statics can be found during resolution
|
||||
Method* result_oop = klass->uncached_lookup_method(name, signature, Klass::skip_overpass);
|
||||
|
||||
// JDK 8, JVMS 5.4.3.4: Interface method resolution should
|
||||
// ignore static and non-public methods of java.lang.Object,
|
||||
@ -256,6 +257,12 @@ void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle kl
|
||||
result_oop = NULL;
|
||||
}
|
||||
|
||||
// Before considering default methods, check for an overpass in the
|
||||
// current class if a method has not been found.
|
||||
if (result_oop == NULL) {
|
||||
result_oop = InstanceKlass::cast(klass())->find_method(name, signature);
|
||||
}
|
||||
|
||||
if (result_oop == NULL) {
|
||||
Array<Method*>* default_methods = InstanceKlass::cast(klass())->default_methods();
|
||||
if (default_methods != NULL) {
|
||||
@ -276,11 +283,11 @@ void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle kl
|
||||
// returns first instance method
|
||||
// Looks up method in classes, then looks up local default methods
|
||||
void LinkResolver::lookup_instance_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) {
|
||||
Method* result_oop = klass->uncached_lookup_method(name, signature);
|
||||
Method* result_oop = klass->uncached_lookup_method(name, signature, Klass::normal);
|
||||
result = methodHandle(THREAD, result_oop);
|
||||
while (!result.is_null() && result->is_static() && result->method_holder()->super() != NULL) {
|
||||
KlassHandle super_klass = KlassHandle(THREAD, result->method_holder()->super());
|
||||
result = methodHandle(THREAD, super_klass->uncached_lookup_method(name, signature));
|
||||
result = methodHandle(THREAD, super_klass->uncached_lookup_method(name, signature, Klass::normal));
|
||||
}
|
||||
|
||||
if (result.is_null()) {
|
||||
@ -302,7 +309,7 @@ int LinkResolver::vtable_index_of_interface_method(KlassHandle klass,
|
||||
// First check in default method array
|
||||
if (!resolved_method->is_abstract() &&
|
||||
(InstanceKlass::cast(klass())->default_methods() != NULL)) {
|
||||
int index = InstanceKlass::find_method_index(InstanceKlass::cast(klass())->default_methods(), name, signature);
|
||||
int index = InstanceKlass::find_method_index(InstanceKlass::cast(klass())->default_methods(), name, signature, false);
|
||||
if (index >= 0 ) {
|
||||
vtable_index = InstanceKlass::cast(klass())->default_vtable_indices()->at(index);
|
||||
}
|
||||
@ -322,7 +329,7 @@ void LinkResolver::lookup_method_in_interfaces(methodHandle& result, KlassHandle
|
||||
// Specify 'true' in order to skip default methods when searching the
|
||||
// interfaces. Function lookup_method_in_klasses() already looked for
|
||||
// the method in the default methods table.
|
||||
result = methodHandle(THREAD, ik->lookup_method_in_all_interfaces(name, signature, true));
|
||||
result = methodHandle(THREAD, ik->lookup_method_in_all_interfaces(name, signature, Klass::skip_defaults));
|
||||
}
|
||||
|
||||
void LinkResolver::lookup_polymorphic_method(methodHandle& result,
|
||||
|
@ -64,10 +64,10 @@ oop ArrayKlass::multi_allocate(int rank, jint* sizes, TRAPS) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Method* ArrayKlass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
|
||||
Method* ArrayKlass::uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const {
|
||||
// There are no methods in an array klass but the super class (Object) has some
|
||||
assert(super(), "super klass must be present");
|
||||
return super()->uncached_lookup_method(name, signature);
|
||||
return super()->uncached_lookup_method(name, signature, mode);
|
||||
}
|
||||
|
||||
ArrayKlass::ArrayKlass(Symbol* name) {
|
||||
|
@ -86,7 +86,7 @@ class ArrayKlass: public Klass {
|
||||
objArrayOop allocate_arrayArray(int n, int length, TRAPS);
|
||||
|
||||
// Lookup operations
|
||||
Method* uncached_lookup_method(Symbol* name, Symbol* signature) const;
|
||||
Method* uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const;
|
||||
|
||||
// Casting from Klass*
|
||||
static ArrayKlass* cast(Klass* k) {
|
||||
|
@ -1389,7 +1389,11 @@ static int binary_search(Array<Method*>* methods, Symbol* name) {
|
||||
|
||||
// find_method looks up the name/signature in the local methods array
|
||||
Method* InstanceKlass::find_method(Symbol* name, Symbol* signature) const {
|
||||
return InstanceKlass::find_method(methods(), name, signature);
|
||||
return find_method_impl(name, signature, false);
|
||||
}
|
||||
|
||||
Method* InstanceKlass::find_method_impl(Symbol* name, Symbol* signature, bool skipping_overpass) const {
|
||||
return InstanceKlass::find_method_impl(methods(), name, signature, skipping_overpass);
|
||||
}
|
||||
|
||||
// find_instance_method looks up the name/signature in the local methods array
|
||||
@ -1406,40 +1410,49 @@ Method* InstanceKlass::find_instance_method(
|
||||
// find_method looks up the name/signature in the local methods array
|
||||
Method* InstanceKlass::find_method(
|
||||
Array<Method*>* methods, Symbol* name, Symbol* signature) {
|
||||
int hit = find_method_index(methods, name, signature);
|
||||
return InstanceKlass::find_method_impl(methods, name, signature, false);
|
||||
}
|
||||
|
||||
Method* InstanceKlass::find_method_impl(
|
||||
Array<Method*>* methods, Symbol* name, Symbol* signature, bool skipping_overpass) {
|
||||
int hit = find_method_index(methods, name, signature, skipping_overpass);
|
||||
return hit >= 0 ? methods->at(hit): NULL;
|
||||
}
|
||||
|
||||
// Used directly for default_methods to find the index into the
|
||||
// default_vtable_indices, and indirectly by find_method
|
||||
// find_method_index looks in the local methods array to return the index
|
||||
// of the matching name/signature
|
||||
// of the matching name/signature. If, overpass methods are being ignored,
|
||||
// the search continues to find a potential non-overpass match. This capability
|
||||
// is important during method resolution to prefer a static method, for example,
|
||||
// over an overpass method.
|
||||
int InstanceKlass::find_method_index(
|
||||
Array<Method*>* methods, Symbol* name, Symbol* signature) {
|
||||
Array<Method*>* methods, Symbol* name, Symbol* signature, bool skipping_overpass) {
|
||||
int hit = binary_search(methods, name);
|
||||
if (hit != -1) {
|
||||
Method* m = methods->at(hit);
|
||||
// Do linear search to find matching signature. First, quick check
|
||||
// for common case
|
||||
if (m->signature() == signature) return hit;
|
||||
// for common case, ignoring overpasses if requested.
|
||||
if ((m->signature() == signature) && (!skipping_overpass || !m->is_overpass())) return hit;
|
||||
|
||||
// search downwards through overloaded methods
|
||||
int i;
|
||||
for (i = hit - 1; i >= 0; --i) {
|
||||
Method* m = methods->at(i);
|
||||
assert(m->is_method(), "must be method");
|
||||
if (m->name() != name) break;
|
||||
if (m->signature() == signature) return i;
|
||||
if ((m->signature() == signature) && (!skipping_overpass || !m->is_overpass())) return i;
|
||||
}
|
||||
// search upwards
|
||||
for (i = hit + 1; i < methods->length(); ++i) {
|
||||
Method* m = methods->at(i);
|
||||
assert(m->is_method(), "must be method");
|
||||
if (m->name() != name) break;
|
||||
if (m->signature() == signature) return i;
|
||||
if ((m->signature() == signature) && (!skipping_overpass || !m->is_overpass())) return i;
|
||||
}
|
||||
// not found
|
||||
#ifdef ASSERT
|
||||
int index = linear_search(methods, name, signature);
|
||||
int index = skipping_overpass ? -1 : linear_search(methods, name, signature);
|
||||
assert(index == -1, err_msg("binary search should have found entry %d", index));
|
||||
#endif
|
||||
}
|
||||
@ -1465,16 +1478,16 @@ int InstanceKlass::find_method_by_name(
|
||||
|
||||
// uncached_lookup_method searches both the local class methods array and all
|
||||
// superclasses methods arrays, skipping any overpass methods in superclasses.
|
||||
Method* InstanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
|
||||
Method* InstanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const {
|
||||
MethodLookupMode lookup_mode = mode;
|
||||
Klass* klass = const_cast<InstanceKlass*>(this);
|
||||
bool dont_ignore_overpasses = true; // For the class being searched, find its overpasses.
|
||||
while (klass != NULL) {
|
||||
Method* method = InstanceKlass::cast(klass)->find_method(name, signature);
|
||||
if ((method != NULL) && (dont_ignore_overpasses || !method->is_overpass())) {
|
||||
Method* method = InstanceKlass::cast(klass)->find_method_impl(name, signature, (lookup_mode == skip_overpass));
|
||||
if (method != NULL) {
|
||||
return method;
|
||||
}
|
||||
klass = InstanceKlass::cast(klass)->super();
|
||||
dont_ignore_overpasses = false; // Ignore overpass methods in all superclasses.
|
||||
lookup_mode = skip_overpass; // Always ignore overpass methods in superclasses
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
@ -1489,7 +1502,7 @@ Method* InstanceKlass::lookup_method_in_ordered_interfaces(Symbol* name,
|
||||
}
|
||||
// Look up interfaces
|
||||
if (m == NULL) {
|
||||
m = lookup_method_in_all_interfaces(name, signature, false);
|
||||
m = lookup_method_in_all_interfaces(name, signature, normal);
|
||||
}
|
||||
return m;
|
||||
}
|
||||
@ -1499,7 +1512,7 @@ Method* InstanceKlass::lookup_method_in_ordered_interfaces(Symbol* name,
|
||||
// They should only be found in the initial InterfaceMethodRef
|
||||
Method* InstanceKlass::lookup_method_in_all_interfaces(Symbol* name,
|
||||
Symbol* signature,
|
||||
bool skip_default_methods) const {
|
||||
MethodLookupMode mode) const {
|
||||
Array<Klass*>* all_ifs = transitive_interfaces();
|
||||
int num_ifs = all_ifs->length();
|
||||
InstanceKlass *ik = NULL;
|
||||
@ -1507,7 +1520,7 @@ Method* InstanceKlass::lookup_method_in_all_interfaces(Symbol* name,
|
||||
ik = InstanceKlass::cast(all_ifs->at(i));
|
||||
Method* m = ik->lookup_method(name, signature);
|
||||
if (m != NULL && m->is_public() && !m->is_static() &&
|
||||
(!skip_default_methods || !m->is_default_method())) {
|
||||
((mode != skip_defaults) || !m->is_default_method())) {
|
||||
return m;
|
||||
}
|
||||
}
|
||||
|
@ -490,14 +490,14 @@ class InstanceKlass: public Klass {
|
||||
static Method* find_instance_method(Array<Method*>* methods, Symbol* name, Symbol* signature);
|
||||
|
||||
// find a local method index in default_methods (returns -1 if not found)
|
||||
static int find_method_index(Array<Method*>* methods, Symbol* name, Symbol* signature);
|
||||
static int find_method_index(Array<Method*>* methods, Symbol* name, Symbol* signature, bool skipping_overpass);
|
||||
|
||||
// lookup operation (returns NULL if not found)
|
||||
Method* uncached_lookup_method(Symbol* name, Symbol* signature) const;
|
||||
Method* uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const;
|
||||
|
||||
// lookup a method in all the interfaces that this class implements
|
||||
// (returns NULL if not found)
|
||||
Method* lookup_method_in_all_interfaces(Symbol* name, Symbol* signature, bool skip_default_methods) const;
|
||||
Method* lookup_method_in_all_interfaces(Symbol* name, Symbol* signature, MethodLookupMode mode) const;
|
||||
|
||||
// lookup a method in local defaults then in all interfaces
|
||||
// (returns NULL if not found)
|
||||
@ -1020,6 +1020,10 @@ private:
|
||||
// Returns the array class with this class as element type
|
||||
Klass* array_klass_impl(bool or_null, TRAPS);
|
||||
|
||||
// find a local method (returns NULL if not found)
|
||||
Method* find_method_impl(Symbol* name, Symbol* signature, bool skipping_overpass) const;
|
||||
static Method* find_method_impl(Array<Method*>* methods, Symbol* name, Symbol* signature, bool skipping_overpass);
|
||||
|
||||
// Free CHeap allocated fields.
|
||||
void release_C_heap_structures();
|
||||
public:
|
||||
|
@ -129,7 +129,7 @@ bool Klass::compute_is_subtype_of(Klass* k) {
|
||||
}
|
||||
|
||||
|
||||
Method* Klass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
|
||||
Method* Klass::uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const {
|
||||
#ifdef ASSERT
|
||||
tty->print_cr("Error: uncached_lookup_method called on a klass oop."
|
||||
" Likely error: reflection method does not correctly"
|
||||
|
@ -154,6 +154,8 @@ class Klass : public Metadata {
|
||||
void* operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw();
|
||||
|
||||
public:
|
||||
enum MethodLookupMode { normal, skip_overpass, skip_defaults };
|
||||
|
||||
bool is_klass() const volatile { return true; }
|
||||
|
||||
// super
|
||||
@ -391,10 +393,10 @@ class Klass : public Metadata {
|
||||
virtual void initialize(TRAPS);
|
||||
// lookup operation for MethodLookupCache
|
||||
friend class MethodLookupCache;
|
||||
virtual Method* uncached_lookup_method(Symbol* name, Symbol* signature) const;
|
||||
virtual Method* uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const;
|
||||
public:
|
||||
Method* lookup_method(Symbol* name, Symbol* signature) const {
|
||||
return uncached_lookup_method(name, signature);
|
||||
return uncached_lookup_method(name, signature, normal);
|
||||
}
|
||||
|
||||
// array class with specific rank
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -622,7 +622,7 @@ bool klassVtable::needs_new_vtable_entry(methodHandle target_method,
|
||||
// this check for all access permissions.
|
||||
InstanceKlass *sk = InstanceKlass::cast(super);
|
||||
if (sk->has_miranda_methods()) {
|
||||
if (sk->lookup_method_in_all_interfaces(name, signature, false) != NULL) {
|
||||
if (sk->lookup_method_in_all_interfaces(name, signature, Klass::normal) != NULL) {
|
||||
return false; // found a matching miranda; we do not need a new entry
|
||||
}
|
||||
}
|
||||
@ -698,7 +698,7 @@ bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
|
||||
&& mo->method_holder() != NULL
|
||||
&& mo->method_holder()->super() != NULL)
|
||||
{
|
||||
mo = mo->method_holder()->super()->uncached_lookup_method(name, signature);
|
||||
mo = mo->method_holder()->super()->uncached_lookup_method(name, signature, Klass::normal);
|
||||
}
|
||||
if (mo == NULL || mo->access_flags().is_private() ) {
|
||||
// super class hierarchy does not implement it or protection is different
|
||||
@ -743,7 +743,7 @@ void klassVtable::add_new_mirandas_to_lists(
|
||||
if (is_miranda(im, class_methods, default_methods, super)) { // is it a miranda at all?
|
||||
InstanceKlass *sk = InstanceKlass::cast(super);
|
||||
// check if it is a duplicate of a super's miranda
|
||||
if (sk->lookup_method_in_all_interfaces(im->name(), im->signature(), false) == NULL) {
|
||||
if (sk->lookup_method_in_all_interfaces(im->name(), im->signature(), Klass::normal) == NULL) {
|
||||
new_mirandas->append(im);
|
||||
}
|
||||
if (all_mirandas != NULL) {
|
||||
|
@ -266,14 +266,17 @@ CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
|
||||
|
||||
// Allow inlining decisions to be delayed
|
||||
class LateInlineCallGenerator : public DirectCallGenerator {
|
||||
private:
|
||||
// unique id for log compilation
|
||||
jlong _unique_id;
|
||||
|
||||
protected:
|
||||
CallGenerator* _inline_cg;
|
||||
|
||||
virtual bool do_late_inline_check(JVMState* jvms) { return true; }
|
||||
|
||||
public:
|
||||
LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
|
||||
DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
|
||||
DirectCallGenerator(method, true), _inline_cg(inline_cg), _unique_id(0) {}
|
||||
|
||||
virtual bool is_late_inline() const { return true; }
|
||||
|
||||
@ -283,6 +286,8 @@ class LateInlineCallGenerator : public DirectCallGenerator {
|
||||
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
|
||||
Compile *C = Compile::current();
|
||||
|
||||
C->log_inline_id(this);
|
||||
|
||||
// Record that this call site should be revisited once the main
|
||||
// parse is finished.
|
||||
if (!is_mh_late_inline()) {
|
||||
@ -304,6 +309,14 @@ class LateInlineCallGenerator : public DirectCallGenerator {
|
||||
C->print_inlining_move_to(this);
|
||||
C->print_inlining_update_delayed(this);
|
||||
}
|
||||
|
||||
virtual void set_unique_id(jlong id) {
|
||||
_unique_id = id;
|
||||
}
|
||||
|
||||
virtual jlong unique_id() const {
|
||||
return _unique_id;
|
||||
}
|
||||
};
|
||||
|
||||
void LateInlineCallGenerator::do_late_inline() {
|
||||
@ -368,6 +381,8 @@ void LateInlineCallGenerator::do_late_inline() {
|
||||
|
||||
C->print_inlining_move_to(this);
|
||||
|
||||
C->log_late_inline(this);
|
||||
|
||||
// This check is done here because for_method_handle_inline() method
|
||||
// needs jvms for inlined state.
|
||||
if (!do_late_inline_check(jvms)) {
|
||||
@ -375,17 +390,6 @@ void LateInlineCallGenerator::do_late_inline() {
|
||||
return;
|
||||
}
|
||||
|
||||
CompileLog* log = C->log();
|
||||
if (log != NULL) {
|
||||
log->head("late_inline method='%d'", log->identify(method()));
|
||||
JVMState* p = jvms;
|
||||
while (p != NULL) {
|
||||
log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
|
||||
p = p->caller();
|
||||
}
|
||||
log->tail("late_inline");
|
||||
}
|
||||
|
||||
// Setup default node notes to be picked up by the inlining
|
||||
Node_Notes* old_nn = C->default_node_notes();
|
||||
if (old_nn != NULL) {
|
||||
@ -438,11 +442,12 @@ class LateInlineMHCallGenerator : public LateInlineCallGenerator {
|
||||
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
|
||||
JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser);
|
||||
|
||||
Compile* C = Compile::current();
|
||||
if (_input_not_const) {
|
||||
// inlining won't be possible so no need to enqueue right now.
|
||||
call_node()->set_generator(this);
|
||||
} else {
|
||||
Compile::current()->add_late_inline(this);
|
||||
C->add_late_inline(this);
|
||||
}
|
||||
return new_jvms;
|
||||
}
|
||||
@ -483,6 +488,9 @@ class LateInlineStringCallGenerator : public LateInlineCallGenerator {
|
||||
|
||||
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
|
||||
Compile *C = Compile::current();
|
||||
|
||||
C->log_inline_id(this);
|
||||
|
||||
C->add_string_late_inline(this);
|
||||
|
||||
JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser);
|
||||
@ -505,6 +513,8 @@ class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
|
||||
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
|
||||
Compile *C = Compile::current();
|
||||
|
||||
C->log_inline_id(this);
|
||||
|
||||
C->add_boxing_late_inline(this);
|
||||
|
||||
JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser);
|
||||
@ -786,6 +796,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
|
||||
} else {
|
||||
const char* msg = "receiver not constant";
|
||||
if (PrintInlining) C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
|
||||
C->log_inline_failure(msg);
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -858,6 +869,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
|
||||
} else {
|
||||
const char* msg = "member_name not constant";
|
||||
if (PrintInlining) C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
|
||||
C->log_inline_failure(msg);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -84,6 +84,9 @@ class CallGenerator : public ResourceObj {
|
||||
|
||||
virtual CallStaticJavaNode* call_node() const { ShouldNotReachHere(); return NULL; }
|
||||
|
||||
virtual void set_unique_id(jlong id) { fatal("unique id only for late inlines"); };
|
||||
virtual jlong unique_id() const { fatal("unique id only for late inlines"); return 0; };
|
||||
|
||||
// Note: It is possible for a CG to be both inline and virtual.
|
||||
// (The hashCode intrinsic does a vtable check and an inlined fast path.)
|
||||
|
||||
|
@ -608,6 +608,39 @@ void JVMState::adapt_position(int delta) {
|
||||
}
|
||||
}
|
||||
|
||||
// Mirror the stack size calculation in the deopt code
|
||||
// How much stack space would we need at this point in the program in
|
||||
// case of deoptimization?
|
||||
int JVMState::interpreter_frame_size() const {
|
||||
const JVMState* jvms = this;
|
||||
int size = 0;
|
||||
int callee_parameters = 0;
|
||||
int callee_locals = 0;
|
||||
int extra_args = method()->max_stack() - stk_size();
|
||||
|
||||
while (jvms != NULL) {
|
||||
int locks = jvms->nof_monitors();
|
||||
int temps = jvms->stk_size();
|
||||
bool is_top_frame = (jvms == this);
|
||||
ciMethod* method = jvms->method();
|
||||
|
||||
int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(),
|
||||
temps + callee_parameters,
|
||||
extra_args,
|
||||
locks,
|
||||
callee_parameters,
|
||||
callee_locals,
|
||||
is_top_frame);
|
||||
size += frame_size;
|
||||
|
||||
callee_parameters = method->size_of_parameters();
|
||||
callee_locals = method->max_locals();
|
||||
extra_args = 0;
|
||||
jvms = jvms->caller();
|
||||
}
|
||||
return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
uint CallNode::cmp( const Node &n ) const
|
||||
{ return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }
|
||||
|
@ -300,6 +300,7 @@ public:
|
||||
JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
|
||||
void set_map_deep(SafePointNode *map);// reset map for all callers
|
||||
void adapt_position(int delta); // Adapt offsets in in-array after adding an edge.
|
||||
int interpreter_frame_size() const;
|
||||
|
||||
#ifndef PRODUCT
|
||||
void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
|
||||
|
@ -440,6 +440,14 @@ int Compile::frame_size_in_words() const {
|
||||
return words;
|
||||
}
|
||||
|
||||
// To bang the stack of this compiled method we use the stack size
|
||||
// that the interpreter would need in case of a deoptimization. This
|
||||
// removes the need to bang the stack in the deoptimization blob which
|
||||
// in turn simplifies stack overflow handling.
|
||||
int Compile::bang_size_in_bytes() const {
|
||||
return MAX2(_interpreter_frame_size, frame_size_in_bytes());
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
//------------------------------CompileWrapper---------------------------------
|
||||
class CompileWrapper : public StackObj {
|
||||
@ -664,7 +672,8 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
||||
_print_inlining_list(NULL),
|
||||
_print_inlining_stream(NULL),
|
||||
_print_inlining_idx(0),
|
||||
_preserve_jvm_state(0) {
|
||||
_preserve_jvm_state(0),
|
||||
_interpreter_frame_size(0) {
|
||||
C = this;
|
||||
|
||||
CompileWrapper cw(this);
|
||||
@ -969,7 +978,8 @@ Compile::Compile( ciEnv* ci_env,
|
||||
_print_inlining_stream(NULL),
|
||||
_print_inlining_idx(0),
|
||||
_preserve_jvm_state(0),
|
||||
_allowed_reasons(0) {
|
||||
_allowed_reasons(0),
|
||||
_interpreter_frame_size(0) {
|
||||
C = this;
|
||||
|
||||
#ifndef PRODUCT
|
||||
@ -3078,8 +3088,12 @@ void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_
|
||||
Node* m = n->in(i);
|
||||
++i;
|
||||
if (m != NULL && !frc._visited.test_set(m->_idx)) {
|
||||
if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL)
|
||||
if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL) {
|
||||
// compute worst case interpreter size in case of a deoptimization
|
||||
update_interpreter_frame_size(m->as_SafePoint()->jvms()->interpreter_frame_size());
|
||||
|
||||
sfpt.push(m);
|
||||
}
|
||||
cnt = m->req();
|
||||
nstack.push(n, i); // put on stack parent and next input's index
|
||||
n = m;
|
||||
@ -3851,7 +3865,7 @@ void Compile::print_inlining_assert_ready() {
|
||||
|
||||
void Compile::dump_inlining() {
|
||||
bool do_print_inlining = print_inlining() || print_intrinsics();
|
||||
if (do_print_inlining) {
|
||||
if (do_print_inlining || log() != NULL) {
|
||||
// Print inlining message for candidates that we couldn't inline
|
||||
// for lack of space
|
||||
for (int i = 0; i < _late_inlines.length(); i++) {
|
||||
@ -3861,6 +3875,7 @@ void Compile::dump_inlining() {
|
||||
if (do_print_inlining) {
|
||||
cg->print_inlining_late(msg);
|
||||
}
|
||||
log_late_inline_failure(cg, msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3871,6 +3886,48 @@ void Compile::dump_inlining() {
|
||||
}
|
||||
}
|
||||
|
||||
void Compile::log_late_inline(CallGenerator* cg) {
|
||||
if (log() != NULL) {
|
||||
log()->head("late_inline method='%d' inline_id='" JLONG_FORMAT "'", log()->identify(cg->method()),
|
||||
cg->unique_id());
|
||||
JVMState* p = cg->call_node()->jvms();
|
||||
while (p != NULL) {
|
||||
log()->elem("jvms bci='%d' method='%d'", p->bci(), log()->identify(p->method()));
|
||||
p = p->caller();
|
||||
}
|
||||
log()->tail("late_inline");
|
||||
}
|
||||
}
|
||||
|
||||
void Compile::log_late_inline_failure(CallGenerator* cg, const char* msg) {
|
||||
log_late_inline(cg);
|
||||
if (log() != NULL) {
|
||||
log()->inline_fail(msg);
|
||||
}
|
||||
}
|
||||
|
||||
void Compile::log_inline_id(CallGenerator* cg) {
|
||||
if (log() != NULL) {
|
||||
// The LogCompilation tool needs a unique way to identify late
|
||||
// inline call sites. This id must be unique for this call site in
|
||||
// this compilation. Try to have it unique across compilations as
|
||||
// well because it can be convenient when grepping through the log
|
||||
// file.
|
||||
// Distinguish OSR compilations from others in case CICountOSR is
|
||||
// on.
|
||||
jlong id = ((jlong)unique()) + (((jlong)compile_id()) << 33) + (CICountOSR && is_osr_compilation() ? ((jlong)1) << 32 : 0);
|
||||
cg->set_unique_id(id);
|
||||
log()->elem("inline_id id='" JLONG_FORMAT "'", id);
|
||||
}
|
||||
}
|
||||
|
||||
void Compile::log_inline_failure(const char* msg) {
|
||||
if (C->log() != NULL) {
|
||||
C->log()->inline_fail(msg);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Dump inlining replay data to the stream.
|
||||
// Don't change thread state and acquire any locks.
|
||||
void Compile::dump_inline_data(outputStream* out) {
|
||||
@ -4048,8 +4105,8 @@ void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
|
||||
worklist.push(root());
|
||||
for (uint next = 0; next < worklist.size(); ++next) {
|
||||
Node *n = worklist.at(next);
|
||||
const Type* t = igvn.type(n);
|
||||
assert(t == t->remove_speculative(), "no more speculative types");
|
||||
const Type* t = igvn.type_or_null(n);
|
||||
assert((t == NULL) || (t == t->remove_speculative()), "no more speculative types");
|
||||
if (n->is_Type()) {
|
||||
t = n->as_Type()->type();
|
||||
assert(t == t->remove_speculative(), "no more speculative types");
|
||||
|
@ -440,6 +440,8 @@ class Compile : public Phase {
|
||||
void print_inlining_push();
|
||||
PrintInliningBuffer& print_inlining_current();
|
||||
|
||||
void log_late_inline_failure(CallGenerator* cg, const char* msg);
|
||||
|
||||
public:
|
||||
|
||||
outputStream* print_inlining_stream() const {
|
||||
@ -459,6 +461,10 @@ class Compile : public Phase {
|
||||
print_inlining_stream()->print(ss.as_string());
|
||||
}
|
||||
|
||||
void log_late_inline(CallGenerator* cg);
|
||||
void log_inline_id(CallGenerator* cg);
|
||||
void log_inline_failure(const char* msg);
|
||||
|
||||
void* replay_inline_data() const { return _replay_inline_data; }
|
||||
|
||||
// Dump inlining replay data to the stream.
|
||||
@ -478,6 +484,7 @@ class Compile : public Phase {
|
||||
RegMask _FIRST_STACK_mask; // All stack slots usable for spills (depends on frame layout)
|
||||
Arena* _indexSet_arena; // control IndexSet allocation within PhaseChaitin
|
||||
void* _indexSet_free_block_list; // free list of IndexSet bit blocks
|
||||
int _interpreter_frame_size;
|
||||
|
||||
uint _node_bundling_limit;
|
||||
Bundle* _node_bundling_base; // Information for instruction bundling
|
||||
@ -935,6 +942,7 @@ class Compile : public Phase {
|
||||
PhaseRegAlloc* regalloc() { return _regalloc; }
|
||||
int frame_slots() const { return _frame_slots; }
|
||||
int frame_size_in_words() const; // frame_slots in units of the polymorphic 'words'
|
||||
int frame_size_in_bytes() const { return _frame_slots << LogBytesPerInt; }
|
||||
RegMask& FIRST_STACK_mask() { return _FIRST_STACK_mask; }
|
||||
Arena* indexSet_arena() { return _indexSet_arena; }
|
||||
void* indexSet_free_block_list() { return _indexSet_free_block_list; }
|
||||
@ -946,6 +954,13 @@ class Compile : public Phase {
|
||||
bool need_stack_bang(int frame_size_in_bytes) const;
|
||||
bool need_register_stack_bang() const;
|
||||
|
||||
void update_interpreter_frame_size(int size) {
|
||||
if (_interpreter_frame_size < size) {
|
||||
_interpreter_frame_size = size;
|
||||
}
|
||||
}
|
||||
int bang_size_in_bytes() const;
|
||||
|
||||
void set_matcher(Matcher* m) { _matcher = m; }
|
||||
//void set_regalloc(PhaseRegAlloc* ra) { _regalloc = ra; }
|
||||
void set_indexSet_arena(Arena* a) { _indexSet_arena = a; }
|
||||
|
@ -104,6 +104,9 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
|
||||
log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1));
|
||||
}
|
||||
}
|
||||
if (callee->is_method_handle_intrinsic()) {
|
||||
log->print(" method_handle_intrinsic='1'");
|
||||
}
|
||||
log->end_elem();
|
||||
}
|
||||
|
||||
@ -296,6 +299,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
|
||||
if (call_does_dispatch) {
|
||||
const char* msg = "virtual call";
|
||||
if (PrintInlining) print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
|
||||
C->log_inline_failure(msg);
|
||||
return CallGenerator::for_virtual_call(callee, vtable_index);
|
||||
} else {
|
||||
// Class Hierarchy Analysis or Type Profile reveals a unique target,
|
||||
|
@ -711,7 +711,7 @@ void ConnectionGraph::add_final_edges(Node *n) {
|
||||
Node *val = n->in(MemNode::ValueIn);
|
||||
PointsToNode* ptn = ptnode_adr(val->_idx);
|
||||
assert(ptn != NULL, "node should be registered");
|
||||
ptn->set_escape_state(PointsToNode::GlobalEscape);
|
||||
set_escape_state(ptn, PointsToNode::GlobalEscape);
|
||||
// Add edge to object for unsafe access with offset.
|
||||
PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
|
||||
assert(adr_ptn != NULL, "node should be registered");
|
||||
|
@ -1150,6 +1150,7 @@ void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_
|
||||
// Now force out all loop-invariant dominating tests. The optimizer
|
||||
// finds some, but we _know_ they are all useless.
|
||||
peeled_dom_test_elim(loop,old_new);
|
||||
loop->record_for_igvn();
|
||||
}
|
||||
|
||||
//------------------------------is_invariant-----------------------------
|
||||
|
@ -3172,17 +3172,16 @@ bool PhaseIdealLoop::verify_dominance(Node* n, Node* use, Node* LCA, Node* early
|
||||
bool had_error = false;
|
||||
#ifdef ASSERT
|
||||
if (early != C->root()) {
|
||||
// Make sure that there's a dominance path from use to LCA
|
||||
Node* d = use;
|
||||
while (d != LCA) {
|
||||
d = idom(d);
|
||||
// Make sure that there's a dominance path from LCA to early
|
||||
Node* d = LCA;
|
||||
while (d != early) {
|
||||
if (d == C->root()) {
|
||||
tty->print_cr("*** Use %d isn't dominated by def %s", use->_idx, n->_idx);
|
||||
n->dump();
|
||||
use->dump();
|
||||
dump_bad_graph("Bad graph detected in compute_lca_of_uses", n, early, LCA);
|
||||
tty->print_cr("*** Use %d isn't dominated by def %d ***", use->_idx, n->_idx);
|
||||
had_error = true;
|
||||
break;
|
||||
}
|
||||
d = idom(d);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -3435,6 +3434,13 @@ void PhaseIdealLoop::build_loop_late_post( Node *n ) {
|
||||
_igvn._worklist.push(n); // Maybe we'll normalize it, if no more loops.
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
if (_verify_only && !n->is_CFG()) {
|
||||
// Check def-use domination.
|
||||
compute_lca_of_uses(n, get_ctrl(n), true /* verify */);
|
||||
}
|
||||
#endif
|
||||
|
||||
// CFG and pinned nodes already handled
|
||||
if( n->in(0) ) {
|
||||
if( n->in(0)->is_top() ) return; // Dead?
|
||||
|
@ -2700,6 +2700,7 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
|
||||
// Inhibit more partial peeling on this loop
|
||||
new_head_clone->set_partial_peel_loop();
|
||||
C->set_major_progress();
|
||||
loop->record_for_igvn();
|
||||
|
||||
#if !defined(PRODUCT)
|
||||
if (TracePartialPeeling) {
|
||||
|
@ -210,7 +210,14 @@ public:
|
||||
bool may_be_short_branch() const { return (flags() & Flag_may_be_short_branch) != 0; }
|
||||
|
||||
// Avoid back to back some instructions on some CPUs.
|
||||
bool avoid_back_to_back() const { return (flags() & Flag_avoid_back_to_back) != 0; }
|
||||
enum AvoidBackToBackFlag { AVOID_NONE = 0,
|
||||
AVOID_BEFORE = Flag_avoid_back_to_back_before,
|
||||
AVOID_AFTER = Flag_avoid_back_to_back_after,
|
||||
AVOID_BEFORE_AND_AFTER = AVOID_BEFORE | AVOID_AFTER };
|
||||
|
||||
bool avoid_back_to_back(AvoidBackToBackFlag flag_value) const {
|
||||
return (flags() & flag_value) == flag_value;
|
||||
}
|
||||
|
||||
// instruction implemented with a call
|
||||
bool has_call() const { return (flags() & Flag_has_call) != 0; }
|
||||
|
@ -645,17 +645,18 @@ public:
|
||||
|
||||
// Flags are sorted by usage frequency.
|
||||
enum NodeFlags {
|
||||
Flag_is_Copy = 0x01, // should be first bit to avoid shift
|
||||
Flag_rematerialize = Flag_is_Copy << 1,
|
||||
Flag_is_Copy = 0x01, // should be first bit to avoid shift
|
||||
Flag_rematerialize = Flag_is_Copy << 1,
|
||||
Flag_needs_anti_dependence_check = Flag_rematerialize << 1,
|
||||
Flag_is_macro = Flag_needs_anti_dependence_check << 1,
|
||||
Flag_is_Con = Flag_is_macro << 1,
|
||||
Flag_is_cisc_alternate = Flag_is_Con << 1,
|
||||
Flag_is_dead_loop_safe = Flag_is_cisc_alternate << 1,
|
||||
Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1,
|
||||
Flag_avoid_back_to_back = Flag_may_be_short_branch << 1,
|
||||
Flag_has_call = Flag_avoid_back_to_back << 1,
|
||||
Flag_is_expensive = Flag_has_call << 1,
|
||||
Flag_is_macro = Flag_needs_anti_dependence_check << 1,
|
||||
Flag_is_Con = Flag_is_macro << 1,
|
||||
Flag_is_cisc_alternate = Flag_is_Con << 1,
|
||||
Flag_is_dead_loop_safe = Flag_is_cisc_alternate << 1,
|
||||
Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1,
|
||||
Flag_avoid_back_to_back_before = Flag_may_be_short_branch << 1,
|
||||
Flag_avoid_back_to_back_after = Flag_avoid_back_to_back_before << 1,
|
||||
Flag_has_call = Flag_avoid_back_to_back_after << 1,
|
||||
Flag_is_expensive = Flag_has_call << 1,
|
||||
_max_flags = (Flag_is_expensive << 1) - 1 // allow flags combination
|
||||
};
|
||||
|
||||
|
@ -165,8 +165,13 @@ bool Compile::need_stack_bang(int frame_size_in_bytes) const {
|
||||
// Determine if we need to generate a stack overflow check.
|
||||
// Do it if the method is not a stub function and
|
||||
// has java calls or has frame size > vm_page_size/8.
|
||||
// The debug VM checks that deoptimization doesn't trigger an
|
||||
// unexpected stack overflow (compiled method stack banging should
|
||||
// guarantee it doesn't happen) so we always need the stack bang in
|
||||
// a debug VM.
|
||||
return (UseStackBanging && stub_function() == NULL &&
|
||||
(has_java_calls() || frame_size_in_bytes > os::vm_page_size()>>3));
|
||||
(has_java_calls() || frame_size_in_bytes > os::vm_page_size()>>3
|
||||
DEBUG_ONLY(|| true)));
|
||||
}
|
||||
|
||||
bool Compile::need_register_stack_bang() const {
|
||||
@ -411,7 +416,7 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
|
||||
blk_size += nop_size;
|
||||
}
|
||||
}
|
||||
if (mach->avoid_back_to_back()) {
|
||||
if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
|
||||
// Nop is inserted between "avoid back to back" instructions.
|
||||
// ScheduleAndBundle() can rearrange nodes in a block,
|
||||
// check for all offsets inside this block.
|
||||
@ -439,7 +444,7 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
|
||||
last_call_adr = blk_starts[i]+blk_size;
|
||||
}
|
||||
// Remember end of avoid_back_to_back offset
|
||||
if (nj->is_Mach() && nj->as_Mach()->avoid_back_to_back()) {
|
||||
if (nj->is_Mach() && nj->as_Mach()->avoid_back_to_back(MachNode::AVOID_AFTER)) {
|
||||
last_avoid_back_to_back_adr = blk_starts[i]+blk_size;
|
||||
}
|
||||
}
|
||||
@ -525,11 +530,11 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
|
||||
int new_size = replacement->size(_regalloc);
|
||||
int diff = br_size - new_size;
|
||||
assert(diff >= (int)nop_size, "short_branch size should be smaller");
|
||||
// Conservatively take into accound padding between
|
||||
// Conservatively take into account padding between
|
||||
// avoid_back_to_back branches. Previous branch could be
|
||||
// converted into avoid_back_to_back branch during next
|
||||
// rounds.
|
||||
if (needs_padding && replacement->avoid_back_to_back()) {
|
||||
if (needs_padding && replacement->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
|
||||
jmp_offset[i] += nop_size;
|
||||
diff -= nop_size;
|
||||
}
|
||||
@ -548,7 +553,7 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
|
||||
}
|
||||
} // (mach->may_be_short_branch())
|
||||
if (mach != NULL && (mach->may_be_short_branch() ||
|
||||
mach->avoid_back_to_back())) {
|
||||
mach->avoid_back_to_back(MachNode::AVOID_AFTER))) {
|
||||
last_may_be_short_branch_adr = blk_starts[i] + jmp_offset[i] + jmp_size[i];
|
||||
}
|
||||
blk_starts[i+1] -= adjust_block_start;
|
||||
@ -1313,7 +1318,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
|
||||
if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
|
||||
padding = nop_size;
|
||||
}
|
||||
if (padding == 0 && mach->avoid_back_to_back() &&
|
||||
if (padding == 0 && mach->avoid_back_to_back(MachNode::AVOID_BEFORE) &&
|
||||
current_offset == last_avoid_back_to_back_offset) {
|
||||
// Avoid back to back some instructions.
|
||||
padding = nop_size;
|
||||
@ -1407,7 +1412,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
|
||||
int new_size = replacement->size(_regalloc);
|
||||
assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller");
|
||||
// Insert padding between avoid_back_to_back branches.
|
||||
if (needs_padding && replacement->avoid_back_to_back()) {
|
||||
if (needs_padding && replacement->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
|
||||
MachNode *nop = new (this) MachNopNode();
|
||||
block->insert_node(nop, j++);
|
||||
_cfg->map_node_to_block(nop, block);
|
||||
@ -1515,7 +1520,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
|
||||
last_call_offset = current_offset;
|
||||
}
|
||||
|
||||
if (n->is_Mach() && n->as_Mach()->avoid_back_to_back()) {
|
||||
if (n->is_Mach() && n->as_Mach()->avoid_back_to_back(MachNode::AVOID_AFTER)) {
|
||||
// Avoid back to back some instructions.
|
||||
last_avoid_back_to_back_offset = current_offset;
|
||||
}
|
||||
|
@ -831,154 +831,111 @@ PhaseIterGVN::PhaseIterGVN( PhaseGVN *gvn ) : PhaseGVN(gvn),
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize worklist for each node.
|
||||
*/
|
||||
void PhaseIterGVN::init_worklist(Node* first) {
|
||||
Unique_Node_List to_process;
|
||||
to_process.push(first);
|
||||
|
||||
while (to_process.size() > 0) {
|
||||
Node* n = to_process.pop();
|
||||
if (!_worklist.member(n)) {
|
||||
_worklist.push(n);
|
||||
|
||||
uint cnt = n->req();
|
||||
for(uint i = 0; i < cnt; i++) {
|
||||
Node* m = n->in(i);
|
||||
if (m != NULL) {
|
||||
to_process.push(m);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void PhaseIterGVN::verify_step(Node* n) {
|
||||
_verify_window[_verify_counter % _verify_window_size] = n;
|
||||
++_verify_counter;
|
||||
ResourceMark rm;
|
||||
ResourceArea *area = Thread::current()->resource_area();
|
||||
VectorSet old_space(area), new_space(area);
|
||||
if (C->unique() < 1000 ||
|
||||
0 == _verify_counter % (C->unique() < 10000 ? 10 : 100)) {
|
||||
++_verify_full_passes;
|
||||
Node::verify_recur(C->root(), -1, old_space, new_space);
|
||||
}
|
||||
const int verify_depth = 4;
|
||||
for ( int i = 0; i < _verify_window_size; i++ ) {
|
||||
Node* n = _verify_window[i];
|
||||
if ( n == NULL ) continue;
|
||||
if( n->in(0) == NodeSentinel ) { // xform_idom
|
||||
_verify_window[i] = n->in(1);
|
||||
--i; continue;
|
||||
if (VerifyIterativeGVN) {
|
||||
_verify_window[_verify_counter % _verify_window_size] = n;
|
||||
++_verify_counter;
|
||||
ResourceMark rm;
|
||||
ResourceArea* area = Thread::current()->resource_area();
|
||||
VectorSet old_space(area), new_space(area);
|
||||
if (C->unique() < 1000 ||
|
||||
0 == _verify_counter % (C->unique() < 10000 ? 10 : 100)) {
|
||||
++_verify_full_passes;
|
||||
Node::verify_recur(C->root(), -1, old_space, new_space);
|
||||
}
|
||||
// Typical fanout is 1-2, so this call visits about 6 nodes.
|
||||
Node::verify_recur(n, verify_depth, old_space, new_space);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
//------------------------------init_worklist----------------------------------
|
||||
// Initialize worklist for each node.
|
||||
void PhaseIterGVN::init_worklist( Node *n ) {
|
||||
if( _worklist.member(n) ) return;
|
||||
_worklist.push(n);
|
||||
uint cnt = n->req();
|
||||
for( uint i =0 ; i < cnt; i++ ) {
|
||||
Node *m = n->in(i);
|
||||
if( m ) init_worklist(m);
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------optimize---------------------------------------
|
||||
void PhaseIterGVN::optimize() {
|
||||
debug_only(uint num_processed = 0;);
|
||||
#ifndef PRODUCT
|
||||
{
|
||||
_verify_counter = 0;
|
||||
_verify_full_passes = 0;
|
||||
const int verify_depth = 4;
|
||||
for ( int i = 0; i < _verify_window_size; i++ ) {
|
||||
_verify_window[i] = NULL;
|
||||
Node* n = _verify_window[i];
|
||||
if ( n == NULL ) continue;
|
||||
if( n->in(0) == NodeSentinel ) { // xform_idom
|
||||
_verify_window[i] = n->in(1);
|
||||
--i; continue;
|
||||
}
|
||||
// Typical fanout is 1-2, so this call visits about 6 nodes.
|
||||
Node::verify_recur(n, verify_depth, old_space, new_space);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
Node* prev = NULL;
|
||||
uint rep_cnt = 0;
|
||||
#endif
|
||||
uint loop_count = 0;
|
||||
|
||||
// Pull from worklist; transform node;
|
||||
// If node has changed: update edge info and put uses on worklist.
|
||||
while( _worklist.size() ) {
|
||||
if (C->check_node_count(NodeLimitFudgeFactor * 2,
|
||||
"out of nodes optimizing method")) {
|
||||
return;
|
||||
}
|
||||
Node *n = _worklist.pop();
|
||||
if (++loop_count >= K * C->live_nodes()) {
|
||||
debug_only(n->dump(4);)
|
||||
assert(false, "infinite loop in PhaseIterGVN::optimize");
|
||||
C->record_method_not_compilable("infinite loop in PhaseIterGVN::optimize");
|
||||
return;
|
||||
}
|
||||
#ifdef ASSERT
|
||||
if (n == prev) {
|
||||
if (++rep_cnt > 3) {
|
||||
n->dump(4);
|
||||
assert(false, "loop in Ideal transformation");
|
||||
void PhaseIterGVN::trace_PhaseIterGVN(Node* n, Node* nn, const Type* oldtype) {
|
||||
if (TraceIterativeGVN) {
|
||||
uint wlsize = _worklist.size();
|
||||
const Type* newtype = type_or_null(n);
|
||||
if (nn != n) {
|
||||
// print old node
|
||||
tty->print("< ");
|
||||
if (oldtype != newtype && oldtype != NULL) {
|
||||
oldtype->dump();
|
||||
}
|
||||
} else {
|
||||
rep_cnt = 0;
|
||||
do { tty->print("\t"); } while (tty->position() < 16);
|
||||
tty->print("<");
|
||||
n->dump();
|
||||
}
|
||||
prev = n;
|
||||
#endif
|
||||
if (TraceIterativeGVN && Verbose) {
|
||||
tty->print(" Pop ");
|
||||
NOT_PRODUCT( n->dump(); )
|
||||
debug_only(if( (num_processed++ % 100) == 0 ) _worklist.print_set();)
|
||||
if (oldtype != newtype || nn != n) {
|
||||
// print new node and/or new type
|
||||
if (oldtype == NULL) {
|
||||
tty->print("* ");
|
||||
} else if (nn != n) {
|
||||
tty->print("> ");
|
||||
} else {
|
||||
tty->print("= ");
|
||||
}
|
||||
if (newtype == NULL) {
|
||||
tty->print("null");
|
||||
} else {
|
||||
newtype->dump();
|
||||
}
|
||||
do { tty->print("\t"); } while (tty->position() < 16);
|
||||
nn->dump();
|
||||
}
|
||||
|
||||
if (n->outcnt() != 0) {
|
||||
|
||||
#ifndef PRODUCT
|
||||
uint wlsize = _worklist.size();
|
||||
const Type* oldtype = type_or_null(n);
|
||||
#endif //PRODUCT
|
||||
|
||||
Node *nn = transform_old(n);
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (TraceIterativeGVN) {
|
||||
const Type* newtype = type_or_null(n);
|
||||
if (nn != n) {
|
||||
// print old node
|
||||
tty->print("< ");
|
||||
if (oldtype != newtype && oldtype != NULL) {
|
||||
oldtype->dump();
|
||||
}
|
||||
do { tty->print("\t"); } while (tty->position() < 16);
|
||||
tty->print("<");
|
||||
n->dump();
|
||||
}
|
||||
if (oldtype != newtype || nn != n) {
|
||||
// print new node and/or new type
|
||||
if (oldtype == NULL) {
|
||||
tty->print("* ");
|
||||
} else if (nn != n) {
|
||||
tty->print("> ");
|
||||
} else {
|
||||
tty->print("= ");
|
||||
}
|
||||
if (newtype == NULL) {
|
||||
tty->print("null");
|
||||
} else {
|
||||
newtype->dump();
|
||||
}
|
||||
do { tty->print("\t"); } while (tty->position() < 16);
|
||||
nn->dump();
|
||||
}
|
||||
if (Verbose && wlsize < _worklist.size()) {
|
||||
tty->print(" Push {");
|
||||
while (wlsize != _worklist.size()) {
|
||||
Node* pushed = _worklist.at(wlsize++);
|
||||
tty->print(" %d", pushed->_idx);
|
||||
}
|
||||
tty->print_cr(" }");
|
||||
}
|
||||
if (Verbose && wlsize < _worklist.size()) {
|
||||
tty->print(" Push {");
|
||||
while (wlsize != _worklist.size()) {
|
||||
Node* pushed = _worklist.at(wlsize++);
|
||||
tty->print(" %d", pushed->_idx);
|
||||
}
|
||||
if( VerifyIterativeGVN && nn != n ) {
|
||||
verify_step((Node*) NULL); // ignore n, it might be subsumed
|
||||
}
|
||||
#endif
|
||||
} else if (!n->is_top()) {
|
||||
remove_dead_node(n);
|
||||
tty->print_cr(" }");
|
||||
}
|
||||
if (nn != n) {
|
||||
// ignore n, it might be subsumed
|
||||
verify_step((Node*) NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void PhaseIterGVN::init_verifyPhaseIterGVN() {
|
||||
_verify_counter = 0;
|
||||
_verify_full_passes = 0;
|
||||
for (int i = 0; i < _verify_window_size; i++) {
|
||||
_verify_window[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void PhaseIterGVN::verify_PhaseIterGVN() {
|
||||
C->verify_graph_edges();
|
||||
if( VerifyOpto && allow_progress() ) {
|
||||
// Must turn off allow_progress to enable assert and break recursion
|
||||
@ -998,21 +955,78 @@ void PhaseIterGVN::optimize() {
|
||||
igvn2.set_allow_progress(true);
|
||||
}
|
||||
}
|
||||
if ( VerifyIterativeGVN && PrintOpto ) {
|
||||
if ( _verify_counter == _verify_full_passes )
|
||||
if (VerifyIterativeGVN && PrintOpto) {
|
||||
if (_verify_counter == _verify_full_passes) {
|
||||
tty->print_cr("VerifyIterativeGVN: %d transforms and verify passes",
|
||||
_verify_full_passes);
|
||||
else
|
||||
} else {
|
||||
tty->print_cr("VerifyIterativeGVN: %d transforms, %d full verify passes",
|
||||
_verify_counter, _verify_full_passes);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif /* PRODUCT */
|
||||
|
||||
#ifdef ASSERT
|
||||
/**
|
||||
* Dumps information that can help to debug the problem. A debug
|
||||
* build fails with an assert.
|
||||
*/
|
||||
void PhaseIterGVN::dump_infinite_loop_info(Node* n) {
|
||||
n->dump(4);
|
||||
_worklist.dump();
|
||||
assert(false, "infinite loop in PhaseIterGVN::optimize");
|
||||
}
|
||||
|
||||
/**
|
||||
* Prints out information about IGVN if the 'verbose' option is used.
|
||||
*/
|
||||
void PhaseIterGVN::trace_PhaseIterGVN_verbose(Node* n, int num_processed) {
|
||||
if (TraceIterativeGVN && Verbose) {
|
||||
tty->print(" Pop ");
|
||||
n->dump();
|
||||
if ((num_processed % 100) == 0) {
|
||||
_worklist.print_set();
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* ASSERT */
|
||||
|
||||
void PhaseIterGVN::optimize() {
|
||||
DEBUG_ONLY(uint num_processed = 0;)
|
||||
NOT_PRODUCT(init_verifyPhaseIterGVN();)
|
||||
|
||||
uint loop_count = 0;
|
||||
// Pull from worklist and transform the node. If the node has changed,
|
||||
// update edge info and put uses on worklist.
|
||||
while(_worklist.size()) {
|
||||
if (C->check_node_count(NodeLimitFudgeFactor * 2, "Out of nodes")) {
|
||||
return;
|
||||
}
|
||||
Node* n = _worklist.pop();
|
||||
if (++loop_count >= K * C->live_nodes()) {
|
||||
DEBUG_ONLY(dump_infinite_loop_info(n);)
|
||||
C->record_method_not_compilable("infinite loop in PhaseIterGVN::optimize");
|
||||
return;
|
||||
}
|
||||
DEBUG_ONLY(trace_PhaseIterGVN_verbose(n, num_processed++);)
|
||||
if (n->outcnt() != 0) {
|
||||
NOT_PRODUCT(const Type* oldtype = type_or_null(n));
|
||||
// Do the transformation
|
||||
Node* nn = transform_old(n);
|
||||
NOT_PRODUCT(trace_PhaseIterGVN(n, nn, oldtype);)
|
||||
} else if (!n->is_top()) {
|
||||
remove_dead_node(n);
|
||||
}
|
||||
}
|
||||
NOT_PRODUCT(verify_PhaseIterGVN();)
|
||||
}
|
||||
|
||||
|
||||
//------------------register_new_node_with_optimizer---------------------------
|
||||
// Register a new node with the optimizer. Update the types array, the def-use
|
||||
// info. Put on worklist.
|
||||
/**
|
||||
* Register a new node with the optimizer. Update the types array, the def-use
|
||||
* info. Put on worklist.
|
||||
*/
|
||||
Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
|
||||
set_type_bottom(n);
|
||||
_worklist.push(n);
|
||||
@ -1038,32 +1052,29 @@ Node *PhaseIterGVN::transform( Node *n ) {
|
||||
return transform_old(n);
|
||||
}
|
||||
|
||||
//------------------------------transform_old----------------------------------
|
||||
Node *PhaseIterGVN::transform_old( Node *n ) {
|
||||
#ifndef PRODUCT
|
||||
debug_only(uint loop_count = 0;);
|
||||
set_transforms();
|
||||
#endif
|
||||
Node *PhaseIterGVN::transform_old(Node* n) {
|
||||
DEBUG_ONLY(uint loop_count = 0;);
|
||||
NOT_PRODUCT(set_transforms());
|
||||
|
||||
// Remove 'n' from hash table in case it gets modified
|
||||
_table.hash_delete(n);
|
||||
if( VerifyIterativeGVN ) {
|
||||
assert( !_table.find_index(n->_idx), "found duplicate entry in table");
|
||||
if (VerifyIterativeGVN) {
|
||||
assert(!_table.find_index(n->_idx), "found duplicate entry in table");
|
||||
}
|
||||
|
||||
// Apply the Ideal call in a loop until it no longer applies
|
||||
Node *k = n;
|
||||
Node* k = n;
|
||||
DEBUG_ONLY(dead_loop_check(k);)
|
||||
DEBUG_ONLY(bool is_new = (k->outcnt() == 0);)
|
||||
Node *i = k->Ideal(this, /*can_reshape=*/true);
|
||||
Node* i = k->Ideal(this, /*can_reshape=*/true);
|
||||
assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
|
||||
#ifndef PRODUCT
|
||||
if( VerifyIterativeGVN )
|
||||
verify_step(k);
|
||||
if( i && VerifyOpto ) {
|
||||
if( !allow_progress() ) {
|
||||
if (i->is_Add() && i->outcnt() == 1) {
|
||||
verify_step(k);
|
||||
if (i && VerifyOpto ) {
|
||||
if (!allow_progress()) {
|
||||
if (i->is_Add() && (i->outcnt() == 1)) {
|
||||
// Switched input to left side because this is the only use
|
||||
} else if( i->is_If() && (i->in(0) == NULL) ) {
|
||||
} else if (i->is_If() && (i->in(0) == NULL)) {
|
||||
// This IF is dead because it is dominated by an equivalent IF When
|
||||
// dominating if changed, info is not propagated sparsely to 'this'
|
||||
// Propagating this info further will spuriously identify other
|
||||
@ -1071,35 +1082,38 @@ Node *PhaseIterGVN::transform_old( Node *n ) {
|
||||
return i;
|
||||
} else
|
||||
set_progress();
|
||||
} else
|
||||
} else {
|
||||
set_progress();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
while( i ) {
|
||||
while (i != NULL) {
|
||||
#ifndef PRODUCT
|
||||
debug_only( if( loop_count >= K ) i->dump(4); )
|
||||
assert(loop_count < K, "infinite loop in PhaseIterGVN::transform");
|
||||
debug_only( loop_count++; )
|
||||
if (loop_count >= K) {
|
||||
dump_infinite_loop_info(i);
|
||||
}
|
||||
loop_count++;
|
||||
#endif
|
||||
assert((i->_idx >= k->_idx) || i->is_top(), "Idealize should return new nodes, use Identity to return old nodes");
|
||||
// Made a change; put users of original Node on worklist
|
||||
add_users_to_worklist( k );
|
||||
add_users_to_worklist(k);
|
||||
// Replacing root of transform tree?
|
||||
if( k != i ) {
|
||||
if (k != i) {
|
||||
// Make users of old Node now use new.
|
||||
subsume_node( k, i );
|
||||
subsume_node(k, i);
|
||||
k = i;
|
||||
}
|
||||
DEBUG_ONLY(dead_loop_check(k);)
|
||||
// Try idealizing again
|
||||
DEBUG_ONLY(is_new = (k->outcnt() == 0);)
|
||||
i = k->Ideal(this, /*can_reshape=*/true);
|
||||
assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
|
||||
assert(i != k || is_new || (i->outcnt() > 0), "don't return dead nodes");
|
||||
#ifndef PRODUCT
|
||||
if( VerifyIterativeGVN )
|
||||
verify_step(k);
|
||||
if( i && VerifyOpto ) set_progress();
|
||||
verify_step(k);
|
||||
if (i && VerifyOpto) {
|
||||
set_progress();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -1107,48 +1121,49 @@ Node *PhaseIterGVN::transform_old( Node *n ) {
|
||||
ensure_type_or_null(k);
|
||||
|
||||
// See what kind of values 'k' takes on at runtime
|
||||
const Type *t = k->Value(this);
|
||||
const Type* t = k->Value(this);
|
||||
assert(t != NULL, "value sanity");
|
||||
|
||||
// Since I just called 'Value' to compute the set of run-time values
|
||||
// for this Node, and 'Value' is non-local (and therefore expensive) I'll
|
||||
// cache Value. Later requests for the local phase->type of this Node can
|
||||
// use the cached Value instead of suffering with 'bottom_type'.
|
||||
if (t != type_or_null(k)) {
|
||||
NOT_PRODUCT( set_progress(); )
|
||||
NOT_PRODUCT( inc_new_values();)
|
||||
if (type_or_null(k) != t) {
|
||||
#ifndef PRODUCT
|
||||
inc_new_values();
|
||||
set_progress();
|
||||
#endif
|
||||
set_type(k, t);
|
||||
// If k is a TypeNode, capture any more-precise type permanently into Node
|
||||
k->raise_bottom_type(t);
|
||||
// Move users of node to worklist
|
||||
add_users_to_worklist( k );
|
||||
add_users_to_worklist(k);
|
||||
}
|
||||
|
||||
// If 'k' computes a constant, replace it with a constant
|
||||
if( t->singleton() && !k->is_Con() ) {
|
||||
NOT_PRODUCT( set_progress(); )
|
||||
Node *con = makecon(t); // Make a constant
|
||||
add_users_to_worklist( k );
|
||||
subsume_node( k, con ); // Everybody using k now uses con
|
||||
if (t->singleton() && !k->is_Con()) {
|
||||
NOT_PRODUCT(set_progress();)
|
||||
Node* con = makecon(t); // Make a constant
|
||||
add_users_to_worklist(k);
|
||||
subsume_node(k, con); // Everybody using k now uses con
|
||||
return con;
|
||||
}
|
||||
|
||||
// Now check for Identities
|
||||
i = k->Identity(this); // Look for a nearby replacement
|
||||
if( i != k ) { // Found? Return replacement!
|
||||
NOT_PRODUCT( set_progress(); )
|
||||
add_users_to_worklist( k );
|
||||
subsume_node( k, i ); // Everybody using k now uses i
|
||||
i = k->Identity(this); // Look for a nearby replacement
|
||||
if (i != k) { // Found? Return replacement!
|
||||
NOT_PRODUCT(set_progress();)
|
||||
add_users_to_worklist(k);
|
||||
subsume_node(k, i); // Everybody using k now uses i
|
||||
return i;
|
||||
}
|
||||
|
||||
// Global Value Numbering
|
||||
i = hash_find_insert(k); // Check for pre-existing node
|
||||
if( i && (i != k) ) {
|
||||
if (i && (i != k)) {
|
||||
// Return the pre-existing node if it isn't dead
|
||||
NOT_PRODUCT( set_progress(); )
|
||||
add_users_to_worklist( k );
|
||||
subsume_node( k, i ); // Everybody using k now uses i
|
||||
NOT_PRODUCT(set_progress();)
|
||||
add_users_to_worklist(k);
|
||||
subsume_node(k, i); // Everybody using k now uses i
|
||||
return i;
|
||||
}
|
||||
|
||||
@ -1514,6 +1529,21 @@ void PhaseCCP::do_transform() {
|
||||
C->set_root( transform(C->root())->as_Root() );
|
||||
assert( C->top(), "missing TOP node" );
|
||||
assert( C->root(), "missing root" );
|
||||
|
||||
// Eagerly remove castPP nodes here. CastPP nodes might not be
|
||||
// removed in the subsequent IGVN phase if a node that changes
|
||||
// in(1) of a castPP is processed prior to the castPP node.
|
||||
for (uint i = 0; i < _worklist.size(); i++) {
|
||||
Node* n = _worklist.at(i);
|
||||
|
||||
if (n->is_ConstraintCast()) {
|
||||
Node* nn = n->Identity(this);
|
||||
if (nn != n) {
|
||||
replace_node(n, nn);
|
||||
--i;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------transform--------------------------------------
|
||||
|
@ -440,6 +440,17 @@ public:
|
||||
// and dominator info to a fixed point.
|
||||
void optimize();
|
||||
|
||||
#ifndef PRODUCT
|
||||
void trace_PhaseIterGVN(Node* n, Node* nn, const Type* old_type);
|
||||
void init_verifyPhaseIterGVN();
|
||||
void verify_PhaseIterGVN();
|
||||
#endif
|
||||
|
||||
#ifdef ASSERT
|
||||
void dump_infinite_loop_info(Node* n);
|
||||
void trace_PhaseIterGVN_verbose(Node* n, int num_processed);
|
||||
#endif
|
||||
|
||||
// Register a new node with the iter GVN pass without transforming it.
|
||||
// Used when we need to restructure a Region/Phi area and all the Regions
|
||||
// and Phis need to complete this one big transform before any other
|
||||
|
@ -2420,7 +2420,7 @@ JNI_END
|
||||
|
||||
DEFINE_SETSTATICFIELD(jboolean, bool, Boolean, 'Z', z
|
||||
, HOTSPOT_JNI_SETSTATICBOOLEANFIELD_ENTRY(env, clazz, (uintptr_t)fieldID, value),
|
||||
HOTSPOT_JNI_SETBOOLEANFIELD_RETURN())
|
||||
HOTSPOT_JNI_SETSTATICBOOLEANFIELD_RETURN())
|
||||
DEFINE_SETSTATICFIELD(jbyte, byte, Byte, 'B', b
|
||||
, HOTSPOT_JNI_SETSTATICBYTEFIELD_ENTRY(env, clazz, (uintptr_t) fieldID, value),
|
||||
HOTSPOT_JNI_SETSTATICBYTEFIELD_RETURN())
|
||||
|
@ -1215,7 +1215,8 @@ JVM_ENTRY(jobject, JVM_DoPrivileged(JNIEnv *env, jclass cls, jobject action, job
|
||||
// get run() method
|
||||
Method* m_oop = object->klass()->uncached_lookup_method(
|
||||
vmSymbols::run_method_name(),
|
||||
vmSymbols::void_object_signature());
|
||||
vmSymbols::void_object_signature(),
|
||||
Klass::normal);
|
||||
methodHandle m (THREAD, m_oop);
|
||||
if (m.is_null() || !m->is_method() || !m()->is_public() || m()->is_static()) {
|
||||
THROW_MSG_0(vmSymbols::java_lang_InternalError(), "No run method");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -408,7 +408,7 @@ address NativeLookup::base_library_lookup(const char* class_name, const char* me
|
||||
|
||||
// Find method and invoke standard lookup
|
||||
methodHandle method (THREAD,
|
||||
klass->uncached_lookup_method(m_name, s_name));
|
||||
klass->uncached_lookup_method(m_name, s_name, Klass::normal));
|
||||
address result = lookup(method, in_base_library, CATCH);
|
||||
assert(in_base_library, "must be in basic library");
|
||||
guarantee(result != NULL, "must be non NULL");
|
||||
|
@ -53,7 +53,8 @@ void AdvancedThresholdPolicy::initialize() {
|
||||
}
|
||||
|
||||
set_c1_count(MAX2(count / 3, 1));
|
||||
set_c2_count(MAX2(count - count / 3, 1));
|
||||
set_c2_count(MAX2(count - c1_count(), 1));
|
||||
FLAG_SET_ERGO(intx, CICompilerCount, c1_count() + c2_count());
|
||||
|
||||
// Some inlining tuning
|
||||
#ifdef X86
|
||||
|
@ -2402,6 +2402,10 @@ bool Arguments::check_vm_args_consistency() {
|
||||
const int num_min_compiler_threads = (TieredCompilation && (TieredStopAtLevel >= CompLevel_full_optimization)) ? 2 : 1;
|
||||
status &=verify_min_value(CICompilerCount, num_min_compiler_threads, "CICompilerCount");
|
||||
|
||||
if (!FLAG_IS_DEFAULT(CICompilerCount) && !FLAG_IS_DEFAULT(CICompilerCountPerCPU) && CICompilerCountPerCPU) {
|
||||
warning("The VM option CICompilerCountPerCPU overrides CICompilerCount.");
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -182,6 +182,7 @@ void NonTieredCompPolicy::initialize() {
|
||||
// max(log2(8)-1,1) = 2 compiler threads on an 8-way machine.
|
||||
// May help big-app startup time.
|
||||
_compiler_count = MAX2(log2_intptr(os::active_processor_count())-1,1);
|
||||
FLAG_SET_ERGO(intx, CICompilerCount, _compiler_count);
|
||||
} else {
|
||||
_compiler_count = CICompilerCount;
|
||||
}
|
||||
|
@ -420,15 +420,9 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
|
||||
// frame[number_of_frames - 1 ] = on_stack_size(youngest)
|
||||
// frame[number_of_frames - 2 ] = on_stack_size(sender(youngest))
|
||||
// frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))
|
||||
int caller_parms = callee_parameters;
|
||||
if ((index == array->frames() - 1) && caller_was_method_handle) {
|
||||
caller_parms = 0;
|
||||
}
|
||||
frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(caller_parms,
|
||||
callee_parameters,
|
||||
frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters,
|
||||
callee_locals,
|
||||
index == 0,
|
||||
index == array->frames() - 1,
|
||||
popframe_extra_args);
|
||||
// This pc doesn't have to be perfect just good enough to identify the frame
|
||||
// as interpreted so the skeleton frame will be walkable
|
||||
|
@ -775,10 +775,13 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
|
||||
// going to be unwound. Dispatch to a shared runtime stub
|
||||
// which will cause the StackOverflowError to be fabricated
|
||||
// and processed.
|
||||
// For stack overflow in deoptimization blob, cleanup thread.
|
||||
if (thread->deopt_mark() != NULL) {
|
||||
Deoptimization::cleanup_deopt_info(thread, NULL);
|
||||
}
|
||||
// Stack overflow should never occur during deoptimization:
|
||||
// the compiled method bangs the stack by as much as the
|
||||
// interpreter would need in case of a deoptimization. The
|
||||
// deoptimization blob and uncommon trap blob bang the stack
|
||||
// in a debug VM to verify the correctness of the compiled
|
||||
// method stack banging.
|
||||
assert(thread->deopt_mark() == NULL, "no stack overflow from deopt blob/uncommon trap");
|
||||
Events::log_exception(thread, "StackOverflowError at " INTPTR_FORMAT, pc);
|
||||
return StubRoutines::throw_StackOverflowError_entry();
|
||||
}
|
||||
@ -946,14 +949,13 @@ jlong SharedRuntime::get_java_tid(Thread* thread) {
|
||||
* it gets turned into a tail-call on sparc, which runs into dtrace bug
|
||||
* 6254741. Once that is fixed we can remove the dummy return value.
|
||||
*/
|
||||
int SharedRuntime::dtrace_object_alloc(oopDesc* o) {
|
||||
return dtrace_object_alloc_base(Thread::current(), o);
|
||||
int SharedRuntime::dtrace_object_alloc(oopDesc* o, int size) {
|
||||
return dtrace_object_alloc_base(Thread::current(), o, size);
|
||||
}
|
||||
|
||||
int SharedRuntime::dtrace_object_alloc_base(Thread* thread, oopDesc* o) {
|
||||
int SharedRuntime::dtrace_object_alloc_base(Thread* thread, oopDesc* o, int size) {
|
||||
assert(DTraceAllocProbes, "wrong call");
|
||||
Klass* klass = o->klass();
|
||||
int size = o->size();
|
||||
Symbol* name = klass->name();
|
||||
HOTSPOT_OBJECT_ALLOC(
|
||||
get_java_tid(thread),
|
||||
|
@ -258,8 +258,8 @@ class SharedRuntime: AllStatic {
|
||||
static void register_finalizer(JavaThread* thread, oopDesc* obj);
|
||||
|
||||
// dtrace notifications
|
||||
static int dtrace_object_alloc(oopDesc* o);
|
||||
static int dtrace_object_alloc_base(Thread* thread, oopDesc* o);
|
||||
static int dtrace_object_alloc(oopDesc* o, int size);
|
||||
static int dtrace_object_alloc_base(Thread* thread, oopDesc* o, int size);
|
||||
static int dtrace_method_entry(JavaThread* thread, Method* m);
|
||||
static int dtrace_method_exit(JavaThread* thread, Method* m);
|
||||
|
||||
|
@ -142,7 +142,8 @@ void SimpleThresholdPolicy::initialize() {
|
||||
count = MAX2(log2_intptr(os::active_processor_count()), 1) * 3 / 2;
|
||||
}
|
||||
set_c1_count(MAX2(count / 3, 1));
|
||||
set_c2_count(MAX2(count - count / 3, 1));
|
||||
set_c2_count(MAX2(count - c1_count(), 1));
|
||||
FLAG_SET_ERGO(intx, CICompilerCount, c1_count() + c2_count());
|
||||
}
|
||||
|
||||
void SimpleThresholdPolicy::set_carry_if_necessary(InvocationCounter *counter) {
|
||||
|
@ -418,24 +418,20 @@ void vframeArrayElement::unpack_on_stack(int caller_actual_parameters,
|
||||
|
||||
}
|
||||
|
||||
int vframeArrayElement::on_stack_size(int caller_actual_parameters,
|
||||
int callee_parameters,
|
||||
int vframeArrayElement::on_stack_size(int callee_parameters,
|
||||
int callee_locals,
|
||||
bool is_top_frame,
|
||||
bool is_bottom_frame,
|
||||
int popframe_extra_stack_expression_els) const {
|
||||
assert(method()->max_locals() == locals()->size(), "just checking");
|
||||
int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors();
|
||||
int temps = expressions()->size();
|
||||
return Interpreter::size_activation(method(),
|
||||
return Interpreter::size_activation(method()->max_stack(),
|
||||
temps + callee_parameters,
|
||||
popframe_extra_stack_expression_els,
|
||||
locks,
|
||||
caller_actual_parameters,
|
||||
callee_parameters,
|
||||
callee_locals,
|
||||
is_top_frame,
|
||||
is_bottom_frame);
|
||||
is_top_frame);
|
||||
}
|
||||
|
||||
|
||||
|
@ -85,10 +85,8 @@ class vframeArrayElement : public _ValueObj {
|
||||
|
||||
// Returns the on stack word size for this frame
|
||||
// callee_parameters is the number of callee locals residing inside this frame
|
||||
int on_stack_size(int caller_actual_parameters,
|
||||
int callee_parameters,
|
||||
int on_stack_size(int callee_parameters,
|
||||
int callee_locals,
|
||||
bool is_bottom_frame,
|
||||
bool is_top_frame,
|
||||
int popframe_extra_stack_expression_els) const;
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user