Merge
This commit is contained in:
commit
dd47e02845
@ -247,3 +247,4 @@ cd3825b2983045784d6fc6d1729c799b08215752 jdk8-b120
|
||||
135f0c7af57ebace31383d8877f47e32172759ff jdk9-b02
|
||||
fd8d51bdf9aadf7ae83e65e8655c53581017c363 jdk9-b03
|
||||
cb4c3440bc2748101923e2488506e61009ab1bf5 jdk9-b04
|
||||
8c63f0b6ada282f27e3a80125e53c3be603f9af7 jdk9-b05
|
||||
|
@ -350,8 +350,23 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
|
||||
|
||||
AC_MSG_CHECKING([flags for boot jdk java command] )
|
||||
|
||||
# Disable special log output when a debug build is used as Boot JDK...
|
||||
ADD_JVM_ARG_IF_OK([-XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput],boot_jdk_jvmargs,[$JAVA])
|
||||
|
||||
# Apply user provided options.
|
||||
ADD_JVM_ARG_IF_OK([$with_boot_jdk_jvmargs],boot_jdk_jvmargs,[$JAVA])
|
||||
|
||||
AC_MSG_RESULT([$boot_jdk_jvmargs])
|
||||
|
||||
# For now, general JAVA_FLAGS are the same as the boot jdk jvmargs
|
||||
JAVA_FLAGS=$boot_jdk_jvmargs
|
||||
AC_SUBST(JAVA_FLAGS)
|
||||
|
||||
|
||||
AC_MSG_CHECKING([flags for boot jdk java command for big workloads])
|
||||
|
||||
# Starting amount of heap memory.
|
||||
ADD_JVM_ARG_IF_OK([-Xms64M],boot_jdk_jvmargs,[$JAVA])
|
||||
ADD_JVM_ARG_IF_OK([-Xms64M],boot_jdk_jvmargs_big,[$JAVA])
|
||||
|
||||
# Maximum amount of heap memory.
|
||||
# Maximum stack size.
|
||||
@ -366,20 +381,24 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
|
||||
JVM_MAX_HEAP=1600M
|
||||
STACK_SIZE=1536
|
||||
fi
|
||||
ADD_JVM_ARG_IF_OK([-Xmx$JVM_MAX_HEAP],boot_jdk_jvmargs,[$JAVA])
|
||||
ADD_JVM_ARG_IF_OK([-XX:ThreadStackSize=$STACK_SIZE],boot_jdk_jvmargs,[$JAVA])
|
||||
ADD_JVM_ARG_IF_OK([-Xmx$JVM_MAX_HEAP],boot_jdk_jvmargs_big,[$JAVA])
|
||||
ADD_JVM_ARG_IF_OK([-XX:ThreadStackSize=$STACK_SIZE],boot_jdk_jvmargs_big,[$JAVA])
|
||||
|
||||
# Disable special log output when a debug build is used as Boot JDK...
|
||||
ADD_JVM_ARG_IF_OK([-XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput],boot_jdk_jvmargs,[$JAVA])
|
||||
AC_MSG_RESULT([$boot_jdk_jvmargs_big])
|
||||
|
||||
# Apply user provided options.
|
||||
ADD_JVM_ARG_IF_OK([$with_boot_jdk_jvmargs],boot_jdk_jvmargs,[$JAVA])
|
||||
JAVA_FLAGS_BIG=$boot_jdk_jvmargs_big
|
||||
AC_SUBST(JAVA_FLAGS_BIG)
|
||||
|
||||
AC_MSG_RESULT([$boot_jdk_jvmargs])
|
||||
|
||||
# For now, general JAVA_FLAGS are the same as the boot jdk jvmargs
|
||||
JAVA_FLAGS=$boot_jdk_jvmargs
|
||||
AC_MSG_CHECKING([flags for boot jdk java command for small workloads])
|
||||
|
||||
AC_SUBST(BOOT_JDK_JVMARGS, $boot_jdk_jvmargs)
|
||||
AC_SUBST(JAVA_FLAGS, $JAVA_FLAGS)
|
||||
# Use serial gc for small short lived tools if possible
|
||||
ADD_JVM_ARG_IF_OK([-XX:+UseSerialGC],boot_jdk_jvmargs_small,[$JAVA])
|
||||
ADD_JVM_ARG_IF_OK([-Xms32M],boot_jdk_jvmargs_small,[$JAVA])
|
||||
ADD_JVM_ARG_IF_OK([-Xmx512M],boot_jdk_jvmargs_small,[$JAVA])
|
||||
|
||||
AC_MSG_RESULT([$boot_jdk_jvmargs_small])
|
||||
|
||||
JAVA_FLAGS_SMALL=$boot_jdk_jvmargs_small
|
||||
AC_SUBST(JAVA_FLAGS_SMALL)
|
||||
])
|
||||
|
@ -133,6 +133,26 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_LIBS],
|
||||
C_FLAG_REORDER=''
|
||||
CXX_FLAG_REORDER=''
|
||||
|
||||
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
|
||||
# Linking is different on MacOSX
|
||||
SHARED_LIBRARY_FLAGS="-dynamiclib -compatibility_version 1.0.0 -current_version 1.0.0 $PICFLAG"
|
||||
SET_EXECUTABLE_ORIGIN='-Xlinker -rpath -Xlinker @loader_path/.'
|
||||
SET_SHARED_LIBRARY_ORIGIN="$SET_EXECUTABLE_ORIGIN"
|
||||
SET_SHARED_LIBRARY_NAME='-Xlinker -install_name -Xlinker @rpath/[$]1'
|
||||
SET_SHARED_LIBRARY_MAPFILE=''
|
||||
else
|
||||
# Default works for linux, might work on other platforms as well.
|
||||
SHARED_LIBRARY_FLAGS='-shared'
|
||||
SET_EXECUTABLE_ORIGIN='-Xlinker -rpath -Xlinker \$$$$ORIGIN[$]1'
|
||||
SET_SHARED_LIBRARY_ORIGIN="-Xlinker -z -Xlinker origin $SET_EXECUTABLE_ORIGIN"
|
||||
SET_SHARED_LIBRARY_NAME='-Xlinker -soname=[$]1'
|
||||
SET_SHARED_LIBRARY_MAPFILE='-Xlinker -version-script=[$]1'
|
||||
fi
|
||||
elif test "x$TOOLCHAIN_TYPE" = xclang; then
|
||||
PICFLAG=''
|
||||
C_FLAG_REORDER=''
|
||||
CXX_FLAG_REORDER=''
|
||||
|
||||
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
|
||||
# Linking is different on MacOSX
|
||||
SHARED_LIBRARY_FLAGS="-dynamiclib -compatibility_version 1.0.0 -current_version 1.0.0 $PICFLAG"
|
||||
@ -242,6 +262,8 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION],
|
||||
# Generate make dependency files
|
||||
if test "x$TOOLCHAIN_TYPE" = xgcc; then
|
||||
C_FLAG_DEPS="-MMD -MF"
|
||||
elif test "x$TOOLCHAIN_TYPE" = xclang; then
|
||||
C_FLAG_DEPS="-MMD -MF"
|
||||
elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
|
||||
C_FLAG_DEPS="-xMMD -xMF"
|
||||
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
|
||||
@ -260,6 +282,9 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION],
|
||||
CFLAGS_DEBUG_SYMBOLS="-g"
|
||||
CXXFLAGS_DEBUG_SYMBOLS="-g"
|
||||
fi
|
||||
elif test "x$TOOLCHAIN_TYPE" = xclang; then
|
||||
CFLAGS_DEBUG_SYMBOLS="-g"
|
||||
CXXFLAGS_DEBUG_SYMBOLS="-g"
|
||||
elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
|
||||
CFLAGS_DEBUG_SYMBOLS="-g -xs"
|
||||
CXXFLAGS_DEBUG_SYMBOLS="-g0 -xs"
|
||||
@ -315,6 +340,20 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION],
|
||||
C_O_FLAG_NORM="-O2"
|
||||
C_O_FLAG_NONE="-O0"
|
||||
fi
|
||||
elif test "x$TOOLCHAIN_TYPE" = xclang; then
|
||||
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
|
||||
# On MacOSX we optimize for size, something
|
||||
# we should do for all platforms?
|
||||
C_O_FLAG_HIGHEST="-Os"
|
||||
C_O_FLAG_HI="-Os"
|
||||
C_O_FLAG_NORM="-Os"
|
||||
C_O_FLAG_NONE=""
|
||||
else
|
||||
C_O_FLAG_HIGHEST="-O3"
|
||||
C_O_FLAG_HI="-O3"
|
||||
C_O_FLAG_NORM="-O2"
|
||||
C_O_FLAG_NONE="-O0"
|
||||
fi
|
||||
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
|
||||
C_O_FLAG_HIGHEST="-O3"
|
||||
C_O_FLAG_HI="-O3 -qstrict"
|
||||
|
@ -653,6 +653,9 @@ LIBDL
|
||||
LIBM
|
||||
LIBZIP_CAN_USE_MMAP
|
||||
USE_EXTERNAL_LIBZ
|
||||
USE_EXTERNAL_LIBPNG
|
||||
PNG_LIBS
|
||||
PNG_CFLAGS
|
||||
USE_EXTERNAL_LIBGIF
|
||||
USE_EXTERNAL_LIBJPEG
|
||||
ALSA_LIBS
|
||||
@ -793,8 +796,9 @@ JAXWS_TOPDIR
|
||||
JAXP_TOPDIR
|
||||
CORBA_TOPDIR
|
||||
LANGTOOLS_TOPDIR
|
||||
JAVA_FLAGS_SMALL
|
||||
JAVA_FLAGS_BIG
|
||||
JAVA_FLAGS
|
||||
BOOT_JDK_JVMARGS
|
||||
JAVAC_FLAGS
|
||||
BOOT_JDK_SOURCETARGET
|
||||
JARSIGNER
|
||||
@ -1071,6 +1075,7 @@ with_alsa
|
||||
with_alsa_include
|
||||
with_alsa_lib
|
||||
with_giflib
|
||||
with_libpng
|
||||
with_zlib
|
||||
with_stdc__lib
|
||||
with_msvcr_dll
|
||||
@ -1183,6 +1188,8 @@ FREETYPE_CFLAGS
|
||||
FREETYPE_LIBS
|
||||
ALSA_CFLAGS
|
||||
ALSA_LIBS
|
||||
PNG_CFLAGS
|
||||
PNG_LIBS
|
||||
LIBFFI_CFLAGS
|
||||
LIBFFI_LIBS
|
||||
CCACHE'
|
||||
@ -1921,6 +1928,8 @@ Optional Packages:
|
||||
--with-alsa-lib specify directory for the alsa library
|
||||
--with-giflib use giflib from build system or OpenJDK source
|
||||
(system, bundled) [bundled]
|
||||
--with-libpng use libpng from build system or OpenJDK source
|
||||
(system, bundled) [bundled]
|
||||
--with-zlib use zlib from build system or OpenJDK source
|
||||
(system, bundled) [bundled]
|
||||
--with-stdc++lib=<static>,<dynamic>,<default>
|
||||
@ -2045,6 +2054,8 @@ Some influential environment variables:
|
||||
linker flags for FREETYPE, overriding pkg-config
|
||||
ALSA_CFLAGS C compiler flags for ALSA, overriding pkg-config
|
||||
ALSA_LIBS linker flags for ALSA, overriding pkg-config
|
||||
PNG_CFLAGS C compiler flags for PNG, overriding pkg-config
|
||||
PNG_LIBS linker flags for PNG, overriding pkg-config
|
||||
LIBFFI_CFLAGS
|
||||
C compiler flags for LIBFFI, overriding pkg-config
|
||||
LIBFFI_LIBS linker flags for LIBFFI, overriding pkg-config
|
||||
@ -4221,7 +4232,7 @@ TOOLCHAIN_DESCRIPTION_xlc="IBM XL C/C++"
|
||||
#CUSTOM_AUTOCONF_INCLUDE
|
||||
|
||||
# Do not change or remove the following line, it is needed for consistency checks:
|
||||
DATE_WHEN_GENERATED=1394011255
|
||||
DATE_WHEN_GENERATED=1394794899
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
@ -25856,67 +25867,6 @@ fi
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command " >&5
|
||||
$as_echo_n "checking flags for boot jdk java command ... " >&6; }
|
||||
|
||||
# Starting amount of heap memory.
|
||||
|
||||
$ECHO "Check if jvm arg is ok: -Xms64M" >&5
|
||||
$ECHO "Command: $JAVA -Xms64M -version" >&5
|
||||
OUTPUT=`$JAVA -Xms64M -version 2>&1`
|
||||
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
|
||||
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
|
||||
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
|
||||
boot_jdk_jvmargs="$boot_jdk_jvmargs -Xms64M"
|
||||
JVM_ARG_OK=true
|
||||
else
|
||||
$ECHO "Arg failed:" >&5
|
||||
$ECHO "$OUTPUT" >&5
|
||||
JVM_ARG_OK=false
|
||||
fi
|
||||
|
||||
|
||||
# Maximum amount of heap memory.
|
||||
# Maximum stack size.
|
||||
if test "x$BUILD_NUM_BITS" = x32; then
|
||||
JVM_MAX_HEAP=1100M
|
||||
STACK_SIZE=768
|
||||
else
|
||||
# Running Javac on a JVM on a 64-bit machine, takes more space since 64-bit
|
||||
# pointers are used. Apparently, we need to increase the heap and stack
|
||||
# space for the jvm. More specifically, when running javac to build huge
|
||||
# jdk batch
|
||||
JVM_MAX_HEAP=1600M
|
||||
STACK_SIZE=1536
|
||||
fi
|
||||
|
||||
$ECHO "Check if jvm arg is ok: -Xmx$JVM_MAX_HEAP" >&5
|
||||
$ECHO "Command: $JAVA -Xmx$JVM_MAX_HEAP -version" >&5
|
||||
OUTPUT=`$JAVA -Xmx$JVM_MAX_HEAP -version 2>&1`
|
||||
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
|
||||
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
|
||||
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
|
||||
boot_jdk_jvmargs="$boot_jdk_jvmargs -Xmx$JVM_MAX_HEAP"
|
||||
JVM_ARG_OK=true
|
||||
else
|
||||
$ECHO "Arg failed:" >&5
|
||||
$ECHO "$OUTPUT" >&5
|
||||
JVM_ARG_OK=false
|
||||
fi
|
||||
|
||||
|
||||
$ECHO "Check if jvm arg is ok: -XX:ThreadStackSize=$STACK_SIZE" >&5
|
||||
$ECHO "Command: $JAVA -XX:ThreadStackSize=$STACK_SIZE -version" >&5
|
||||
OUTPUT=`$JAVA -XX:ThreadStackSize=$STACK_SIZE -version 2>&1`
|
||||
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
|
||||
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
|
||||
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
|
||||
boot_jdk_jvmargs="$boot_jdk_jvmargs -XX:ThreadStackSize=$STACK_SIZE"
|
||||
JVM_ARG_OK=true
|
||||
else
|
||||
$ECHO "Arg failed:" >&5
|
||||
$ECHO "$OUTPUT" >&5
|
||||
JVM_ARG_OK=false
|
||||
fi
|
||||
|
||||
|
||||
# Disable special log output when a debug build is used as Boot JDK...
|
||||
|
||||
$ECHO "Check if jvm arg is ok: -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput" >&5
|
||||
@ -25957,9 +25907,133 @@ $as_echo "$boot_jdk_jvmargs" >&6; }
|
||||
# For now, general JAVA_FLAGS are the same as the boot jdk jvmargs
|
||||
JAVA_FLAGS=$boot_jdk_jvmargs
|
||||
|
||||
BOOT_JDK_JVMARGS=$boot_jdk_jvmargs
|
||||
|
||||
JAVA_FLAGS=$JAVA_FLAGS
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command for big workloads" >&5
|
||||
$as_echo_n "checking flags for boot jdk java command for big workloads... " >&6; }
|
||||
|
||||
# Starting amount of heap memory.
|
||||
|
||||
$ECHO "Check if jvm arg is ok: -Xms64M" >&5
|
||||
$ECHO "Command: $JAVA -Xms64M -version" >&5
|
||||
OUTPUT=`$JAVA -Xms64M -version 2>&1`
|
||||
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
|
||||
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
|
||||
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
|
||||
boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -Xms64M"
|
||||
JVM_ARG_OK=true
|
||||
else
|
||||
$ECHO "Arg failed:" >&5
|
||||
$ECHO "$OUTPUT" >&5
|
||||
JVM_ARG_OK=false
|
||||
fi
|
||||
|
||||
|
||||
# Maximum amount of heap memory.
|
||||
# Maximum stack size.
|
||||
if test "x$BUILD_NUM_BITS" = x32; then
|
||||
JVM_MAX_HEAP=1100M
|
||||
STACK_SIZE=768
|
||||
else
|
||||
# Running Javac on a JVM on a 64-bit machine, takes more space since 64-bit
|
||||
# pointers are used. Apparently, we need to increase the heap and stack
|
||||
# space for the jvm. More specifically, when running javac to build huge
|
||||
# jdk batch
|
||||
JVM_MAX_HEAP=1600M
|
||||
STACK_SIZE=1536
|
||||
fi
|
||||
|
||||
$ECHO "Check if jvm arg is ok: -Xmx$JVM_MAX_HEAP" >&5
|
||||
$ECHO "Command: $JAVA -Xmx$JVM_MAX_HEAP -version" >&5
|
||||
OUTPUT=`$JAVA -Xmx$JVM_MAX_HEAP -version 2>&1`
|
||||
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
|
||||
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
|
||||
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
|
||||
boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -Xmx$JVM_MAX_HEAP"
|
||||
JVM_ARG_OK=true
|
||||
else
|
||||
$ECHO "Arg failed:" >&5
|
||||
$ECHO "$OUTPUT" >&5
|
||||
JVM_ARG_OK=false
|
||||
fi
|
||||
|
||||
|
||||
$ECHO "Check if jvm arg is ok: -XX:ThreadStackSize=$STACK_SIZE" >&5
|
||||
$ECHO "Command: $JAVA -XX:ThreadStackSize=$STACK_SIZE -version" >&5
|
||||
OUTPUT=`$JAVA -XX:ThreadStackSize=$STACK_SIZE -version 2>&1`
|
||||
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
|
||||
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
|
||||
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
|
||||
boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -XX:ThreadStackSize=$STACK_SIZE"
|
||||
JVM_ARG_OK=true
|
||||
else
|
||||
$ECHO "Arg failed:" >&5
|
||||
$ECHO "$OUTPUT" >&5
|
||||
JVM_ARG_OK=false
|
||||
fi
|
||||
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs_big" >&5
|
||||
$as_echo "$boot_jdk_jvmargs_big" >&6; }
|
||||
|
||||
JAVA_FLAGS_BIG=$boot_jdk_jvmargs_big
|
||||
|
||||
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command for small workloads" >&5
|
||||
$as_echo_n "checking flags for boot jdk java command for small workloads... " >&6; }
|
||||
|
||||
# Use serial gc for small short lived tools if possible
|
||||
|
||||
$ECHO "Check if jvm arg is ok: -XX:+UseSerialGC" >&5
|
||||
$ECHO "Command: $JAVA -XX:+UseSerialGC -version" >&5
|
||||
OUTPUT=`$JAVA -XX:+UseSerialGC -version 2>&1`
|
||||
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
|
||||
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
|
||||
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
|
||||
boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -XX:+UseSerialGC"
|
||||
JVM_ARG_OK=true
|
||||
else
|
||||
$ECHO "Arg failed:" >&5
|
||||
$ECHO "$OUTPUT" >&5
|
||||
JVM_ARG_OK=false
|
||||
fi
|
||||
|
||||
|
||||
$ECHO "Check if jvm arg is ok: -Xms32M" >&5
|
||||
$ECHO "Command: $JAVA -Xms32M -version" >&5
|
||||
OUTPUT=`$JAVA -Xms32M -version 2>&1`
|
||||
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
|
||||
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
|
||||
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
|
||||
boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -Xms32M"
|
||||
JVM_ARG_OK=true
|
||||
else
|
||||
$ECHO "Arg failed:" >&5
|
||||
$ECHO "$OUTPUT" >&5
|
||||
JVM_ARG_OK=false
|
||||
fi
|
||||
|
||||
|
||||
$ECHO "Check if jvm arg is ok: -Xmx512M" >&5
|
||||
$ECHO "Command: $JAVA -Xmx512M -version" >&5
|
||||
OUTPUT=`$JAVA -Xmx512M -version 2>&1`
|
||||
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
|
||||
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
|
||||
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
|
||||
boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -Xmx512M"
|
||||
JVM_ARG_OK=true
|
||||
else
|
||||
$ECHO "Arg failed:" >&5
|
||||
$ECHO "$OUTPUT" >&5
|
||||
JVM_ARG_OK=false
|
||||
fi
|
||||
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs_small" >&5
|
||||
$as_echo "$boot_jdk_jvmargs_small" >&6; }
|
||||
|
||||
JAVA_FLAGS_SMALL=$boot_jdk_jvmargs_small
|
||||
|
||||
|
||||
|
||||
@ -26297,8 +26371,28 @@ fi
|
||||
# Use indirect variable referencing
|
||||
toolchain_var_name=VALID_TOOLCHAINS_$OPENJDK_BUILD_OS
|
||||
VALID_TOOLCHAINS=${!toolchain_var_name}
|
||||
# First toolchain type in the list is the default
|
||||
DEFAULT_TOOLCHAIN=${VALID_TOOLCHAINS%% *}
|
||||
|
||||
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
|
||||
# On Mac OS X, default toolchain to clang after Xcode 5
|
||||
XCODE_VERSION_OUTPUT=`xcodebuild -version 2>&1 | $HEAD -n 1`
|
||||
$ECHO "$XCODE_VERSION_OUTPUT" | $GREP "Xcode " > /dev/null
|
||||
if test $? -ne 0; then
|
||||
as_fn_error $? "Failed to determine Xcode version." "$LINENO" 5
|
||||
fi
|
||||
XCODE_MAJOR_VERSION=`$ECHO $XCODE_VERSION_OUTPUT | \
|
||||
$SED -e 's/^Xcode \([1-9][0-9.]*\)/\1/' | \
|
||||
$CUT -f 1 -d .`
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: Xcode major version: $XCODE_MAJOR_VERSION" >&5
|
||||
$as_echo "$as_me: Xcode major version: $XCODE_MAJOR_VERSION" >&6;}
|
||||
if test $XCODE_MAJOR_VERSION -ge 5; then
|
||||
DEFAULT_TOOLCHAIN="clang"
|
||||
else
|
||||
DEFAULT_TOOLCHAIN="gcc"
|
||||
fi
|
||||
else
|
||||
# First toolchain type in the list is the default
|
||||
DEFAULT_TOOLCHAIN=${VALID_TOOLCHAINS%% *}
|
||||
fi
|
||||
|
||||
if test "x$with_toolchain_type" = xlist; then
|
||||
# List all toolchains
|
||||
@ -41221,6 +41315,26 @@ $as_echo "$ac_cv_c_bigendian" >&6; }
|
||||
C_FLAG_REORDER=''
|
||||
CXX_FLAG_REORDER=''
|
||||
|
||||
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
|
||||
# Linking is different on MacOSX
|
||||
SHARED_LIBRARY_FLAGS="-dynamiclib -compatibility_version 1.0.0 -current_version 1.0.0 $PICFLAG"
|
||||
SET_EXECUTABLE_ORIGIN='-Xlinker -rpath -Xlinker @loader_path/.'
|
||||
SET_SHARED_LIBRARY_ORIGIN="$SET_EXECUTABLE_ORIGIN"
|
||||
SET_SHARED_LIBRARY_NAME='-Xlinker -install_name -Xlinker @rpath/$1'
|
||||
SET_SHARED_LIBRARY_MAPFILE=''
|
||||
else
|
||||
# Default works for linux, might work on other platforms as well.
|
||||
SHARED_LIBRARY_FLAGS='-shared'
|
||||
SET_EXECUTABLE_ORIGIN='-Xlinker -rpath -Xlinker \$$$$ORIGIN$1'
|
||||
SET_SHARED_LIBRARY_ORIGIN="-Xlinker -z -Xlinker origin $SET_EXECUTABLE_ORIGIN"
|
||||
SET_SHARED_LIBRARY_NAME='-Xlinker -soname=$1'
|
||||
SET_SHARED_LIBRARY_MAPFILE='-Xlinker -version-script=$1'
|
||||
fi
|
||||
elif test "x$TOOLCHAIN_TYPE" = xclang; then
|
||||
PICFLAG=''
|
||||
C_FLAG_REORDER=''
|
||||
CXX_FLAG_REORDER=''
|
||||
|
||||
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
|
||||
# Linking is different on MacOSX
|
||||
SHARED_LIBRARY_FLAGS="-dynamiclib -compatibility_version 1.0.0 -current_version 1.0.0 $PICFLAG"
|
||||
@ -41297,6 +41411,8 @@ $as_echo "$ac_cv_c_bigendian" >&6; }
|
||||
# Generate make dependency files
|
||||
if test "x$TOOLCHAIN_TYPE" = xgcc; then
|
||||
C_FLAG_DEPS="-MMD -MF"
|
||||
elif test "x$TOOLCHAIN_TYPE" = xclang; then
|
||||
C_FLAG_DEPS="-MMD -MF"
|
||||
elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
|
||||
C_FLAG_DEPS="-xMMD -xMF"
|
||||
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
|
||||
@ -41315,6 +41431,9 @@ $as_echo "$ac_cv_c_bigendian" >&6; }
|
||||
CFLAGS_DEBUG_SYMBOLS="-g"
|
||||
CXXFLAGS_DEBUG_SYMBOLS="-g"
|
||||
fi
|
||||
elif test "x$TOOLCHAIN_TYPE" = xclang; then
|
||||
CFLAGS_DEBUG_SYMBOLS="-g"
|
||||
CXXFLAGS_DEBUG_SYMBOLS="-g"
|
||||
elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
|
||||
CFLAGS_DEBUG_SYMBOLS="-g -xs"
|
||||
CXXFLAGS_DEBUG_SYMBOLS="-g0 -xs"
|
||||
@ -41370,6 +41489,20 @@ $as_echo "$ac_cv_c_bigendian" >&6; }
|
||||
C_O_FLAG_NORM="-O2"
|
||||
C_O_FLAG_NONE="-O0"
|
||||
fi
|
||||
elif test "x$TOOLCHAIN_TYPE" = xclang; then
|
||||
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
|
||||
# On MacOSX we optimize for size, something
|
||||
# we should do for all platforms?
|
||||
C_O_FLAG_HIGHEST="-Os"
|
||||
C_O_FLAG_HI="-Os"
|
||||
C_O_FLAG_NORM="-Os"
|
||||
C_O_FLAG_NONE=""
|
||||
else
|
||||
C_O_FLAG_HIGHEST="-O3"
|
||||
C_O_FLAG_HI="-O3"
|
||||
C_O_FLAG_NORM="-O2"
|
||||
C_O_FLAG_NONE="-O0"
|
||||
fi
|
||||
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
|
||||
C_O_FLAG_HIGHEST="-O3"
|
||||
C_O_FLAG_HI="-O3 -qstrict"
|
||||
@ -46789,6 +46922,118 @@ fi
|
||||
fi
|
||||
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# Check for the png library
|
||||
#
|
||||
|
||||
|
||||
# Check whether --with-libpng was given.
|
||||
if test "${with_libpng+set}" = set; then :
|
||||
withval=$with_libpng;
|
||||
fi
|
||||
|
||||
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for which libpng to use" >&5
|
||||
$as_echo_n "checking for which libpng to use... " >&6; }
|
||||
|
||||
# default is bundled
|
||||
DEFAULT_LIBPNG=bundled
|
||||
|
||||
#
|
||||
# if user didn't specify, use DEFAULT_LIBPNG
|
||||
#
|
||||
if test "x${with_libpng}" = "x"; then
|
||||
with_libpng=${DEFAULT_LIBPNG}
|
||||
fi
|
||||
|
||||
if test "x${with_libpng}" = "xbundled"; then
|
||||
USE_EXTERNAL_LIBPNG=false
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: bundled" >&5
|
||||
$as_echo "bundled" >&6; }
|
||||
elif test "x${with_libpng}" = "xsystem"; then
|
||||
|
||||
pkg_failed=no
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for PNG" >&5
|
||||
$as_echo_n "checking for PNG... " >&6; }
|
||||
|
||||
if test -n "$PNG_CFLAGS"; then
|
||||
pkg_cv_PNG_CFLAGS="$PNG_CFLAGS"
|
||||
elif test -n "$PKG_CONFIG"; then
|
||||
if test -n "$PKG_CONFIG" && \
|
||||
{ { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libpng\""; } >&5
|
||||
($PKG_CONFIG --exists --print-errors "libpng") 2>&5
|
||||
ac_status=$?
|
||||
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
|
||||
test $ac_status = 0; }; then
|
||||
pkg_cv_PNG_CFLAGS=`$PKG_CONFIG --cflags "libpng" 2>/dev/null`
|
||||
else
|
||||
pkg_failed=yes
|
||||
fi
|
||||
else
|
||||
pkg_failed=untried
|
||||
fi
|
||||
if test -n "$PNG_LIBS"; then
|
||||
pkg_cv_PNG_LIBS="$PNG_LIBS"
|
||||
elif test -n "$PKG_CONFIG"; then
|
||||
if test -n "$PKG_CONFIG" && \
|
||||
{ { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libpng\""; } >&5
|
||||
($PKG_CONFIG --exists --print-errors "libpng") 2>&5
|
||||
ac_status=$?
|
||||
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
|
||||
test $ac_status = 0; }; then
|
||||
pkg_cv_PNG_LIBS=`$PKG_CONFIG --libs "libpng" 2>/dev/null`
|
||||
else
|
||||
pkg_failed=yes
|
||||
fi
|
||||
else
|
||||
pkg_failed=untried
|
||||
fi
|
||||
|
||||
|
||||
|
||||
if test $pkg_failed = yes; then
|
||||
|
||||
if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
|
||||
_pkg_short_errors_supported=yes
|
||||
else
|
||||
_pkg_short_errors_supported=no
|
||||
fi
|
||||
if test $_pkg_short_errors_supported = yes; then
|
||||
PNG_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "libpng" 2>&1`
|
||||
else
|
||||
PNG_PKG_ERRORS=`$PKG_CONFIG --print-errors "libpng" 2>&1`
|
||||
fi
|
||||
# Put the nasty error message in config.log where it belongs
|
||||
echo "$PNG_PKG_ERRORS" >&5
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
|
||||
$as_echo "no" >&6; }
|
||||
LIBPNG_FOUND=no
|
||||
elif test $pkg_failed = untried; then
|
||||
LIBPNG_FOUND=no
|
||||
else
|
||||
PNG_CFLAGS=$pkg_cv_PNG_CFLAGS
|
||||
PNG_LIBS=$pkg_cv_PNG_LIBS
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
|
||||
$as_echo "yes" >&6; }
|
||||
LIBPNG_FOUND=yes
|
||||
fi
|
||||
if test "x${LIBPNG_FOUND}" = "xyes"; then
|
||||
USE_EXTERNAL_LIBPNG=true
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: system" >&5
|
||||
$as_echo "system" >&6; }
|
||||
else
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: system not found" >&5
|
||||
$as_echo "system not found" >&6; }
|
||||
as_fn_error $? "--with-libpng=system specified, but no libpng found!" "$LINENO" 5
|
||||
fi
|
||||
else
|
||||
as_fn_error $? "Invalid value of --with-libpng: ${with_libpng}, use 'system' or 'bundled'" "$LINENO" 5
|
||||
fi
|
||||
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# Check for the zlib library
|
||||
|
@ -652,6 +652,46 @@ AC_DEFUN_ONCE([LIB_SETUP_MISC_LIBS],
|
||||
fi
|
||||
AC_SUBST(USE_EXTERNAL_LIBGIF)
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# Check for the png library
|
||||
#
|
||||
|
||||
AC_ARG_WITH(libpng, [AS_HELP_STRING([--with-libpng],
|
||||
[use libpng from build system or OpenJDK source (system, bundled) @<:@bundled@:>@])])
|
||||
|
||||
|
||||
AC_MSG_CHECKING([for which libpng to use])
|
||||
|
||||
# default is bundled
|
||||
DEFAULT_LIBPNG=bundled
|
||||
|
||||
#
|
||||
# if user didn't specify, use DEFAULT_LIBPNG
|
||||
#
|
||||
if test "x${with_libpng}" = "x"; then
|
||||
with_libpng=${DEFAULT_LIBPNG}
|
||||
fi
|
||||
|
||||
if test "x${with_libpng}" = "xbundled"; then
|
||||
USE_EXTERNAL_LIBPNG=false
|
||||
AC_MSG_RESULT([bundled])
|
||||
elif test "x${with_libpng}" = "xsystem"; then
|
||||
PKG_CHECK_MODULES(PNG, libpng,
|
||||
[ LIBPNG_FOUND=yes ],
|
||||
[ LIBPNG_FOUND=no ])
|
||||
if test "x${LIBPNG_FOUND}" = "xyes"; then
|
||||
USE_EXTERNAL_LIBPNG=true
|
||||
AC_MSG_RESULT([system])
|
||||
else
|
||||
AC_MSG_RESULT([system not found])
|
||||
AC_MSG_ERROR([--with-libpng=system specified, but no libpng found!])
|
||||
fi
|
||||
else
|
||||
AC_MSG_ERROR([Invalid value of --with-libpng: ${with_libpng}, use 'system' or 'bundled'])
|
||||
fi
|
||||
AC_SUBST(USE_EXTERNAL_LIBPNG)
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# Check for the zlib library
|
||||
|
@ -254,7 +254,6 @@ BUILD_HOTSPOT=@BUILD_HOTSPOT@
|
||||
|
||||
# The boot jdk to use
|
||||
BOOT_JDK:=@BOOT_JDK@
|
||||
BOOT_JDK_JVMARGS:=@BOOT_JDK_JVMARGS@
|
||||
BOOT_RTJAR:=@BOOT_RTJAR@
|
||||
BOOT_TOOLSJAR=$(BOOT_JDK)/lib/tools.jar
|
||||
|
||||
@ -442,8 +441,11 @@ POST_STRIP_CMD:=@POST_STRIP_CMD@
|
||||
POST_MCS_CMD:=@POST_MCS_CMD@
|
||||
|
||||
JAVA_FLAGS:=@JAVA_FLAGS@
|
||||
JAVA_FLAGS_BIG:=@JAVA_FLAGS_BIG@
|
||||
JAVA_FLAGS_SMALL:=@JAVA_FLAGS_SMALL@
|
||||
|
||||
JAVA=@FIXPATH@ @JAVA@ $(JAVA_FLAGS)
|
||||
JAVA=@FIXPATH@ @JAVA@ $(JAVA_FLAGS_BIG) $(JAVA_FLAGS)
|
||||
JAVA_SMALL=@FIXPATH@ @JAVA@ $(JAVA_FLAGS_SMALL) $(JAVA_FLAGS)
|
||||
|
||||
JAVAC:=@FIXPATH@ @JAVAC@
|
||||
# Hotspot sets this variable before reading the SPEC when compiling sa-jdi.jar. Avoid
|
||||
@ -454,7 +456,7 @@ JAVAH:=@FIXPATH@ @JAVAH@
|
||||
|
||||
JAR:=@FIXPATH@ @JAR@
|
||||
|
||||
NATIVE2ASCII:=@FIXPATH@ @NATIVE2ASCII@
|
||||
NATIVE2ASCII:=@FIXPATH@ @NATIVE2ASCII@ $(addprefix -J, $(JAVA_FLAGS_SMALL))
|
||||
|
||||
JARSIGNER:=@FIXPATH@ @JARSIGNER@
|
||||
|
||||
@ -645,6 +647,15 @@ INSTALL_SHAREDSTATEDIR=@sharedstatedir@
|
||||
# Read-only single-machine data
|
||||
INSTALL_SYSCONFDIR=@sysconfdir@
|
||||
|
||||
####################################################
|
||||
#
|
||||
# Libraries
|
||||
#
|
||||
|
||||
USE_EXTERNAL_LIBPNG:=@USE_EXTERNAL_LIBPNG@
|
||||
PNG_LIBS:=@PNG_LIBS@
|
||||
PNG_CFLAGS:=@PNG_CFLAGS@
|
||||
|
||||
|
||||
####################################################
|
||||
#
|
||||
|
@ -96,8 +96,27 @@ AC_DEFUN_ONCE([TOOLCHAIN_DETERMINE_TOOLCHAIN_TYPE],
|
||||
# Use indirect variable referencing
|
||||
toolchain_var_name=VALID_TOOLCHAINS_$OPENJDK_BUILD_OS
|
||||
VALID_TOOLCHAINS=${!toolchain_var_name}
|
||||
# First toolchain type in the list is the default
|
||||
DEFAULT_TOOLCHAIN=${VALID_TOOLCHAINS%% *}
|
||||
|
||||
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
|
||||
# On Mac OS X, default toolchain to clang after Xcode 5
|
||||
XCODE_VERSION_OUTPUT=`xcodebuild -version 2>&1 | $HEAD -n 1`
|
||||
$ECHO "$XCODE_VERSION_OUTPUT" | $GREP "Xcode " > /dev/null
|
||||
if test $? -ne 0; then
|
||||
AC_MSG_ERROR([Failed to determine Xcode version.])
|
||||
fi
|
||||
XCODE_MAJOR_VERSION=`$ECHO $XCODE_VERSION_OUTPUT | \
|
||||
$SED -e 's/^Xcode \(@<:@1-9@:>@@<:@0-9.@:>@*\)/\1/' | \
|
||||
$CUT -f 1 -d .`
|
||||
AC_MSG_NOTICE([Xcode major version: $XCODE_MAJOR_VERSION])
|
||||
if test $XCODE_MAJOR_VERSION -ge 5; then
|
||||
DEFAULT_TOOLCHAIN="clang"
|
||||
else
|
||||
DEFAULT_TOOLCHAIN="gcc"
|
||||
fi
|
||||
else
|
||||
# First toolchain type in the list is the default
|
||||
DEFAULT_TOOLCHAIN=${VALID_TOOLCHAINS%% *}
|
||||
fi
|
||||
|
||||
if test "x$with_toolchain_type" = xlist; then
|
||||
# List all toolchains
|
||||
|
@ -407,3 +407,4 @@ ce2d7e46f3c7e41241f3b407705a4071323a11ab jdk9-b00
|
||||
b188446de75bda5fc52d102cddf242c3ef5ecbdf jdk9-b02
|
||||
b2fee789d23f3cdabb3db4e51af43038e5692d3a jdk9-b03
|
||||
3812c088b9456ee22c933e88aee1ece71f4e783a jdk9-b04
|
||||
bdc5311e1db7598589b77015119b821bf8c828bd jdk9-b05
|
||||
|
@ -70,6 +70,10 @@ ifndef CC_INTERP
|
||||
FORCE_TIERED=1
|
||||
endif
|
||||
endif
|
||||
# C1 is not ported on ppc64(le), so we cannot build a tiered VM:
|
||||
ifneq (,$(filter $(ARCH),ppc64 pp64le))
|
||||
FORCE_TIERED=0
|
||||
endif
|
||||
|
||||
ifdef LP64
|
||||
ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")
|
||||
|
@ -1,6 +1,6 @@
|
||||
#! /bin/sh
|
||||
#
|
||||
# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -64,7 +64,7 @@ MFLAGS=`
|
||||
echo "$MFLAGS" \
|
||||
| sed '
|
||||
s/^-/ -/
|
||||
s/ -\([^ ][^ ]*\)j/ -\1 -j/
|
||||
s/ -\([^ I][^ I]*\)j/ -\1 -j/
|
||||
s/ -j[0-9][0-9]*/ -j/
|
||||
s/ -j\([^ ]\)/ -j -\1/
|
||||
s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/
|
||||
|
@ -260,7 +260,7 @@ ifeq ($(USE_CLANG), true)
|
||||
WARNINGS_ARE_ERRORS += -Wno-empty-body
|
||||
endif
|
||||
|
||||
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wformat=2 -Wno-error=format-nonliteral
|
||||
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wformat=2
|
||||
|
||||
ifeq ($(USE_CLANG),)
|
||||
# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
|
||||
|
@ -66,6 +66,10 @@ ifndef CC_INTERP
|
||||
FORCE_TIERED=1
|
||||
endif
|
||||
endif
|
||||
# C1 is not ported on ppc64(le), so we cannot build a tiered VM:
|
||||
ifneq (,$(filter $(ARCH),ppc64 pp64le))
|
||||
FORCE_TIERED=0
|
||||
endif
|
||||
|
||||
ifdef LP64
|
||||
ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")
|
||||
|
@ -215,7 +215,7 @@ ifeq ($(USE_CLANG), true)
|
||||
WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
|
||||
endif
|
||||
|
||||
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value -Wformat=2 -Wno-error=format-nonliteral
|
||||
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value -Wformat=2
|
||||
|
||||
ifeq ($(USE_CLANG),)
|
||||
# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
|
||||
|
@ -25,6 +25,9 @@
|
||||
|
||||
# Setup common to Zero (non-Shark) and Shark versions of VM
|
||||
|
||||
# override this from the main file because some version of llvm do not like -Wundef
|
||||
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wunused-function -Wunused-value
|
||||
|
||||
# The copied fdlibm routines in sharedRuntimeTrig.o must not be optimized
|
||||
OPT_CFLAGS/sharedRuntimeTrig.o = $(OPT_CFLAGS/NOOPT)
|
||||
# The copied fdlibm routines in sharedRuntimeTrans.o must not be optimized
|
||||
|
@ -118,7 +118,7 @@ endif
|
||||
# Compiler warnings are treated as errors
|
||||
WARNINGS_ARE_ERRORS = -Werror
|
||||
# Enable these warnings. See 'info gcc' about details on these options
|
||||
WARNING_FLAGS = -Wpointer-arith -Wconversion -Wsign-compare -Wundef -Wformat=2 -Wno-error=format-nonliteral
|
||||
WARNING_FLAGS = -Wpointer-arith -Wconversion -Wsign-compare -Wundef -Wformat=2
|
||||
CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(WARNING_FLAGS)
|
||||
# Special cases
|
||||
CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@))
|
||||
|
@ -124,6 +124,7 @@ class Argument VALUE_OBJ_CLASS_SPEC {
|
||||
}
|
||||
};
|
||||
|
||||
#if !defined(ABI_ELFv2)
|
||||
// A ppc64 function descriptor.
|
||||
struct FunctionDescriptor VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
@ -161,6 +162,7 @@ struct FunctionDescriptor VALUE_OBJ_CLASS_SPEC {
|
||||
_env = (address) 0xbad;
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
||||
class Assembler : public AbstractAssembler {
|
||||
protected:
|
||||
@ -1067,6 +1069,7 @@ class Assembler : public AbstractAssembler {
|
||||
// Emit an address.
|
||||
inline address emit_addr(const address addr = NULL);
|
||||
|
||||
#if !defined(ABI_ELFv2)
|
||||
// Emit a function descriptor with the specified entry point, TOC,
|
||||
// and ENV. If the entry point is NULL, the descriptor will point
|
||||
// just past the descriptor.
|
||||
@ -1074,6 +1077,7 @@ class Assembler : public AbstractAssembler {
|
||||
inline address emit_fd(address entry = NULL,
|
||||
address toc = (address) FunctionDescriptor::friend_toc,
|
||||
address env = (address) FunctionDescriptor::friend_env);
|
||||
#endif
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////
|
||||
// PPC instructions
|
||||
|
@ -55,6 +55,7 @@ inline address Assembler::emit_addr(const address addr) {
|
||||
return start;
|
||||
}
|
||||
|
||||
#if !defined(ABI_ELFv2)
|
||||
// Emit a function descriptor with the specified entry point, TOC, and
|
||||
// ENV. If the entry point is NULL, the descriptor will point just
|
||||
// past the descriptor.
|
||||
@ -73,6 +74,7 @@ inline address Assembler::emit_fd(address entry, address toc, address env) {
|
||||
|
||||
return (address)fd;
|
||||
}
|
||||
#endif
|
||||
|
||||
// Issue an illegal instruction. 0 is guaranteed to be an illegal instruction.
|
||||
inline void Assembler::illtrap() { Assembler::emit_int32(0); }
|
||||
|
@ -1136,7 +1136,9 @@ address CppInterpreterGenerator::generate_native_entry(void) {
|
||||
// (outgoing C args), R3_ARG1 to R10_ARG8, and F1_ARG1 to
|
||||
// F13_ARG13.
|
||||
__ mr(R3_ARG1, R18_locals);
|
||||
#if !defined(ABI_ELFv2)
|
||||
__ ld(signature_handler_fd, 0, signature_handler_fd);
|
||||
#endif
|
||||
__ call_stub(signature_handler_fd);
|
||||
// reload method
|
||||
__ ld(R19_method, state_(_method));
|
||||
@ -1295,8 +1297,13 @@ address CppInterpreterGenerator::generate_native_entry(void) {
|
||||
// native result acrosss the call. No oop is present
|
||||
|
||||
__ mr(R3_ARG1, R16_thread);
|
||||
#if defined(ABI_ELFv2)
|
||||
__ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
|
||||
relocInfo::none);
|
||||
#else
|
||||
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans),
|
||||
relocInfo::none);
|
||||
#endif
|
||||
__ bind(sync_check_done);
|
||||
|
||||
//=============================================================================
|
||||
@ -1346,9 +1353,9 @@ address CppInterpreterGenerator::generate_native_entry(void) {
|
||||
// notify here, we'll drop it on the floor.
|
||||
|
||||
__ notify_method_exit(true/*native method*/,
|
||||
ilgl /*illegal state (not used for native methods)*/);
|
||||
|
||||
|
||||
ilgl /*illegal state (not used for native methods)*/,
|
||||
InterpreterMacroAssembler::NotifyJVMTI,
|
||||
false /*check_exceptions*/);
|
||||
|
||||
//=============================================================================
|
||||
// Handle exceptions
|
||||
@ -1413,7 +1420,7 @@ address CppInterpreterGenerator::generate_native_entry(void) {
|
||||
// First, pop to caller's frame.
|
||||
__ pop_interpreter_frame(R11_scratch1, R12_scratch2, R21_tmp1 /* set to return pc */, R22_tmp2);
|
||||
|
||||
__ push_frame_abi112(0, R11_scratch1);
|
||||
__ push_frame_reg_args(0, R11_scratch1);
|
||||
// Get the address of the exception handler.
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
|
||||
R16_thread,
|
||||
@ -2545,7 +2552,7 @@ address CppInterpreterGenerator::generate_normal_entry(void) {
|
||||
__ mr(R4_ARG2, R3_ARG1); // ARG2 := ARG1
|
||||
|
||||
// Find the address of the "catch_exception" stub.
|
||||
__ push_frame_abi112(0, R11_scratch1);
|
||||
__ push_frame_reg_args(0, R11_scratch1);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
|
||||
R16_thread,
|
||||
R4_ARG2);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,10 +42,6 @@
|
||||
#include "runtime/vframeArray.hpp"
|
||||
#endif
|
||||
|
||||
#ifndef CC_INTERP
|
||||
#error "CC_INTERP must be defined on PPC64"
|
||||
#endif
|
||||
|
||||
#ifdef ASSERT
|
||||
void RegisterMap::check_location_valid() {
|
||||
}
|
||||
@ -89,7 +85,10 @@ frame frame::sender_for_entry_frame(RegisterMap *map) const {
|
||||
|
||||
frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
|
||||
// Pass callers initial_caller_sp as unextended_sp.
|
||||
return frame(sender_sp(), sender_pc(), (intptr_t*)((parent_ijava_frame_abi *)callers_abi())->initial_caller_sp);
|
||||
return frame(sender_sp(), sender_pc(),
|
||||
CC_INTERP_ONLY((intptr_t*)((parent_ijava_frame_abi *)callers_abi())->initial_caller_sp)
|
||||
NOT_CC_INTERP((intptr_t*)get_ijava_state()->sender_sp)
|
||||
);
|
||||
}
|
||||
|
||||
frame frame::sender_for_compiled_frame(RegisterMap *map) const {
|
||||
@ -183,6 +182,9 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
|
||||
interpreterState istate = get_interpreterState();
|
||||
address lresult = (address)istate + in_bytes(BytecodeInterpreter::native_lresult_offset());
|
||||
address fresult = (address)istate + in_bytes(BytecodeInterpreter::native_fresult_offset());
|
||||
#else
|
||||
address lresult = (address)&(get_ijava_state()->lresult);
|
||||
address fresult = (address)&(get_ijava_state()->fresult);
|
||||
#endif
|
||||
|
||||
switch (method->result_type()) {
|
||||
@ -259,7 +261,21 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_native_fresult), " native_fresult");
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_native_lresult), " native_lresult");
|
||||
#else
|
||||
Unimplemented();
|
||||
#define DESCRIBE_ADDRESS(name) \
|
||||
values.describe(frame_no, (intptr_t*)&(get_ijava_state()->name), #name);
|
||||
|
||||
DESCRIBE_ADDRESS(method);
|
||||
DESCRIBE_ADDRESS(locals);
|
||||
DESCRIBE_ADDRESS(monitors);
|
||||
DESCRIBE_ADDRESS(cpoolCache);
|
||||
DESCRIBE_ADDRESS(bcp);
|
||||
DESCRIBE_ADDRESS(esp);
|
||||
DESCRIBE_ADDRESS(mdx);
|
||||
DESCRIBE_ADDRESS(top_frame_sp);
|
||||
DESCRIBE_ADDRESS(sender_sp);
|
||||
DESCRIBE_ADDRESS(oop_tmp);
|
||||
DESCRIBE_ADDRESS(lresult);
|
||||
DESCRIBE_ADDRESS(fresult);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,10 +29,6 @@
|
||||
#include "runtime/synchronizer.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
#ifndef CC_INTERP
|
||||
#error "CC_INTERP must be defined on PPC64"
|
||||
#endif
|
||||
|
||||
// C frame layout on PPC-64.
|
||||
//
|
||||
// In this figure the stack grows upwards, while memory grows
|
||||
@ -50,7 +46,7 @@
|
||||
// [C_FRAME]
|
||||
//
|
||||
// C_FRAME:
|
||||
// 0 [ABI_112]
|
||||
// 0 [ABI_REG_ARGS]
|
||||
// 112 CARG_9: outgoing arg 9 (arg_1 ... arg_8 via gpr_3 ... gpr_{10})
|
||||
// ...
|
||||
// 40+M*8 CARG_M: outgoing arg M (M is the maximum of outgoing args taken over all call sites in the procedure)
|
||||
@ -77,7 +73,7 @@
|
||||
// 32 reserved
|
||||
// 40 space for TOC (=R2) register for next call
|
||||
//
|
||||
// ABI_112:
|
||||
// ABI_REG_ARGS:
|
||||
// 0 [ABI_48]
|
||||
// 48 CARG_1: spill slot for outgoing arg 1. used by next callee.
|
||||
// ... ...
|
||||
@ -95,23 +91,25 @@
|
||||
log_2_of_alignment_in_bits = 7
|
||||
};
|
||||
|
||||
// ABI_48:
|
||||
struct abi_48 {
|
||||
// ABI_MINFRAME:
|
||||
struct abi_minframe {
|
||||
uint64_t callers_sp;
|
||||
uint64_t cr; //_16
|
||||
uint64_t lr;
|
||||
#if !defined(ABI_ELFv2)
|
||||
uint64_t reserved1; //_16
|
||||
uint64_t reserved2;
|
||||
#endif
|
||||
uint64_t toc; //_16
|
||||
// nothing to add here!
|
||||
// aligned to frame::alignment_in_bytes (16)
|
||||
};
|
||||
|
||||
enum {
|
||||
abi_48_size = sizeof(abi_48)
|
||||
abi_minframe_size = sizeof(abi_minframe)
|
||||
};
|
||||
|
||||
struct abi_112 : abi_48 {
|
||||
struct abi_reg_args : abi_minframe {
|
||||
uint64_t carg_1;
|
||||
uint64_t carg_2; //_16
|
||||
uint64_t carg_3;
|
||||
@ -124,13 +122,13 @@
|
||||
};
|
||||
|
||||
enum {
|
||||
abi_112_size = sizeof(abi_112)
|
||||
abi_reg_args_size = sizeof(abi_reg_args)
|
||||
};
|
||||
|
||||
#define _abi(_component) \
|
||||
(offset_of(frame::abi_112, _component))
|
||||
(offset_of(frame::abi_reg_args, _component))
|
||||
|
||||
struct abi_112_spill : abi_112 {
|
||||
struct abi_reg_args_spill : abi_reg_args {
|
||||
// additional spill slots
|
||||
uint64_t spill_ret;
|
||||
uint64_t spill_fret; //_16
|
||||
@ -138,11 +136,11 @@
|
||||
};
|
||||
|
||||
enum {
|
||||
abi_112_spill_size = sizeof(abi_112_spill)
|
||||
abi_reg_args_spill_size = sizeof(abi_reg_args_spill)
|
||||
};
|
||||
|
||||
#define _abi_112_spill(_component) \
|
||||
(offset_of(frame::abi_112_spill, _component))
|
||||
#define _abi_reg_args_spill(_component) \
|
||||
(offset_of(frame::abi_reg_args_spill, _component))
|
||||
|
||||
// non-volatile GPRs:
|
||||
|
||||
@ -195,7 +193,85 @@
|
||||
#define _spill_nonvolatiles_neg(_component) \
|
||||
(int)(-frame::spill_nonvolatiles_size + offset_of(frame::spill_nonvolatiles, _component))
|
||||
|
||||
// Frame layout for the Java interpreter on PPC64.
|
||||
|
||||
|
||||
#ifndef CC_INTERP
|
||||
// Frame layout for the Java template interpreter on PPC64.
|
||||
//
|
||||
// Diffs to the CC_INTERP are marked with 'X'.
|
||||
//
|
||||
// TOP_IJAVA_FRAME:
|
||||
//
|
||||
// 0 [TOP_IJAVA_FRAME_ABI]
|
||||
// alignment (optional)
|
||||
// [operand stack]
|
||||
// [monitors] (optional)
|
||||
// X[IJAVA_STATE]
|
||||
// note: own locals are located in the caller frame.
|
||||
//
|
||||
// PARENT_IJAVA_FRAME:
|
||||
//
|
||||
// 0 [PARENT_IJAVA_FRAME_ABI]
|
||||
// alignment (optional)
|
||||
// [callee's Java result]
|
||||
// [callee's locals w/o arguments]
|
||||
// [outgoing arguments]
|
||||
// [used part of operand stack w/o arguments]
|
||||
// [monitors] (optional)
|
||||
// X[IJAVA_STATE]
|
||||
//
|
||||
|
||||
struct parent_ijava_frame_abi : abi_minframe {
|
||||
};
|
||||
|
||||
enum {
|
||||
parent_ijava_frame_abi_size = sizeof(parent_ijava_frame_abi)
|
||||
};
|
||||
|
||||
#define _parent_ijava_frame_abi(_component) \
|
||||
(offset_of(frame::parent_ijava_frame_abi, _component))
|
||||
|
||||
struct top_ijava_frame_abi : abi_reg_args {
|
||||
};
|
||||
|
||||
enum {
|
||||
top_ijava_frame_abi_size = sizeof(top_ijava_frame_abi)
|
||||
};
|
||||
|
||||
#define _top_ijava_frame_abi(_component) \
|
||||
(offset_of(frame::top_ijava_frame_abi, _component))
|
||||
|
||||
struct ijava_state {
|
||||
#ifdef ASSERT
|
||||
uint64_t ijava_reserved; // Used for assertion.
|
||||
uint64_t ijava_reserved2; // Inserted for alignment.
|
||||
#endif
|
||||
uint64_t method;
|
||||
uint64_t locals;
|
||||
uint64_t monitors;
|
||||
uint64_t cpoolCache;
|
||||
uint64_t bcp;
|
||||
uint64_t esp;
|
||||
uint64_t mdx;
|
||||
uint64_t top_frame_sp; // Maybe define parent_frame_abi and move there.
|
||||
uint64_t sender_sp;
|
||||
// Slots only needed for native calls. Maybe better to move elsewhere.
|
||||
uint64_t oop_tmp;
|
||||
uint64_t lresult;
|
||||
uint64_t fresult;
|
||||
// Aligned to frame::alignment_in_bytes (16).
|
||||
};
|
||||
|
||||
enum {
|
||||
ijava_state_size = sizeof(ijava_state)
|
||||
};
|
||||
|
||||
#define _ijava_state_neg(_component) \
|
||||
(int) (-frame::ijava_state_size + offset_of(frame::ijava_state, _component))
|
||||
|
||||
#else // CC_INTERP:
|
||||
|
||||
// Frame layout for the Java C++ interpreter on PPC64.
|
||||
//
|
||||
// This frame layout provides a C-like frame for every Java frame.
|
||||
//
|
||||
@ -242,7 +318,7 @@
|
||||
// [ENTRY_FRAME_LOCALS]
|
||||
//
|
||||
// PARENT_IJAVA_FRAME_ABI:
|
||||
// 0 [ABI_48]
|
||||
// 0 [ABI_MINFRAME]
|
||||
// top_frame_sp
|
||||
// initial_caller_sp
|
||||
//
|
||||
@ -258,7 +334,7 @@
|
||||
|
||||
// PARENT_IJAVA_FRAME_ABI
|
||||
|
||||
struct parent_ijava_frame_abi : abi_48 {
|
||||
struct parent_ijava_frame_abi : abi_minframe {
|
||||
// SOE registers.
|
||||
// C2i adapters spill their top-frame stack-pointer here.
|
||||
uint64_t top_frame_sp; // carg_1
|
||||
@ -285,7 +361,7 @@
|
||||
uint64_t carg_6_unused; //_16 carg_6
|
||||
uint64_t carg_7_unused; // carg_7
|
||||
// Use arg8 for storing frame_manager_lr. The size of
|
||||
// top_ijava_frame_abi must match abi_112.
|
||||
// top_ijava_frame_abi must match abi_reg_args.
|
||||
uint64_t frame_manager_lr; //_16 carg_8
|
||||
// nothing to add here!
|
||||
// aligned to frame::alignment_in_bytes (16)
|
||||
@ -298,6 +374,8 @@
|
||||
#define _top_ijava_frame_abi(_component) \
|
||||
(offset_of(frame::top_ijava_frame_abi, _component))
|
||||
|
||||
#endif // CC_INTERP
|
||||
|
||||
// ENTRY_FRAME
|
||||
|
||||
struct entry_frame_locals {
|
||||
@ -395,8 +473,8 @@
|
||||
intptr_t* fp() const { return _fp; }
|
||||
|
||||
// Accessors for ABIs
|
||||
inline abi_48* own_abi() const { return (abi_48*) _sp; }
|
||||
inline abi_48* callers_abi() const { return (abi_48*) _fp; }
|
||||
inline abi_minframe* own_abi() const { return (abi_minframe*) _sp; }
|
||||
inline abi_minframe* callers_abi() const { return (abi_minframe*) _fp; }
|
||||
|
||||
private:
|
||||
|
||||
@ -421,6 +499,14 @@
|
||||
#ifdef CC_INTERP
|
||||
// Additional interface for interpreter frames:
|
||||
inline interpreterState get_interpreterState() const;
|
||||
#else
|
||||
inline ijava_state* get_ijava_state() const;
|
||||
// Some convenient register frame setters/getters for deoptimization.
|
||||
inline intptr_t* interpreter_frame_esp() const;
|
||||
inline void interpreter_frame_set_cpcache(ConstantPoolCache* cp);
|
||||
inline void interpreter_frame_set_esp(intptr_t* esp);
|
||||
inline void interpreter_frame_set_top_frame_sp(intptr_t* top_frame_sp);
|
||||
inline void interpreter_frame_set_sender_sp(intptr_t* sender_sp);
|
||||
#endif // CC_INTERP
|
||||
|
||||
// Size of a monitor in bytes.
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,10 +26,6 @@
|
||||
#ifndef CPU_PPC_VM_FRAME_PPC_INLINE_HPP
|
||||
#define CPU_PPC_VM_FRAME_PPC_INLINE_HPP
|
||||
|
||||
#ifndef CC_INTERP
|
||||
#error "CC_INTERP must be defined on PPC64"
|
||||
#endif
|
||||
|
||||
// Inline functions for ppc64 frames:
|
||||
|
||||
// Find codeblob and set deopt_state.
|
||||
@ -199,6 +195,75 @@ inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
|
||||
interpreterState istate = get_interpreterState();
|
||||
return &istate->_constants;
|
||||
}
|
||||
|
||||
#else // !CC_INTERP
|
||||
|
||||
// Template Interpreter frame value accessors.
|
||||
|
||||
inline frame::ijava_state* frame::get_ijava_state() const {
|
||||
return (ijava_state*) ((uintptr_t)fp() - ijava_state_size);
|
||||
}
|
||||
|
||||
inline intptr_t** frame::interpreter_frame_locals_addr() const {
|
||||
return (intptr_t**) &(get_ijava_state()->locals);
|
||||
}
|
||||
inline intptr_t* frame::interpreter_frame_bcx_addr() const {
|
||||
return (intptr_t*) &(get_ijava_state()->bcp);
|
||||
}
|
||||
inline intptr_t* frame::interpreter_frame_mdx_addr() const {
|
||||
return (intptr_t*) &(get_ijava_state()->mdx);
|
||||
}
|
||||
// Pointer beyond the "oldest/deepest" BasicObjectLock on stack.
|
||||
inline BasicObjectLock* frame::interpreter_frame_monitor_end() const {
|
||||
return (BasicObjectLock *) get_ijava_state()->monitors;
|
||||
}
|
||||
|
||||
inline BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
|
||||
return (BasicObjectLock *) get_ijava_state();
|
||||
}
|
||||
|
||||
// SAPJVM ASc 2012-11-21. Return register stack slot addr at which currently interpreted method is found
|
||||
inline Method** frame::interpreter_frame_method_addr() const {
|
||||
return (Method**) &(get_ijava_state()->method);
|
||||
}
|
||||
inline ConstantPoolCache** frame::interpreter_frame_cpoolcache_addr() const {
|
||||
return (ConstantPoolCache**) &(get_ijava_state()->cpoolCache);
|
||||
}
|
||||
inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
|
||||
return (ConstantPoolCache**) &(get_ijava_state()->cpoolCache);
|
||||
}
|
||||
|
||||
inline oop* frame::interpreter_frame_temp_oop_addr() const {
|
||||
return (oop *) &(get_ijava_state()->oop_tmp);
|
||||
}
|
||||
inline intptr_t* frame::interpreter_frame_esp() const {
|
||||
return (intptr_t*) get_ijava_state()->esp;
|
||||
}
|
||||
|
||||
// Convenient setters
|
||||
inline void frame::interpreter_frame_set_monitor_end(BasicObjectLock* end) { get_ijava_state()->monitors = (intptr_t) end;}
|
||||
inline void frame::interpreter_frame_set_cpcache(ConstantPoolCache* cp) { *frame::interpreter_frame_cpoolcache_addr() = cp; }
|
||||
inline void frame::interpreter_frame_set_esp(intptr_t* esp) { get_ijava_state()->esp = (intptr_t) esp; }
|
||||
inline void frame::interpreter_frame_set_top_frame_sp(intptr_t* top_frame_sp) { get_ijava_state()->top_frame_sp = (intptr_t) top_frame_sp; }
|
||||
inline void frame::interpreter_frame_set_sender_sp(intptr_t* sender_sp) { get_ijava_state()->sender_sp = (intptr_t) sender_sp; }
|
||||
|
||||
inline intptr_t* frame::interpreter_frame_expression_stack() const {
|
||||
return (intptr_t*)interpreter_frame_monitor_end() - 1;
|
||||
}
|
||||
|
||||
inline jint frame::interpreter_frame_expression_stack_direction() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// top of expression stack
|
||||
inline intptr_t* frame::interpreter_frame_tos_address() const {
|
||||
return ((intptr_t*) get_ijava_state()->esp) + Interpreter::stackElementWords;
|
||||
}
|
||||
|
||||
inline intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
|
||||
return &interpreter_frame_tos_address()[offset];
|
||||
}
|
||||
|
||||
#endif // CC_INTERP
|
||||
|
||||
inline int frame::interpreter_frame_monitor_size() {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,7 +29,7 @@
|
||||
#include "assembler_ppc.inline.hpp"
|
||||
#include "interpreter/invocationCounter.hpp"
|
||||
|
||||
// This file specializes the assembler with interpreter-specific macros
|
||||
// This file specializes the assembler with interpreter-specific macros.
|
||||
|
||||
|
||||
class InterpreterMacroAssembler: public MacroAssembler {
|
||||
@ -39,15 +39,176 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
|
||||
void null_check_throw(Register a, int offset, Register temp_reg);
|
||||
|
||||
// Handy address generation macros
|
||||
void branch_to_entry(address entry, Register Rscratch);
|
||||
|
||||
// Handy address generation macros.
|
||||
#define thread_(field_name) in_bytes(JavaThread::field_name ## _offset()), R16_thread
|
||||
#define method_(field_name) in_bytes(Method::field_name ## _offset()), R19_method
|
||||
|
||||
#ifdef CC_INTERP
|
||||
#define state_(field_name) in_bytes(byte_offset_of(BytecodeInterpreter, field_name)), R14_state
|
||||
#define prev_state_(field_name) in_bytes(byte_offset_of(BytecodeInterpreter, field_name)), R15_prev_state
|
||||
void pop (TosState state) {}; // Not needed.
|
||||
void push(TosState state) {}; // Not needed.
|
||||
#endif
|
||||
|
||||
#ifndef CC_INTERP
|
||||
virtual void check_and_handle_popframe(Register java_thread);
|
||||
virtual void check_and_handle_earlyret(Register java_thread);
|
||||
|
||||
// Base routine for all dispatches.
|
||||
void dispatch_base(TosState state, address* table);
|
||||
|
||||
void load_earlyret_value(TosState state, Register Rscratch1);
|
||||
|
||||
static const Address l_tmp;
|
||||
static const Address d_tmp;
|
||||
|
||||
// dispatch routines
|
||||
void dispatch_next(TosState state, int step = 0);
|
||||
void dispatch_via (TosState state, address* table);
|
||||
void load_dispatch_table(Register dst, address* table);
|
||||
void dispatch_Lbyte_code(TosState state, Register bytecode, address* table, bool verify = false);
|
||||
|
||||
// Called by shared interpreter generator.
|
||||
void dispatch_prolog(TosState state, int step = 0);
|
||||
void dispatch_epilog(TosState state, int step = 0);
|
||||
|
||||
// Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls.
|
||||
void super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1);
|
||||
void super_call_VM(Register thread_cache, Register oop_result, Register last_java_sp,
|
||||
address entry_point, Register arg_1, Register arg_2, bool check_exception = true);
|
||||
|
||||
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
|
||||
// a subtype of super_klass. Blows registers tmp1, tmp2 and tmp3.
|
||||
void gen_subtype_check(Register sub_klass, Register super_klass,
|
||||
Register tmp1, Register tmp2, Register tmp3, Label &ok_is_subtype);
|
||||
|
||||
// Load object from cpool->resolved_references(index).
|
||||
void load_resolved_reference_at_index(Register result, Register index);
|
||||
|
||||
void generate_stack_overflow_check_with_compare_and_throw(Register Rmem_frame_size, Register Rscratch1);
|
||||
void load_receiver(Register Rparam_count, Register Rrecv_dst);
|
||||
|
||||
// helpers for expression stack
|
||||
void pop_i( Register r = R17_tos);
|
||||
void pop_ptr( Register r = R17_tos);
|
||||
void pop_l( Register r = R17_tos);
|
||||
void pop_f(FloatRegister f = F15_ftos);
|
||||
void pop_d(FloatRegister f = F15_ftos );
|
||||
|
||||
void push_i( Register r = R17_tos);
|
||||
void push_ptr( Register r = R17_tos);
|
||||
void push_l( Register r = R17_tos);
|
||||
void push_f(FloatRegister f = F15_ftos );
|
||||
void push_d(FloatRegister f = F15_ftos);
|
||||
|
||||
void push_2ptrs(Register first, Register second);
|
||||
|
||||
void push_l_pop_d(Register l = R17_tos, FloatRegister d = F15_ftos);
|
||||
void push_d_pop_l(FloatRegister d = F15_ftos, Register l = R17_tos);
|
||||
|
||||
void pop (TosState state); // transition vtos -> state
|
||||
void push(TosState state); // transition state -> vtos
|
||||
void empty_expression_stack(); // Resets both Lesp and SP.
|
||||
|
||||
public:
|
||||
// Load values from bytecode stream:
|
||||
|
||||
enum signedOrNot { Signed, Unsigned };
|
||||
enum setCCOrNot { set_CC, dont_set_CC };
|
||||
|
||||
void get_2_byte_integer_at_bcp(int bcp_offset,
|
||||
Register Rdst,
|
||||
signedOrNot is_signed);
|
||||
|
||||
void get_4_byte_integer_at_bcp(int bcp_offset,
|
||||
Register Rdst,
|
||||
signedOrNot is_signed = Unsigned);
|
||||
|
||||
void get_cache_index_at_bcp(Register Rdst, int bcp_offset, size_t index_size);
|
||||
|
||||
void get_cache_and_index_at_bcp(Register cache, int bcp_offset, size_t index_size = sizeof(u2));
|
||||
|
||||
|
||||
// common code
|
||||
|
||||
void field_offset_at(int n, Register tmp, Register dest, Register base);
|
||||
int field_offset_at(Register object, address bcp, int offset);
|
||||
void fast_iaaccess(int n, address bcp);
|
||||
void fast_iagetfield(address bcp);
|
||||
void fast_iaputfield(address bcp, bool do_store_check);
|
||||
|
||||
void index_check(Register array, Register index, int index_shift, Register tmp, Register res);
|
||||
void index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res);
|
||||
|
||||
void get_const(Register Rdst);
|
||||
void get_constant_pool(Register Rdst);
|
||||
void get_constant_pool_cache(Register Rdst);
|
||||
void get_cpool_and_tags(Register Rcpool, Register Rtags);
|
||||
void is_a(Label& L);
|
||||
|
||||
// Java Call Helpers
|
||||
void call_from_interpreter(Register Rtarget_method, Register Rret_addr, Register Rscratch1, Register Rscratch2);
|
||||
|
||||
// --------------------------------------------------
|
||||
|
||||
void unlock_if_synchronized_method(TosState state, bool throw_monitor_exception = true,
|
||||
bool install_monitor_exception = true);
|
||||
|
||||
// Removes the current activation (incl. unlocking of monitors).
|
||||
// Additionally this code is used for earlyReturn in which case we
|
||||
// want to skip throwing an exception and installing an exception.
|
||||
void remove_activation(TosState state,
|
||||
bool throw_monitor_exception = true,
|
||||
bool install_monitor_exception = true);
|
||||
void merge_frames(Register Rtop_frame_sp, Register return_pc, Register Rscratch1, Register Rscratch2); // merge top frames
|
||||
|
||||
void add_monitor_to_stack(bool stack_is_empty, Register Rtemp1, Register Rtemp2);
|
||||
|
||||
// Local variable access helpers
|
||||
void load_local_int(Register Rdst_value, Register Rdst_address, Register Rindex);
|
||||
void load_local_long(Register Rdst_value, Register Rdst_address, Register Rindex);
|
||||
void load_local_ptr(Register Rdst_value, Register Rdst_address, Register Rindex);
|
||||
void load_local_float(FloatRegister Rdst_value, Register Rdst_address, Register Rindex);
|
||||
void load_local_double(FloatRegister Rdst_value, Register Rdst_address, Register Rindex);
|
||||
void store_local_int(Register Rvalue, Register Rindex);
|
||||
void store_local_long(Register Rvalue, Register Rindex);
|
||||
void store_local_ptr(Register Rvalue, Register Rindex);
|
||||
void store_local_float(FloatRegister Rvalue, Register Rindex);
|
||||
void store_local_double(FloatRegister Rvalue, Register Rindex);
|
||||
|
||||
// Call VM for std frames
|
||||
// Special call VM versions that check for exceptions and forward exception
|
||||
// via short cut (not via expensive forward exception stub).
|
||||
void check_and_forward_exception(Register Rscratch1, Register Rscratch2);
|
||||
void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
|
||||
void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
|
||||
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
|
||||
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
|
||||
// Should not be used:
|
||||
void call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions = true) {ShouldNotReachHere();}
|
||||
void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true) {ShouldNotReachHere();}
|
||||
void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true) {ShouldNotReachHere();}
|
||||
void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true) {ShouldNotReachHere();}
|
||||
|
||||
Address first_local_in_stack();
|
||||
|
||||
enum LoadOrStore { load, store };
|
||||
void static_iload_or_store(int which_local, LoadOrStore direction, Register Rtmp);
|
||||
void static_aload_or_store(int which_local, LoadOrStore direction, Register Rtmp);
|
||||
void static_dload_or_store(int which_local, LoadOrStore direction);
|
||||
|
||||
void save_interpreter_state(Register scratch);
|
||||
void restore_interpreter_state(Register scratch, bool bcp_and_mdx_only = false);
|
||||
|
||||
void increment_backedge_counter(const Register Rcounters, Register Rtmp, Register Rtmp2, Register Rscratch);
|
||||
void test_backedge_count_for_osr(Register backedge_count, Register branch_bcp, Register Rtmp);
|
||||
|
||||
void record_static_call_in_profile(Register Rentry, Register Rtmp);
|
||||
void record_receiver_call_in_profile(Register Rklass, Register Rentry, Register Rtmp);
|
||||
#endif // !CC_INTERP
|
||||
|
||||
void get_method_counters(Register method, Register Rcounters, Label& skip);
|
||||
void increment_invocation_counter(Register iv_be_count, Register Rtmp1, Register Rtmp2_r0);
|
||||
|
||||
@ -55,12 +216,59 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void lock_object (Register lock_reg, Register obj_reg);
|
||||
void unlock_object(Register lock_reg, bool check_for_exceptions = true);
|
||||
|
||||
#ifndef CC_INTERP
|
||||
|
||||
// Interpreter profiling operations
|
||||
void set_method_data_pointer_for_bcp();
|
||||
void test_method_data_pointer(Label& zero_continue);
|
||||
void verify_method_data_pointer();
|
||||
void test_invocation_counter_for_mdp(Register invocation_count, Register Rscratch, Label &profile_continue);
|
||||
|
||||
void set_mdp_data_at(int constant, Register value);
|
||||
|
||||
void increment_mdp_data_at(int constant, Register counter_addr, Register Rbumped_count, bool decrement = false);
|
||||
|
||||
void increment_mdp_data_at(Register counter_addr, Register Rbumped_count, bool decrement = false);
|
||||
void increment_mdp_data_at(Register reg, int constant, Register scratch, Register Rbumped_count, bool decrement = false);
|
||||
|
||||
void set_mdp_flag_at(int flag_constant, Register scratch);
|
||||
void test_mdp_data_at(int offset, Register value, Label& not_equal_continue, Register test_out);
|
||||
|
||||
void update_mdp_by_offset(int offset_of_disp, Register scratch);
|
||||
void update_mdp_by_offset(Register reg, int offset_of_disp,
|
||||
Register scratch);
|
||||
void update_mdp_by_constant(int constant);
|
||||
void update_mdp_for_ret(TosState state, Register return_bci);
|
||||
|
||||
void profile_taken_branch(Register scratch, Register bumped_count);
|
||||
void profile_not_taken_branch(Register scratch1, Register scratch2);
|
||||
void profile_call(Register scratch1, Register scratch2);
|
||||
void profile_final_call(Register scratch1, Register scratch2);
|
||||
void profile_virtual_call(Register Rreceiver, Register Rscratch1, Register Rscratch2, bool receiver_can_be_null);
|
||||
void profile_typecheck(Register Rklass, Register Rscratch1, Register Rscratch2);
|
||||
void profile_typecheck_failed(Register Rscratch1, Register Rscratch2);
|
||||
void profile_ret(TosState state, Register return_bci, Register scratch1, Register scratch2);
|
||||
void profile_switch_default(Register scratch1, Register scratch2);
|
||||
void profile_switch_case(Register index, Register scratch1,Register scratch2, Register scratch3);
|
||||
void profile_null_seen(Register Rscratch1, Register Rscratch2);
|
||||
void record_klass_in_profile(Register receiver, Register scratch1, Register scratch2, bool is_virtual_call);
|
||||
void record_klass_in_profile_helper(Register receiver, Register scratch1, Register scratch2, int start_row, Label& done, bool is_virtual_call);
|
||||
|
||||
#endif // !CC_INTERP
|
||||
|
||||
// Debugging
|
||||
void verify_oop(Register reg, TosState state = atos); // only if +VerifyOops && state == atos
|
||||
#ifndef CC_INTERP
|
||||
void verify_oop_or_return_address(Register reg, Register rtmp); // for astore
|
||||
void verify_FPU(int stack_depth, TosState state = ftos);
|
||||
#endif // !CC_INTERP
|
||||
|
||||
// support for jvmdi/jvmpi
|
||||
typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
|
||||
|
||||
// Support for jvmdi/jvmpi.
|
||||
void notify_method_entry();
|
||||
void notify_method_exit(bool is_native_method, TosState state);
|
||||
void notify_method_exit(bool is_native_method, TosState state,
|
||||
NotifyMethodExitMode mode, bool check_exceptions);
|
||||
|
||||
#ifdef CC_INTERP
|
||||
// Convert the current TOP_IJAVA_FRAME into a PARENT_IJAVA_FRAME
|
||||
|
@ -109,8 +109,10 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
|
||||
}
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprint) {
|
||||
#if !defined(ABI_ELFv2)
|
||||
// Emit fd for current codebuffer. Needs patching!
|
||||
__ emit_fd();
|
||||
#endif
|
||||
|
||||
// Generate code to handle arguments.
|
||||
iterate(fingerprint);
|
||||
@ -127,11 +129,13 @@ void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprin
|
||||
// Implementation of SignatureHandlerLibrary
|
||||
|
||||
void SignatureHandlerLibrary::pd_set_handler(address handler) {
|
||||
#if !defined(ABI_ELFv2)
|
||||
// patch fd here.
|
||||
FunctionDescriptor* fd = (FunctionDescriptor*) handler;
|
||||
|
||||
fd->set_entry(handler + (int)sizeof(FunctionDescriptor));
|
||||
assert(fd->toc() == (address)0xcafe, "need to adjust TOC here");
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -51,10 +51,6 @@
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#endif
|
||||
|
||||
#ifndef CC_INTERP
|
||||
#error "CC_INTERP must be defined on PPC"
|
||||
#endif
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
#ifdef PRODUCT
|
||||
@ -128,13 +124,13 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
|
||||
const Register target_sp = R28_tmp8;
|
||||
const FloatRegister floatSlot = F0;
|
||||
|
||||
address entry = __ emit_fd();
|
||||
address entry = __ function_entry();
|
||||
|
||||
__ save_LR_CR(R0);
|
||||
__ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
|
||||
// We use target_sp for storing arguments in the C frame.
|
||||
__ mr(target_sp, R1_SP);
|
||||
__ push_frame_abi112_nonvolatiles(0, R11_scratch1);
|
||||
__ push_frame_reg_args_nonvolatiles(0, R11_scratch1);
|
||||
|
||||
__ mr(arg_java, R3_ARG1);
|
||||
|
||||
@ -147,7 +143,8 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
|
||||
#ifdef CC_INTERP
|
||||
__ ld(R19_method, state_(_method));
|
||||
#else
|
||||
__ unimplemented("slow signature handler 1");
|
||||
__ ld(R19_method, 0, target_sp);
|
||||
__ ld(R19_method, _ijava_state_neg(method), R19_method);
|
||||
#endif
|
||||
|
||||
// Get the result handler.
|
||||
@ -157,7 +154,8 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
|
||||
#ifdef CC_INTERP
|
||||
__ ld(R19_method, state_(_method));
|
||||
#else
|
||||
__ unimplemented("slow signature handler 2");
|
||||
__ ld(R19_method, 0, target_sp);
|
||||
__ ld(R19_method, _ijava_state_neg(method), R19_method);
|
||||
#endif
|
||||
|
||||
{
|
||||
@ -453,7 +451,7 @@ address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
//
|
||||
// Registers alive
|
||||
// R16_thread - JavaThread*
|
||||
// R19_method - callee's methodOop (method to be invoked)
|
||||
// R19_method - callee's method (method to be invoked)
|
||||
// R1_SP - SP prepared such that caller's outgoing args are near top
|
||||
// LR - return address to caller
|
||||
//
|
||||
@ -474,7 +472,7 @@ address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
|
||||
// Push a new C frame and save LR.
|
||||
__ save_LR_CR(R0);
|
||||
__ push_frame_abi112(0, R11_scratch1);
|
||||
__ push_frame_reg_args(0, R11_scratch1);
|
||||
|
||||
// This is not a leaf but we have a JavaFrameAnchor now and we will
|
||||
// check (create) exceptions afterward so this is ok.
|
||||
@ -491,7 +489,12 @@ address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
// Return to frame manager, it will handle the pending exception.
|
||||
__ blr();
|
||||
#else
|
||||
Unimplemented();
|
||||
// We don't know our caller, so jump to the general forward exception stub,
|
||||
// which will also pop our full frame off. Satisfy the interface of
|
||||
// SharedRuntime::generate_forward_exception()
|
||||
__ load_const_optimized(R11_scratch1, StubRoutines::forward_exception_entry(), R0);
|
||||
__ mtctr(R11_scratch1);
|
||||
__ bctr();
|
||||
#endif
|
||||
|
||||
return entry;
|
||||
@ -500,8 +503,9 @@ address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
// Call an accessor method (assuming it is resolved, otherwise drop into
|
||||
// vanilla (slow path) entry.
|
||||
address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
if(!UseFastAccessorMethods && (!FLAG_IS_ERGO(UseFastAccessorMethods)))
|
||||
if (!UseFastAccessorMethods && (!FLAG_IS_ERGO(UseFastAccessorMethods))) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Label Lslow_path, Lacquire;
|
||||
|
||||
@ -586,10 +590,14 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
// Load from branch table and dispatch (volatile case: one instruction ahead)
|
||||
__ sldi(Rflags, Rflags, LogBytesPerWord);
|
||||
__ cmpwi(CCR6, Rscratch, 1); // volatile?
|
||||
__ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // volatile ? size of 1 instruction : 0
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
__ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // volatile ? size of 1 instruction : 0
|
||||
}
|
||||
__ ldx(Rbtable, Rbtable, Rflags);
|
||||
|
||||
__ subf(Rbtable, Rscratch, Rbtable); // point to volatile/non-volatile entry point
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
__ subf(Rbtable, Rscratch, Rbtable); // point to volatile/non-volatile entry point
|
||||
}
|
||||
__ mtctr(Rbtable);
|
||||
__ bctr();
|
||||
|
||||
@ -605,7 +613,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
}
|
||||
assert(all_uninitialized != all_initialized, "consistency"); // either or
|
||||
|
||||
__ sync(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
if (branch_table[vtos] == 0) branch_table[vtos] = __ pc(); // non-volatile_entry point
|
||||
if (branch_table[dtos] == 0) branch_table[dtos] = __ pc(); // non-volatile_entry point
|
||||
if (branch_table[ftos] == 0) branch_table[ftos] = __ pc(); // non-volatile_entry point
|
||||
@ -614,7 +622,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
|
||||
if (branch_table[itos] == 0) { // generate only once
|
||||
__ align(32, 28, 28); // align load
|
||||
__ sync(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
branch_table[itos] = __ pc(); // non-volatile_entry point
|
||||
__ lwax(R3_RET, Rclass_or_obj, Roffset);
|
||||
__ beq(CCR6, Lacquire);
|
||||
@ -623,7 +631,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
|
||||
if (branch_table[ltos] == 0) { // generate only once
|
||||
__ align(32, 28, 28); // align load
|
||||
__ sync(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
branch_table[ltos] = __ pc(); // non-volatile_entry point
|
||||
__ ldx(R3_RET, Rclass_or_obj, Roffset);
|
||||
__ beq(CCR6, Lacquire);
|
||||
@ -632,7 +640,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
|
||||
if (branch_table[btos] == 0) { // generate only once
|
||||
__ align(32, 28, 28); // align load
|
||||
__ sync(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
branch_table[btos] = __ pc(); // non-volatile_entry point
|
||||
__ lbzx(R3_RET, Rclass_or_obj, Roffset);
|
||||
__ extsb(R3_RET, R3_RET);
|
||||
@ -642,7 +650,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
|
||||
if (branch_table[ctos] == 0) { // generate only once
|
||||
__ align(32, 28, 28); // align load
|
||||
__ sync(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
branch_table[ctos] = __ pc(); // non-volatile_entry point
|
||||
__ lhzx(R3_RET, Rclass_or_obj, Roffset);
|
||||
__ beq(CCR6, Lacquire);
|
||||
@ -651,7 +659,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
|
||||
if (branch_table[stos] == 0) { // generate only once
|
||||
__ align(32, 28, 28); // align load
|
||||
__ sync(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
branch_table[stos] = __ pc(); // non-volatile_entry point
|
||||
__ lhax(R3_RET, Rclass_or_obj, Roffset);
|
||||
__ beq(CCR6, Lacquire);
|
||||
@ -660,7 +668,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
|
||||
if (branch_table[atos] == 0) { // generate only once
|
||||
__ align(32, 28, 28); // align load
|
||||
__ sync(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
branch_table[atos] = __ pc(); // non-volatile_entry point
|
||||
__ load_heap_oop(R3_RET, (RegisterOrConstant)Roffset, Rclass_or_obj);
|
||||
__ verify_oop(R3_RET);
|
||||
@ -683,10 +691,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
#endif
|
||||
|
||||
__ bind(Lslow_path);
|
||||
assert(Interpreter::entry_for_kind(Interpreter::zerolocals), "Normal entry must have been generated by now");
|
||||
__ load_const_optimized(Rscratch, Interpreter::entry_for_kind(Interpreter::zerolocals), R0);
|
||||
__ mtctr(Rscratch);
|
||||
__ bctr();
|
||||
__ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), Rscratch);
|
||||
__ flush();
|
||||
|
||||
return entry;
|
||||
@ -773,10 +778,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
|
||||
// Generate regular method entry.
|
||||
__ bind(slow_path);
|
||||
assert(Interpreter::entry_for_kind(Interpreter::zerolocals), "Normal entry must have been generated by now");
|
||||
__ load_const_optimized(R11_scratch1, Interpreter::entry_for_kind(Interpreter::zerolocals), R0);
|
||||
__ mtctr(R11_scratch1);
|
||||
__ bctr();
|
||||
__ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
|
||||
__ flush();
|
||||
|
||||
return entry;
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,15 +28,23 @@
|
||||
|
||||
public:
|
||||
|
||||
// Stack index relative to tos (which points at value)
|
||||
// Stack index relative to tos (which points at value).
|
||||
static int expr_index_at(int i) {
|
||||
return stackElementWords * i;
|
||||
}
|
||||
|
||||
// Already negated by c++ interpreter
|
||||
// Already negated by c++ interpreter.
|
||||
static int local_index_at(int i) {
|
||||
assert(i <= 0, "local direction already negated");
|
||||
return stackElementWords * i;
|
||||
}
|
||||
|
||||
#ifndef CC_INTERP
|
||||
// The offset in bytes to access a expression stack slot
|
||||
// relative to the esp pointer.
|
||||
static int expr_offset_in_bytes(int slot) {
|
||||
return stackElementSize * slot + wordSize;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // CPU_PPC_VM_INTERPRETER_PPC_PP
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,10 +26,6 @@
|
||||
#ifndef CPU_PPC_VM_JAVAFRAMEANCHOR_PPC_HPP
|
||||
#define CPU_PPC_VM_JAVAFRAMEANCHOR_PPC_HPP
|
||||
|
||||
#ifndef CC_INTERP
|
||||
#error "CC_INTERP must be defined on PPC64"
|
||||
#endif
|
||||
|
||||
public:
|
||||
// Each arch must define reset, save, restore
|
||||
// These are used by objects that only care about:
|
||||
|
@ -594,7 +594,13 @@ void MacroAssembler::bxx64_patchable(address dest, relocInfo::relocType rt, bool
|
||||
"can't identify emitted call");
|
||||
} else {
|
||||
// variant 1:
|
||||
|
||||
#if defined(ABI_ELFv2)
|
||||
nop();
|
||||
calculate_address_from_global_toc(R12, dest, true, true, false);
|
||||
mtctr(R12);
|
||||
nop();
|
||||
nop();
|
||||
#else
|
||||
mr(R0, R11); // spill R11 -> R0.
|
||||
|
||||
// Load the destination address into CTR,
|
||||
@ -604,6 +610,7 @@ void MacroAssembler::bxx64_patchable(address dest, relocInfo::relocType rt, bool
|
||||
mtctr(R11);
|
||||
mr(R11, R0); // spill R11 <- R0.
|
||||
nop();
|
||||
#endif
|
||||
|
||||
// do the call/jump
|
||||
if (link) {
|
||||
@ -912,16 +919,16 @@ void MacroAssembler::push_frame(unsigned int bytes, Register tmp) {
|
||||
}
|
||||
}
|
||||
|
||||
// Push a frame of size `bytes' plus abi112 on top.
|
||||
void MacroAssembler::push_frame_abi112(unsigned int bytes, Register tmp) {
|
||||
push_frame(bytes + frame::abi_112_size, tmp);
|
||||
// Push a frame of size `bytes' plus abi_reg_args on top.
|
||||
void MacroAssembler::push_frame_reg_args(unsigned int bytes, Register tmp) {
|
||||
push_frame(bytes + frame::abi_reg_args_size, tmp);
|
||||
}
|
||||
|
||||
// Setup up a new C frame with a spill area for non-volatile GPRs and
|
||||
// additional space for local variables.
|
||||
void MacroAssembler::push_frame_abi112_nonvolatiles(unsigned int bytes,
|
||||
Register tmp) {
|
||||
push_frame(bytes + frame::abi_112_size + frame::spill_nonvolatiles_size, tmp);
|
||||
void MacroAssembler::push_frame_reg_args_nonvolatiles(unsigned int bytes,
|
||||
Register tmp) {
|
||||
push_frame(bytes + frame::abi_reg_args_size + frame::spill_nonvolatiles_size, tmp);
|
||||
}
|
||||
|
||||
// Pop current C frame.
|
||||
@ -929,6 +936,42 @@ void MacroAssembler::pop_frame() {
|
||||
ld(R1_SP, _abi(callers_sp), R1_SP);
|
||||
}
|
||||
|
||||
#if defined(ABI_ELFv2)
|
||||
address MacroAssembler::branch_to(Register r_function_entry, bool and_link) {
|
||||
// TODO(asmundak): make sure the caller uses R12 as function descriptor
|
||||
// most of the times.
|
||||
if (R12 != r_function_entry) {
|
||||
mr(R12, r_function_entry);
|
||||
}
|
||||
mtctr(R12);
|
||||
// Do a call or a branch.
|
||||
if (and_link) {
|
||||
bctrl();
|
||||
} else {
|
||||
bctr();
|
||||
}
|
||||
_last_calls_return_pc = pc();
|
||||
|
||||
return _last_calls_return_pc;
|
||||
}
|
||||
|
||||
// Call a C function via a function descriptor and use full C
|
||||
// calling conventions. Updates and returns _last_calls_return_pc.
|
||||
address MacroAssembler::call_c(Register r_function_entry) {
|
||||
return branch_to(r_function_entry, /*and_link=*/true);
|
||||
}
|
||||
|
||||
// For tail calls: only branch, don't link, so callee returns to caller of this function.
|
||||
address MacroAssembler::call_c_and_return_to_caller(Register r_function_entry) {
|
||||
return branch_to(r_function_entry, /*and_link=*/false);
|
||||
}
|
||||
|
||||
address MacroAssembler::call_c(address function_entry, relocInfo::relocType rt) {
|
||||
load_const(R12, function_entry, R0);
|
||||
return branch_to(R12, /*and_link=*/true);
|
||||
}
|
||||
|
||||
#else
|
||||
// Generic version of a call to C function via a function descriptor
|
||||
// with variable support for C calling conventions (TOC, ENV, etc.).
|
||||
// Updates and returns _last_calls_return_pc.
|
||||
@ -1077,6 +1120,7 @@ address MacroAssembler::call_c_using_toc(const FunctionDescriptor* fd,
|
||||
}
|
||||
return _last_calls_return_pc;
|
||||
}
|
||||
#endif
|
||||
|
||||
void MacroAssembler::call_VM_base(Register oop_result,
|
||||
Register last_java_sp,
|
||||
@ -1091,8 +1135,11 @@ void MacroAssembler::call_VM_base(Register oop_result,
|
||||
|
||||
// ARG1 must hold thread address.
|
||||
mr(R3_ARG1, R16_thread);
|
||||
|
||||
#if defined(ABI_ELFv2)
|
||||
address return_pc = call_c(entry_point, relocInfo::none);
|
||||
#else
|
||||
address return_pc = call_c((FunctionDescriptor*)entry_point, relocInfo::none);
|
||||
#endif
|
||||
|
||||
reset_last_Java_frame();
|
||||
|
||||
@ -1113,7 +1160,11 @@ void MacroAssembler::call_VM_base(Register oop_result,
|
||||
|
||||
void MacroAssembler::call_VM_leaf_base(address entry_point) {
|
||||
BLOCK_COMMENT("call_VM_leaf {");
|
||||
#if defined(ABI_ELFv2)
|
||||
call_c(entry_point, relocInfo::none);
|
||||
#else
|
||||
call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::none);
|
||||
#endif
|
||||
BLOCK_COMMENT("} call_VM_leaf");
|
||||
}
|
||||
|
||||
@ -2227,7 +2278,7 @@ void MacroAssembler::g1_write_barrier_pre(Register Robj, RegisterOrConstant offs
|
||||
// VM call need frame to access(write) O register.
|
||||
if (needs_frame) {
|
||||
save_LR_CR(Rtmp1);
|
||||
push_frame_abi112(0, Rtmp2);
|
||||
push_frame_reg_args(0, Rtmp2);
|
||||
}
|
||||
|
||||
if (Rpre_val->is_volatile() && Robj == noreg) mr(R31, Rpre_val); // Save pre_val across C call if it was preloaded.
|
||||
@ -2361,7 +2412,8 @@ void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, R
|
||||
#ifdef CC_INTERP
|
||||
ld(tmp1/*pc*/, _top_ijava_frame_abi(frame_manager_lr), sp);
|
||||
#else
|
||||
Unimplemented();
|
||||
address entry = pc();
|
||||
load_const_optimized(tmp1, entry);
|
||||
#endif
|
||||
|
||||
set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1);
|
||||
@ -2421,6 +2473,16 @@ void MacroAssembler::store_klass(Register dst_oop, Register klass, Register ck)
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::store_klass_gap(Register dst_oop, Register val) {
|
||||
if (UseCompressedClassPointers) {
|
||||
if (val == noreg) {
|
||||
val = R0;
|
||||
li(val, 0);
|
||||
}
|
||||
stw(val, oopDesc::klass_gap_offset_in_bytes(), dst_oop); // klass gap if compressed
|
||||
}
|
||||
}
|
||||
|
||||
int MacroAssembler::instr_size_for_decode_klass_not_null() {
|
||||
if (!UseCompressedClassPointers) return 0;
|
||||
int num_instrs = 1; // shift or move
|
||||
@ -3006,13 +3068,13 @@ void MacroAssembler::verify_oop(Register oop, const char* msg) {
|
||||
mr(R0, tmp);
|
||||
// kill tmp
|
||||
save_LR_CR(tmp);
|
||||
push_frame_abi112(nbytes_save, tmp);
|
||||
push_frame_reg_args(nbytes_save, tmp);
|
||||
// restore tmp
|
||||
mr(tmp, R0);
|
||||
save_volatile_gprs(R1_SP, 112); // except R0
|
||||
// load FunctionDescriptor**
|
||||
// load FunctionDescriptor** / entry_address *
|
||||
load_const(tmp, fd);
|
||||
// load FunctionDescriptor*
|
||||
// load FunctionDescriptor* / entry_address
|
||||
ld(tmp, 0, tmp);
|
||||
mr(R4_ARG2, oop);
|
||||
load_const(R3_ARG1, (address)msg);
|
||||
@ -3092,3 +3154,15 @@ void MacroAssembler::zap_from_to(Register low, int before, Register high, int af
|
||||
}
|
||||
|
||||
#endif // !PRODUCT
|
||||
|
||||
SkipIfEqualZero::SkipIfEqualZero(MacroAssembler* masm, Register temp, const bool* flag_addr) : _masm(masm), _label() {
|
||||
int simm16_offset = masm->load_const_optimized(temp, (address)flag_addr, R0, true);
|
||||
assert(sizeof(bool) == 1, "PowerPC ABI");
|
||||
masm->lbz(temp, simm16_offset, temp);
|
||||
masm->cmpwi(CCR0, temp, 0);
|
||||
masm->beq(CCR0, _label);
|
||||
}
|
||||
|
||||
SkipIfEqualZero::~SkipIfEqualZero() {
|
||||
_masm->bind(_label);
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -279,12 +279,12 @@ class MacroAssembler: public Assembler {
|
||||
// Push a frame of size `bytes'. No abi space provided.
|
||||
void push_frame(unsigned int bytes, Register tmp);
|
||||
|
||||
// Push a frame of size `bytes' plus abi112 on top.
|
||||
void push_frame_abi112(unsigned int bytes, Register tmp);
|
||||
// Push a frame of size `bytes' plus abi_reg_args on top.
|
||||
void push_frame_reg_args(unsigned int bytes, Register tmp);
|
||||
|
||||
// Setup up a new C frame with a spill area for non-volatile GPRs and additional
|
||||
// space for local variables
|
||||
void push_frame_abi112_nonvolatiles(unsigned int bytes, Register tmp);
|
||||
void push_frame_reg_args_nonvolatiles(unsigned int bytes, Register tmp);
|
||||
|
||||
// pop current C frame
|
||||
void pop_frame();
|
||||
@ -296,17 +296,31 @@ class MacroAssembler: public Assembler {
|
||||
private:
|
||||
address _last_calls_return_pc;
|
||||
|
||||
#if defined(ABI_ELFv2)
|
||||
// Generic version of a call to C function.
|
||||
// Updates and returns _last_calls_return_pc.
|
||||
address branch_to(Register function_entry, bool and_link);
|
||||
#else
|
||||
// Generic version of a call to C function via a function descriptor
|
||||
// with variable support for C calling conventions (TOC, ENV, etc.).
|
||||
// updates and returns _last_calls_return_pc.
|
||||
address branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call,
|
||||
bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee);
|
||||
#endif
|
||||
|
||||
public:
|
||||
|
||||
// Get the pc where the last call will return to. returns _last_calls_return_pc.
|
||||
inline address last_calls_return_pc();
|
||||
|
||||
#if defined(ABI_ELFv2)
|
||||
// Call a C function via a function descriptor and use full C
|
||||
// calling conventions. Updates and returns _last_calls_return_pc.
|
||||
address call_c(Register function_entry);
|
||||
// For tail calls: only branch, don't link, so callee returns to caller of this function.
|
||||
address call_c_and_return_to_caller(Register function_entry);
|
||||
address call_c(address function_entry, relocInfo::relocType rt);
|
||||
#else
|
||||
// Call a C function via a function descriptor and use full C
|
||||
// calling conventions. Updates and returns _last_calls_return_pc.
|
||||
address call_c(Register function_descriptor);
|
||||
@ -315,6 +329,7 @@ class MacroAssembler: public Assembler {
|
||||
address call_c(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt);
|
||||
address call_c_using_toc(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt,
|
||||
Register toc);
|
||||
#endif
|
||||
|
||||
protected:
|
||||
|
||||
@ -551,12 +566,14 @@ class MacroAssembler: public Assembler {
|
||||
|
||||
// Load heap oop and decompress. Loaded oop may not be null.
|
||||
inline void load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1 = noreg);
|
||||
inline void store_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1,
|
||||
/*specify if d must stay uncompressed*/ Register tmp = noreg);
|
||||
|
||||
// Null allowed.
|
||||
inline void load_heap_oop(Register d, RegisterOrConstant offs, Register s1 = noreg);
|
||||
|
||||
// Encode/decode heap oop. Oop may not be null, else en/decoding goes wrong.
|
||||
inline void encode_heap_oop_not_null(Register d);
|
||||
inline Register encode_heap_oop_not_null(Register d, Register src = noreg);
|
||||
inline void decode_heap_oop_not_null(Register d);
|
||||
|
||||
// Null allowed.
|
||||
@ -566,6 +583,7 @@ class MacroAssembler: public Assembler {
|
||||
void load_klass(Register dst, Register src);
|
||||
void load_klass_with_trap_null_check(Register dst, Register src);
|
||||
void store_klass(Register dst_oop, Register klass, Register tmp = R0);
|
||||
void store_klass_gap(Register dst_oop, Register val = noreg); // Will store 0 if val not specified.
|
||||
static int instr_size_for_decode_klass_not_null();
|
||||
void decode_klass_not_null(Register dst, Register src = noreg);
|
||||
void encode_klass_not_null(Register dst, Register src = noreg);
|
||||
@ -649,6 +667,11 @@ class MacroAssembler: public Assembler {
|
||||
void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
|
||||
void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {}
|
||||
|
||||
// Convenience method returning function entry. For the ELFv1 case
|
||||
// creates function descriptor at the current address and returs
|
||||
// the pointer to it. For the ELFv2 case returns the current address.
|
||||
inline address function_entry();
|
||||
|
||||
#define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
|
||||
#define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
|
||||
|
||||
@ -673,4 +696,21 @@ class MacroAssembler: public Assembler {
|
||||
void zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
// class SkipIfEqualZero:
|
||||
//
|
||||
// Instantiating this class will result in assembly code being output that will
|
||||
// jump around any code emitted between the creation of the instance and it's
|
||||
// automatic destruction at the end of a scope block, depending on the value of
|
||||
// the flag passed to the constructor, which will be checked at run-time.
|
||||
class SkipIfEqualZero : public StackObj {
|
||||
private:
|
||||
MacroAssembler* _masm;
|
||||
Label _label;
|
||||
|
||||
public:
|
||||
// 'Temp' is a temp register that this object can use (and trash).
|
||||
explicit SkipIfEqualZero(MacroAssembler*, Register temp, const bool* flag_addr);
|
||||
~SkipIfEqualZero();
|
||||
};
|
||||
|
||||
#endif // CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -321,6 +321,15 @@ inline void MacroAssembler::load_heap_oop_not_null(Register d, RegisterOrConstan
|
||||
}
|
||||
}
|
||||
|
||||
inline void MacroAssembler::store_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1, Register tmp) {
|
||||
if (UseCompressedOops) {
|
||||
Register compressedOop = encode_heap_oop_not_null((tmp != noreg) ? tmp : d, d);
|
||||
stw(compressedOop, offs, s1);
|
||||
} else {
|
||||
std(d, offs, s1);
|
||||
}
|
||||
}
|
||||
|
||||
inline void MacroAssembler::load_heap_oop(Register d, RegisterOrConstant offs, Register s1) {
|
||||
if (UseCompressedOops) {
|
||||
lwz(d, offs, s1);
|
||||
@ -330,13 +339,17 @@ inline void MacroAssembler::load_heap_oop(Register d, RegisterOrConstant offs, R
|
||||
}
|
||||
}
|
||||
|
||||
inline void MacroAssembler::encode_heap_oop_not_null(Register d) {
|
||||
inline Register MacroAssembler::encode_heap_oop_not_null(Register d, Register src) {
|
||||
Register current = (src!=noreg) ? src : d; // Compressed oop is in d if no src provided.
|
||||
if (Universe::narrow_oop_base() != NULL) {
|
||||
sub(d, d, R30);
|
||||
sub(d, current, R30);
|
||||
current = d;
|
||||
}
|
||||
if (Universe::narrow_oop_shift() != 0) {
|
||||
srdi(d, d, LogMinObjAlignmentInBytes);
|
||||
srdi(d, current, LogMinObjAlignmentInBytes);
|
||||
current = d;
|
||||
}
|
||||
return current; // Encoded oop is in this register.
|
||||
}
|
||||
|
||||
inline void MacroAssembler::decode_heap_oop_not_null(Register d) {
|
||||
@ -385,4 +398,10 @@ inline void MacroAssembler::trap_range_check_ge(Register a, int si16) {
|
||||
twi(traptoEqual | traptoGreaterThanUnsigned, a/*reg a*/, si16);
|
||||
}
|
||||
|
||||
#if defined(ABI_ELFv2)
|
||||
inline address MacroAssembler::function_entry() { return pc(); }
|
||||
#else
|
||||
inline address MacroAssembler::function_entry() { return emit_fd(); }
|
||||
#endif
|
||||
|
||||
#endif // CPU_PPC_VM_MACROASSEMBLER_PPC_INLINE_HPP
|
||||
|
@ -453,11 +453,11 @@ void trace_method_handle_stub(const char* adaptername,
|
||||
|
||||
if (Verbose) {
|
||||
tty->print_cr("Registers:");
|
||||
const int abi_offset = frame::abi_112_size / 8;
|
||||
const int abi_offset = frame::abi_reg_args_size / 8;
|
||||
for (int i = R3->encoding(); i <= R12->encoding(); i++) {
|
||||
Register r = as_Register(i);
|
||||
int count = i - R3->encoding();
|
||||
// The registers are stored in reverse order on the stack (by save_volatile_gprs(R1_SP, abi_112_size)).
|
||||
// The registers are stored in reverse order on the stack (by save_volatile_gprs(R1_SP, abi_reg_args_size)).
|
||||
tty->print("%3s=" PTR_FORMAT, r->name(), saved_regs[abi_offset + count]);
|
||||
if ((count + 1) % 4 == 0) {
|
||||
tty->cr();
|
||||
@ -524,9 +524,9 @@ void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adapt
|
||||
__ save_LR_CR(R0);
|
||||
__ mr(R0, R1_SP); // saved_sp
|
||||
assert(Assembler::is_simm(-nbytes_save, 16), "Overwriting R0");
|
||||
// push_frame_abi112 only uses R0 if nbytes_save is wider than 16 bit
|
||||
__ push_frame_abi112(nbytes_save, R0);
|
||||
__ save_volatile_gprs(R1_SP, frame::abi_112_size); // Except R0.
|
||||
// Push_frame_reg_args only uses R0 if nbytes_save is wider than 16 bit.
|
||||
__ push_frame_reg_args(nbytes_save, R0);
|
||||
__ save_volatile_gprs(R1_SP, frame::abi_reg_args_size); // Except R0.
|
||||
|
||||
__ load_const(R3_ARG1, (address)adaptername);
|
||||
__ mr(R4_ARG2, R23_method_handle);
|
||||
|
@ -1008,7 +1008,11 @@ int MachCallDynamicJavaNode::ret_addr_offset() {
|
||||
}
|
||||
|
||||
int MachCallRuntimeNode::ret_addr_offset() {
|
||||
#if defined(ABI_ELFv2)
|
||||
return 28;
|
||||
#else
|
||||
return 40;
|
||||
#endif
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
@ -3674,6 +3678,10 @@ encode %{
|
||||
MacroAssembler _masm(&cbuf);
|
||||
const address start_pc = __ pc();
|
||||
|
||||
#if defined(ABI_ELFv2)
|
||||
address entry= !($meth$$method) ? NULL : (address)$meth$$method;
|
||||
__ call_c(entry, relocInfo::runtime_call_type);
|
||||
#else
|
||||
// The function we're going to call.
|
||||
FunctionDescriptor fdtemp;
|
||||
const FunctionDescriptor* fd = !($meth$$method) ? &fdtemp : (FunctionDescriptor*)$meth$$method;
|
||||
@ -3684,6 +3692,7 @@ encode %{
|
||||
// Put entry, env, toc into the constant pool, this needs up to 3 constant
|
||||
// pool entries; call_c_using_toc will optimize the call.
|
||||
__ call_c_using_toc(fd, relocInfo::runtime_call_type, Rtoc);
|
||||
#endif
|
||||
|
||||
// Check the ret_addr_offset.
|
||||
assert(((MachCallRuntimeNode*)this)->ret_addr_offset() == __ last_calls_return_pc() - start_pc,
|
||||
@ -3699,20 +3708,25 @@ encode %{
|
||||
__ mtctr($src$$Register);
|
||||
%}
|
||||
|
||||
// postalloc expand emitter for runtime leaf calls.
|
||||
// Postalloc expand emitter for runtime leaf calls.
|
||||
enc_class postalloc_expand_java_to_runtime_call(method meth, iRegLdst toc) %{
|
||||
loadConLNodesTuple loadConLNodes_Entry;
|
||||
#if defined(ABI_ELFv2)
|
||||
jlong entry_address = (jlong) this->entry_point();
|
||||
assert(entry_address, "need address here");
|
||||
loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper(entry_address),
|
||||
OptoReg::Name(R12_H_num), OptoReg::Name(R12_num));
|
||||
#else
|
||||
// Get the struct that describes the function we are about to call.
|
||||
FunctionDescriptor* fd = (FunctionDescriptor*) this->entry_point();
|
||||
assert(fd, "need fd here");
|
||||
jlong entry_address = (jlong) fd->entry();
|
||||
// new nodes
|
||||
loadConLNodesTuple loadConLNodes_Entry;
|
||||
loadConLNodesTuple loadConLNodes_Env;
|
||||
loadConLNodesTuple loadConLNodes_Toc;
|
||||
MachNode *mtctr = NULL;
|
||||
MachCallLeafNode *call = NULL;
|
||||
|
||||
// Create nodes and operands for loading the entry point.
|
||||
loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper((jlong) fd->entry()),
|
||||
loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper(entry_address),
|
||||
OptoReg::Name(R12_H_num), OptoReg::Name(R12_num));
|
||||
|
||||
|
||||
@ -3733,8 +3747,9 @@ encode %{
|
||||
// Create nodes and operands for loading the Toc point.
|
||||
loadConLNodes_Toc = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper((jlong) fd->toc()),
|
||||
OptoReg::Name(R2_H_num), OptoReg::Name(R2_num));
|
||||
#endif // ABI_ELFv2
|
||||
// mtctr node
|
||||
mtctr = new (C) CallLeafDirect_mtctrNode();
|
||||
MachNode *mtctr = new (C) CallLeafDirect_mtctrNode();
|
||||
|
||||
assert(loadConLNodes_Entry._last != NULL, "entry must exist");
|
||||
mtctr->add_req(0, loadConLNodes_Entry._last);
|
||||
@ -3743,10 +3758,10 @@ encode %{
|
||||
mtctr->_opnds[1] = new (C) iRegLdstOper();
|
||||
|
||||
// call node
|
||||
call = new (C) CallLeafDirectNode();
|
||||
MachCallLeafNode *call = new (C) CallLeafDirectNode();
|
||||
|
||||
call->_opnds[0] = _opnds[0];
|
||||
call->_opnds[1] = new (C) methodOper((intptr_t) fd->entry()); // may get set later
|
||||
call->_opnds[1] = new (C) methodOper((intptr_t) entry_address); // May get set later.
|
||||
|
||||
// Make the new call node look like the old one.
|
||||
call->_name = _name;
|
||||
@ -3773,8 +3788,10 @@ encode %{
|
||||
// These must be reqired edges, as the registers are live up to
|
||||
// the call. Else the constants are handled as kills.
|
||||
call->add_req(mtctr);
|
||||
#if !defined(ABI_ELFv2)
|
||||
call->add_req(loadConLNodes_Env._last);
|
||||
call->add_req(loadConLNodes_Toc._last);
|
||||
#endif
|
||||
|
||||
// ...as well as prec
|
||||
for (uint i = req(); i < len(); ++i) {
|
||||
@ -3787,10 +3804,12 @@ encode %{
|
||||
// Insert the new nodes.
|
||||
if (loadConLNodes_Entry._large_hi) nodes->push(loadConLNodes_Entry._large_hi);
|
||||
if (loadConLNodes_Entry._last) nodes->push(loadConLNodes_Entry._last);
|
||||
#if !defined(ABI_ELFv2)
|
||||
if (loadConLNodes_Env._large_hi) nodes->push(loadConLNodes_Env._large_hi);
|
||||
if (loadConLNodes_Env._last) nodes->push(loadConLNodes_Env._last);
|
||||
if (loadConLNodes_Toc._large_hi) nodes->push(loadConLNodes_Toc._large_hi);
|
||||
if (loadConLNodes_Toc._last) nodes->push(loadConLNodes_Toc._last);
|
||||
#endif
|
||||
nodes->push(mtctr);
|
||||
nodes->push(call);
|
||||
%}
|
||||
@ -3837,7 +3856,7 @@ frame %{
|
||||
// out_preserve_stack_slots for calls to C. Supports the var-args
|
||||
// backing area for register parms.
|
||||
//
|
||||
varargs_C_out_slots_killed(((frame::abi_112_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size));
|
||||
varargs_C_out_slots_killed(((frame::abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size));
|
||||
|
||||
// The after-PROLOG location of the return address. Location of
|
||||
// return address specifies a type (REG or STACK) and a number
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -579,15 +579,27 @@ REGISTER_DECLARATION(FloatRegister, F13_ARG13, F13); // volatile
|
||||
|
||||
// Register declarations to be used in frame manager assembly code.
|
||||
// Use only non-volatile registers in order to keep values across C-calls.
|
||||
#ifdef CC_INTERP
|
||||
REGISTER_DECLARATION(Register, R14_state, R14); // address of new cInterpreter.
|
||||
REGISTER_DECLARATION(Register, R15_prev_state, R15); // address of old cInterpreter
|
||||
#else // CC_INTERP
|
||||
REGISTER_DECLARATION(Register, R14_bcp, R14);
|
||||
REGISTER_DECLARATION(Register, R15_esp, R15);
|
||||
REGISTER_DECLARATION(FloatRegister, F15_ftos, F15);
|
||||
#endif // CC_INTERP
|
||||
REGISTER_DECLARATION(Register, R16_thread, R16); // address of current thread
|
||||
REGISTER_DECLARATION(Register, R17_tos, R17); // address of Java tos (prepushed).
|
||||
REGISTER_DECLARATION(Register, R18_locals, R18); // address of first param slot (receiver).
|
||||
REGISTER_DECLARATION(Register, R19_method, R19); // address of current method
|
||||
#ifndef DONT_USE_REGISTER_DEFINES
|
||||
#ifdef CC_INTERP
|
||||
#define R14_state AS_REGISTER(Register, R14)
|
||||
#define R15_prev_state AS_REGISTER(Register, R15)
|
||||
#else // CC_INTERP
|
||||
#define R14_bcp AS_REGISTER(Register, R14)
|
||||
#define R15_esp AS_REGISTER(Register, R15)
|
||||
#define F15_ftos AS_REGISTER(FloatRegister, F15)
|
||||
#endif // CC_INTERP
|
||||
#define R16_thread AS_REGISTER(Register, R16)
|
||||
#define R17_tos AS_REGISTER(Register, R17)
|
||||
#define R18_locals AS_REGISTER(Register, R18)
|
||||
@ -608,6 +620,14 @@ REGISTER_DECLARATION(Register, R26_tmp6, R26);
|
||||
REGISTER_DECLARATION(Register, R27_tmp7, R27);
|
||||
REGISTER_DECLARATION(Register, R28_tmp8, R28);
|
||||
REGISTER_DECLARATION(Register, R29_tmp9, R29);
|
||||
#ifndef CC_INTERP
|
||||
REGISTER_DECLARATION(Register, R24_dispatch_addr, R24);
|
||||
REGISTER_DECLARATION(Register, R25_templateTableBase, R25);
|
||||
REGISTER_DECLARATION(Register, R26_monitor, R26);
|
||||
REGISTER_DECLARATION(Register, R27_constPoolCache, R27);
|
||||
REGISTER_DECLARATION(Register, R28_mdx, R28);
|
||||
#endif // CC_INTERP
|
||||
|
||||
#ifndef DONT_USE_REGISTER_DEFINES
|
||||
#define R21_tmp1 AS_REGISTER(Register, R21)
|
||||
#define R22_tmp2 AS_REGISTER(Register, R22)
|
||||
@ -618,6 +638,16 @@ REGISTER_DECLARATION(Register, R29_tmp9, R29);
|
||||
#define R27_tmp7 AS_REGISTER(Register, R27)
|
||||
#define R28_tmp8 AS_REGISTER(Register, R28)
|
||||
#define R29_tmp9 AS_REGISTER(Register, R29)
|
||||
#ifndef CC_INTERP
|
||||
// Lmonitors : monitor pointer
|
||||
// LcpoolCache: constant pool cache
|
||||
// mdx: method data index
|
||||
#define R24_dispatch_addr AS_REGISTER(Register, R24)
|
||||
#define R25_templateTableBase AS_REGISTER(Register, R25)
|
||||
#define R26_monitor AS_REGISTER(Register, R26)
|
||||
#define R27_constPoolCache AS_REGISTER(Register, R27)
|
||||
#define R28_mdx AS_REGISTER(Register, R28)
|
||||
#endif
|
||||
|
||||
#define CCR4_is_synced AS_REGISTER(ConditionRegister, CCR4)
|
||||
#endif
|
||||
|
@ -87,7 +87,7 @@ void OptoRuntime::generate_exception_blob() {
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
int frame_size_in_bytes = frame::abi_112_size;
|
||||
int frame_size_in_bytes = frame::abi_reg_args_size;
|
||||
OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
|
||||
|
||||
// Exception pc is 'return address' for stack walker.
|
||||
@ -99,7 +99,7 @@ void OptoRuntime::generate_exception_blob() {
|
||||
|
||||
// Save callee-saved registers.
|
||||
// Push a C frame for the exception blob. It is needed for the C call later on.
|
||||
__ push_frame_abi112(0, R11_scratch1);
|
||||
__ push_frame_reg_args(0, R11_scratch1);
|
||||
|
||||
// This call does all the hard work. It checks if an exception handler
|
||||
// exists in the method.
|
||||
@ -109,8 +109,12 @@ void OptoRuntime::generate_exception_blob() {
|
||||
__ set_last_Java_frame(/*sp=*/R1_SP, noreg);
|
||||
|
||||
__ mr(R3_ARG1, R16_thread);
|
||||
#if defined(ABI_ELFv2)
|
||||
__ call_c((address) OptoRuntime::handle_exception_C, relocInfo::none);
|
||||
#else
|
||||
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, OptoRuntime::handle_exception_C),
|
||||
relocInfo::none);
|
||||
#endif
|
||||
address calls_return_pc = __ last_calls_return_pc();
|
||||
# ifdef ASSERT
|
||||
__ cmpdi(CCR0, R3_RET, 0);
|
||||
@ -162,7 +166,11 @@ void OptoRuntime::generate_exception_blob() {
|
||||
__ bind(mh_callsite);
|
||||
__ mr(R31, R3_RET); // Save branch address.
|
||||
__ mr(R3_ARG1, R16_thread);
|
||||
#if defined(ABI_ELFv2)
|
||||
__ call_c((address) adjust_SP_for_methodhandle_callsite, relocInfo::none);
|
||||
#else
|
||||
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, adjust_SP_for_methodhandle_callsite), relocInfo::none);
|
||||
#endif
|
||||
// Returns unextended_sp in R3_RET.
|
||||
|
||||
__ mtctr(R31); // Move address of exception handler to SR_CTR.
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -67,7 +67,7 @@ class RegisterSaver {
|
||||
return_pc_is_thread_saved_exception_pc
|
||||
};
|
||||
|
||||
static OopMap* push_frame_abi112_and_save_live_registers(MacroAssembler* masm,
|
||||
static OopMap* push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
|
||||
int* out_frame_size_in_bytes,
|
||||
bool generate_oop_map,
|
||||
int return_pc_adjustment,
|
||||
@ -200,12 +200,12 @@ static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = {
|
||||
RegisterSaver_LiveIntReg( R30 ), // r30 must be the last register
|
||||
};
|
||||
|
||||
OopMap* RegisterSaver::push_frame_abi112_and_save_live_registers(MacroAssembler* masm,
|
||||
OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
|
||||
int* out_frame_size_in_bytes,
|
||||
bool generate_oop_map,
|
||||
int return_pc_adjustment,
|
||||
ReturnPCLocation return_pc_location) {
|
||||
// Push an abi112-frame and store all registers which may be live.
|
||||
// Push an abi_reg_args-frame and store all registers which may be live.
|
||||
// If requested, create an OopMap: Record volatile registers as
|
||||
// callee-save values in an OopMap so their save locations will be
|
||||
// propagated to the RegisterMap of the caller frame during
|
||||
@ -221,7 +221,7 @@ OopMap* RegisterSaver::push_frame_abi112_and_save_live_registers(MacroAssembler*
|
||||
sizeof(RegisterSaver::LiveRegType);
|
||||
const int register_save_size = regstosave_num * reg_size;
|
||||
const int frame_size_in_bytes = round_to(register_save_size, frame::alignment_in_bytes)
|
||||
+ frame::abi_112_size;
|
||||
+ frame::abi_reg_args_size;
|
||||
*out_frame_size_in_bytes = frame_size_in_bytes;
|
||||
const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
|
||||
const int register_save_offset = frame_size_in_bytes - register_save_size;
|
||||
@ -229,7 +229,7 @@ OopMap* RegisterSaver::push_frame_abi112_and_save_live_registers(MacroAssembler*
|
||||
// OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words.
|
||||
OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : NULL;
|
||||
|
||||
BLOCK_COMMENT("push_frame_abi112_and_save_live_registers {");
|
||||
BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {");
|
||||
|
||||
// Save r30 in the last slot of the not yet pushed frame so that we
|
||||
// can use it as scratch reg.
|
||||
@ -294,7 +294,7 @@ OopMap* RegisterSaver::push_frame_abi112_and_save_live_registers(MacroAssembler*
|
||||
offset += reg_size;
|
||||
}
|
||||
|
||||
BLOCK_COMMENT("} push_frame_abi112_and_save_live_registers");
|
||||
BLOCK_COMMENT("} push_frame_reg_args_and_save_live_registers");
|
||||
|
||||
// And we're done.
|
||||
return map;
|
||||
@ -699,15 +699,19 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
|
||||
|
||||
int i;
|
||||
VMReg reg;
|
||||
// Leave room for C-compatible ABI_112.
|
||||
int stk = (frame::abi_112_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size;
|
||||
// Leave room for C-compatible ABI_REG_ARGS.
|
||||
int stk = (frame::abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size;
|
||||
int arg = 0;
|
||||
int freg = 0;
|
||||
|
||||
// Avoid passing C arguments in the wrong stack slots.
|
||||
#if defined(ABI_ELFv2)
|
||||
assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 96,
|
||||
"passing C arguments in wrong stack slots");
|
||||
#else
|
||||
assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 112,
|
||||
"passing C arguments in wrong stack slots");
|
||||
|
||||
#endif
|
||||
// We fill-out regs AND regs2 if an argument must be passed in a
|
||||
// register AND in a stack slot. If regs2 is NULL in such a
|
||||
// situation, we bail-out with a fatal error.
|
||||
@ -953,6 +957,9 @@ static address gen_c2i_adapter(MacroAssembler *masm,
|
||||
|
||||
#ifdef CC_INTERP
|
||||
const Register tos = R17_tos;
|
||||
#else
|
||||
const Register tos = R15_esp;
|
||||
__ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
|
||||
#endif
|
||||
|
||||
// load TOS
|
||||
@ -971,7 +978,7 @@ static void gen_i2c_adapter(MacroAssembler *masm,
|
||||
const BasicType *sig_bt,
|
||||
const VMRegPair *regs) {
|
||||
|
||||
// Load method's entry-point from methodOop.
|
||||
// Load method's entry-point from method.
|
||||
__ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
|
||||
__ mtctr(R12_scratch2);
|
||||
|
||||
@ -992,7 +999,10 @@ static void gen_i2c_adapter(MacroAssembler *masm,
|
||||
|
||||
#ifdef CC_INTERP
|
||||
const Register ld_ptr = R17_tos;
|
||||
#else
|
||||
const Register ld_ptr = R15_esp;
|
||||
#endif
|
||||
|
||||
const Register value_regs[] = { R22_tmp2, R23_tmp3, R24_tmp4, R25_tmp5, R26_tmp6 };
|
||||
const int num_value_regs = sizeof(value_regs) / sizeof(Register);
|
||||
int value_regs_index = 0;
|
||||
@ -1083,8 +1093,8 @@ static void gen_i2c_adapter(MacroAssembler *masm,
|
||||
}
|
||||
}
|
||||
|
||||
BLOCK_COMMENT("Store method oop");
|
||||
// Store method oop into thread->callee_target.
|
||||
BLOCK_COMMENT("Store method");
|
||||
// Store method into thread->callee_target.
|
||||
// We might end up in handle_wrong_method if the callee is
|
||||
// deoptimized as we race thru here. If that happens we don't want
|
||||
// to take a safepoint because the caller frame will look
|
||||
@ -1504,7 +1514,11 @@ static void check_needs_gc_for_critical_native(MacroAssembler* masm,
|
||||
|
||||
__ block_comment("block_for_jni_critical");
|
||||
address entry_point = CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical);
|
||||
#if defined(ABI_ELFv2)
|
||||
__ call_c(entry_point, relocInfo::runtime_call_type);
|
||||
#else
|
||||
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::runtime_call_type);
|
||||
#endif
|
||||
address start = __ pc() - __ offset(),
|
||||
calls_return_pc = __ last_calls_return_pc();
|
||||
oop_maps->add_gc_map(calls_return_pc - start, map);
|
||||
@ -1877,7 +1891,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// Layout of the native wrapper frame:
|
||||
// (stack grows upwards, memory grows downwards)
|
||||
//
|
||||
// NW [ABI_112] <-- 1) R1_SP
|
||||
// NW [ABI_REG_ARGS] <-- 1) R1_SP
|
||||
// [outgoing arguments] <-- 2) R1_SP + out_arg_slot_offset
|
||||
// [oopHandle area] <-- 3) R1_SP + oop_handle_offset (save area for critical natives)
|
||||
// klass <-- 4) R1_SP + klass_offset
|
||||
@ -2211,8 +2225,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// slow case of monitor enter. Inline a special case of call_VM that
|
||||
// disallows any pending_exception.
|
||||
|
||||
// Save argument registers and leave room for C-compatible ABI_112.
|
||||
int frame_size = frame::abi_112_size +
|
||||
// Save argument registers and leave room for C-compatible ABI_REG_ARGS.
|
||||
int frame_size = frame::abi_reg_args_size +
|
||||
round_to(total_c_args * wordSize, frame::alignment_in_bytes);
|
||||
__ mr(R11_scratch1, R1_SP);
|
||||
RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs, out_regs2);
|
||||
@ -2250,9 +2264,12 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
|
||||
// The JNI call
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
#if defined(ABI_ELFv2)
|
||||
__ call_c(native_func, relocInfo::runtime_call_type);
|
||||
#else
|
||||
FunctionDescriptor* fd_native_method = (FunctionDescriptor*) native_func;
|
||||
__ call_c(fd_native_method, relocInfo::runtime_call_type);
|
||||
#endif
|
||||
|
||||
|
||||
// Now, we are back from the native code.
|
||||
@ -2606,8 +2623,12 @@ static void push_skeleton_frame(MacroAssembler* masm, bool deopt,
|
||||
#ifdef CC_INTERP
|
||||
__ std(R1_SP, _parent_ijava_frame_abi(initial_caller_sp), R1_SP);
|
||||
#else
|
||||
Unimplemented();
|
||||
#ifdef ASSERT
|
||||
__ load_const_optimized(pc_reg, 0x5afe);
|
||||
__ std(pc_reg, _ijava_state_neg(ijava_reserved), R1_SP);
|
||||
#endif
|
||||
__ std(R1_SP, _ijava_state_neg(sender_sp), R1_SP);
|
||||
#endif // CC_INTERP
|
||||
__ addi(number_of_frames_reg, number_of_frames_reg, -1);
|
||||
__ addi(frame_sizes_reg, frame_sizes_reg, wordSize);
|
||||
__ addi(pcs_reg, pcs_reg, wordSize);
|
||||
@ -2679,7 +2700,15 @@ static void push_skeleton_frames(MacroAssembler* masm, bool deopt,
|
||||
__ std(R12_scratch2, _abi(lr), R1_SP);
|
||||
|
||||
// Initialize initial_caller_sp.
|
||||
#ifdef CC_INTERP
|
||||
__ std(frame_size_reg/*old_sp*/, _parent_ijava_frame_abi(initial_caller_sp), R1_SP);
|
||||
#else
|
||||
#ifdef ASSERT
|
||||
__ load_const_optimized(pc_reg, 0x5afe);
|
||||
__ std(pc_reg, _ijava_state_neg(ijava_reserved), R1_SP);
|
||||
#endif
|
||||
__ std(frame_size_reg, _ijava_state_neg(sender_sp), R1_SP);
|
||||
#endif // CC_INTERP
|
||||
|
||||
#ifdef ASSERT
|
||||
// Make sure that there is at least one entry in the array.
|
||||
@ -2724,7 +2753,7 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
OopMapSet *oop_maps = new OopMapSet();
|
||||
|
||||
// size of ABI112 plus spill slots for R3_RET and F1_RET.
|
||||
const int frame_size_in_bytes = frame::abi_112_spill_size;
|
||||
const int frame_size_in_bytes = frame::abi_reg_args_spill_size;
|
||||
const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
|
||||
int first_frame_size_in_bytes = 0; // frame size of "unpack frame" for call to fetch_unroll_info.
|
||||
|
||||
@ -2757,11 +2786,11 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
|
||||
// Push the "unpack frame"
|
||||
// Save everything in sight.
|
||||
map = RegisterSaver::push_frame_abi112_and_save_live_registers(masm,
|
||||
&first_frame_size_in_bytes,
|
||||
/*generate_oop_map=*/ true,
|
||||
return_pc_adjustment_no_exception,
|
||||
RegisterSaver::return_pc_is_lr);
|
||||
map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
|
||||
&first_frame_size_in_bytes,
|
||||
/*generate_oop_map=*/ true,
|
||||
return_pc_adjustment_no_exception,
|
||||
RegisterSaver::return_pc_is_lr);
|
||||
assert(map != NULL, "OopMap must have been created");
|
||||
|
||||
__ li(exec_mode_reg, Deoptimization::Unpack_deopt);
|
||||
@ -2787,11 +2816,11 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
// Push the "unpack frame".
|
||||
// Save everything in sight.
|
||||
assert(R4 == R4_ARG2, "exception pc must be in r4");
|
||||
RegisterSaver::push_frame_abi112_and_save_live_registers(masm,
|
||||
&first_frame_size_in_bytes,
|
||||
/*generate_oop_map=*/ false,
|
||||
return_pc_adjustment_exception,
|
||||
RegisterSaver::return_pc_is_r4);
|
||||
RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
|
||||
&first_frame_size_in_bytes,
|
||||
/*generate_oop_map=*/ false,
|
||||
return_pc_adjustment_exception,
|
||||
RegisterSaver::return_pc_is_r4);
|
||||
|
||||
// Deopt during an exception. Save exec mode for unpack_frames.
|
||||
__ li(exec_mode_reg, Deoptimization::Unpack_exception);
|
||||
@ -2876,8 +2905,8 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
// ...).
|
||||
|
||||
// Spill live volatile registers since we'll do a call.
|
||||
__ std( R3_RET, _abi_112_spill(spill_ret), R1_SP);
|
||||
__ stfd(F1_RET, _abi_112_spill(spill_fret), R1_SP);
|
||||
__ std( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP);
|
||||
__ stfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP);
|
||||
|
||||
// Let the unpacker layout information in the skeletal frames just
|
||||
// allocated.
|
||||
@ -2889,8 +2918,8 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
__ reset_last_Java_frame();
|
||||
|
||||
// Restore the volatiles saved above.
|
||||
__ ld( R3_RET, _abi_112_spill(spill_ret), R1_SP);
|
||||
__ lfd(F1_RET, _abi_112_spill(spill_fret), R1_SP);
|
||||
__ ld( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP);
|
||||
__ lfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP);
|
||||
|
||||
// Pop the unpack frame.
|
||||
__ pop_frame();
|
||||
@ -2900,10 +2929,16 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
// optional c2i, caller of deoptee, ...).
|
||||
|
||||
// Initialize R14_state.
|
||||
#ifdef CC_INTERP
|
||||
__ ld(R14_state, 0, R1_SP);
|
||||
__ addi(R14_state, R14_state, -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
|
||||
// Also inititialize R15_prev_state.
|
||||
__ restore_prev_state();
|
||||
#else
|
||||
__ restore_interpreter_state(R11_scratch1);
|
||||
__ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
|
||||
#endif // CC_INTERP
|
||||
|
||||
|
||||
// Return to the interpreter entry point.
|
||||
__ blr();
|
||||
@ -2930,7 +2965,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
|
||||
Register unc_trap_reg = R23_tmp3;
|
||||
|
||||
OopMapSet* oop_maps = new OopMapSet();
|
||||
int frame_size_in_bytes = frame::abi_112_size;
|
||||
int frame_size_in_bytes = frame::abi_reg_args_size;
|
||||
OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
|
||||
|
||||
// stack: (deoptee, optional i2c, caller_of_deoptee, ...).
|
||||
@ -2943,7 +2978,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
|
||||
__ save_LR_CR(R11_scratch1);
|
||||
|
||||
// Push an "uncommon_trap" frame.
|
||||
__ push_frame_abi112(0, R11_scratch1);
|
||||
__ push_frame_reg_args(0, R11_scratch1);
|
||||
|
||||
// stack: (unpack frame, deoptee, optional i2c, caller_of_deoptee, ...).
|
||||
|
||||
@ -2996,7 +3031,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
|
||||
// interpreter frames just created.
|
||||
|
||||
// Push a simple "unpack frame" here.
|
||||
__ push_frame_abi112(0, R11_scratch1);
|
||||
__ push_frame_reg_args(0, R11_scratch1);
|
||||
|
||||
// stack: (unpack frame, skeletal interpreter frame, ..., optional
|
||||
// skeletal interpreter frame, optional c2i, caller of deoptee,
|
||||
@ -3022,11 +3057,17 @@ void SharedRuntime::generate_uncommon_trap_blob() {
|
||||
// stack: (top interpreter frame, ..., optional interpreter frame,
|
||||
// optional c2i, caller of deoptee, ...).
|
||||
|
||||
#ifdef CC_INTERP
|
||||
// Initialize R14_state, ...
|
||||
__ ld(R11_scratch1, 0, R1_SP);
|
||||
__ addi(R14_state, R11_scratch1, -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
|
||||
// also initialize R15_prev_state.
|
||||
__ restore_prev_state();
|
||||
#else
|
||||
__ restore_interpreter_state(R11_scratch1);
|
||||
__ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
|
||||
#endif // CC_INTERP
|
||||
|
||||
// Return to the interpreter entry point.
|
||||
__ blr();
|
||||
|
||||
@ -3064,11 +3105,11 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
|
||||
}
|
||||
|
||||
// Save registers, fpu state, and flags.
|
||||
map = RegisterSaver::push_frame_abi112_and_save_live_registers(masm,
|
||||
&frame_size_in_bytes,
|
||||
/*generate_oop_map=*/ true,
|
||||
/*return_pc_adjustment=*/0,
|
||||
return_pc_location);
|
||||
map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
|
||||
&frame_size_in_bytes,
|
||||
/*generate_oop_map=*/ true,
|
||||
/*return_pc_adjustment=*/0,
|
||||
return_pc_location);
|
||||
|
||||
// The following is basically a call_VM. However, we need the precise
|
||||
// address of the call in order to generate an oopmap. Hence, we do all the
|
||||
@ -3104,7 +3145,6 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
|
||||
frame_size_in_bytes,
|
||||
/*restore_ctr=*/true);
|
||||
|
||||
|
||||
BLOCK_COMMENT(" Jump to forward_exception_entry.");
|
||||
// Jump to forward_exception_entry, with the issuing PC in LR
|
||||
// so it looks like the original nmethod called forward_exception_entry.
|
||||
@ -3151,11 +3191,11 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
map = RegisterSaver::push_frame_abi112_and_save_live_registers(masm,
|
||||
&frame_size_in_bytes,
|
||||
/*generate_oop_map*/ true,
|
||||
/*return_pc_adjustment*/ 0,
|
||||
RegisterSaver::return_pc_is_lr);
|
||||
map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
|
||||
&frame_size_in_bytes,
|
||||
/*generate_oop_map*/ true,
|
||||
/*return_pc_adjustment*/ 0,
|
||||
RegisterSaver::return_pc_is_lr);
|
||||
|
||||
// Use noreg as last_Java_pc, the return pc will be reconstructed
|
||||
// from the physical frame.
|
||||
@ -3189,7 +3229,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
|
||||
|
||||
RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ false);
|
||||
|
||||
// Get the returned methodOop.
|
||||
// Get the returned method.
|
||||
__ get_vm_result_2(R19_method);
|
||||
|
||||
__ bctr();
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,15 +39,10 @@
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
#ifdef TARGET_OS_FAMILY_aix
|
||||
# include "thread_aix.inline.hpp"
|
||||
#endif
|
||||
#ifdef TARGET_OS_FAMILY_linux
|
||||
# include "thread_linux.inline.hpp"
|
||||
#endif
|
||||
#ifdef COMPILER2
|
||||
#include "opto/runtime.hpp"
|
||||
#endif
|
||||
#include "runtime/thread.inline.hpp"
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
@ -79,11 +74,11 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
StubCodeMark mark(this, "StubRoutines", "call_stub");
|
||||
|
||||
address start = __ emit_fd();
|
||||
address start = __ function_entry();
|
||||
|
||||
// some sanity checks
|
||||
assert((sizeof(frame::abi_48) % 16) == 0, "unaligned");
|
||||
assert((sizeof(frame::abi_112) % 16) == 0, "unaligned");
|
||||
assert((sizeof(frame::abi_minframe) % 16) == 0, "unaligned");
|
||||
assert((sizeof(frame::abi_reg_args) % 16) == 0, "unaligned");
|
||||
assert((sizeof(frame::spill_nonvolatiles) % 16) == 0, "unaligned");
|
||||
assert((sizeof(frame::parent_ijava_frame_abi) % 16) == 0, "unaligned");
|
||||
assert((sizeof(frame::entry_frame_locals) % 16) == 0, "unaligned");
|
||||
@ -221,7 +216,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
{
|
||||
BLOCK_COMMENT("Call frame manager or native entry.");
|
||||
// Call frame manager or native entry.
|
||||
Register r_new_arg_entry = R14_state;
|
||||
Register r_new_arg_entry = R14; // PPC_state;
|
||||
assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr,
|
||||
r_arg_method, r_arg_thread);
|
||||
|
||||
@ -234,7 +229,11 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// R16_thread - JavaThread*
|
||||
|
||||
// Tos must point to last argument - element_size.
|
||||
#ifdef CC_INTERP
|
||||
const Register tos = R17_tos;
|
||||
#else
|
||||
const Register tos = R15_esp;
|
||||
#endif
|
||||
__ addi(tos, r_top_of_arguments_addr, -Interpreter::stackElementSize);
|
||||
|
||||
// initialize call_stub locals (step 2)
|
||||
@ -248,8 +247,11 @@ class StubGenerator: public StubCodeGenerator {
|
||||
assert(tos != r_arg_thread && R19_method != r_arg_thread, "trashed r_arg_thread");
|
||||
|
||||
// Set R15_prev_state to 0 for simplifying checks in callee.
|
||||
#ifdef CC_INTERP
|
||||
__ li(R15_prev_state, 0);
|
||||
|
||||
#else
|
||||
__ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
|
||||
#endif
|
||||
// Stack on entry to frame manager / native entry:
|
||||
//
|
||||
// F0 [TOP_IJAVA_FRAME_ABI]
|
||||
@ -444,7 +446,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// Save LR/CR and copy exception pc (LR) into R4_ARG2.
|
||||
__ save_LR_CR(R4_ARG2);
|
||||
__ push_frame_abi112(0, R0);
|
||||
__ push_frame_reg_args(0, R0);
|
||||
// Find exception handler.
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address,
|
||||
SharedRuntime::exception_handler_for_return_address),
|
||||
@ -519,7 +521,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
MacroAssembler* masm = new MacroAssembler(&code);
|
||||
|
||||
OopMapSet* oop_maps = new OopMapSet();
|
||||
int frame_size_in_bytes = frame::abi_112_size;
|
||||
int frame_size_in_bytes = frame::abi_reg_args_size;
|
||||
OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
|
||||
|
||||
StubCodeMark mark(this, "StubRoutines", "throw_exception");
|
||||
@ -529,7 +531,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ save_LR_CR(R11_scratch1);
|
||||
|
||||
// Push a frame.
|
||||
__ push_frame_abi112(0, R11_scratch1);
|
||||
__ push_frame_reg_args(0, R11_scratch1);
|
||||
|
||||
address frame_complete_pc = __ pc();
|
||||
|
||||
@ -551,8 +553,11 @@ class StubGenerator: public StubCodeGenerator {
|
||||
if (arg2 != noreg) {
|
||||
__ mr(R5_ARG3, arg2);
|
||||
}
|
||||
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry),
|
||||
relocInfo::none);
|
||||
#if defined(ABI_ELFv2)
|
||||
__ call_c(runtime_entry, relocInfo::none);
|
||||
#else
|
||||
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none);
|
||||
#endif
|
||||
|
||||
// Set an oopmap for the call site.
|
||||
oop_maps->add_gc_map((int)(gc_map_pc - start), map);
|
||||
@ -614,7 +619,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// With G1, don't generate the call if we statically know that the target in uninitialized
|
||||
if (!dest_uninitialized) {
|
||||
const int spill_slots = 4 * wordSize;
|
||||
const int frame_size = frame::abi_112_size + spill_slots;
|
||||
const int frame_size = frame::abi_reg_args_size + spill_slots;
|
||||
Label filtered;
|
||||
|
||||
// Is marking active?
|
||||
@ -628,7 +633,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ beq(CCR0, filtered);
|
||||
|
||||
__ save_LR_CR(R0);
|
||||
__ push_frame_abi112(spill_slots, R0);
|
||||
__ push_frame_reg_args(spill_slots, R0);
|
||||
__ std(from, frame_size - 1 * wordSize, R1_SP);
|
||||
__ std(to, frame_size - 2 * wordSize, R1_SP);
|
||||
__ std(count, frame_size - 3 * wordSize, R1_SP);
|
||||
@ -672,7 +677,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
if (branchToEnd) {
|
||||
__ save_LR_CR(R0);
|
||||
// We need this frame only to spill LR.
|
||||
__ push_frame_abi112(0, R0);
|
||||
__ push_frame_reg_args(0, R0);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count);
|
||||
__ pop_frame();
|
||||
__ restore_LR_CR(R0);
|
||||
@ -742,7 +747,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8");
|
||||
|
||||
// Implemented as in ClearArray.
|
||||
address start = __ emit_fd();
|
||||
address start = __ function_entry();
|
||||
|
||||
Register base_ptr_reg = R3_ARG1; // tohw (needs to be 8b aligned)
|
||||
Register cnt_dwords_reg = R4_ARG2; // count (in dwords)
|
||||
@ -820,7 +825,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
address generate_handler_for_unsafe_access() {
|
||||
StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
|
||||
address start = __ emit_fd();
|
||||
address start = __ function_entry();
|
||||
__ unimplemented("StubRoutines::handler_for_unsafe_access", 93);
|
||||
return start;
|
||||
}
|
||||
@ -861,7 +866,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// to read from the safepoint polling page.
|
||||
address generate_load_from_poll() {
|
||||
StubCodeMark mark(this, "StubRoutines", "generate_load_from_poll");
|
||||
address start = __ emit_fd();
|
||||
address start = __ function_entry();
|
||||
__ unimplemented("StubRoutines::verify_oop", 95); // TODO PPC port
|
||||
return start;
|
||||
}
|
||||
@ -885,7 +890,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
address generate_fill(BasicType t, bool aligned, const char* name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ emit_fd();
|
||||
address start = __ function_entry();
|
||||
|
||||
const Register to = R3_ARG1; // source array address
|
||||
const Register value = R4_ARG2; // fill value
|
||||
@ -1123,7 +1128,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
address generate_disjoint_byte_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ emit_fd();
|
||||
address start = __ function_entry();
|
||||
|
||||
Register tmp1 = R6_ARG4;
|
||||
Register tmp2 = R7_ARG5;
|
||||
@ -1254,15 +1259,21 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
address generate_conjoint_byte_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ emit_fd();
|
||||
address start = __ function_entry();
|
||||
|
||||
Register tmp1 = R6_ARG4;
|
||||
Register tmp2 = R7_ARG5;
|
||||
Register tmp3 = R8_ARG6;
|
||||
|
||||
#if defined(ABI_ELFv2)
|
||||
address nooverlap_target = aligned ?
|
||||
StubRoutines::arrayof_jbyte_disjoint_arraycopy() :
|
||||
StubRoutines::jbyte_disjoint_arraycopy();
|
||||
#else
|
||||
address nooverlap_target = aligned ?
|
||||
((FunctionDescriptor*)StubRoutines::arrayof_jbyte_disjoint_arraycopy())->entry() :
|
||||
((FunctionDescriptor*)StubRoutines::jbyte_disjoint_arraycopy())->entry();
|
||||
#endif
|
||||
|
||||
array_overlap_test(nooverlap_target, 0);
|
||||
// Do reverse copy. We assume the case of actual overlap is rare enough
|
||||
@ -1345,7 +1356,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
Register tmp3 = R8_ARG6;
|
||||
Register tmp4 = R9_ARG7;
|
||||
|
||||
address start = __ emit_fd();
|
||||
address start = __ function_entry();
|
||||
|
||||
Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8;
|
||||
// don't try anything fancy if arrays don't have many elements
|
||||
@ -1474,15 +1485,21 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
address generate_conjoint_short_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ emit_fd();
|
||||
address start = __ function_entry();
|
||||
|
||||
Register tmp1 = R6_ARG4;
|
||||
Register tmp2 = R7_ARG5;
|
||||
Register tmp3 = R8_ARG6;
|
||||
|
||||
#if defined(ABI_ELFv2)
|
||||
address nooverlap_target = aligned ?
|
||||
StubRoutines::arrayof_jshort_disjoint_arraycopy() :
|
||||
StubRoutines::jshort_disjoint_arraycopy();
|
||||
#else
|
||||
address nooverlap_target = aligned ?
|
||||
((FunctionDescriptor*)StubRoutines::arrayof_jshort_disjoint_arraycopy())->entry() :
|
||||
((FunctionDescriptor*)StubRoutines::jshort_disjoint_arraycopy())->entry();
|
||||
#endif
|
||||
|
||||
array_overlap_test(nooverlap_target, 1);
|
||||
|
||||
@ -1597,7 +1614,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
address generate_disjoint_int_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ emit_fd();
|
||||
address start = __ function_entry();
|
||||
generate_disjoint_int_copy_core(aligned);
|
||||
__ blr();
|
||||
return start;
|
||||
@ -1681,11 +1698,17 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
address generate_conjoint_int_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ emit_fd();
|
||||
address start = __ function_entry();
|
||||
|
||||
#if defined(ABI_ELFv2)
|
||||
address nooverlap_target = aligned ?
|
||||
StubRoutines::arrayof_jint_disjoint_arraycopy() :
|
||||
StubRoutines::jint_disjoint_arraycopy();
|
||||
#else
|
||||
address nooverlap_target = aligned ?
|
||||
((FunctionDescriptor*)StubRoutines::arrayof_jint_disjoint_arraycopy())->entry() :
|
||||
((FunctionDescriptor*)StubRoutines::jint_disjoint_arraycopy())->entry();
|
||||
#endif
|
||||
|
||||
array_overlap_test(nooverlap_target, 2);
|
||||
|
||||
@ -1767,7 +1790,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
address generate_disjoint_long_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ emit_fd();
|
||||
address start = __ function_entry();
|
||||
generate_disjoint_long_copy_core(aligned);
|
||||
__ blr();
|
||||
|
||||
@ -1849,11 +1872,17 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
address generate_conjoint_long_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ emit_fd();
|
||||
address start = __ function_entry();
|
||||
|
||||
#if defined(ABI_ELFv2)
|
||||
address nooverlap_target = aligned ?
|
||||
StubRoutines::arrayof_jlong_disjoint_arraycopy() :
|
||||
StubRoutines::jlong_disjoint_arraycopy();
|
||||
#else
|
||||
address nooverlap_target = aligned ?
|
||||
((FunctionDescriptor*)StubRoutines::arrayof_jlong_disjoint_arraycopy())->entry() :
|
||||
((FunctionDescriptor*)StubRoutines::jlong_disjoint_arraycopy())->entry();
|
||||
#endif
|
||||
|
||||
array_overlap_test(nooverlap_target, 3);
|
||||
generate_conjoint_long_copy_core(aligned);
|
||||
@ -1875,11 +1904,17 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
|
||||
address start = __ emit_fd();
|
||||
address start = __ function_entry();
|
||||
|
||||
#if defined(ABI_ELFv2)
|
||||
address nooverlap_target = aligned ?
|
||||
StubRoutines::arrayof_oop_disjoint_arraycopy() :
|
||||
StubRoutines::oop_disjoint_arraycopy();
|
||||
#else
|
||||
address nooverlap_target = aligned ?
|
||||
((FunctionDescriptor*)StubRoutines::arrayof_oop_disjoint_arraycopy())->entry() :
|
||||
((FunctionDescriptor*)StubRoutines::oop_disjoint_arraycopy())->entry();
|
||||
#endif
|
||||
|
||||
gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
|
||||
|
||||
@ -1910,7 +1945,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ emit_fd();
|
||||
address start = __ function_entry();
|
||||
|
||||
gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
|
||||
|
||||
@ -1991,7 +2026,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
|
||||
// Entry point, pc or function descriptor.
|
||||
*entry = __ emit_fd();
|
||||
*entry = __ function_entry();
|
||||
|
||||
// Load *adr into R4_ARG2, may fault.
|
||||
*fault_pc = __ pc();
|
||||
@ -2056,7 +2091,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
guarantee(!UseAESIntrinsics, "not yet implemented.");
|
||||
}
|
||||
|
||||
// PPC uses stubs for safefetch.
|
||||
// Safefetch stubs.
|
||||
generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
|
||||
&StubRoutines::_safefetch32_fault_pc,
|
||||
&StubRoutines::_safefetch32_continuation_pc);
|
||||
|
44
hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.hpp
Normal file
44
hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.hpp
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2013, 2014 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_TEMPLATEINTERPRETERGENERATOR_PPC_HPP
|
||||
#define CPU_PPC_VM_TEMPLATEINTERPRETERGENERATOR_PPC_HPP
|
||||
|
||||
protected:
|
||||
address generate_normal_entry(bool synchronized);
|
||||
address generate_native_entry(bool synchronized);
|
||||
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
||||
address generate_empty_entry(void);
|
||||
|
||||
void lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded=false);
|
||||
void unlock_method(bool check_exceptions = true);
|
||||
|
||||
void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
|
||||
void generate_counter_overflow(Label& continue_entry);
|
||||
|
||||
void generate_fixed_frame(bool native_call, Register Rsize_of_parameters, Register Rsize_of_locals);
|
||||
void generate_stack_overflow_check(Register Rframe_size, Register Rscratch1);
|
||||
|
||||
#endif // CPU_PPC_VM_TEMPLATEINTERPRETERGENERATOR_PPC_HPP
|
1813
hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp
Normal file
1813
hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp
Normal file
File diff suppressed because it is too large
Load Diff
41
hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.hpp
Normal file
41
hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.hpp
Normal file
@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2013, 2014 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_TEMPLATEINTERPRETER_PPC_HPP
|
||||
#define CPU_PPC_VM_TEMPLATEINTERPRETER_PPC_HPP
|
||||
|
||||
protected:
|
||||
|
||||
// Size of interpreter code. Increase if too small. Interpreter will
|
||||
// fail with a guarantee ("not enough space for interpreter generation");
|
||||
// if too small.
|
||||
// Run with +PrintInterpreter to get the VM to print out the size.
|
||||
// Max size with JVMTI
|
||||
|
||||
const static int InterpreterCodeSize = 210*K;
|
||||
|
||||
#endif // CPU_PPC_VM_TEMPLATEINTERPRETER_PPC_HPP
|
||||
|
||||
|
4082
hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp
Normal file
4082
hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp
Normal file
File diff suppressed because it is too large
Load Diff
38
hotspot/src/cpu/ppc/vm/templateTable_ppc_64.hpp
Normal file
38
hotspot/src/cpu/ppc/vm/templateTable_ppc_64.hpp
Normal file
@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2013, 2014 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_TEMPLATETABLE_PPC_64_HPP
|
||||
#define CPU_PPC_VM_TEMPLATETABLE_PPC_64_HPP
|
||||
|
||||
static void prepare_invoke(int byte_no, Register Rmethod, Register Rret_addr, Register Rindex, Register Rrecv, Register Rflags, Register Rscratch);
|
||||
static void invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2);
|
||||
static void generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp);
|
||||
static void invokeinterface_object_method(Register Rrecv_klass, Register Rret, Register Rflags, Register Rindex, Register Rtemp, Register Rtemp2);
|
||||
|
||||
// Branch_conditional which takes TemplateTable::Condition.
|
||||
static void branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert = false);
|
||||
static void if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0);
|
||||
|
||||
#endif // CPU_PPC_VM_TEMPLATETABLE_PPC_64_HPP
|
@ -24,7 +24,8 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "assembler_ppc.inline.hpp"
|
||||
#include "asm/assembler.inline.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "compiler/disassembler.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
@ -168,7 +169,7 @@ void VM_Version::determine_section_size() {
|
||||
|
||||
uint32_t *code = (uint32_t *)a->pc();
|
||||
// Emit code.
|
||||
void (*test1)() = (void(*)())(void *)a->emit_fd();
|
||||
void (*test1)() = (void(*)())(void *)a->function_entry();
|
||||
|
||||
Label l1;
|
||||
|
||||
@ -242,7 +243,7 @@ void VM_Version::determine_section_size() {
|
||||
a->blr();
|
||||
|
||||
// Emit code.
|
||||
void (*test2)() = (void(*)())(void *)a->emit_fd();
|
||||
void (*test2)() = (void(*)())(void *)a->function_entry();
|
||||
// uint32_t *code = (uint32_t *)a->pc();
|
||||
|
||||
Label l2;
|
||||
@ -383,8 +384,12 @@ void VM_Version::determine_section_size() {
|
||||
#endif // COMPILER2
|
||||
|
||||
void VM_Version::determine_features() {
|
||||
#if defined(ABI_ELFv2)
|
||||
const int code_size = (num_features+1+2*7)*BytesPerInstWord; // TODO(asmundak): calculation is incorrect.
|
||||
#else
|
||||
// 7 InstWords for each call (function descriptor + blr instruction).
|
||||
const int code_size = (num_features+1+2*7)*BytesPerInstWord;
|
||||
#endif
|
||||
int features = 0;
|
||||
|
||||
// create test area
|
||||
@ -398,7 +403,7 @@ void VM_Version::determine_features() {
|
||||
MacroAssembler* a = new MacroAssembler(&cb);
|
||||
|
||||
// Emit code.
|
||||
void (*test)(address addr, uint64_t offset)=(void(*)(address addr, uint64_t offset))(void *)a->emit_fd();
|
||||
void (*test)(address addr, uint64_t offset)=(void(*)(address addr, uint64_t offset))(void *)a->function_entry();
|
||||
uint32_t *code = (uint32_t *)a->pc();
|
||||
// Don't use R0 in ldarx.
|
||||
// Keep R3_ARG1 unmodified, it contains &field (see below).
|
||||
@ -415,7 +420,7 @@ void VM_Version::determine_features() {
|
||||
a->blr();
|
||||
|
||||
// Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
|
||||
void (*zero_cacheline_func_ptr)(char*) = (void(*)(char*))(void *)a->emit_fd();
|
||||
void (*zero_cacheline_func_ptr)(char*) = (void(*)(char*))(void *)a->function_entry();
|
||||
a->dcbz(R3_ARG1); // R3_ARG1 = addr
|
||||
a->blr();
|
||||
|
||||
|
@ -413,16 +413,15 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
|
||||
// Update standard invocation counters
|
||||
__ increment_invocation_counter(Rcounters, O0, G4_scratch);
|
||||
if (ProfileInterpreter) {
|
||||
Address interpreter_invocation_counter(Rcounters, 0,
|
||||
Address interpreter_invocation_counter(Rcounters,
|
||||
in_bytes(MethodCounters::interpreter_invocation_counter_offset()));
|
||||
__ ld(interpreter_invocation_counter, G4_scratch);
|
||||
__ inc(G4_scratch);
|
||||
__ st(G4_scratch, interpreter_invocation_counter);
|
||||
}
|
||||
|
||||
Address invocation_limit(G3_scratch, (address)&InvocationCounter::InterpreterInvocationLimit);
|
||||
__ sethi(invocation_limit);
|
||||
__ ld(invocation_limit, G3_scratch);
|
||||
AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit);
|
||||
__ load_contents(invocation_limit, G3_scratch);
|
||||
__ cmp(O0, G3_scratch);
|
||||
__ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
|
||||
__ delayed()->nop();
|
||||
@ -439,7 +438,7 @@ address InterpreterGenerator::generate_empty_entry(void) {
|
||||
// do nothing for empty methods (do not even increment invocation counter)
|
||||
if ( UseFastEmptyMethods) {
|
||||
// If we need a safepoint check, generate full interpreter entry.
|
||||
Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
|
||||
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
|
||||
__ load_contents(sync_state, G3_scratch);
|
||||
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
|
||||
__ br(Assembler::notEqual, false, Assembler::pn, frame_manager_entry);
|
||||
@ -471,7 +470,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
if ( UseFastAccessorMethods) {
|
||||
// Check if we need to reach a safepoint and generate full interpreter
|
||||
// frame if so.
|
||||
Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
|
||||
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
|
||||
__ load_contents(sync_state, G3_scratch);
|
||||
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
|
||||
__ br(Assembler::notEqual, false, Assembler::pn, slow_path);
|
||||
@ -486,8 +485,8 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
|
||||
// read first instruction word and extract bytecode @ 1 and index @ 2
|
||||
// get first 4 bytes of the bytecodes (big endian!)
|
||||
__ ld_ptr(Address(G5_method, 0, in_bytes(Method::const_offset())), G1_scratch);
|
||||
__ ld(Address(G1_scratch, 0, in_bytes(ConstMethod::codes_offset())), G1_scratch);
|
||||
__ ld_ptr(Address(G5_method, in_bytes(Method::const_offset())), G1_scratch);
|
||||
__ ld(Address(G1_scratch, in_bytes(ConstMethod::codes_offset())), G1_scratch);
|
||||
|
||||
// move index @ 2 far left then to the right most two bytes.
|
||||
__ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
|
||||
@ -590,15 +589,15 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
const Register Gtmp1 = G3_scratch ;
|
||||
const Register Gtmp2 = G1_scratch;
|
||||
const Register RconstMethod = Gtmp1;
|
||||
const Address constMethod(G5_method, 0, in_bytes(Method::const_offset()));
|
||||
const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
|
||||
const Address constMethod(G5_method, in_bytes(Method::const_offset()));
|
||||
const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
|
||||
|
||||
bool inc_counter = UseCompiler || CountCompiledCalls;
|
||||
|
||||
// make sure registers are different!
|
||||
assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
|
||||
|
||||
const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
|
||||
const Address access_flags (G5_method, in_bytes(Method::access_flags_offset()));
|
||||
|
||||
Label Lentry;
|
||||
__ bind(Lentry);
|
||||
@ -643,7 +642,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// At this point Lstate points to new interpreter state
|
||||
//
|
||||
|
||||
const Address do_not_unlock_if_synchronized(G2_thread, 0,
|
||||
const Address do_not_unlock_if_synchronized(G2_thread,
|
||||
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
|
||||
// Since at this point in the method invocation the exception handler
|
||||
// would try to exit the monitor of synchronized methods which hasn't
|
||||
@ -717,17 +716,17 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
|
||||
{ Label L;
|
||||
__ ld_ptr(STATE(_method), G5_method);
|
||||
__ ld_ptr(Address(G5_method, 0, in_bytes(Method::signature_handler_offset())), G3_scratch);
|
||||
__ ld_ptr(Address(G5_method, in_bytes(Method::signature_handler_offset())), G3_scratch);
|
||||
__ tst(G3_scratch);
|
||||
__ brx(Assembler::notZero, false, Assembler::pt, L);
|
||||
__ delayed()->nop();
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), G5_method, false);
|
||||
__ ld_ptr(STATE(_method), G5_method);
|
||||
|
||||
Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
|
||||
Address exception_addr(G2_thread, in_bytes(Thread::pending_exception_offset()));
|
||||
__ ld_ptr(exception_addr, G3_scratch);
|
||||
__ br_notnull_short(G3_scratch, Assembler::pn, pending_exception_present);
|
||||
__ ld_ptr(Address(G5_method, 0, in_bytes(Method::signature_handler_offset())), G3_scratch);
|
||||
__ ld_ptr(Address(G5_method, in_bytes(Method::signature_handler_offset())), G3_scratch);
|
||||
__ bind(L);
|
||||
}
|
||||
|
||||
@ -771,13 +770,13 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ br( Assembler::zero, false, Assembler::pt, not_static);
|
||||
__ delayed()->
|
||||
// get native function entry point(O0 is a good temp until the very end)
|
||||
ld_ptr(Address(G5_method, 0, in_bytes(Method::native_function_offset())), O0);
|
||||
ld_ptr(Address(G5_method, in_bytes(Method::native_function_offset())), O0);
|
||||
// for static methods insert the mirror argument
|
||||
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
|
||||
|
||||
__ ld_ptr(Address(G5_method, 0, in_bytes(Method:: const_offset())), O1);
|
||||
__ ld_ptr(Address(O1, 0, in_bytes(ConstMethod::constants_offset())), O1);
|
||||
__ ld_ptr(Address(O1, 0, ConstantPool::pool_holder_offset_in_bytes()), O1);
|
||||
__ ld_ptr(Address(G5_method, in_bytes(Method:: const_offset())), O1);
|
||||
__ ld_ptr(Address(O1, in_bytes(ConstMethod::constants_offset())), O1);
|
||||
__ ld_ptr(Address(O1, ConstantPool::pool_holder_offset_in_bytes()), O1);
|
||||
__ ld_ptr(O1, mirror_offset, O1);
|
||||
// where the mirror handle body is allocated:
|
||||
#ifdef ASSERT
|
||||
@ -831,18 +830,17 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// flush the windows now. We don't care about the current (protection) frame
|
||||
// only the outer frames
|
||||
|
||||
__ flush_windows();
|
||||
__ flushw();
|
||||
|
||||
// mark windows as flushed
|
||||
Address flags(G2_thread,
|
||||
0,
|
||||
in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset()));
|
||||
__ set(JavaFrameAnchor::flushed, G3_scratch);
|
||||
__ st(G3_scratch, flags);
|
||||
|
||||
// Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
|
||||
|
||||
Address thread_state(G2_thread, 0, in_bytes(JavaThread::thread_state_offset()));
|
||||
Address thread_state(G2_thread, in_bytes(JavaThread::thread_state_offset()));
|
||||
#ifdef ASSERT
|
||||
{ Label L;
|
||||
__ ld(thread_state, G3_scratch);
|
||||
@ -867,7 +865,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// Block, if necessary, before resuming in _thread_in_Java state.
|
||||
// In order for GC to work, don't clear the last_Java_sp until after blocking.
|
||||
{ Label no_block;
|
||||
Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
|
||||
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
|
||||
|
||||
// Switch thread to "native transition" state before reading the synchronization state.
|
||||
// This additional state is necessary because reading and testing the synchronization
|
||||
@ -890,7 +888,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
|
||||
|
||||
Label L;
|
||||
Address suspend_state(G2_thread, 0, in_bytes(JavaThread::suspend_flags_offset()));
|
||||
Address suspend_state(G2_thread, in_bytes(JavaThread::suspend_flags_offset()));
|
||||
__ br(Assembler::notEqual, false, Assembler::pn, L);
|
||||
__ delayed()->
|
||||
ld(suspend_state, G3_scratch);
|
||||
@ -965,7 +963,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
|
||||
// handle exceptions (exception handling will handle unlocking!)
|
||||
{ Label L;
|
||||
Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
|
||||
Address exception_addr (G2_thread, in_bytes(Thread::pending_exception_offset()));
|
||||
|
||||
__ ld_ptr(exception_addr, Gtemp);
|
||||
__ tst(Gtemp);
|
||||
@ -1055,8 +1053,8 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
|
||||
assert_different_registers(state, prev_state);
|
||||
assert_different_registers(prev_state, G3_scratch);
|
||||
const Register Gtmp = G3_scratch;
|
||||
const Address constMethod (G5_method, 0, in_bytes(Method::const_offset()));
|
||||
const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
|
||||
const Address constMethod (G5_method, in_bytes(Method::const_offset()));
|
||||
const Address access_flags (G5_method, in_bytes(Method::access_flags_offset()));
|
||||
|
||||
// slop factor is two extra slots on the expression stack so that
|
||||
// we always have room to store a result when returning from a call without parameters
|
||||
@ -1075,7 +1073,7 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
|
||||
|
||||
if (native) {
|
||||
const Register RconstMethod = Gtmp;
|
||||
const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
|
||||
const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
|
||||
__ ld_ptr(constMethod, RconstMethod);
|
||||
__ lduh( size_of_parameters, Gtmp );
|
||||
__ calc_mem_param_words(Gtmp, Gtmp); // space for native call parameters passed on the stack in words
|
||||
@ -1246,8 +1244,8 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
|
||||
if (init_value != noreg) {
|
||||
Label clear_loop;
|
||||
const Register RconstMethod = O1;
|
||||
const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
|
||||
const Address size_of_locals (RconstMethod, 0, in_bytes(ConstMethod::size_of_locals_offset()));
|
||||
const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
|
||||
const Address size_of_locals (RconstMethod, in_bytes(ConstMethod::size_of_locals_offset()));
|
||||
|
||||
// NOTE: If you change the frame layout, this code will need to
|
||||
// be updated!
|
||||
@ -1496,11 +1494,11 @@ void CppInterpreterGenerator::adjust_callers_stack(Register args) {
|
||||
//
|
||||
// assert_different_registers(state, prev_state);
|
||||
const Register Gtmp = G3_scratch;
|
||||
const RconstMethod = G3_scratch;
|
||||
const Register RconstMethod = G3_scratch;
|
||||
const Register tmp = O2;
|
||||
const Address constMethod(G5_method, 0, in_bytes(Method::const_offset()));
|
||||
const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
|
||||
const Address size_of_locals (RconstMethod, 0, in_bytes(ConstMethod::size_of_locals_offset()));
|
||||
const Address constMethod(G5_method, in_bytes(Method::const_offset()));
|
||||
const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
|
||||
const Address size_of_locals (RconstMethod, in_bytes(ConstMethod::size_of_locals_offset()));
|
||||
|
||||
__ ld_ptr(constMethod, RconstMethod);
|
||||
__ lduh(size_of_parameters, tmp);
|
||||
@ -1555,8 +1553,8 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
const Register Gtmp1 = G3_scratch;
|
||||
// const Register Lmirror = L1; // native mirror (native calls only)
|
||||
|
||||
const Address constMethod (G5_method, 0, in_bytes(Method::const_offset()));
|
||||
const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
|
||||
const Address constMethod (G5_method, in_bytes(Method::const_offset()));
|
||||
const Address access_flags (G5_method, in_bytes(Method::access_flags_offset()));
|
||||
|
||||
address entry_point = __ pc();
|
||||
__ mov(G0, prevState); // no current activation
|
||||
@ -1709,7 +1707,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
|
||||
// We want exception in the thread no matter what we ultimately decide about frame type.
|
||||
|
||||
Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
|
||||
Address exception_addr (G2_thread, in_bytes(Thread::pending_exception_offset()));
|
||||
__ verify_thread();
|
||||
__ st_ptr(O0, exception_addr);
|
||||
|
||||
|
@ -827,6 +827,7 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
}
|
||||
|
||||
if (is_interpreted_frame()) {
|
||||
#ifndef CC_INTERP
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_d_scratch_fp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_l_scratch_fp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_padding);
|
||||
@ -837,6 +838,7 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
if ((esp >= sp()) && (esp < fp())) {
|
||||
values.describe(-1, esp, "*Lesp");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
if (!is_compiled_frame()) {
|
||||
|
@ -2497,6 +2497,24 @@ void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Regis
|
||||
void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
|
||||
if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);
|
||||
}
|
||||
|
||||
|
||||
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
|
||||
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
|
||||
int increment, int mask,
|
||||
Register scratch1, Register scratch2,
|
||||
Condition cond, Label *where) {
|
||||
ld(counter_addr, scratch1);
|
||||
add(scratch1, increment, scratch1);
|
||||
if (is_simm13(mask)) {
|
||||
andcc(scratch1, mask, G0);
|
||||
} else {
|
||||
set(mask, scratch2);
|
||||
andcc(scratch1, scratch2, G0);
|
||||
}
|
||||
br(cond, false, Assembler::pn, *where);
|
||||
delayed()->st(scratch1, counter_addr);
|
||||
}
|
||||
#endif /* CC_INTERP */
|
||||
|
||||
// Inline assembly for:
|
||||
@ -2646,20 +2664,3 @@ void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_na
|
||||
}
|
||||
#endif // CC_INTERP
|
||||
}
|
||||
|
||||
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
|
||||
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
|
||||
int increment, int mask,
|
||||
Register scratch1, Register scratch2,
|
||||
Condition cond, Label *where) {
|
||||
ld(counter_addr, scratch1);
|
||||
add(scratch1, increment, scratch1);
|
||||
if (is_simm13(mask)) {
|
||||
andcc(scratch1, mask, G0);
|
||||
} else {
|
||||
set(mask, scratch2);
|
||||
andcc(scratch1, scratch2, G0);
|
||||
}
|
||||
br(cond, false, Assembler::pn, *where);
|
||||
delayed()->st(scratch1, counter_addr);
|
||||
}
|
||||
|
@ -23,7 +23,8 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "nativeInst_sparc.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
@ -1089,6 +1089,21 @@ void Assembler::andl(Register dst, Register src) {
|
||||
emit_arith(0x23, 0xC0, dst, src);
|
||||
}
|
||||
|
||||
void Assembler::andnl(Register dst, Register src1, Register src2) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
int encode = vex_prefix_0F38_and_encode(dst, src1, src2);
|
||||
emit_int8((unsigned char)0xF2);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
}
|
||||
|
||||
void Assembler::andnl(Register dst, Register src1, Address src2) {
|
||||
InstructionMark im(this);
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
vex_prefix_0F38(dst, src1, src2);
|
||||
emit_int8((unsigned char)0xF2);
|
||||
emit_operand(dst, src2);
|
||||
}
|
||||
|
||||
void Assembler::bsfl(Register dst, Register src) {
|
||||
int encode = prefix_and_encode(dst->encoding(), src->encoding());
|
||||
emit_int8(0x0F);
|
||||
@ -1110,6 +1125,51 @@ void Assembler::bswapl(Register reg) { // bswap
|
||||
emit_int8((unsigned char)(0xC8 | encode));
|
||||
}
|
||||
|
||||
void Assembler::blsil(Register dst, Register src) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
int encode = vex_prefix_0F38_and_encode(rbx, dst, src);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
}
|
||||
|
||||
void Assembler::blsil(Register dst, Address src) {
|
||||
InstructionMark im(this);
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
vex_prefix_0F38(rbx, dst, src);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_operand(rbx, src);
|
||||
}
|
||||
|
||||
void Assembler::blsmskl(Register dst, Register src) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
int encode = vex_prefix_0F38_and_encode(rdx, dst, src);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
}
|
||||
|
||||
void Assembler::blsmskl(Register dst, Address src) {
|
||||
InstructionMark im(this);
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
vex_prefix_0F38(rdx, dst, src);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_operand(rdx, src);
|
||||
}
|
||||
|
||||
void Assembler::blsrl(Register dst, Register src) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
int encode = vex_prefix_0F38_and_encode(rcx, dst, src);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
}
|
||||
|
||||
void Assembler::blsrl(Register dst, Address src) {
|
||||
InstructionMark im(this);
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
vex_prefix_0F38(rcx, dst, src);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_operand(rcx, src);
|
||||
}
|
||||
|
||||
void Assembler::call(Label& L, relocInfo::relocType rtype) {
|
||||
// suspect disp32 is always good
|
||||
int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand);
|
||||
@ -2878,6 +2938,24 @@ void Assembler::testl(Register dst, Address src) {
|
||||
emit_operand(dst, src);
|
||||
}
|
||||
|
||||
void Assembler::tzcntl(Register dst, Register src) {
|
||||
assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
|
||||
emit_int8((unsigned char)0xF3);
|
||||
int encode = prefix_and_encode(dst->encoding(), src->encoding());
|
||||
emit_int8(0x0F);
|
||||
emit_int8((unsigned char)0xBC);
|
||||
emit_int8((unsigned char)0xC0 | encode);
|
||||
}
|
||||
|
||||
void Assembler::tzcntq(Register dst, Register src) {
|
||||
assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
|
||||
emit_int8((unsigned char)0xF3);
|
||||
int encode = prefixq_and_encode(dst->encoding(), src->encoding());
|
||||
emit_int8(0x0F);
|
||||
emit_int8((unsigned char)0xBC);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
}
|
||||
|
||||
void Assembler::ucomisd(XMMRegister dst, Address src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66);
|
||||
@ -4837,6 +4915,21 @@ void Assembler::andq(Register dst, Register src) {
|
||||
emit_arith(0x23, 0xC0, dst, src);
|
||||
}
|
||||
|
||||
void Assembler::andnq(Register dst, Register src1, Register src2) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
int encode = vex_prefix_0F38_and_encode_q(dst, src1, src2);
|
||||
emit_int8((unsigned char)0xF2);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
}
|
||||
|
||||
void Assembler::andnq(Register dst, Register src1, Address src2) {
|
||||
InstructionMark im(this);
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
vex_prefix_0F38_q(dst, src1, src2);
|
||||
emit_int8((unsigned char)0xF2);
|
||||
emit_operand(dst, src2);
|
||||
}
|
||||
|
||||
void Assembler::bsfq(Register dst, Register src) {
|
||||
int encode = prefixq_and_encode(dst->encoding(), src->encoding());
|
||||
emit_int8(0x0F);
|
||||
@ -4858,6 +4951,51 @@ void Assembler::bswapq(Register reg) {
|
||||
emit_int8((unsigned char)(0xC8 | encode));
|
||||
}
|
||||
|
||||
void Assembler::blsiq(Register dst, Register src) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
int encode = vex_prefix_0F38_and_encode_q(rbx, dst, src);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
}
|
||||
|
||||
void Assembler::blsiq(Register dst, Address src) {
|
||||
InstructionMark im(this);
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
vex_prefix_0F38_q(rbx, dst, src);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_operand(rbx, src);
|
||||
}
|
||||
|
||||
void Assembler::blsmskq(Register dst, Register src) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
int encode = vex_prefix_0F38_and_encode_q(rdx, dst, src);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
}
|
||||
|
||||
void Assembler::blsmskq(Register dst, Address src) {
|
||||
InstructionMark im(this);
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
vex_prefix_0F38_q(rdx, dst, src);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_operand(rdx, src);
|
||||
}
|
||||
|
||||
void Assembler::blsrq(Register dst, Register src) {
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
int encode = vex_prefix_0F38_and_encode_q(rcx, dst, src);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
}
|
||||
|
||||
void Assembler::blsrq(Register dst, Address src) {
|
||||
InstructionMark im(this);
|
||||
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
|
||||
vex_prefix_0F38_q(rcx, dst, src);
|
||||
emit_int8((unsigned char)0xF3);
|
||||
emit_operand(rcx, src);
|
||||
}
|
||||
|
||||
void Assembler::cdqq() {
|
||||
prefix(REX_W);
|
||||
emit_int8((unsigned char)0x99);
|
||||
|
@ -590,10 +590,35 @@ private:
|
||||
vex_prefix(src, nds_enc, dst_enc, pre, VEX_OPCODE_0F, false, vector256);
|
||||
}
|
||||
|
||||
void vex_prefix_0F38(Register dst, Register nds, Address src) {
|
||||
bool vex_w = false;
|
||||
bool vector256 = false;
|
||||
vex_prefix(src, nds->encoding(), dst->encoding(),
|
||||
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
|
||||
}
|
||||
|
||||
void vex_prefix_0F38_q(Register dst, Register nds, Address src) {
|
||||
bool vex_w = true;
|
||||
bool vector256 = false;
|
||||
vex_prefix(src, nds->encoding(), dst->encoding(),
|
||||
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
|
||||
}
|
||||
int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc,
|
||||
VexSimdPrefix pre, VexOpcode opc,
|
||||
bool vex_w, bool vector256);
|
||||
|
||||
int vex_prefix_0F38_and_encode(Register dst, Register nds, Register src) {
|
||||
bool vex_w = false;
|
||||
bool vector256 = false;
|
||||
return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
|
||||
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
|
||||
}
|
||||
int vex_prefix_0F38_and_encode_q(Register dst, Register nds, Register src) {
|
||||
bool vex_w = true;
|
||||
bool vector256 = false;
|
||||
return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
|
||||
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
|
||||
}
|
||||
int vex_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
|
||||
VexSimdPrefix pre, bool vector256 = false,
|
||||
VexOpcode opc = VEX_OPCODE_0F) {
|
||||
@ -897,6 +922,27 @@ private:
|
||||
void andq(Register dst, Address src);
|
||||
void andq(Register dst, Register src);
|
||||
|
||||
// BMI instructions
|
||||
void andnl(Register dst, Register src1, Register src2);
|
||||
void andnl(Register dst, Register src1, Address src2);
|
||||
void andnq(Register dst, Register src1, Register src2);
|
||||
void andnq(Register dst, Register src1, Address src2);
|
||||
|
||||
void blsil(Register dst, Register src);
|
||||
void blsil(Register dst, Address src);
|
||||
void blsiq(Register dst, Register src);
|
||||
void blsiq(Register dst, Address src);
|
||||
|
||||
void blsmskl(Register dst, Register src);
|
||||
void blsmskl(Register dst, Address src);
|
||||
void blsmskq(Register dst, Register src);
|
||||
void blsmskq(Register dst, Address src);
|
||||
|
||||
void blsrl(Register dst, Register src);
|
||||
void blsrl(Register dst, Address src);
|
||||
void blsrq(Register dst, Register src);
|
||||
void blsrq(Register dst, Address src);
|
||||
|
||||
void bsfl(Register dst, Register src);
|
||||
void bsrl(Register dst, Register src);
|
||||
|
||||
@ -1574,6 +1620,9 @@ private:
|
||||
void testq(Register dst, int32_t imm32);
|
||||
void testq(Register dst, Register src);
|
||||
|
||||
// BMI - count trailing zeros
|
||||
void tzcntl(Register dst, Register src);
|
||||
void tzcntq(Register dst, Register src);
|
||||
|
||||
// Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
|
||||
void ucomisd(XMMRegister dst, Address src);
|
||||
|
@ -250,7 +250,7 @@ inline jint BytecodeInterpreter::VMintSub(jint op1, jint op2) {
|
||||
return op1 - op2;
|
||||
}
|
||||
|
||||
inline jint BytecodeInterpreter::VMintUshr(jint op1, jint op2) {
|
||||
inline juint BytecodeInterpreter::VMintUshr(jint op1, jint op2) {
|
||||
return ((juint) op1) >> (op2 & 0x1f);
|
||||
}
|
||||
|
||||
|
@ -574,7 +574,7 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
|
||||
MethodCounters::invocation_counter_offset() +
|
||||
InvocationCounter::counter_offset());
|
||||
const Address backedge_counter (rax,
|
||||
MethodCounter::backedge_counter_offset() +
|
||||
MethodCounters::backedge_counter_offset() +
|
||||
InvocationCounter::counter_offset());
|
||||
|
||||
__ get_method_counters(rbx, rax, done);
|
||||
@ -982,16 +982,18 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// to save/restore.
|
||||
address entry_point = __ pc();
|
||||
|
||||
const Address constMethod (rbx, Method::const_offset());
|
||||
const Address access_flags (rbx, Method::access_flags_offset());
|
||||
const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
|
||||
|
||||
// rsi/r13 == state/locals rdi == prevstate
|
||||
const Register locals = rdi;
|
||||
|
||||
// get parameter size (always needed)
|
||||
__ movptr(rcx, constMethod);
|
||||
__ load_unsigned_short(rcx, size_of_parameters);
|
||||
{
|
||||
const Address constMethod (rbx, Method::const_offset());
|
||||
const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
|
||||
__ movptr(rcx, constMethod);
|
||||
__ load_unsigned_short(rcx, size_of_parameters);
|
||||
}
|
||||
|
||||
// rbx: Method*
|
||||
// rcx: size of parameters
|
||||
@ -1111,14 +1113,16 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
const Register method = rbx;
|
||||
const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rdi);
|
||||
const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); // rcx|rscratch1
|
||||
const Address constMethod (method, Method::const_offset());
|
||||
const Address size_of_parameters(t, ConstMethod::size_of_parameters_offset());
|
||||
|
||||
// allocate space for parameters
|
||||
// allocate space for parameters
|
||||
__ movptr(method, STATE(_method));
|
||||
__ verify_method_ptr(method);
|
||||
__ movptr(t, constMethod);
|
||||
__ load_unsigned_short(t, size_of_parameters);
|
||||
{
|
||||
const Address constMethod (method, Method::const_offset());
|
||||
const Address size_of_parameters(t, ConstMethod::size_of_parameters_offset());
|
||||
__ movptr(t, constMethod);
|
||||
__ load_unsigned_short(t, size_of_parameters);
|
||||
}
|
||||
__ shll(t, 2);
|
||||
#ifdef _LP64
|
||||
__ subptr(rsp, t);
|
||||
@ -2221,7 +2225,6 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
|
||||
case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
|
||||
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
|
||||
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
|
||||
case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break;
|
||||
|
||||
case Interpreter::java_lang_math_sin : // fall thru
|
||||
case Interpreter::java_lang_math_cos : // fall thru
|
||||
@ -2229,7 +2232,10 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
|
||||
case Interpreter::java_lang_math_abs : // fall thru
|
||||
case Interpreter::java_lang_math_log : // fall thru
|
||||
case Interpreter::java_lang_math_log10 : // fall thru
|
||||
case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
|
||||
case Interpreter::java_lang_math_sqrt : // fall thru
|
||||
case Interpreter::java_lang_math_pow : // fall thru
|
||||
case Interpreter::java_lang_math_exp : // fall thru
|
||||
entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
|
||||
case Interpreter::java_lang_ref_reference_get
|
||||
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
|
||||
default : ShouldNotReachHere(); break;
|
||||
@ -2451,4 +2457,22 @@ int AbstractInterpreter::layout_activation(Method* method,
|
||||
return frame_size/BytesPerWord;
|
||||
}
|
||||
|
||||
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
|
||||
switch (method_kind(m)) {
|
||||
case Interpreter::java_lang_math_sin : // fall thru
|
||||
case Interpreter::java_lang_math_cos : // fall thru
|
||||
case Interpreter::java_lang_math_tan : // fall thru
|
||||
case Interpreter::java_lang_math_abs : // fall thru
|
||||
case Interpreter::java_lang_math_log : // fall thru
|
||||
case Interpreter::java_lang_math_log10 : // fall thru
|
||||
case Interpreter::java_lang_math_sqrt : // fall thru
|
||||
case Interpreter::java_lang_math_pow : // fall thru
|
||||
case Interpreter::java_lang_math_exp :
|
||||
return false;
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif // CC_INTERP (all)
|
||||
|
@ -687,6 +687,7 @@ intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
|
||||
|
||||
void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
if (is_interpreted_frame()) {
|
||||
#ifndef CC_INTERP
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_method);
|
||||
@ -695,6 +696,7 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_locals);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_bcx);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -135,5 +135,11 @@ define_pd_global(uintx, TypeProfileLevel, 111);
|
||||
\
|
||||
product(bool, UseCountLeadingZerosInstruction, false, \
|
||||
"Use count leading zeros instruction") \
|
||||
\
|
||||
product(bool, UseCountTrailingZerosInstruction, false, \
|
||||
"Use count trailing zeros instruction") \
|
||||
\
|
||||
product(bool, UseBMI1Instructions, false, \
|
||||
"Use BMI instructions")
|
||||
|
||||
#endif // CPU_X86_VM_GLOBALS_X86_HPP
|
||||
|
@ -266,20 +266,6 @@ void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, R
|
||||
addptr(cache, tmp); // construct pointer to cache entry
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::get_method_counters(Register method,
|
||||
Register mcs, Label& skip) {
|
||||
Label has_counters;
|
||||
movptr(mcs, Address(method, Method::method_counters_offset()));
|
||||
testptr(mcs, mcs);
|
||||
jcc(Assembler::notZero, has_counters);
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::build_method_counters), method);
|
||||
movptr(mcs, Address(method,Method::method_counters_offset()));
|
||||
testptr(mcs, mcs);
|
||||
jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
|
||||
bind(has_counters);
|
||||
}
|
||||
|
||||
// Load object from cpool->resolved_references(index)
|
||||
void InterpreterMacroAssembler::load_resolved_reference_at_index(
|
||||
Register result, Register index) {
|
||||
@ -678,6 +664,20 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
|
||||
|
||||
#endif /* !CC_INTERP */
|
||||
|
||||
void InterpreterMacroAssembler::get_method_counters(Register method,
|
||||
Register mcs, Label& skip) {
|
||||
Label has_counters;
|
||||
movptr(mcs, Address(method, Method::method_counters_offset()));
|
||||
testptr(mcs, mcs);
|
||||
jcc(Assembler::notZero, has_counters);
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::build_method_counters), method);
|
||||
movptr(mcs, Address(method,Method::method_counters_offset()));
|
||||
testptr(mcs, mcs);
|
||||
jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
|
||||
bind(has_counters);
|
||||
}
|
||||
|
||||
|
||||
// Lock object
|
||||
//
|
||||
@ -1359,6 +1359,19 @@ void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
|
||||
if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);
|
||||
}
|
||||
|
||||
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
|
||||
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
|
||||
int increment, int mask,
|
||||
Register scratch, bool preloaded,
|
||||
Condition cond, Label* where) {
|
||||
if (!preloaded) {
|
||||
movl(scratch, counter_addr);
|
||||
}
|
||||
incrementl(scratch, increment);
|
||||
movl(counter_addr, scratch);
|
||||
andl(scratch, mask);
|
||||
jcc(cond, *where);
|
||||
}
|
||||
#endif /* CC_INTERP */
|
||||
|
||||
|
||||
@ -1430,17 +1443,3 @@ void InterpreterMacroAssembler::notify_method_exit(
|
||||
NOT_CC_INTERP(pop(state));
|
||||
}
|
||||
}
|
||||
|
||||
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
|
||||
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
|
||||
int increment, int mask,
|
||||
Register scratch, bool preloaded,
|
||||
Condition cond, Label* where) {
|
||||
if (!preloaded) {
|
||||
movl(scratch, counter_addr);
|
||||
}
|
||||
incrementl(scratch, increment);
|
||||
movl(counter_addr, scratch);
|
||||
andl(scratch, mask);
|
||||
jcc(cond, *where);
|
||||
}
|
||||
|
@ -77,7 +77,6 @@
|
||||
void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2));
|
||||
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
|
||||
void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
|
||||
void get_method_counters(Register method, Register mcs, Label& skip);
|
||||
|
||||
// load cpool->resolved_references(index);
|
||||
void load_resolved_reference_at_index(Register result, Register index);
|
||||
@ -156,6 +155,7 @@
|
||||
bool install_monitor_exception = true,
|
||||
bool notify_jvmdi = true);
|
||||
#endif /* !CC_INTERP */
|
||||
void get_method_counters(Register method, Register mcs, Label& skip);
|
||||
|
||||
// Debugging
|
||||
void verify_oop(Register reg, TosState state = atos); // only if +VerifyOops && state == atos
|
||||
|
@ -271,20 +271,6 @@ void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
|
||||
addptr(cache, tmp); // construct pointer to cache entry
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::get_method_counters(Register method,
|
||||
Register mcs, Label& skip) {
|
||||
Label has_counters;
|
||||
movptr(mcs, Address(method, Method::method_counters_offset()));
|
||||
testptr(mcs, mcs);
|
||||
jcc(Assembler::notZero, has_counters);
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::build_method_counters), method);
|
||||
movptr(mcs, Address(method,Method::method_counters_offset()));
|
||||
testptr(mcs, mcs);
|
||||
jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
|
||||
bind(has_counters);
|
||||
}
|
||||
|
||||
// Load object from cpool->resolved_references(index)
|
||||
void InterpreterMacroAssembler::load_resolved_reference_at_index(
|
||||
Register result, Register index) {
|
||||
@ -676,6 +662,21 @@ void InterpreterMacroAssembler::remove_activation(
|
||||
|
||||
#endif // C_INTERP
|
||||
|
||||
void InterpreterMacroAssembler::get_method_counters(Register method,
|
||||
Register mcs, Label& skip) {
|
||||
Label has_counters;
|
||||
movptr(mcs, Address(method, Method::method_counters_offset()));
|
||||
testptr(mcs, mcs);
|
||||
jcc(Assembler::notZero, has_counters);
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::build_method_counters), method);
|
||||
movptr(mcs, Address(method,Method::method_counters_offset()));
|
||||
testptr(mcs, mcs);
|
||||
jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
|
||||
bind(has_counters);
|
||||
}
|
||||
|
||||
|
||||
// Lock object
|
||||
//
|
||||
// Args:
|
||||
@ -1423,6 +1424,20 @@ void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
|
||||
|
||||
void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
|
||||
}
|
||||
|
||||
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
|
||||
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
|
||||
int increment, int mask,
|
||||
Register scratch, bool preloaded,
|
||||
Condition cond, Label* where) {
|
||||
if (!preloaded) {
|
||||
movl(scratch, counter_addr);
|
||||
}
|
||||
incrementl(scratch, increment);
|
||||
movl(counter_addr, scratch);
|
||||
andl(scratch, mask);
|
||||
jcc(cond, *where);
|
||||
}
|
||||
#endif // !CC_INTERP
|
||||
|
||||
|
||||
@ -1491,16 +1506,3 @@ void InterpreterMacroAssembler::notify_method_exit(
|
||||
}
|
||||
}
|
||||
|
||||
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
|
||||
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
|
||||
int increment, int mask,
|
||||
Register scratch, bool preloaded,
|
||||
Condition cond, Label* where) {
|
||||
if (!preloaded) {
|
||||
movl(scratch, counter_addr);
|
||||
}
|
||||
incrementl(scratch, increment);
|
||||
movl(counter_addr, scratch);
|
||||
andl(scratch, mask);
|
||||
jcc(cond, *where);
|
||||
}
|
||||
|
@ -99,7 +99,6 @@
|
||||
void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2));
|
||||
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
|
||||
void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
|
||||
void get_method_counters(Register method, Register mcs, Label& skip);
|
||||
|
||||
// load cpool->resolved_references(index);
|
||||
void load_resolved_reference_at_index(Register result, Register index);
|
||||
@ -172,6 +171,7 @@
|
||||
bool install_monitor_exception = true,
|
||||
bool notify_jvmdi = true);
|
||||
#endif // CC_INTERP
|
||||
void get_method_counters(Register method, Register mcs, Label& skip);
|
||||
|
||||
// Object locking
|
||||
void lock_object (Register lock_reg);
|
||||
|
@ -229,10 +229,12 @@ address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
|
||||
// abstract method entry
|
||||
|
||||
#ifndef CC_INTERP
|
||||
// pop return address, reset last_sp to NULL
|
||||
__ empty_expression_stack();
|
||||
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
|
||||
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
|
||||
#endif
|
||||
|
||||
// throw exception
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
|
||||
|
@ -310,10 +310,12 @@ address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
|
||||
// abstract method entry
|
||||
|
||||
#ifndef CC_INTERP
|
||||
// pop return address, reset last_sp to NULL
|
||||
__ empty_expression_stack();
|
||||
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
|
||||
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
|
||||
#endif
|
||||
|
||||
// throw exception
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
|
||||
|
@ -429,7 +429,7 @@ void VM_Version::get_processor_features() {
|
||||
}
|
||||
|
||||
char buf[256];
|
||||
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
|
||||
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
|
||||
cores_per_cpu(), threads_per_core(),
|
||||
cpu_family(), _model, _stepping,
|
||||
(supports_cmov() ? ", cmov" : ""),
|
||||
@ -455,7 +455,9 @@ void VM_Version::get_processor_features() {
|
||||
(supports_ht() ? ", ht": ""),
|
||||
(supports_tsc() ? ", tsc": ""),
|
||||
(supports_tscinv_bit() ? ", tscinvbit": ""),
|
||||
(supports_tscinv() ? ", tscinv": ""));
|
||||
(supports_tscinv() ? ", tscinv": ""),
|
||||
(supports_bmi1() ? ", bmi1" : ""),
|
||||
(supports_bmi2() ? ", bmi2" : ""));
|
||||
_features_str = strdup(buf);
|
||||
|
||||
// UseSSE is set to the smaller of what hardware supports and what
|
||||
@ -600,13 +602,6 @@ void VM_Version::get_processor_features() {
|
||||
}
|
||||
}
|
||||
|
||||
// Use count leading zeros count instruction if available.
|
||||
if (supports_lzcnt()) {
|
||||
if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) {
|
||||
UseCountLeadingZerosInstruction = true;
|
||||
}
|
||||
}
|
||||
|
||||
// some defaults for AMD family 15h
|
||||
if ( cpu_family() == 0x15 ) {
|
||||
// On family 15h processors default is no sw prefetch
|
||||
@ -692,6 +687,35 @@ void VM_Version::get_processor_features() {
|
||||
}
|
||||
#endif // COMPILER2
|
||||
|
||||
// Use count leading zeros count instruction if available.
|
||||
if (supports_lzcnt()) {
|
||||
if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) {
|
||||
UseCountLeadingZerosInstruction = true;
|
||||
}
|
||||
} else if (UseCountLeadingZerosInstruction) {
|
||||
warning("lzcnt instruction is not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false);
|
||||
}
|
||||
|
||||
if (supports_bmi1()) {
|
||||
if (FLAG_IS_DEFAULT(UseBMI1Instructions)) {
|
||||
UseBMI1Instructions = true;
|
||||
}
|
||||
} else if (UseBMI1Instructions) {
|
||||
warning("BMI1 instructions are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseBMI1Instructions, false);
|
||||
}
|
||||
|
||||
// Use count trailing zeros instruction if available
|
||||
if (supports_bmi1()) {
|
||||
if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) {
|
||||
UseCountTrailingZerosInstruction = UseBMI1Instructions;
|
||||
}
|
||||
} else if (UseCountTrailingZerosInstruction) {
|
||||
warning("tzcnt instruction is not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false);
|
||||
}
|
||||
|
||||
// Use population count instruction if available.
|
||||
if (supports_popcnt()) {
|
||||
if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
|
||||
|
@ -141,7 +141,8 @@ public:
|
||||
struct {
|
||||
uint32_t LahfSahf : 1,
|
||||
CmpLegacy : 1,
|
||||
: 4,
|
||||
: 3,
|
||||
lzcnt_intel : 1,
|
||||
lzcnt : 1,
|
||||
sse4a : 1,
|
||||
misalignsse : 1,
|
||||
@ -251,7 +252,9 @@ protected:
|
||||
CPU_AVX2 = (1 << 18),
|
||||
CPU_AES = (1 << 19),
|
||||
CPU_ERMS = (1 << 20), // enhanced 'rep movsb/stosb' instructions
|
||||
CPU_CLMUL = (1 << 21) // carryless multiply for CRC
|
||||
CPU_CLMUL = (1 << 21), // carryless multiply for CRC
|
||||
CPU_BMI1 = (1 << 22),
|
||||
CPU_BMI2 = (1 << 23)
|
||||
} cpuFeatureFlags;
|
||||
|
||||
enum {
|
||||
@ -423,6 +426,8 @@ protected:
|
||||
if (_cpuid_info.sef_cpuid7_ebx.bits.avx2 != 0)
|
||||
result |= CPU_AVX2;
|
||||
}
|
||||
if(_cpuid_info.sef_cpuid7_ebx.bits.bmi1 != 0)
|
||||
result |= CPU_BMI1;
|
||||
if (_cpuid_info.std_cpuid1_edx.bits.tsc != 0)
|
||||
result |= CPU_TSC;
|
||||
if (_cpuid_info.ext_cpuid7_edx.bits.tsc_invariance != 0)
|
||||
@ -444,6 +449,13 @@ protected:
|
||||
if (_cpuid_info.ext_cpuid1_ecx.bits.sse4a != 0)
|
||||
result |= CPU_SSE4A;
|
||||
}
|
||||
// Intel features.
|
||||
if(is_intel()) {
|
||||
if(_cpuid_info.sef_cpuid7_ebx.bits.bmi2 != 0)
|
||||
result |= CPU_BMI2;
|
||||
if(_cpuid_info.ext_cpuid1_ecx.bits.lzcnt_intel != 0)
|
||||
result |= CPU_LZCNT;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -560,7 +572,8 @@ public:
|
||||
static bool supports_aes() { return (_cpuFeatures & CPU_AES) != 0; }
|
||||
static bool supports_erms() { return (_cpuFeatures & CPU_ERMS) != 0; }
|
||||
static bool supports_clmul() { return (_cpuFeatures & CPU_CLMUL) != 0; }
|
||||
|
||||
static bool supports_bmi1() { return (_cpuFeatures & CPU_BMI1) != 0; }
|
||||
static bool supports_bmi2() { return (_cpuFeatures & CPU_BMI2) != 0; }
|
||||
// Intel features
|
||||
static bool is_intel_family_core() { return is_intel() &&
|
||||
extended_cpu_family() == CPU_FAMILY_INTEL_CORE; }
|
||||
|
@ -5163,6 +5163,19 @@ instruct countLeadingZerosL_bsr(rRegI dst, eRegL src, eFlagsReg cr) %{
|
||||
%}
|
||||
|
||||
instruct countTrailingZerosI(rRegI dst, rRegI src, eFlagsReg cr) %{
|
||||
predicate(UseCountTrailingZerosInstruction);
|
||||
match(Set dst (CountTrailingZerosI src));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "TZCNT $dst, $src\t# count trailing zeros (int)" %}
|
||||
ins_encode %{
|
||||
__ tzcntl($dst$$Register, $src$$Register);
|
||||
%}
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct countTrailingZerosI_bsf(rRegI dst, rRegI src, eFlagsReg cr) %{
|
||||
predicate(!UseCountTrailingZerosInstruction);
|
||||
match(Set dst (CountTrailingZerosI src));
|
||||
effect(KILL cr);
|
||||
|
||||
@ -5182,6 +5195,30 @@ instruct countTrailingZerosI(rRegI dst, rRegI src, eFlagsReg cr) %{
|
||||
%}
|
||||
|
||||
instruct countTrailingZerosL(rRegI dst, eRegL src, eFlagsReg cr) %{
|
||||
predicate(UseCountTrailingZerosInstruction);
|
||||
match(Set dst (CountTrailingZerosL src));
|
||||
effect(TEMP dst, KILL cr);
|
||||
|
||||
format %{ "TZCNT $dst, $src.lo\t# count trailing zeros (long) \n\t"
|
||||
"JNC done\n\t"
|
||||
"TZCNT $dst, $src.hi\n\t"
|
||||
"ADD $dst, 32\n"
|
||||
"done:" %}
|
||||
ins_encode %{
|
||||
Register Rdst = $dst$$Register;
|
||||
Register Rsrc = $src$$Register;
|
||||
Label done;
|
||||
__ tzcntl(Rdst, Rsrc);
|
||||
__ jccb(Assembler::carryClear, done);
|
||||
__ tzcntl(Rdst, HIGH_FROM_LOW(Rsrc));
|
||||
__ addl(Rdst, BitsPerInt);
|
||||
__ bind(done);
|
||||
%}
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct countTrailingZerosL_bsf(rRegI dst, eRegL src, eFlagsReg cr) %{
|
||||
predicate(!UseCountTrailingZerosInstruction);
|
||||
match(Set dst (CountTrailingZerosL src));
|
||||
effect(TEMP dst, KILL cr);
|
||||
|
||||
@ -8027,6 +8064,123 @@ instruct andI_mem_imm(memory dst, immI src, eFlagsReg cr) %{
|
||||
ins_pipe( ialu_mem_imm );
|
||||
%}
|
||||
|
||||
// BMI1 instructions
|
||||
instruct andnI_rReg_rReg_rReg(rRegI dst, rRegI src1, rRegI src2, immI_M1 minus_1, eFlagsReg cr) %{
|
||||
match(Set dst (AndI (XorI src1 minus_1) src2));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "ANDNL $dst, $src1, $src2" %}
|
||||
|
||||
ins_encode %{
|
||||
__ andnl($dst$$Register, $src1$$Register, $src2$$Register);
|
||||
%}
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct andnI_rReg_rReg_mem(rRegI dst, rRegI src1, memory src2, immI_M1 minus_1, eFlagsReg cr) %{
|
||||
match(Set dst (AndI (XorI src1 minus_1) (LoadI src2) ));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
ins_cost(125);
|
||||
format %{ "ANDNL $dst, $src1, $src2" %}
|
||||
|
||||
ins_encode %{
|
||||
__ andnl($dst$$Register, $src1$$Register, $src2$$Address);
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct blsiI_rReg_rReg(rRegI dst, rRegI src, immI0 imm_zero, eFlagsReg cr) %{
|
||||
match(Set dst (AndI (SubI imm_zero src) src));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "BLSIL $dst, $src" %}
|
||||
|
||||
ins_encode %{
|
||||
__ blsil($dst$$Register, $src$$Register);
|
||||
%}
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct blsiI_rReg_mem(rRegI dst, memory src, immI0 imm_zero, eFlagsReg cr) %{
|
||||
match(Set dst (AndI (SubI imm_zero (LoadI src) ) (LoadI src) ));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
ins_cost(125);
|
||||
format %{ "BLSIL $dst, $src" %}
|
||||
|
||||
ins_encode %{
|
||||
__ blsil($dst$$Register, $src$$Address);
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct blsmskI_rReg_rReg(rRegI dst, rRegI src, immI_M1 minus_1, eFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (XorI (AddI src minus_1) src));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "BLSMSKL $dst, $src" %}
|
||||
|
||||
ins_encode %{
|
||||
__ blsmskl($dst$$Register, $src$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct blsmskI_rReg_mem(rRegI dst, memory src, immI_M1 minus_1, eFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (XorI (AddI (LoadI src) minus_1) (LoadI src) ));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
ins_cost(125);
|
||||
format %{ "BLSMSKL $dst, $src" %}
|
||||
|
||||
ins_encode %{
|
||||
__ blsmskl($dst$$Register, $src$$Address);
|
||||
%}
|
||||
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct blsrI_rReg_rReg(rRegI dst, rRegI src, immI_M1 minus_1, eFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (AndI (AddI src minus_1) src) );
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "BLSRL $dst, $src" %}
|
||||
|
||||
ins_encode %{
|
||||
__ blsrl($dst$$Register, $src$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct blsrI_rReg_mem(rRegI dst, memory src, immI_M1 minus_1, eFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (AndI (AddI (LoadI src) minus_1) (LoadI src) ));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
ins_cost(125);
|
||||
format %{ "BLSRL $dst, $src" %}
|
||||
|
||||
ins_encode %{
|
||||
__ blsrl($dst$$Register, $src$$Address);
|
||||
%}
|
||||
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// Or Instructions
|
||||
// Or Register with Register
|
||||
instruct orI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
|
||||
@ -8649,6 +8803,210 @@ instruct andL_eReg_mem(eRegL dst, load_long_memory mem, eFlagsReg cr) %{
|
||||
ins_pipe( ialu_reg_long_mem );
|
||||
%}
|
||||
|
||||
// BMI1 instructions
|
||||
instruct andnL_eReg_eReg_eReg(eRegL dst, eRegL src1, eRegL src2, immL_M1 minus_1, eFlagsReg cr) %{
|
||||
match(Set dst (AndL (XorL src1 minus_1) src2));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr, TEMP dst);
|
||||
|
||||
format %{ "ANDNL $dst.lo, $src1.lo, $src2.lo\n\t"
|
||||
"ANDNL $dst.hi, $src1.hi, $src2.hi"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
Register Rdst = $dst$$Register;
|
||||
Register Rsrc1 = $src1$$Register;
|
||||
Register Rsrc2 = $src2$$Register;
|
||||
__ andnl(Rdst, Rsrc1, Rsrc2);
|
||||
__ andnl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc1), HIGH_FROM_LOW(Rsrc2));
|
||||
%}
|
||||
ins_pipe(ialu_reg_reg_long);
|
||||
%}
|
||||
|
||||
instruct andnL_eReg_eReg_mem(eRegL dst, eRegL src1, memory src2, immL_M1 minus_1, eFlagsReg cr) %{
|
||||
match(Set dst (AndL (XorL src1 minus_1) (LoadL src2) ));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr, TEMP dst);
|
||||
|
||||
ins_cost(125);
|
||||
format %{ "ANDNL $dst.lo, $src1.lo, $src2\n\t"
|
||||
"ANDNL $dst.hi, $src1.hi, $src2+4"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
Register Rdst = $dst$$Register;
|
||||
Register Rsrc1 = $src1$$Register;
|
||||
Address src2_hi = Address::make_raw($src2$$base, $src2$$index, $src2$$scale, $src2$$disp + 4, relocInfo::none);
|
||||
|
||||
__ andnl(Rdst, Rsrc1, $src2$$Address);
|
||||
__ andnl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc1), src2_hi);
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct blsiL_eReg_eReg(eRegL dst, eRegL src, immL0 imm_zero, eFlagsReg cr) %{
|
||||
match(Set dst (AndL (SubL imm_zero src) src));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr, TEMP dst);
|
||||
|
||||
format %{ "MOVL $dst.hi, 0\n\t"
|
||||
"BLSIL $dst.lo, $src.lo\n\t"
|
||||
"JNZ done\n\t"
|
||||
"BLSIL $dst.hi, $src.hi\n"
|
||||
"done:"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
Label done;
|
||||
Register Rdst = $dst$$Register;
|
||||
Register Rsrc = $src$$Register;
|
||||
__ movl(HIGH_FROM_LOW(Rdst), 0);
|
||||
__ blsil(Rdst, Rsrc);
|
||||
__ jccb(Assembler::notZero, done);
|
||||
__ blsil(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc));
|
||||
__ bind(done);
|
||||
%}
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct blsiL_eReg_mem(eRegL dst, memory src, immL0 imm_zero, eFlagsReg cr) %{
|
||||
match(Set dst (AndL (SubL imm_zero (LoadL src) ) (LoadL src) ));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr, TEMP dst);
|
||||
|
||||
ins_cost(125);
|
||||
format %{ "MOVL $dst.hi, 0\n\t"
|
||||
"BLSIL $dst.lo, $src\n\t"
|
||||
"JNZ done\n\t"
|
||||
"BLSIL $dst.hi, $src+4\n"
|
||||
"done:"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
Label done;
|
||||
Register Rdst = $dst$$Register;
|
||||
Address src_hi = Address::make_raw($src$$base, $src$$index, $src$$scale, $src$$disp + 4, relocInfo::none);
|
||||
|
||||
__ movl(HIGH_FROM_LOW(Rdst), 0);
|
||||
__ blsil(Rdst, $src$$Address);
|
||||
__ jccb(Assembler::notZero, done);
|
||||
__ blsil(HIGH_FROM_LOW(Rdst), src_hi);
|
||||
__ bind(done);
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct blsmskL_eReg_eReg(eRegL dst, eRegL src, immL_M1 minus_1, eFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (XorL (AddL src minus_1) src));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr, TEMP dst);
|
||||
|
||||
format %{ "MOVL $dst.hi, 0\n\t"
|
||||
"BLSMSKL $dst.lo, $src.lo\n\t"
|
||||
"JNC done\n\t"
|
||||
"BLSMSKL $dst.hi, $src.hi\n"
|
||||
"done:"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
Label done;
|
||||
Register Rdst = $dst$$Register;
|
||||
Register Rsrc = $src$$Register;
|
||||
__ movl(HIGH_FROM_LOW(Rdst), 0);
|
||||
__ blsmskl(Rdst, Rsrc);
|
||||
__ jccb(Assembler::carryClear, done);
|
||||
__ blsmskl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc));
|
||||
__ bind(done);
|
||||
%}
|
||||
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct blsmskL_eReg_mem(eRegL dst, memory src, immL_M1 minus_1, eFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (XorL (AddL (LoadL src) minus_1) (LoadL src) ));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr, TEMP dst);
|
||||
|
||||
ins_cost(125);
|
||||
format %{ "MOVL $dst.hi, 0\n\t"
|
||||
"BLSMSKL $dst.lo, $src\n\t"
|
||||
"JNC done\n\t"
|
||||
"BLSMSKL $dst.hi, $src+4\n"
|
||||
"done:"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
Label done;
|
||||
Register Rdst = $dst$$Register;
|
||||
Address src_hi = Address::make_raw($src$$base, $src$$index, $src$$scale, $src$$disp + 4, relocInfo::none);
|
||||
|
||||
__ movl(HIGH_FROM_LOW(Rdst), 0);
|
||||
__ blsmskl(Rdst, $src$$Address);
|
||||
__ jccb(Assembler::carryClear, done);
|
||||
__ blsmskl(HIGH_FROM_LOW(Rdst), src_hi);
|
||||
__ bind(done);
|
||||
%}
|
||||
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct blsrL_eReg_eReg(eRegL dst, eRegL src, immL_M1 minus_1, eFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (AndL (AddL src minus_1) src) );
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr, TEMP dst);
|
||||
|
||||
format %{ "MOVL $dst.hi, $src.hi\n\t"
|
||||
"BLSRL $dst.lo, $src.lo\n\t"
|
||||
"JNC done\n\t"
|
||||
"BLSRL $dst.hi, $src.hi\n"
|
||||
"done:"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
Label done;
|
||||
Register Rdst = $dst$$Register;
|
||||
Register Rsrc = $src$$Register;
|
||||
__ movl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc));
|
||||
__ blsrl(Rdst, Rsrc);
|
||||
__ jccb(Assembler::carryClear, done);
|
||||
__ blsrl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc));
|
||||
__ bind(done);
|
||||
%}
|
||||
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct blsrL_eReg_mem(eRegL dst, memory src, immL_M1 minus_1, eFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (AndL (AddL (LoadL src) minus_1) (LoadL src) ));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr, TEMP dst);
|
||||
|
||||
ins_cost(125);
|
||||
format %{ "MOVL $dst.hi, $src+4\n\t"
|
||||
"BLSRL $dst.lo, $src\n\t"
|
||||
"JNC done\n\t"
|
||||
"BLSRL $dst.hi, $src+4\n"
|
||||
"done:"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
Label done;
|
||||
Register Rdst = $dst$$Register;
|
||||
Address src_hi = Address::make_raw($src$$base, $src$$index, $src$$scale, $src$$disp + 4, relocInfo::none);
|
||||
__ movl(HIGH_FROM_LOW(Rdst), src_hi);
|
||||
__ blsrl(Rdst, $src$$Address);
|
||||
__ jccb(Assembler::carryClear, done);
|
||||
__ blsrl(HIGH_FROM_LOW(Rdst), src_hi);
|
||||
__ bind(done);
|
||||
%}
|
||||
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
// Or Long Register with Register
|
||||
instruct orl_eReg(eRegL dst, eRegL src, eFlagsReg cr) %{
|
||||
match(Set dst (OrL dst src));
|
||||
|
@ -6022,6 +6022,19 @@ instruct countLeadingZerosL_bsr(rRegI dst, rRegL src, rFlagsReg cr) %{
|
||||
%}
|
||||
|
||||
instruct countTrailingZerosI(rRegI dst, rRegI src, rFlagsReg cr) %{
|
||||
predicate(UseCountTrailingZerosInstruction);
|
||||
match(Set dst (CountTrailingZerosI src));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "tzcntl $dst, $src\t# count trailing zeros (int)" %}
|
||||
ins_encode %{
|
||||
__ tzcntl($dst$$Register, $src$$Register);
|
||||
%}
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct countTrailingZerosI_bsf(rRegI dst, rRegI src, rFlagsReg cr) %{
|
||||
predicate(!UseCountTrailingZerosInstruction);
|
||||
match(Set dst (CountTrailingZerosI src));
|
||||
effect(KILL cr);
|
||||
|
||||
@ -6041,6 +6054,19 @@ instruct countTrailingZerosI(rRegI dst, rRegI src, rFlagsReg cr) %{
|
||||
%}
|
||||
|
||||
instruct countTrailingZerosL(rRegI dst, rRegL src, rFlagsReg cr) %{
|
||||
predicate(UseCountTrailingZerosInstruction);
|
||||
match(Set dst (CountTrailingZerosL src));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "tzcntq $dst, $src\t# count trailing zeros (long)" %}
|
||||
ins_encode %{
|
||||
__ tzcntq($dst$$Register, $src$$Register);
|
||||
%}
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct countTrailingZerosL_bsf(rRegI dst, rRegL src, rFlagsReg cr) %{
|
||||
predicate(!UseCountTrailingZerosInstruction);
|
||||
match(Set dst (CountTrailingZerosL src));
|
||||
effect(KILL cr);
|
||||
|
||||
@ -8622,6 +8648,122 @@ instruct andI_mem_imm(memory dst, immI src, rFlagsReg cr)
|
||||
ins_pipe(ialu_mem_imm);
|
||||
%}
|
||||
|
||||
// BMI1 instructions
|
||||
instruct andnI_rReg_rReg_mem(rRegI dst, rRegI src1, memory src2, immI_M1 minus_1, rFlagsReg cr) %{
|
||||
match(Set dst (AndI (XorI src1 minus_1) (LoadI src2)));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
ins_cost(125);
|
||||
format %{ "andnl $dst, $src1, $src2" %}
|
||||
|
||||
ins_encode %{
|
||||
__ andnl($dst$$Register, $src1$$Register, $src2$$Address);
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct andnI_rReg_rReg_rReg(rRegI dst, rRegI src1, rRegI src2, immI_M1 minus_1, rFlagsReg cr) %{
|
||||
match(Set dst (AndI (XorI src1 minus_1) src2));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "andnl $dst, $src1, $src2" %}
|
||||
|
||||
ins_encode %{
|
||||
__ andnl($dst$$Register, $src1$$Register, $src2$$Register);
|
||||
%}
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct blsiI_rReg_rReg(rRegI dst, rRegI src, immI0 imm_zero, rFlagsReg cr) %{
|
||||
match(Set dst (AndI (SubI imm_zero src) src));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "blsil $dst, $src" %}
|
||||
|
||||
ins_encode %{
|
||||
__ blsil($dst$$Register, $src$$Register);
|
||||
%}
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct blsiI_rReg_mem(rRegI dst, memory src, immI0 imm_zero, rFlagsReg cr) %{
|
||||
match(Set dst (AndI (SubI imm_zero (LoadI src) ) (LoadI src) ));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
ins_cost(125);
|
||||
format %{ "blsil $dst, $src" %}
|
||||
|
||||
ins_encode %{
|
||||
__ blsil($dst$$Register, $src$$Address);
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct blsmskI_rReg_mem(rRegI dst, memory src, immI_M1 minus_1, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (XorI (AddI (LoadI src) minus_1) (LoadI src) ) );
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
ins_cost(125);
|
||||
format %{ "blsmskl $dst, $src" %}
|
||||
|
||||
ins_encode %{
|
||||
__ blsmskl($dst$$Register, $src$$Address);
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct blsmskI_rReg_rReg(rRegI dst, rRegI src, immI_M1 minus_1, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (XorI (AddI src minus_1) src));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "blsmskl $dst, $src" %}
|
||||
|
||||
ins_encode %{
|
||||
__ blsmskl($dst$$Register, $src$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct blsrI_rReg_rReg(rRegI dst, rRegI src, immI_M1 minus_1, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (AndI (AddI src minus_1) src) );
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "blsrl $dst, $src" %}
|
||||
|
||||
ins_encode %{
|
||||
__ blsrl($dst$$Register, $src$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct blsrI_rReg_mem(rRegI dst, memory src, immI_M1 minus_1, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (AndI (AddI (LoadI src) minus_1) (LoadI src) ) );
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
ins_cost(125);
|
||||
format %{ "blsrl $dst, $src" %}
|
||||
|
||||
ins_encode %{
|
||||
__ blsrl($dst$$Register, $src$$Address);
|
||||
%}
|
||||
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
// Or Instructions
|
||||
// Or Register with Register
|
||||
instruct orI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
|
||||
@ -8853,6 +8995,122 @@ instruct andL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
|
||||
ins_pipe(ialu_mem_imm);
|
||||
%}
|
||||
|
||||
// BMI1 instructions
|
||||
instruct andnL_rReg_rReg_mem(rRegL dst, rRegL src1, memory src2, immL_M1 minus_1, rFlagsReg cr) %{
|
||||
match(Set dst (AndL (XorL src1 minus_1) (LoadL src2)));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
ins_cost(125);
|
||||
format %{ "andnq $dst, $src1, $src2" %}
|
||||
|
||||
ins_encode %{
|
||||
__ andnq($dst$$Register, $src1$$Register, $src2$$Address);
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct andnL_rReg_rReg_rReg(rRegL dst, rRegL src1, rRegL src2, immL_M1 minus_1, rFlagsReg cr) %{
|
||||
match(Set dst (AndL (XorL src1 minus_1) src2));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "andnq $dst, $src1, $src2" %}
|
||||
|
||||
ins_encode %{
|
||||
__ andnq($dst$$Register, $src1$$Register, $src2$$Register);
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct blsiL_rReg_rReg(rRegL dst, rRegL src, immL0 imm_zero, rFlagsReg cr) %{
|
||||
match(Set dst (AndL (SubL imm_zero src) src));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "blsiq $dst, $src" %}
|
||||
|
||||
ins_encode %{
|
||||
__ blsiq($dst$$Register, $src$$Register);
|
||||
%}
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct blsiL_rReg_mem(rRegL dst, memory src, immL0 imm_zero, rFlagsReg cr) %{
|
||||
match(Set dst (AndL (SubL imm_zero (LoadL src) ) (LoadL src) ));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
ins_cost(125);
|
||||
format %{ "blsiq $dst, $src" %}
|
||||
|
||||
ins_encode %{
|
||||
__ blsiq($dst$$Register, $src$$Address);
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct blsmskL_rReg_mem(rRegL dst, memory src, immL_M1 minus_1, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (XorL (AddL (LoadL src) minus_1) (LoadL src) ) );
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
ins_cost(125);
|
||||
format %{ "blsmskq $dst, $src" %}
|
||||
|
||||
ins_encode %{
|
||||
__ blsmskq($dst$$Register, $src$$Address);
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct blsmskL_rReg_rReg(rRegL dst, rRegL src, immL_M1 minus_1, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (XorL (AddL src minus_1) src));
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "blsmskq $dst, $src" %}
|
||||
|
||||
ins_encode %{
|
||||
__ blsmskq($dst$$Register, $src$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct blsrL_rReg_rReg(rRegL dst, rRegL src, immL_M1 minus_1, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (AndL (AddL src minus_1) src) );
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "blsrq $dst, $src" %}
|
||||
|
||||
ins_encode %{
|
||||
__ blsrq($dst$$Register, $src$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct blsrL_rReg_mem(rRegL dst, memory src, immL_M1 minus_1, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (AndL (AddL (LoadL src) minus_1) (LoadL src)) );
|
||||
predicate(UseBMI1Instructions);
|
||||
effect(KILL cr);
|
||||
|
||||
ins_cost(125);
|
||||
format %{ "blsrq $dst, $src" %}
|
||||
|
||||
ins_encode %{
|
||||
__ blsrq($dst$$Register, $src$$Address);
|
||||
%}
|
||||
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
// Or Instructions
|
||||
// Or Register with Register
|
||||
instruct orL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
|
||||
|
@ -1135,15 +1135,10 @@ jlong os::javaTimeNanos() {
|
||||
}
|
||||
|
||||
void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
|
||||
{
|
||||
// gettimeofday - based on time in seconds since the Epoch thus does not wrap
|
||||
info_ptr->max_value = ALL_64_BITS;
|
||||
|
||||
// gettimeofday is a real time clock so it skips
|
||||
info_ptr->may_skip_backward = true;
|
||||
info_ptr->may_skip_forward = true;
|
||||
}
|
||||
|
||||
info_ptr->max_value = ALL_64_BITS;
|
||||
// mread_real_time() is monotonic (see 'os::javaTimeNanos()')
|
||||
info_ptr->may_skip_backward = false;
|
||||
info_ptr->may_skip_forward = false;
|
||||
info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
|
||||
}
|
||||
|
||||
@ -2799,105 +2794,6 @@ size_t os::read(int fd, void *buf, unsigned int nBytes) {
|
||||
return ::read(fd, buf, nBytes);
|
||||
}
|
||||
|
||||
#define NANOSECS_PER_MILLISEC 1000000
|
||||
|
||||
int os::sleep(Thread* thread, jlong millis, bool interruptible) {
|
||||
assert(thread == Thread::current(), "thread consistency check");
|
||||
|
||||
// Prevent nasty overflow in deadline calculation
|
||||
// by handling long sleeps similar to solaris or windows.
|
||||
const jlong limit = INT_MAX;
|
||||
int result;
|
||||
while (millis > limit) {
|
||||
if ((result = os::sleep(thread, limit, interruptible)) != OS_OK) {
|
||||
return result;
|
||||
}
|
||||
millis -= limit;
|
||||
}
|
||||
|
||||
ParkEvent * const slp = thread->_SleepEvent;
|
||||
slp->reset();
|
||||
OrderAccess::fence();
|
||||
|
||||
if (interruptible) {
|
||||
jlong prevtime = javaTimeNanos();
|
||||
|
||||
// Prevent precision loss and too long sleeps
|
||||
jlong deadline = prevtime + millis * NANOSECS_PER_MILLISEC;
|
||||
|
||||
for (;;) {
|
||||
if (os::is_interrupted(thread, true)) {
|
||||
return OS_INTRPT;
|
||||
}
|
||||
|
||||
jlong newtime = javaTimeNanos();
|
||||
|
||||
assert(newtime >= prevtime, "time moving backwards");
|
||||
// Doing prevtime and newtime in microseconds doesn't help precision,
|
||||
// and trying to round up to avoid lost milliseconds can result in a
|
||||
// too-short delay.
|
||||
millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
|
||||
|
||||
if (millis <= 0) {
|
||||
return OS_OK;
|
||||
}
|
||||
|
||||
// Stop sleeping if we passed the deadline
|
||||
if (newtime >= deadline) {
|
||||
return OS_OK;
|
||||
}
|
||||
|
||||
prevtime = newtime;
|
||||
|
||||
{
|
||||
assert(thread->is_Java_thread(), "sanity check");
|
||||
JavaThread *jt = (JavaThread *) thread;
|
||||
ThreadBlockInVM tbivm(jt);
|
||||
OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
|
||||
|
||||
jt->set_suspend_equivalent();
|
||||
|
||||
slp->park(millis);
|
||||
|
||||
// were we externally suspended while we were waiting?
|
||||
jt->check_and_wait_while_suspended();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
|
||||
jlong prevtime = javaTimeNanos();
|
||||
|
||||
// Prevent precision loss and too long sleeps
|
||||
jlong deadline = prevtime + millis * NANOSECS_PER_MILLISEC;
|
||||
|
||||
for (;;) {
|
||||
// It'd be nice to avoid the back-to-back javaTimeNanos() calls on
|
||||
// the 1st iteration ...
|
||||
jlong newtime = javaTimeNanos();
|
||||
|
||||
if (newtime - prevtime < 0) {
|
||||
// time moving backwards, should only happen if no monotonic clock
|
||||
// not a guarantee() because JVM should not abort on kernel/glibc bugs
|
||||
// - HS14 Commented out as not implemented.
|
||||
// - TODO Maybe we should implement it?
|
||||
//assert(!Aix::supports_monotonic_clock(), "time moving backwards");
|
||||
} else {
|
||||
millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
|
||||
}
|
||||
|
||||
if (millis <= 0) break;
|
||||
|
||||
if (newtime >= deadline) {
|
||||
break;
|
||||
}
|
||||
|
||||
prevtime = newtime;
|
||||
slp->park(millis);
|
||||
}
|
||||
return OS_OK;
|
||||
}
|
||||
}
|
||||
|
||||
void os::naked_short_sleep(jlong ms) {
|
||||
struct timespec req;
|
||||
|
||||
@ -3246,50 +3142,6 @@ static void do_resume(OSThread* osthread) {
|
||||
guarantee(osthread->sr.is_running(), "Must be running!");
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// interrupt support
|
||||
|
||||
void os::interrupt(Thread* thread) {
|
||||
assert(Thread::current() == thread || Threads_lock->owned_by_self(),
|
||||
"possibility of dangling Thread pointer");
|
||||
|
||||
OSThread* osthread = thread->osthread();
|
||||
|
||||
if (!osthread->interrupted()) {
|
||||
osthread->set_interrupted(true);
|
||||
// More than one thread can get here with the same value of osthread,
|
||||
// resulting in multiple notifications. We do, however, want the store
|
||||
// to interrupted() to be visible to other threads before we execute unpark().
|
||||
OrderAccess::fence();
|
||||
ParkEvent * const slp = thread->_SleepEvent;
|
||||
if (slp != NULL) slp->unpark();
|
||||
}
|
||||
|
||||
// For JSR166. Unpark even if interrupt status already was set
|
||||
if (thread->is_Java_thread())
|
||||
((JavaThread*)thread)->parker()->unpark();
|
||||
|
||||
ParkEvent * ev = thread->_ParkEvent;
|
||||
if (ev != NULL) ev->unpark();
|
||||
|
||||
}
|
||||
|
||||
bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
|
||||
assert(Thread::current() == thread || Threads_lock->owned_by_self(),
|
||||
"possibility of dangling Thread pointer");
|
||||
|
||||
OSThread* osthread = thread->osthread();
|
||||
|
||||
bool interrupted = osthread->interrupted();
|
||||
|
||||
if (interrupted && clear_interrupted) {
|
||||
osthread->set_interrupted(false);
|
||||
// consider thread->_SleepEvent->reset() ... optional optimization
|
||||
}
|
||||
|
||||
return interrupted;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////
|
||||
// signal handling (except suspend/resume)
|
||||
|
||||
|
@ -283,4 +283,10 @@ inline int os::set_sock_opt(int fd, int level, int optname,
|
||||
const char* optval, socklen_t optlen) {
|
||||
return ::setsockopt(fd, level, optname, optval, optlen);
|
||||
}
|
||||
|
||||
inline bool os::supports_monotonic_clock() {
|
||||
// mread_real_time() is monotonic on AIX (see os::javaTimeNanos() comments)
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif // OS_AIX_VM_OS_AIX_INLINE_HPP
|
||||
|
@ -5284,7 +5284,6 @@ jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
|
||||
|
||||
static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
|
||||
static bool proc_task_unchecked = true;
|
||||
static const char *proc_stat_path = "/proc/%d/stat";
|
||||
pid_t tid = thread->osthread()->thread_id();
|
||||
char *s;
|
||||
char stat[2048];
|
||||
@ -5297,6 +5296,8 @@ static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
|
||||
long ldummy;
|
||||
FILE *fp;
|
||||
|
||||
snprintf(proc_name, 64, "/proc/%d/stat", tid);
|
||||
|
||||
// The /proc/<tid>/stat aggregates per-process usage on
|
||||
// new Linux kernels 2.6+ where NPTL is supported.
|
||||
// The /proc/self/task/<tid>/stat still has the per-thread usage.
|
||||
@ -5308,12 +5309,11 @@ static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
|
||||
proc_task_unchecked = false;
|
||||
fp = fopen("/proc/self/task", "r");
|
||||
if (fp != NULL) {
|
||||
proc_stat_path = "/proc/self/task/%d/stat";
|
||||
snprintf(proc_name, 64, "/proc/self/task/%d/stat", tid);
|
||||
fclose(fp);
|
||||
}
|
||||
}
|
||||
|
||||
sprintf(proc_name, proc_stat_path, tid);
|
||||
fp = fopen(proc_name, "r");
|
||||
if ( fp == NULL ) return -1;
|
||||
statlen = fread(stat, 1, 2047, fp);
|
||||
|
@ -3619,13 +3619,14 @@ bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
|
||||
"possibility of dangling Thread pointer");
|
||||
|
||||
OSThread* osthread = thread->osthread();
|
||||
bool interrupted = osthread->interrupted();
|
||||
// There is no synchronization between the setting of the interrupt
|
||||
// and it being cleared here. It is critical - see 6535709 - that
|
||||
// we only clear the interrupt state, and reset the interrupt event,
|
||||
// if we are going to report that we were indeed interrupted - else
|
||||
// an interrupt can be "lost", leading to spurious wakeups or lost wakeups
|
||||
// depending on the timing
|
||||
// depending on the timing. By checking thread interrupt event to see
|
||||
// if the thread gets real interrupt thus prevent spurious wakeup.
|
||||
bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0);
|
||||
if (interrupted && clear_interrupted) {
|
||||
osthread->set_interrupted(false);
|
||||
ResetEvent(osthread->interrupt_event());
|
||||
|
@ -660,6 +660,7 @@ int InstructForm::memory_operand(FormDict &globals) const {
|
||||
int USE_of_memory = 0;
|
||||
int DEF_of_memory = 0;
|
||||
const char* last_memory_DEF = NULL; // to test DEF/USE pairing in asserts
|
||||
const char* last_memory_USE = NULL;
|
||||
Component *unique = NULL;
|
||||
Component *comp = NULL;
|
||||
ComponentList &components = (ComponentList &)_components;
|
||||
@ -681,7 +682,16 @@ int InstructForm::memory_operand(FormDict &globals) const {
|
||||
assert(0 == strcmp(last_memory_DEF, comp->_name), "every memory DEF is followed by a USE of the same name");
|
||||
last_memory_DEF = NULL;
|
||||
}
|
||||
USE_of_memory++;
|
||||
// Handles same memory being used multiple times in the case of BMI1 instructions.
|
||||
if (last_memory_USE != NULL) {
|
||||
if (strcmp(comp->_name, last_memory_USE) != 0) {
|
||||
USE_of_memory++;
|
||||
}
|
||||
} else {
|
||||
USE_of_memory++;
|
||||
}
|
||||
last_memory_USE = comp->_name;
|
||||
|
||||
if (DEF_of_memory == 0) // defs take precedence
|
||||
unique = comp;
|
||||
} else {
|
||||
|
@ -1436,7 +1436,7 @@ void GraphBuilder::method_return(Value x) {
|
||||
|
||||
bool need_mem_bar = false;
|
||||
if (method()->name() == ciSymbol::object_initializer_name() &&
|
||||
scope()->wrote_final()) {
|
||||
(scope()->wrote_final() || (AlwaysSafeConstructors && scope()->wrote_fields()))) {
|
||||
need_mem_bar = true;
|
||||
}
|
||||
|
||||
@ -1550,6 +1550,10 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
|
||||
scope()->set_wrote_final();
|
||||
}
|
||||
|
||||
if (code == Bytecodes::_putfield) {
|
||||
scope()->set_wrote_fields();
|
||||
}
|
||||
|
||||
const int offset = !needs_patching ? field->offset() : -1;
|
||||
switch (code) {
|
||||
case Bytecodes::_getstatic: {
|
||||
@ -3767,11 +3771,14 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode
|
||||
}
|
||||
|
||||
// now perform tests that are based on flag settings
|
||||
if (callee->force_inline()) {
|
||||
if (inline_level() > MaxForceInlineLevel) INLINE_BAILOUT("MaxForceInlineLevel");
|
||||
print_inlining(callee, "force inline by annotation");
|
||||
} else if (callee->should_inline()) {
|
||||
print_inlining(callee, "force inline by CompileOracle");
|
||||
if (callee->force_inline() || callee->should_inline()) {
|
||||
if (inline_level() > MaxForceInlineLevel ) INLINE_BAILOUT("MaxForceInlineLevel");
|
||||
if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("recursive inlining too deep");
|
||||
|
||||
const char* msg = "";
|
||||
if (callee->force_inline()) msg = "force inline by annotation";
|
||||
if (callee->should_inline()) msg = "force inline by CompileOracle";
|
||||
print_inlining(callee, msg);
|
||||
} else {
|
||||
// use heuristic controls on inlining
|
||||
if (inline_level() > MaxInlineLevel ) INLINE_BAILOUT("inlining too deep");
|
||||
|
@ -142,6 +142,7 @@ IRScope::IRScope(Compilation* compilation, IRScope* caller, int caller_bci, ciMe
|
||||
_number_of_locks = 0;
|
||||
_monitor_pairing_ok = method->has_balanced_monitors();
|
||||
_wrote_final = false;
|
||||
_wrote_fields = false;
|
||||
_start = NULL;
|
||||
|
||||
if (osr_bci == -1) {
|
||||
|
@ -150,6 +150,7 @@ class IRScope: public CompilationResourceObj {
|
||||
int _number_of_locks; // the number of monitor lock slots needed
|
||||
bool _monitor_pairing_ok; // the monitor pairing info
|
||||
bool _wrote_final; // has written final field
|
||||
bool _wrote_fields; // has written fields
|
||||
BlockBegin* _start; // the start block, successsors are method entries
|
||||
|
||||
BitMap _requires_phi_function; // bit is set if phi functions at loop headers are necessary for a local variable
|
||||
@ -184,6 +185,9 @@ class IRScope: public CompilationResourceObj {
|
||||
BlockBegin* start() const { return _start; }
|
||||
void set_wrote_final() { _wrote_final = true; }
|
||||
bool wrote_final () const { return _wrote_final; }
|
||||
void set_wrote_fields() { _wrote_fields = true; }
|
||||
bool wrote_fields () const { return _wrote_fields; }
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
@ -1734,7 +1734,8 @@ void LIRGenerator::do_StoreField(StoreField* x) {
|
||||
(info ? new CodeEmitInfo(info) : NULL));
|
||||
}
|
||||
|
||||
if (is_volatile && !needs_patching) {
|
||||
bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
|
||||
if (needs_atomic_access && !needs_patching) {
|
||||
volatile_field_store(value.result(), address, info);
|
||||
} else {
|
||||
LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
|
||||
@ -1807,7 +1808,8 @@ void LIRGenerator::do_LoadField(LoadField* x) {
|
||||
address = generate_address(object.result(), x->offset(), field_type);
|
||||
}
|
||||
|
||||
if (is_volatile && !needs_patching) {
|
||||
bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
|
||||
if (needs_atomic_access && !needs_patching) {
|
||||
volatile_field_load(address, reg, info);
|
||||
} else {
|
||||
LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
|
||||
|
@ -809,11 +809,10 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
|
||||
int bci = vfst.bci();
|
||||
Bytecodes::Code code = caller_method()->java_code_at(bci);
|
||||
|
||||
#ifndef PRODUCT
|
||||
// this is used by assertions in the access_field_patching_id
|
||||
BasicType patch_field_type = T_ILLEGAL;
|
||||
#endif // PRODUCT
|
||||
bool deoptimize_for_volatile = false;
|
||||
bool deoptimize_for_atomic = false;
|
||||
int patch_field_offset = -1;
|
||||
KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
|
||||
KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
|
||||
@ -839,11 +838,24 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
|
||||
// is the path for patching field offsets. load_klass is only
|
||||
// used for patching references to oops which don't need special
|
||||
// handling in the volatile case.
|
||||
|
||||
deoptimize_for_volatile = result.access_flags().is_volatile();
|
||||
|
||||
#ifndef PRODUCT
|
||||
// If we are patching a field which should be atomic, then
|
||||
// the generated code is not correct either, force deoptimizing.
|
||||
// We need to only cover T_LONG and T_DOUBLE fields, as we can
|
||||
// break access atomicity only for them.
|
||||
|
||||
// Strictly speaking, the deoptimizaation on 64-bit platforms
|
||||
// is unnecessary, and T_LONG stores on 32-bit platforms need
|
||||
// to be handled by special patching code when AlwaysAtomicAccesses
|
||||
// becomes product feature. At this point, we are still going
|
||||
// for the deoptimization for consistency against volatile
|
||||
// accesses.
|
||||
|
||||
patch_field_type = result.field_type();
|
||||
#endif
|
||||
deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG));
|
||||
|
||||
} else if (load_klass_or_mirror_patch_id) {
|
||||
Klass* k = NULL;
|
||||
switch (code) {
|
||||
@ -918,13 +930,19 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
if (deoptimize_for_volatile) {
|
||||
// At compile time we assumed the field wasn't volatile but after
|
||||
// loading it turns out it was volatile so we have to throw the
|
||||
if (deoptimize_for_volatile || deoptimize_for_atomic) {
|
||||
// At compile time we assumed the field wasn't volatile/atomic but after
|
||||
// loading it turns out it was volatile/atomic so we have to throw the
|
||||
// compiled code out and let it be regenerated.
|
||||
if (TracePatching) {
|
||||
tty->print_cr("Deoptimizing for patching volatile field reference");
|
||||
if (deoptimize_for_volatile) {
|
||||
tty->print_cr("Deoptimizing for patching volatile field reference");
|
||||
}
|
||||
if (deoptimize_for_atomic) {
|
||||
tty->print_cr("Deoptimizing for patching atomic field reference");
|
||||
}
|
||||
}
|
||||
|
||||
// It's possible the nmethod was invalidated in the last
|
||||
// safepoint, but if it's still alive then make it not_entrant.
|
||||
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
|
||||
|
@ -724,6 +724,11 @@ ciMethod* ciMethod::find_monomorphic_target(ciInstanceKlass* caller,
|
||||
|
||||
VM_ENTRY_MARK;
|
||||
|
||||
// Disable CHA for default methods for now
|
||||
if (root_m->get_Method()->is_default_method()) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
methodHandle target;
|
||||
{
|
||||
MutexLocker locker(Compile_lock);
|
||||
|
@ -87,8 +87,9 @@ void ciMethodData::load_extra_data() {
|
||||
DataLayout* dp_dst = extra_data_base();
|
||||
for (;; dp_src = MethodData::next_extra(dp_src), dp_dst = MethodData::next_extra(dp_dst)) {
|
||||
assert(dp_src < end_src, "moved past end of extra data");
|
||||
assert(dp_src->tag() == dp_dst->tag(), err_msg("should be same tags %d != %d", dp_src->tag(), dp_dst->tag()));
|
||||
switch(dp_src->tag()) {
|
||||
// New traps in the MDO can be added as we translate the copy so
|
||||
// look at the entries in the copy.
|
||||
switch(dp_dst->tag()) {
|
||||
case DataLayout::speculative_trap_data_tag: {
|
||||
ciSpeculativeTrapData* data_dst = new ciSpeculativeTrapData(dp_dst);
|
||||
SpeculativeTrapData* data_src = new SpeculativeTrapData(dp_src);
|
||||
@ -102,7 +103,7 @@ void ciMethodData::load_extra_data() {
|
||||
// An empty slot or ArgInfoData entry marks the end of the trap data
|
||||
return;
|
||||
default:
|
||||
fatal(err_msg("bad tag = %d", dp_src->tag()));
|
||||
fatal(err_msg("bad tag = %d", dp_dst->tag()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -198,14 +198,12 @@ CodeBlob* CodeCache::allocate(int size, bool is_critical) {
|
||||
}
|
||||
maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -
|
||||
(address)_heap->low_boundary()) - unallocated_capacity());
|
||||
verify_if_often();
|
||||
print_trace("allocation", cb, size);
|
||||
return cb;
|
||||
}
|
||||
|
||||
void CodeCache::free(CodeBlob* cb) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
verify_if_often();
|
||||
|
||||
print_trace("free", cb);
|
||||
if (cb->is_nmethod()) {
|
||||
@ -221,7 +219,6 @@ void CodeCache::free(CodeBlob* cb) {
|
||||
|
||||
_heap->deallocate(cb);
|
||||
|
||||
verify_if_often();
|
||||
assert(_number_of_blobs >= 0, "sanity check");
|
||||
}
|
||||
|
||||
@ -244,12 +241,6 @@ void CodeCache::commit(CodeBlob* cb) {
|
||||
}
|
||||
|
||||
|
||||
void CodeCache::flush() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
|
||||
// Iteration over CodeBlobs
|
||||
|
||||
#define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) )
|
||||
@ -269,7 +260,7 @@ bool CodeCache::contains(void *p) {
|
||||
CodeBlob* CodeCache::find_blob(void* start) {
|
||||
CodeBlob* result = find_blob_unsafe(start);
|
||||
if (result == NULL) return NULL;
|
||||
// We could potientially look up non_entrant methods
|
||||
// We could potentially look up non_entrant methods
|
||||
guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
|
||||
return result;
|
||||
}
|
||||
@ -741,17 +732,26 @@ void CodeCache::report_codemem_full() {
|
||||
}
|
||||
}
|
||||
|
||||
void CodeCache::print_memory_overhead() {
|
||||
size_t wasted_bytes = 0;
|
||||
CodeBlob *cb;
|
||||
for (cb = first(); cb != NULL; cb = next(cb)) {
|
||||
HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
|
||||
wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
|
||||
}
|
||||
// Print bytes that are allocated in the freelist
|
||||
ttyLocker ttl;
|
||||
tty->print_cr("Number of elements in freelist: %d", freelist_length());
|
||||
tty->print_cr("Allocated in freelist: %dkB", bytes_allocated_in_freelist()/K);
|
||||
tty->print_cr("Unused bytes in CodeBlobs: %dkB", (int)(wasted_bytes/K));
|
||||
tty->print_cr("Segment map size: %dkB", allocated_segments()/K); // 1 byte per segment
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------------------------
|
||||
// Non-product version
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
void CodeCache::verify_if_often() {
|
||||
if (VerifyCodeCacheOften) {
|
||||
_heap->verify();
|
||||
}
|
||||
}
|
||||
|
||||
void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
|
||||
if (PrintCodeCache2) { // Need to add a new flag
|
||||
ResourceMark rm;
|
||||
@ -774,7 +774,7 @@ void CodeCache::print_internals() {
|
||||
int nmethodUnloaded = 0;
|
||||
int nmethodJava = 0;
|
||||
int nmethodNative = 0;
|
||||
int maxCodeSize = 0;
|
||||
int max_nm_size = 0;
|
||||
ResourceMark rm;
|
||||
|
||||
CodeBlob *cb;
|
||||
@ -798,13 +798,11 @@ void CodeCache::print_internals() {
|
||||
if(nm->is_not_entrant()) { nmethodNotEntrant++; }
|
||||
if(nm->is_zombie()) { nmethodZombie++; }
|
||||
if(nm->is_unloaded()) { nmethodUnloaded++; }
|
||||
if(nm->is_native_method()) { nmethodNative++; }
|
||||
if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
|
||||
|
||||
if(nm->method() != NULL && nm->is_java_method()) {
|
||||
nmethodJava++;
|
||||
if (nm->insts_size() > maxCodeSize) {
|
||||
maxCodeSize = nm->insts_size();
|
||||
}
|
||||
max_nm_size = MAX2(max_nm_size, nm->size());
|
||||
}
|
||||
} else if (cb->is_runtime_stub()) {
|
||||
runtimeStubCount++;
|
||||
@ -820,18 +818,19 @@ void CodeCache::print_internals() {
|
||||
}
|
||||
|
||||
int bucketSize = 512;
|
||||
int bucketLimit = maxCodeSize / bucketSize + 1;
|
||||
int bucketLimit = max_nm_size / bucketSize + 1;
|
||||
int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
|
||||
memset(buckets,0,sizeof(int) * bucketLimit);
|
||||
memset(buckets, 0, sizeof(int) * bucketLimit);
|
||||
|
||||
for (cb = first(); cb != NULL; cb = next(cb)) {
|
||||
if (cb->is_nmethod()) {
|
||||
nmethod* nm = (nmethod*)cb;
|
||||
if(nm->is_java_method()) {
|
||||
buckets[nm->insts_size() / bucketSize]++;
|
||||
}
|
||||
buckets[nm->size() / bucketSize]++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tty->print_cr("Code Cache Entries (total of %d)",total);
|
||||
tty->print_cr("-------------------------------------------------");
|
||||
tty->print_cr("nmethods: %d",nmethodCount);
|
||||
@ -858,6 +857,7 @@ void CodeCache::print_internals() {
|
||||
}
|
||||
|
||||
FREE_C_HEAP_ARRAY(int, buckets, mtCode);
|
||||
print_memory_overhead();
|
||||
}
|
||||
|
||||
#endif // !PRODUCT
|
||||
|
@ -58,12 +58,13 @@ class CodeCache : AllStatic {
|
||||
static bool _needs_cache_clean;
|
||||
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
|
||||
|
||||
static void verify_if_often() PRODUCT_RETURN;
|
||||
|
||||
static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
|
||||
static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
|
||||
|
||||
static int _codemem_full_count;
|
||||
static size_t bytes_allocated_in_freelist() { return _heap->allocated_in_freelist(); }
|
||||
static int allocated_segments() { return _heap->allocated_segments(); }
|
||||
static size_t freelist_length() { return _heap->freelist_length(); }
|
||||
|
||||
public:
|
||||
|
||||
@ -78,7 +79,6 @@ class CodeCache : AllStatic {
|
||||
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
|
||||
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
|
||||
static void free(CodeBlob* cb); // frees a CodeBlob
|
||||
static void flush(); // flushes all CodeBlobs
|
||||
static bool contains(void *p); // returns whether p is included
|
||||
static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
|
||||
static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs
|
||||
@ -150,6 +150,7 @@ class CodeCache : AllStatic {
|
||||
// Printing/debugging
|
||||
static void print(); // prints summary
|
||||
static void print_internals();
|
||||
static void print_memory_overhead();
|
||||
static void verify(); // verifies the code cache
|
||||
static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN;
|
||||
static void print_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -273,8 +273,8 @@ class DebugInfoReadStream : public CompressedReadStream {
|
||||
}
|
||||
Method* read_method() {
|
||||
Method* o = (Method*)(code()->metadata_at(read_int()));
|
||||
assert(o == NULL ||
|
||||
o->is_metaspace_object(), "meta data only");
|
||||
// is_metadata() is a faster check than is_metaspace_object()
|
||||
assert(o == NULL || o->is_metadata(), "meta data only");
|
||||
return o;
|
||||
}
|
||||
ScopeValue* read_object_value();
|
||||
|
@ -725,13 +725,13 @@ Klass* Dependencies::DepStream::context_type() {
|
||||
}
|
||||
|
||||
// ----------------- DependencySignature --------------------------------------
|
||||
bool DependencySignature::equals(DependencySignature* sig) const {
|
||||
if ((type() != sig->type()) || (args_count() != sig->args_count())) {
|
||||
bool DependencySignature::equals(DependencySignature const& s1, DependencySignature const& s2) {
|
||||
if ((s1.type() != s2.type()) || (s1.args_count() != s2.args_count())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (int i = 0; i < sig->args_count(); i++) {
|
||||
if (arg(i) != sig->arg(i)) {
|
||||
for (int i = 0; i < s1.args_count(); i++) {
|
||||
if (s1.arg(i) != s2.arg(i)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -527,7 +527,7 @@ class Dependencies: public ResourceObj {
|
||||
};
|
||||
|
||||
|
||||
class DependencySignature : public GenericHashtableEntry<DependencySignature, ResourceObj> {
|
||||
class DependencySignature : public ResourceObj {
|
||||
private:
|
||||
int _args_count;
|
||||
uintptr_t _argument_hash[Dependencies::max_arg_count];
|
||||
@ -542,12 +542,13 @@ class DependencySignature : public GenericHashtableEntry<DependencySignature, Re
|
||||
}
|
||||
}
|
||||
|
||||
bool equals(DependencySignature* sig) const;
|
||||
uintptr_t key() const { return _argument_hash[0] >> 2; }
|
||||
static bool equals(DependencySignature const& s1, DependencySignature const& s2);
|
||||
static unsigned hash (DependencySignature const& s1) { return s1.arg(0) >> 2; }
|
||||
|
||||
int args_count() const { return _args_count; }
|
||||
uintptr_t arg(int idx) const { return _argument_hash[idx]; }
|
||||
Dependencies::DepType type() const { return _type; }
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
@ -39,6 +39,7 @@
|
||||
#include "prims/jvmtiImpl.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/sweeper.hpp"
|
||||
#include "utilities/resourceHash.hpp"
|
||||
#include "utilities/dtrace.hpp"
|
||||
#include "utilities/events.hpp"
|
||||
#include "utilities/xmlstream.hpp"
|
||||
@ -2135,7 +2136,11 @@ void nmethod::check_all_dependencies(DepChange& changes) {
|
||||
// Turn off dependency tracing while actually testing dependencies.
|
||||
NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
|
||||
|
||||
GenericHashtable<DependencySignature, ResourceObj>* table = new GenericHashtable<DependencySignature, ResourceObj>(11027);
|
||||
typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
|
||||
&DependencySignature::equals, 11027> DepTable;
|
||||
|
||||
DepTable* table = new DepTable();
|
||||
|
||||
// Iterate over live nmethods and check dependencies of all nmethods that are not
|
||||
// marked for deoptimization. A particular dependency is only checked once.
|
||||
for(nmethod* nm = CodeCache::alive_nmethod(CodeCache::first()); nm != NULL; nm = CodeCache::alive_nmethod(CodeCache::next(nm))) {
|
||||
@ -2143,9 +2148,10 @@ void nmethod::check_all_dependencies(DepChange& changes) {
|
||||
for (Dependencies::DepStream deps(nm); deps.next(); ) {
|
||||
// Construct abstraction of a dependency.
|
||||
DependencySignature* current_sig = new DependencySignature(deps);
|
||||
// Determine if 'deps' is already checked. table->add() returns
|
||||
// 'true' if the dependency was added (i.e., was not in the hashtable).
|
||||
if (table->add(current_sig)) {
|
||||
|
||||
// Determine if dependency is already checked. table->put(...) returns
|
||||
// 'true' if the dependency is added (i.e., was not in the hashtable).
|
||||
if (table->put(*current_sig, 1)) {
|
||||
if (deps.check_dependency() != NULL) {
|
||||
// Dependency checking failed. Print out information about the failed
|
||||
// dependency and finally fail with an assert. We can fail here, since
|
||||
|
@ -374,25 +374,8 @@ static void usage() {
|
||||
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
|
||||
|
||||
#define RANGE0 "[*" RANGEBASE "]"
|
||||
#define RANGEDOT "[*" RANGEBASE ".]"
|
||||
#define RANGESLASH "[*" RANGEBASE "/]"
|
||||
|
||||
|
||||
// Accept several syntaxes for these patterns
|
||||
// original syntax
|
||||
// cmd java.lang.String foo
|
||||
// PrintCompilation syntax
|
||||
// cmd java.lang.String::foo
|
||||
// VM syntax
|
||||
// cmd java/lang/String[. ]foo
|
||||
//
|
||||
|
||||
static const char* patterns[] = {
|
||||
"%*[ \t]%255" RANGEDOT " " "%255" RANGE0 "%n",
|
||||
"%*[ \t]%255" RANGEDOT "::" "%255" RANGE0 "%n",
|
||||
"%*[ \t]%255" RANGESLASH "%*[ .]" "%255" RANGE0 "%n",
|
||||
};
|
||||
|
||||
static MethodMatcher::Mode check_mode(char name[], const char*& error_msg) {
|
||||
int match = MethodMatcher::Exact;
|
||||
while (name[0] == '*') {
|
||||
@ -421,12 +404,10 @@ static bool scan_line(const char * line,
|
||||
int* bytes_read, const char*& error_msg) {
|
||||
*bytes_read = 0;
|
||||
error_msg = NULL;
|
||||
for (uint i = 0; i < ARRAY_SIZE(patterns); i++) {
|
||||
if (2 == sscanf(line, patterns[i], class_name, method_name, bytes_read)) {
|
||||
*c_mode = check_mode(class_name, error_msg);
|
||||
*m_mode = check_mode(method_name, error_msg);
|
||||
return *c_mode != MethodMatcher::Unknown && *m_mode != MethodMatcher::Unknown;
|
||||
}
|
||||
if (2 == sscanf(line, "%*[ \t]%255" RANGESLASH "%*[ ]" "%255" RANGE0 "%n", class_name, method_name, bytes_read)) {
|
||||
*c_mode = check_mode(class_name, error_msg);
|
||||
*m_mode = check_mode(method_name, error_msg);
|
||||
return *c_mode != MethodMatcher::Unknown && *m_mode != MethodMatcher::Unknown;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -280,6 +280,16 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
||||
if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
|
||||
((gc_cause != GCCause::_java_lang_system_gc) ||
|
||||
UseAdaptiveSizePolicyWithSystemGC)) {
|
||||
// Swap the survivor spaces if from_space is empty. The
|
||||
// resize_young_gen() called below is normally used after
|
||||
// a successful young GC and swapping of survivor spaces;
|
||||
// otherwise, it will fail to resize the young gen with
|
||||
// the current implementation.
|
||||
if (young_gen->from_space()->is_empty()) {
|
||||
young_gen->from_space()->clear(SpaceDecorator::Mangle);
|
||||
young_gen->swap_spaces();
|
||||
}
|
||||
|
||||
// Calculate optimal free space amounts
|
||||
assert(young_gen->max_size() >
|
||||
young_gen->from_space()->capacity_in_bytes() +
|
||||
@ -318,12 +328,8 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
||||
|
||||
heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
|
||||
|
||||
// Don't resize the young generation at an major collection. A
|
||||
// desired young generation size may have been calculated but
|
||||
// resizing the young generation complicates the code because the
|
||||
// resizing of the old generation may have moved the boundary
|
||||
// between the young generation and the old generation. Let the
|
||||
// young generation resizing happen at the minor collections.
|
||||
heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
|
||||
size_policy->calculated_survivor_size_in_bytes());
|
||||
}
|
||||
if (PrintAdaptiveSizePolicy) {
|
||||
gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
|
||||
|
@ -43,6 +43,7 @@
|
||||
#include "gc_implementation/shared/gcTrace.hpp"
|
||||
#include "gc_implementation/shared/gcTraceTime.hpp"
|
||||
#include "gc_implementation/shared/isGCActiveMark.hpp"
|
||||
#include "gc_implementation/shared/spaceDecorator.hpp"
|
||||
#include "gc_interface/gcCause.hpp"
|
||||
#include "memory/gcLocker.inline.hpp"
|
||||
#include "memory/referencePolicy.hpp"
|
||||
@ -2115,6 +2116,16 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
||||
if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
|
||||
((gc_cause != GCCause::_java_lang_system_gc) ||
|
||||
UseAdaptiveSizePolicyWithSystemGC)) {
|
||||
// Swap the survivor spaces if from_space is empty. The
|
||||
// resize_young_gen() called below is normally used after
|
||||
// a successful young GC and swapping of survivor spaces;
|
||||
// otherwise, it will fail to resize the young gen with
|
||||
// the current implementation.
|
||||
if (young_gen->from_space()->is_empty()) {
|
||||
young_gen->from_space()->clear(SpaceDecorator::Mangle);
|
||||
young_gen->swap_spaces();
|
||||
}
|
||||
|
||||
// Calculate optimal free space amounts
|
||||
assert(young_gen->max_size() >
|
||||
young_gen->from_space()->capacity_in_bytes() +
|
||||
@ -2154,12 +2165,8 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
||||
heap->resize_old_gen(
|
||||
size_policy->calculated_old_free_size_in_bytes());
|
||||
|
||||
// Don't resize the young generation at an major collection. A
|
||||
// desired young generation size may have been calculated but
|
||||
// resizing the young generation complicates the code because the
|
||||
// resizing of the old generation may have moved the boundary
|
||||
// between the young generation and the old generation. Let the
|
||||
// young generation resizing happen at the minor collections.
|
||||
heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
|
||||
size_policy->calculated_survivor_size_in_bytes());
|
||||
}
|
||||
if (PrintAdaptiveSizePolicy) {
|
||||
gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
|
||||
|
@ -3475,7 +3475,7 @@ BytecodeInterpreter::print() {
|
||||
tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult);
|
||||
tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult);
|
||||
#endif
|
||||
#if !defined(ZERO)
|
||||
#if !defined(ZERO) && defined(PPC)
|
||||
tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp);
|
||||
#endif // !ZERO
|
||||
tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link);
|
||||
|
@ -376,6 +376,9 @@ class TemplateTable: AllStatic {
|
||||
#ifdef TARGET_ARCH_MODEL_ppc_32
|
||||
# include "templateTable_ppc_32.hpp"
|
||||
#endif
|
||||
#ifdef TARGET_ARCH_MODEL_ppc_64
|
||||
# include "templateTable_ppc_64.hpp"
|
||||
#endif
|
||||
|
||||
};
|
||||
#endif /* !CC_INTERP */
|
||||
|
@ -304,10 +304,13 @@ void GenCollectorPolicy::initialize_flags() {
|
||||
}
|
||||
|
||||
// Now take the actual NewSize into account. We will silently increase NewSize
|
||||
// if the user specified a smaller value.
|
||||
// if the user specified a smaller or unaligned value.
|
||||
smallest_new_size = MAX2(smallest_new_size, (uintx)align_size_down(NewSize, _gen_alignment));
|
||||
if (smallest_new_size != NewSize) {
|
||||
FLAG_SET_ERGO(uintx, NewSize, smallest_new_size);
|
||||
// Do not use FLAG_SET_ERGO to update NewSize here, since this will override
|
||||
// if NewSize was set on the command line or not. This information is needed
|
||||
// later when setting the initial and minimum young generation size.
|
||||
NewSize = smallest_new_size;
|
||||
}
|
||||
_initial_gen0_size = NewSize;
|
||||
|
||||
|
@ -78,6 +78,10 @@ void FileMapInfo::fail_continue(const char *msg, ...) {
|
||||
va_start(ap, msg);
|
||||
if (RequireSharedSpaces) {
|
||||
fail(msg, ap);
|
||||
} else {
|
||||
if (PrintSharedSpaces) {
|
||||
tty->print_cr("UseSharedSpaces: %s", msg);
|
||||
}
|
||||
}
|
||||
va_end(ap);
|
||||
UseSharedSpaces = false;
|
||||
|
@ -43,6 +43,7 @@ CodeHeap::CodeHeap() {
|
||||
_next_segment = 0;
|
||||
_freelist = NULL;
|
||||
_freelist_segments = 0;
|
||||
_freelist_length = 0;
|
||||
}
|
||||
|
||||
|
||||
@ -53,7 +54,7 @@ void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) {
|
||||
address p = (address)_segmap.low() + beg;
|
||||
address q = (address)_segmap.low() + end;
|
||||
// initialize interval
|
||||
while (p < q) *p++ = 0xFF;
|
||||
while (p < q) *p++ = free_sentinel;
|
||||
}
|
||||
|
||||
|
||||
@ -67,7 +68,7 @@ void CodeHeap::mark_segmap_as_used(size_t beg, size_t end) {
|
||||
int i = 0;
|
||||
while (p < q) {
|
||||
*p++ = i++;
|
||||
if (i == 0xFF) i = 1;
|
||||
if (i == free_sentinel) i = 1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -139,11 +140,6 @@ bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
|
||||
}
|
||||
|
||||
|
||||
void CodeHeap::release() {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
|
||||
bool CodeHeap::expand_by(size_t size) {
|
||||
// expand _memory space
|
||||
size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size();
|
||||
@ -157,8 +153,8 @@ bool CodeHeap::expand_by(size_t size) {
|
||||
assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
|
||||
// expand _segmap space
|
||||
size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size();
|
||||
if (ds > 0) {
|
||||
if (!_segmap.expand_by(ds)) return false;
|
||||
if ((ds > 0) && !_segmap.expand_by(ds)) {
|
||||
return false;
|
||||
}
|
||||
assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking");
|
||||
// initialize additional segmap entries
|
||||
@ -167,12 +163,6 @@ bool CodeHeap::expand_by(size_t size) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void CodeHeap::shrink_by(size_t size) {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
|
||||
void CodeHeap::clear() {
|
||||
_next_segment = 0;
|
||||
mark_segmap_as_free(0, _number_of_committed_segments);
|
||||
@ -180,26 +170,23 @@ void CodeHeap::clear() {
|
||||
|
||||
|
||||
void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
|
||||
size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock));
|
||||
size_t number_of_segments = size_to_segments(instance_size + header_size());
|
||||
assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
|
||||
|
||||
// First check if we can satisfy request from freelist
|
||||
debug_only(verify());
|
||||
NOT_PRODUCT(verify());
|
||||
HeapBlock* block = search_freelist(number_of_segments, is_critical);
|
||||
debug_only(if (VerifyCodeCacheOften) verify());
|
||||
NOT_PRODUCT(verify());
|
||||
|
||||
if (block != NULL) {
|
||||
assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check");
|
||||
assert(!block->free(), "must be marked free");
|
||||
#ifdef ASSERT
|
||||
memset((void *)block->allocated_space(), badCodeHeapNewVal, instance_size);
|
||||
#endif
|
||||
DEBUG_ONLY(memset((void*)block->allocated_space(), badCodeHeapNewVal, instance_size));
|
||||
return block->allocated_space();
|
||||
}
|
||||
|
||||
// Ensure minimum size for allocation to the heap.
|
||||
if (number_of_segments < CodeCacheMinBlockLength) {
|
||||
number_of_segments = CodeCacheMinBlockLength;
|
||||
}
|
||||
number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments);
|
||||
|
||||
if (!is_critical) {
|
||||
// Make sure the allocation fits in the unallocated heap without using
|
||||
@ -215,9 +202,7 @@ void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
|
||||
HeapBlock* b = block_at(_next_segment);
|
||||
b->initialize(number_of_segments);
|
||||
_next_segment += number_of_segments;
|
||||
#ifdef ASSERT
|
||||
memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size);
|
||||
#endif
|
||||
DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size));
|
||||
return b->allocated_space();
|
||||
} else {
|
||||
return NULL;
|
||||
@ -230,28 +215,56 @@ void CodeHeap::deallocate(void* p) {
|
||||
// Find start of HeapBlock
|
||||
HeapBlock* b = (((HeapBlock *)p) - 1);
|
||||
assert(b->allocated_space() == p, "sanity check");
|
||||
#ifdef ASSERT
|
||||
memset((void *)b->allocated_space(),
|
||||
badCodeHeapFreeVal,
|
||||
segments_to_size(b->length()) - sizeof(HeapBlock));
|
||||
#endif
|
||||
DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapFreeVal,
|
||||
segments_to_size(b->length()) - sizeof(HeapBlock)));
|
||||
add_to_freelist(b);
|
||||
|
||||
debug_only(if (VerifyCodeCacheOften) verify());
|
||||
NOT_PRODUCT(verify());
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Uses segment map to find the the start (header) of a nmethod. This works as follows:
|
||||
* The memory of the code cache is divided into 'segments'. The size of a segment is
|
||||
* determined by -XX:CodeCacheSegmentSize=XX. Allocation in the code cache can only
|
||||
* happen at segment boundaries. A pointer in the code cache can be mapped to a segment
|
||||
* by calling segment_for(addr). Each time memory is requested from the code cache,
|
||||
* the segmap is updated accordingly. See the following example, which illustrates the
|
||||
* state of code cache and the segment map: (seg -> segment, nm ->nmethod)
|
||||
*
|
||||
* code cache segmap
|
||||
* ----------- ---------
|
||||
* seg 1 | nm 1 | -> | 0 |
|
||||
* seg 2 | nm 1 | -> | 1 |
|
||||
* ... | nm 1 | -> | .. |
|
||||
* seg m | nm 2 | -> | 0 |
|
||||
* seg m+1 | nm 2 | -> | 1 |
|
||||
* ... | nm 2 | -> | 2 |
|
||||
* ... | nm 2 | -> | .. |
|
||||
* ... | nm 2 | -> | 0xFE |
|
||||
* seg m+n | nm 2 | -> | 1 |
|
||||
* ... | nm 2 | -> | |
|
||||
*
|
||||
* A value of '0' in the segmap indicates that this segment contains the beginning of
|
||||
* an nmethod. Let's walk through a simple example: If we want to find the start of
|
||||
* an nmethod that falls into seg 2, we read the value of the segmap[2]. The value
|
||||
* is an offset that points to the segment that contains the start of the nmethod.
|
||||
* Another example: If we want to get the start of nm 2, and we happen to get a pointer
|
||||
* that points to seg m+n, we first read seg[n+m], which returns '1'. So we have to
|
||||
* do one more read of the segmap[m+n-1] to finally get the segment header.
|
||||
*/
|
||||
void* CodeHeap::find_start(void* p) const {
|
||||
if (!contains(p)) {
|
||||
return NULL;
|
||||
}
|
||||
size_t i = segment_for(p);
|
||||
address b = (address)_segmap.low();
|
||||
if (b[i] == 0xFF) {
|
||||
size_t seg_idx = segment_for(p);
|
||||
address seg_map = (address)_segmap.low();
|
||||
if (is_segment_unused(seg_map[seg_idx])) {
|
||||
return NULL;
|
||||
}
|
||||
while (b[i] > 0) i -= (int)b[i];
|
||||
HeapBlock* h = block_at(i);
|
||||
while (seg_map[seg_idx] > 0) {
|
||||
seg_idx -= (int)seg_map[seg_idx];
|
||||
}
|
||||
|
||||
HeapBlock* h = block_at(seg_idx);
|
||||
if (h->free()) {
|
||||
return NULL;
|
||||
}
|
||||
@ -272,7 +285,7 @@ size_t CodeHeap::alignment_offset() const {
|
||||
}
|
||||
|
||||
// Finds the next free heapblock. If the current one is free, that it returned
|
||||
void* CodeHeap::next_free(HeapBlock *b) const {
|
||||
void* CodeHeap::next_free(HeapBlock* b) const {
|
||||
// Since free blocks are merged, there is max. on free block
|
||||
// between two used ones
|
||||
if (b != NULL && b->free()) b = next_block(b);
|
||||
@ -287,7 +300,7 @@ HeapBlock* CodeHeap::first_block() const {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
HeapBlock *CodeHeap::block_start(void *q) const {
|
||||
HeapBlock* CodeHeap::block_start(void* q) const {
|
||||
HeapBlock* b = (HeapBlock*)find_start(q);
|
||||
if (b == NULL) return NULL;
|
||||
return b - 1;
|
||||
@ -312,6 +325,10 @@ size_t CodeHeap::max_capacity() const {
|
||||
return _memory.reserved_size();
|
||||
}
|
||||
|
||||
int CodeHeap::allocated_segments() const {
|
||||
return (int)_next_segment;
|
||||
}
|
||||
|
||||
size_t CodeHeap::allocated_capacity() const {
|
||||
// size of used heap - size on freelist
|
||||
return segments_to_size(_next_segment - _freelist_segments);
|
||||
@ -325,7 +342,7 @@ size_t CodeHeap::heap_unallocated_capacity() const {
|
||||
|
||||
// Free list management
|
||||
|
||||
FreeBlock *CodeHeap::following_block(FreeBlock *b) {
|
||||
FreeBlock* CodeHeap::following_block(FreeBlock *b) {
|
||||
return (FreeBlock*)(((address)b) + _segment_size * b->length());
|
||||
}
|
||||
|
||||
@ -343,7 +360,7 @@ void CodeHeap::insert_after(FreeBlock* a, FreeBlock* b) {
|
||||
}
|
||||
|
||||
// Try to merge this block with the following block
|
||||
void CodeHeap::merge_right(FreeBlock *a) {
|
||||
bool CodeHeap::merge_right(FreeBlock* a) {
|
||||
assert(a->free(), "must be a free block");
|
||||
if (following_block(a) == a->link()) {
|
||||
assert(a->link() != NULL && a->link()->free(), "must be free too");
|
||||
@ -353,13 +370,20 @@ void CodeHeap::merge_right(FreeBlock *a) {
|
||||
// Update find_start map
|
||||
size_t beg = segment_for(a);
|
||||
mark_segmap_as_used(beg, beg + a->length());
|
||||
_freelist_length--;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void CodeHeap::add_to_freelist(HeapBlock *a) {
|
||||
|
||||
void CodeHeap::add_to_freelist(HeapBlock* a) {
|
||||
FreeBlock* b = (FreeBlock*)a;
|
||||
_freelist_length++;
|
||||
|
||||
assert(b != _freelist, "cannot be removed twice");
|
||||
|
||||
|
||||
// Mark as free and update free space count
|
||||
_freelist_segments += b->length();
|
||||
b->set_free();
|
||||
@ -371,95 +395,96 @@ void CodeHeap::add_to_freelist(HeapBlock *a) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Scan for right place to put into list. List
|
||||
// is sorted by increasing addresses
|
||||
FreeBlock* prev = NULL;
|
||||
FreeBlock* cur = _freelist;
|
||||
while(cur != NULL && cur < b) {
|
||||
assert(prev == NULL || prev < cur, "must be ordered");
|
||||
prev = cur;
|
||||
cur = cur->link();
|
||||
}
|
||||
|
||||
assert( (prev == NULL && b < _freelist) ||
|
||||
(prev < b && (cur == NULL || b < cur)), "list must be ordered");
|
||||
|
||||
if (prev == NULL) {
|
||||
// Since the freelist is ordered (smaller addresses -> larger addresses) and the
|
||||
// element we want to insert into the freelist has a smaller address than the first
|
||||
// element, we can simply add 'b' as the first element and we are done.
|
||||
if (b < _freelist) {
|
||||
// Insert first in list
|
||||
b->set_link(_freelist);
|
||||
_freelist = b;
|
||||
merge_right(_freelist);
|
||||
} else {
|
||||
insert_after(prev, b);
|
||||
return;
|
||||
}
|
||||
|
||||
// Scan for right place to put into list. List
|
||||
// is sorted by increasing addresses
|
||||
FreeBlock* prev = _freelist;
|
||||
FreeBlock* cur = _freelist->link();
|
||||
while(cur != NULL && cur < b) {
|
||||
assert(prev < cur, "Freelist must be ordered");
|
||||
prev = cur;
|
||||
cur = cur->link();
|
||||
}
|
||||
assert((prev < b) && (cur == NULL || b < cur), "free-list must be ordered");
|
||||
insert_after(prev, b);
|
||||
}
|
||||
|
||||
// Search freelist for an entry on the list with the best fit
|
||||
// Return NULL if no one was found
|
||||
/**
|
||||
* Search freelist for an entry on the list with the best fit.
|
||||
* @return NULL, if no one was found
|
||||
*/
|
||||
FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) {
|
||||
FreeBlock *best_block = NULL;
|
||||
FreeBlock *best_prev = NULL;
|
||||
size_t best_length = 0;
|
||||
FreeBlock* found_block = NULL;
|
||||
FreeBlock* found_prev = NULL;
|
||||
size_t found_length = 0;
|
||||
|
||||
// Search for smallest block which is bigger than length
|
||||
FreeBlock *prev = NULL;
|
||||
FreeBlock *cur = _freelist;
|
||||
FreeBlock* prev = NULL;
|
||||
FreeBlock* cur = _freelist;
|
||||
const size_t critical_boundary = (size_t)high_boundary() - CodeCacheMinimumFreeSpace;
|
||||
|
||||
// Search for first block that fits
|
||||
while(cur != NULL) {
|
||||
size_t l = cur->length();
|
||||
if (l >= length && (best_block == NULL || best_length > l)) {
|
||||
|
||||
if (cur->length() >= length) {
|
||||
// Non critical allocations are not allowed to use the last part of the code heap.
|
||||
if (!is_critical) {
|
||||
// Make sure the end of the allocation doesn't cross into the last part of the code heap
|
||||
if (((size_t)cur + length) > ((size_t)high_boundary() - CodeCacheMinimumFreeSpace)) {
|
||||
// the freelist is sorted by address - if one fails, all consecutive will also fail.
|
||||
break;
|
||||
}
|
||||
// Make sure the end of the allocation doesn't cross into the last part of the code heap.
|
||||
if (!is_critical && (((size_t)cur + length) > critical_boundary)) {
|
||||
// The freelist is sorted by address - if one fails, all consecutive will also fail.
|
||||
break;
|
||||
}
|
||||
// Remember block, its previous element, and its length
|
||||
found_block = cur;
|
||||
found_prev = prev;
|
||||
found_length = found_block->length();
|
||||
|
||||
// Remember best block, its previous element, and its length
|
||||
best_block = cur;
|
||||
best_prev = prev;
|
||||
best_length = best_block->length();
|
||||
break;
|
||||
}
|
||||
|
||||
// Next element in list
|
||||
prev = cur;
|
||||
cur = cur->link();
|
||||
}
|
||||
|
||||
if (best_block == NULL) {
|
||||
if (found_block == NULL) {
|
||||
// None found
|
||||
return NULL;
|
||||
}
|
||||
|
||||
assert((best_prev == NULL && _freelist == best_block ) ||
|
||||
(best_prev != NULL && best_prev->link() == best_block), "sanity check");
|
||||
|
||||
// Exact (or at least good enough) fit. Remove from list.
|
||||
// Don't leave anything on the freelist smaller than CodeCacheMinBlockLength.
|
||||
if (best_length < length + CodeCacheMinBlockLength) {
|
||||
length = best_length;
|
||||
if (best_prev == NULL) {
|
||||
assert(_freelist == best_block, "sanity check");
|
||||
if (found_length - length < CodeCacheMinBlockLength) {
|
||||
_freelist_length--;
|
||||
length = found_length;
|
||||
if (found_prev == NULL) {
|
||||
assert(_freelist == found_block, "sanity check");
|
||||
_freelist = _freelist->link();
|
||||
} else {
|
||||
assert((found_prev->link() == found_block), "sanity check");
|
||||
// Unmap element
|
||||
best_prev->set_link(best_block->link());
|
||||
found_prev->set_link(found_block->link());
|
||||
}
|
||||
} else {
|
||||
// Truncate block and return a pointer to the following block
|
||||
best_block->set_length(best_length - length);
|
||||
best_block = following_block(best_block);
|
||||
// Set used bit and length on new block
|
||||
size_t beg = segment_for(best_block);
|
||||
found_block->set_length(found_length - length);
|
||||
found_block = following_block(found_block);
|
||||
|
||||
size_t beg = segment_for(found_block);
|
||||
mark_segmap_as_used(beg, beg + length);
|
||||
best_block->set_length(length);
|
||||
found_block->set_length(length);
|
||||
}
|
||||
|
||||
best_block->set_used();
|
||||
found_block->set_used();
|
||||
_freelist_segments -= length;
|
||||
return best_block;
|
||||
return found_block;
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------------
|
||||
@ -471,33 +496,34 @@ void CodeHeap::print() {
|
||||
tty->print_cr("The Heap");
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void CodeHeap::verify() {
|
||||
// Count the number of blocks on the freelist, and the amount of space
|
||||
// represented.
|
||||
int count = 0;
|
||||
size_t len = 0;
|
||||
for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {
|
||||
len += b->length();
|
||||
count++;
|
||||
}
|
||||
if (VerifyCodeCache) {
|
||||
size_t len = 0;
|
||||
int count = 0;
|
||||
for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {
|
||||
len += b->length();
|
||||
count++;
|
||||
// Check if we have merged all free blocks
|
||||
assert(merge_right(b) == false, "Missed merging opportunity");
|
||||
}
|
||||
// Verify that freelist contains the right amount of free space
|
||||
assert(len == _freelist_segments, "wrong freelist");
|
||||
|
||||
// Verify that freelist contains the right amount of free space
|
||||
// guarantee(len == _freelist_segments, "wrong freelist");
|
||||
for(HeapBlock* h = first_block(); h != NULL; h = next_block(h)) {
|
||||
if (h->free()) count--;
|
||||
}
|
||||
// Verify that the freelist contains the same number of blocks
|
||||
// than free blocks found on the full list.
|
||||
assert(count == 0, "missing free blocks");
|
||||
|
||||
// Verify that the number of free blocks is not out of hand.
|
||||
static int free_block_threshold = 10000;
|
||||
if (count > free_block_threshold) {
|
||||
warning("CodeHeap: # of free blocks > %d", free_block_threshold);
|
||||
// Double the warning limit
|
||||
free_block_threshold *= 2;
|
||||
// Verify that the number of free blocks is not out of hand.
|
||||
static int free_block_threshold = 10000;
|
||||
if (count > free_block_threshold) {
|
||||
warning("CodeHeap: # of free blocks > %d", free_block_threshold);
|
||||
// Double the warning limit
|
||||
free_block_threshold *= 2;
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that the freelist contains the same number of free blocks that is
|
||||
// found on the full list.
|
||||
for(HeapBlock *h = first_block(); h != NULL; h = next_block(h)) {
|
||||
if (h->free()) count--;
|
||||
}
|
||||
// guarantee(count == 0, "missing free blocks");
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -92,24 +92,28 @@ class CodeHeap : public CHeapObj<mtCode> {
|
||||
|
||||
FreeBlock* _freelist;
|
||||
size_t _freelist_segments; // No. of segments in freelist
|
||||
int _freelist_length;
|
||||
|
||||
enum { free_sentinel = 0xFF };
|
||||
|
||||
// Helper functions
|
||||
size_t size_to_segments(size_t size) const { return (size + _segment_size - 1) >> _log2_segment_size; }
|
||||
size_t segments_to_size(size_t number_of_segments) const { return number_of_segments << _log2_segment_size; }
|
||||
|
||||
size_t segment_for(void* p) const { return ((char*)p - _memory.low()) >> _log2_segment_size; }
|
||||
bool is_segment_unused(int val) const { return val == free_sentinel; }
|
||||
HeapBlock* block_at(size_t i) const { return (HeapBlock*)(_memory.low() + (i << _log2_segment_size)); }
|
||||
|
||||
void mark_segmap_as_free(size_t beg, size_t end);
|
||||
void mark_segmap_as_used(size_t beg, size_t end);
|
||||
|
||||
// Freelist management helpers
|
||||
FreeBlock* following_block(FreeBlock *b);
|
||||
FreeBlock* following_block(FreeBlock* b);
|
||||
void insert_after(FreeBlock* a, FreeBlock* b);
|
||||
void merge_right (FreeBlock* a);
|
||||
bool merge_right (FreeBlock* a);
|
||||
|
||||
// Toplevel freelist management
|
||||
void add_to_freelist(HeapBlock *b);
|
||||
void add_to_freelist(HeapBlock* b);
|
||||
FreeBlock* search_freelist(size_t length, bool is_critical);
|
||||
|
||||
// Iteration helpers
|
||||
@ -120,20 +124,18 @@ class CodeHeap : public CHeapObj<mtCode> {
|
||||
|
||||
// to perform additional actions on creation of executable code
|
||||
void on_code_mapping(char* base, size_t size);
|
||||
void clear(); // clears all heap contents
|
||||
|
||||
public:
|
||||
CodeHeap();
|
||||
|
||||
// Heap extents
|
||||
bool reserve(size_t reserved_size, size_t committed_size, size_t segment_size);
|
||||
void release(); // releases all allocated memory
|
||||
bool expand_by(size_t size); // expands committed memory by size
|
||||
void shrink_by(size_t size); // shrinks committed memory by size
|
||||
void clear(); // clears all heap contents
|
||||
|
||||
// Memory allocation
|
||||
void* allocate (size_t size, bool is_critical); // allocates a block of size or returns NULL
|
||||
void deallocate(void* p); // deallocates a block
|
||||
void deallocate(void* p); // deallocates a block
|
||||
|
||||
// Attributes
|
||||
char* low_boundary() const { return _memory.low_boundary (); }
|
||||
@ -141,12 +143,13 @@ class CodeHeap : public CHeapObj<mtCode> {
|
||||
char* high_boundary() const { return _memory.high_boundary(); }
|
||||
|
||||
bool contains(const void* p) const { return low_boundary() <= p && p < high(); }
|
||||
void* find_start(void* p) const; // returns the block containing p or NULL
|
||||
size_t alignment_unit() const; // alignment of any block
|
||||
size_t alignment_offset() const; // offset of first byte of any block, within the enclosing alignment unit
|
||||
static size_t header_size(); // returns the header size for each heap block
|
||||
void* find_start(void* p) const; // returns the block containing p or NULL
|
||||
size_t alignment_unit() const; // alignment of any block
|
||||
size_t alignment_offset() const; // offset of first byte of any block, within the enclosing alignment unit
|
||||
static size_t header_size(); // returns the header size for each heap block
|
||||
|
||||
// Iteration
|
||||
size_t allocated_in_freelist() const { return _freelist_segments * CodeCacheSegmentSize; }
|
||||
int freelist_length() const { return _freelist_length; } // number of elements in the freelist
|
||||
|
||||
// returns the first block or NULL
|
||||
void* first() const { return next_free(first_block()); }
|
||||
@ -156,6 +159,7 @@ class CodeHeap : public CHeapObj<mtCode> {
|
||||
// Statistics
|
||||
size_t capacity() const;
|
||||
size_t max_capacity() const;
|
||||
int allocated_segments() const;
|
||||
size_t allocated_capacity() const;
|
||||
size_t unallocated_capacity() const { return max_capacity() - allocated_capacity(); }
|
||||
|
||||
@ -164,7 +168,7 @@ private:
|
||||
|
||||
public:
|
||||
// Debugging
|
||||
void verify();
|
||||
void verify() PRODUCT_RETURN;
|
||||
void print() PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
|
@ -1295,6 +1295,7 @@ void ConstantPool::copy_entry_to(constantPoolHandle from_cp, int from_i,
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_UnresolvedClass:
|
||||
case JVM_CONSTANT_UnresolvedClassInError:
|
||||
{
|
||||
// Can be resolved after checking tag, so check the slot first.
|
||||
CPSlot entry = from_cp->slot_at(from_i);
|
||||
|
@ -42,6 +42,7 @@ class Metadata : public MetaspaceObj {
|
||||
// Rehashing support for tables containing pointers to this
|
||||
unsigned int new_hash(juint seed) { ShouldNotReachHere(); return 0; }
|
||||
|
||||
virtual bool is_metadata() const volatile { return true; }
|
||||
virtual bool is_klass() const volatile { return false; }
|
||||
virtual bool is_method() const volatile { return false; }
|
||||
virtual bool is_methodData() const volatile { return false; }
|
||||
|
@ -108,12 +108,16 @@ class Method : public Metadata {
|
||||
#endif
|
||||
u2 _method_size; // size of this object
|
||||
u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
|
||||
u1 _jfr_towrite : 1, // Flags
|
||||
_caller_sensitive : 1,
|
||||
_force_inline : 1,
|
||||
_hidden : 1,
|
||||
_dont_inline : 1,
|
||||
: 3;
|
||||
|
||||
// Flags
|
||||
enum Flags {
|
||||
_jfr_towrite = 1 << 0,
|
||||
_caller_sensitive = 1 << 1,
|
||||
_force_inline = 1 << 2,
|
||||
_dont_inline = 1 << 3,
|
||||
_hidden = 1 << 4
|
||||
};
|
||||
u1 _flags;
|
||||
|
||||
#ifndef PRODUCT
|
||||
int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging)
|
||||
@ -759,16 +763,41 @@ class Method : public Metadata {
|
||||
void init_intrinsic_id(); // updates from _none if a match
|
||||
static vmSymbols::SID klass_id_for_intrinsics(Klass* holder);
|
||||
|
||||
bool jfr_towrite() { return _jfr_towrite; }
|
||||
void set_jfr_towrite(bool x) { _jfr_towrite = x; }
|
||||
bool caller_sensitive() { return _caller_sensitive; }
|
||||
void set_caller_sensitive(bool x) { _caller_sensitive = x; }
|
||||
bool force_inline() { return _force_inline; }
|
||||
void set_force_inline(bool x) { _force_inline = x; }
|
||||
bool dont_inline() { return _dont_inline; }
|
||||
void set_dont_inline(bool x) { _dont_inline = x; }
|
||||
bool is_hidden() { return _hidden; }
|
||||
void set_hidden(bool x) { _hidden = x; }
|
||||
bool jfr_towrite() {
|
||||
return (_flags & _jfr_towrite) != 0;
|
||||
}
|
||||
void set_jfr_towrite(bool x) {
|
||||
_flags = x ? (_flags | _jfr_towrite) : (_flags & ~_jfr_towrite);
|
||||
}
|
||||
|
||||
bool caller_sensitive() {
|
||||
return (_flags & _caller_sensitive) != 0;
|
||||
}
|
||||
void set_caller_sensitive(bool x) {
|
||||
_flags = x ? (_flags | _caller_sensitive) : (_flags & ~_caller_sensitive);
|
||||
}
|
||||
|
||||
bool force_inline() {
|
||||
return (_flags & _force_inline) != 0;
|
||||
}
|
||||
void set_force_inline(bool x) {
|
||||
_flags = x ? (_flags | _force_inline) : (_flags & ~_force_inline);
|
||||
}
|
||||
|
||||
bool dont_inline() {
|
||||
return (_flags & _dont_inline) != 0;
|
||||
}
|
||||
void set_dont_inline(bool x) {
|
||||
_flags = x ? (_flags | _dont_inline) : (_flags & ~_dont_inline);
|
||||
}
|
||||
|
||||
bool is_hidden() {
|
||||
return (_flags & _hidden) != 0;
|
||||
}
|
||||
void set_hidden(bool x) {
|
||||
_flags = x ? (_flags | _hidden) : (_flags & ~_hidden);
|
||||
}
|
||||
|
||||
ConstMethod::MethodType method_type() const {
|
||||
return _constMethod->method_type();
|
||||
}
|
||||
|
@ -1071,7 +1071,8 @@ void MethodData::post_initialize(BytecodeStream* stream) {
|
||||
}
|
||||
|
||||
// Initialize the MethodData* corresponding to a given method.
|
||||
MethodData::MethodData(methodHandle method, int size, TRAPS) {
|
||||
MethodData::MethodData(methodHandle method, int size, TRAPS)
|
||||
: _extra_data_lock(Monitor::leaf, "MDO extra data lock") {
|
||||
No_Safepoint_Verifier no_safepoint; // init function atomic wrt GC
|
||||
ResourceMark rm;
|
||||
// Set the method back-pointer.
|
||||
@ -1235,7 +1236,7 @@ DataLayout* MethodData::next_extra(DataLayout* dp) {
|
||||
return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells));
|
||||
}
|
||||
|
||||
ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp) {
|
||||
ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent) {
|
||||
DataLayout* end = extra_data_limit();
|
||||
|
||||
for (;; dp = next_extra(dp)) {
|
||||
@ -1257,10 +1258,11 @@ ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout
|
||||
if (m != NULL) {
|
||||
SpeculativeTrapData* data = new SpeculativeTrapData(dp);
|
||||
// data->method() may be null in case of a concurrent
|
||||
// allocation. Assume it's for the same method and use that
|
||||
// allocation. Maybe it's for the same method. Try to use that
|
||||
// entry in that case.
|
||||
if (dp->bci() == bci) {
|
||||
if (data->method() == NULL) {
|
||||
assert(concurrent, "impossible because no concurrent allocation");
|
||||
return NULL;
|
||||
} else if (data->method() == m) {
|
||||
return data;
|
||||
@ -1289,40 +1291,40 @@ ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_mi
|
||||
// Allocation in the extra data space has to be atomic because not
|
||||
// all entries have the same size and non atomic concurrent
|
||||
// allocation would result in a corrupted extra data space.
|
||||
while (true) {
|
||||
ProfileData* result = bci_to_extra_data_helper(bci, m, dp);
|
||||
if (result != NULL) {
|
||||
ProfileData* result = bci_to_extra_data_helper(bci, m, dp, true);
|
||||
if (result != NULL) {
|
||||
return result;
|
||||
}
|
||||
|
||||
if (create_if_missing && dp < end) {
|
||||
MutexLocker ml(&_extra_data_lock);
|
||||
// Check again now that we have the lock. Another thread may
|
||||
// have added extra data entries.
|
||||
ProfileData* result = bci_to_extra_data_helper(bci, m, dp, false);
|
||||
if (result != NULL || dp >= end) {
|
||||
return result;
|
||||
}
|
||||
|
||||
if (create_if_missing && dp < end) {
|
||||
assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != NULL), "should be free");
|
||||
assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info");
|
||||
u1 tag = m == NULL ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag;
|
||||
// SpeculativeTrapData is 2 slots. Make sure we have room.
|
||||
if (m != NULL && next_extra(dp)->tag() != DataLayout::no_tag) {
|
||||
return NULL;
|
||||
}
|
||||
DataLayout temp;
|
||||
temp.initialize(tag, bci, 0);
|
||||
// May have been set concurrently
|
||||
if (dp->header() != temp.header() && !dp->atomic_set_header(temp.header())) {
|
||||
// Allocation failure because of concurrent allocation. Try
|
||||
// again.
|
||||
continue;
|
||||
}
|
||||
assert(dp->tag() == tag, "sane");
|
||||
assert(dp->bci() == bci, "no concurrent allocation");
|
||||
if (tag == DataLayout::bit_data_tag) {
|
||||
return new BitData(dp);
|
||||
} else {
|
||||
// If being allocated concurrently, one trap may be lost
|
||||
SpeculativeTrapData* data = new SpeculativeTrapData(dp);
|
||||
data->set_method(m);
|
||||
return data;
|
||||
}
|
||||
assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != NULL), "should be free");
|
||||
assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info");
|
||||
u1 tag = m == NULL ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag;
|
||||
// SpeculativeTrapData is 2 slots. Make sure we have room.
|
||||
if (m != NULL && next_extra(dp)->tag() != DataLayout::no_tag) {
|
||||
return NULL;
|
||||
}
|
||||
DataLayout temp;
|
||||
temp.initialize(tag, bci, 0);
|
||||
|
||||
dp->set_header(temp.header());
|
||||
assert(dp->tag() == tag, "sane");
|
||||
assert(dp->bci() == bci, "no concurrent allocation");
|
||||
if (tag == DataLayout::bit_data_tag) {
|
||||
return new BitData(dp);
|
||||
} else {
|
||||
SpeculativeTrapData* data = new SpeculativeTrapData(dp);
|
||||
data->set_method(m);
|
||||
return data;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
@ -190,12 +190,6 @@ public:
|
||||
void set_header(intptr_t value) {
|
||||
_header._bits = value;
|
||||
}
|
||||
bool atomic_set_header(intptr_t value) {
|
||||
if (Atomic::cmpxchg_ptr(value, (volatile intptr_t*)&_header._bits, 0) == 0) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
intptr_t header() {
|
||||
return _header._bits;
|
||||
}
|
||||
@ -2047,10 +2041,12 @@ private:
|
||||
// Cached hint for bci_to_dp and bci_to_data
|
||||
int _hint_di;
|
||||
|
||||
Mutex _extra_data_lock;
|
||||
|
||||
MethodData(methodHandle method, int size, TRAPS);
|
||||
public:
|
||||
static MethodData* allocate(ClassLoaderData* loader_data, methodHandle method, TRAPS);
|
||||
MethodData() {}; // For ciMethodData
|
||||
MethodData() : _extra_data_lock(Monitor::leaf, "MDO extra data lock") {}; // For ciMethodData
|
||||
|
||||
bool is_methodData() const volatile { return true; }
|
||||
|
||||
@ -2155,7 +2151,7 @@ private:
|
||||
// What is the index of the first data entry?
|
||||
int first_di() const { return 0; }
|
||||
|
||||
ProfileData* bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp);
|
||||
ProfileData* bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent);
|
||||
// Find or create an extra ProfileData:
|
||||
ProfileData* bci_to_extra_data(int bci, Method* m, bool create_if_missing);
|
||||
|
||||
|
@ -452,7 +452,7 @@
|
||||
product(bool, EliminateAutoBox, true, \
|
||||
"Control optimizations for autobox elimination") \
|
||||
\
|
||||
experimental(bool, UseImplicitStableValues, false, \
|
||||
diagnostic(bool, UseImplicitStableValues, true, \
|
||||
"Mark well-known stable fields as such (e.g. String.value)") \
|
||||
\
|
||||
product(intx, AutoBoxCacheMax, 128, \
|
||||
@ -650,7 +650,7 @@
|
||||
experimental(bool, ReplaceInParentMaps, false, \
|
||||
"Propagate type improvements in callers of inlinee if possible") \
|
||||
\
|
||||
experimental(bool, UseTypeSpeculation, false, \
|
||||
product(bool, UseTypeSpeculation, true, \
|
||||
"Speculatively propagate types from profiles") \
|
||||
\
|
||||
diagnostic(bool, UseInlineDepthForSpeculativeTypes, true, \
|
||||
|
@ -3007,22 +3007,28 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
|
||||
}
|
||||
|
||||
Node* cast_obj = NULL;
|
||||
const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
|
||||
// We may not have profiling here or it may not help us. If we have
|
||||
// a speculative type use it to perform an exact cast.
|
||||
ciKlass* spec_obj_type = obj_type->speculative_type();
|
||||
if (spec_obj_type != NULL ||
|
||||
(data != NULL &&
|
||||
// Counter has never been decremented (due to cast failure).
|
||||
// ...This is a reasonable thing to expect. It is true of
|
||||
// all casts inserted by javac to implement generic types.
|
||||
data->as_CounterData()->count() >= 0)) {
|
||||
cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace);
|
||||
if (cast_obj != NULL) {
|
||||
if (failure_control != NULL) // failure is now impossible
|
||||
(*failure_control) = top();
|
||||
// adjust the type of the phi to the exact klass:
|
||||
phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
|
||||
if (tk->klass_is_exact()) {
|
||||
// The following optimization tries to statically cast the speculative type of the object
|
||||
// (for example obtained during profiling) to the type of the superklass and then do a
|
||||
// dynamic check that the type of the object is what we expect. To work correctly
|
||||
// for checkcast and aastore the type of superklass should be exact.
|
||||
const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
|
||||
// We may not have profiling here or it may not help us. If we have
|
||||
// a speculative type use it to perform an exact cast.
|
||||
ciKlass* spec_obj_type = obj_type->speculative_type();
|
||||
if (spec_obj_type != NULL ||
|
||||
(data != NULL &&
|
||||
// Counter has never been decremented (due to cast failure).
|
||||
// ...This is a reasonable thing to expect. It is true of
|
||||
// all casts inserted by javac to implement generic types.
|
||||
data->as_CounterData()->count() >= 0)) {
|
||||
cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace);
|
||||
if (cast_obj != NULL) {
|
||||
if (failure_control != NULL) // failure is now impossible
|
||||
(*failure_control) = top();
|
||||
// adjust the type of the phi to the exact klass:
|
||||
phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3180,7 +3180,8 @@ bool LibraryCallKit::inline_native_currentThread() {
|
||||
// private native boolean java.lang.Thread.isInterrupted(boolean ClearInterrupted);
|
||||
bool LibraryCallKit::inline_native_isInterrupted() {
|
||||
// Add a fast path to t.isInterrupted(clear_int):
|
||||
// (t == Thread.current() && (!TLS._osthread._interrupted || !clear_int))
|
||||
// (t == Thread.current() &&
|
||||
// (!TLS._osthread._interrupted || WINDOWS_ONLY(false) NOT_WINDOWS(!clear_int)))
|
||||
// ? TLS._osthread._interrupted : /*slow path:*/ t.isInterrupted(clear_int)
|
||||
// So, in the common case that the interrupt bit is false,
|
||||
// we avoid making a call into the VM. Even if the interrupt bit
|
||||
@ -3237,6 +3238,7 @@ bool LibraryCallKit::inline_native_isInterrupted() {
|
||||
// drop through to next case
|
||||
set_control( _gvn.transform(new (C) IfTrueNode(iff_bit)));
|
||||
|
||||
#ifndef TARGET_OS_FAMILY_windows
|
||||
// (c) Or, if interrupt bit is set and clear_int is false, use 2nd fast path.
|
||||
Node* clr_arg = argument(1);
|
||||
Node* cmp_arg = _gvn.transform(new (C) CmpINode(clr_arg, intcon(0)));
|
||||
@ -3250,6 +3252,10 @@ bool LibraryCallKit::inline_native_isInterrupted() {
|
||||
|
||||
// drop through to next case
|
||||
set_control( _gvn.transform(new (C) IfTrueNode(iff_arg)));
|
||||
#else
|
||||
// To return true on Windows you must read the _interrupted field
|
||||
// and check the the event state i.e. take the slow path.
|
||||
#endif // TARGET_OS_FAMILY_windows
|
||||
|
||||
// (d) Otherwise, go to the slow path.
|
||||
slow_region->add_req(control());
|
||||
|
@ -1922,6 +1922,105 @@ OptoReg::Name Matcher::find_receiver( bool is_outgoing ) {
|
||||
return OptoReg::as_OptoReg(regs.first());
|
||||
}
|
||||
|
||||
// This function identifies sub-graphs in which a 'load' node is
|
||||
// input to two different nodes, and such that it can be matched
|
||||
// with BMI instructions like blsi, blsr, etc.
|
||||
// Example : for b = -a[i] & a[i] can be matched to blsi r32, m32.
|
||||
// The graph is (AndL (SubL Con0 LoadL*) LoadL*), where LoadL*
|
||||
// refers to the same node.
|
||||
#ifdef X86
|
||||
// Match the generic fused operations pattern (op1 (op2 Con{ConType} mop) mop)
|
||||
// This is a temporary solution until we make DAGs expressible in ADL.
|
||||
template<typename ConType>
|
||||
class FusedPatternMatcher {
|
||||
Node* _op1_node;
|
||||
Node* _mop_node;
|
||||
int _con_op;
|
||||
|
||||
static int match_next(Node* n, int next_op, int next_op_idx) {
|
||||
if (n->in(1) == NULL || n->in(2) == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (next_op_idx == -1) { // n is commutative, try rotations
|
||||
if (n->in(1)->Opcode() == next_op) {
|
||||
return 1;
|
||||
} else if (n->in(2)->Opcode() == next_op) {
|
||||
return 2;
|
||||
}
|
||||
} else {
|
||||
assert(next_op_idx > 0 && next_op_idx <= 2, "Bad argument index");
|
||||
if (n->in(next_op_idx)->Opcode() == next_op) {
|
||||
return next_op_idx;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
public:
|
||||
FusedPatternMatcher(Node* op1_node, Node *mop_node, int con_op) :
|
||||
_op1_node(op1_node), _mop_node(mop_node), _con_op(con_op) { }
|
||||
|
||||
bool match(int op1, int op1_op2_idx, // op1 and the index of the op1->op2 edge, -1 if op1 is commutative
|
||||
int op2, int op2_con_idx, // op2 and the index of the op2->con edge, -1 if op2 is commutative
|
||||
typename ConType::NativeType con_value) {
|
||||
if (_op1_node->Opcode() != op1) {
|
||||
return false;
|
||||
}
|
||||
if (_mop_node->outcnt() > 2) {
|
||||
return false;
|
||||
}
|
||||
op1_op2_idx = match_next(_op1_node, op2, op1_op2_idx);
|
||||
if (op1_op2_idx == -1) {
|
||||
return false;
|
||||
}
|
||||
// Memory operation must be the other edge
|
||||
int op1_mop_idx = (op1_op2_idx & 1) + 1;
|
||||
|
||||
// Check that the mop node is really what we want
|
||||
if (_op1_node->in(op1_mop_idx) == _mop_node) {
|
||||
Node *op2_node = _op1_node->in(op1_op2_idx);
|
||||
if (op2_node->outcnt() > 1) {
|
||||
return false;
|
||||
}
|
||||
assert(op2_node->Opcode() == op2, "Should be");
|
||||
op2_con_idx = match_next(op2_node, _con_op, op2_con_idx);
|
||||
if (op2_con_idx == -1) {
|
||||
return false;
|
||||
}
|
||||
// Memory operation must be the other edge
|
||||
int op2_mop_idx = (op2_con_idx & 1) + 1;
|
||||
// Check that the memory operation is the same node
|
||||
if (op2_node->in(op2_mop_idx) == _mop_node) {
|
||||
// Now check the constant
|
||||
const Type* con_type = op2_node->in(op2_con_idx)->bottom_type();
|
||||
if (con_type != Type::TOP && ConType::as_self(con_type)->get_con() == con_value) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
bool Matcher::is_bmi_pattern(Node *n, Node *m) {
|
||||
if (n != NULL && m != NULL) {
|
||||
if (m->Opcode() == Op_LoadI) {
|
||||
FusedPatternMatcher<TypeInt> bmii(n, m, Op_ConI);
|
||||
return bmii.match(Op_AndI, -1, Op_SubI, 1, 0) ||
|
||||
bmii.match(Op_AndI, -1, Op_AddI, -1, -1) ||
|
||||
bmii.match(Op_XorI, -1, Op_AddI, -1, -1);
|
||||
} else if (m->Opcode() == Op_LoadL) {
|
||||
FusedPatternMatcher<TypeLong> bmil(n, m, Op_ConL);
|
||||
return bmil.match(Op_AndL, -1, Op_SubL, 1, 0) ||
|
||||
bmil.match(Op_AndL, -1, Op_AddL, -1, -1) ||
|
||||
bmil.match(Op_XorL, -1, Op_AddL, -1, -1);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
#endif // X86
|
||||
|
||||
// A method-klass-holder may be passed in the inline_cache_reg
|
||||
// and then expanded into the inline_cache_reg and a method_oop register
|
||||
// defined in ad_<arch>.cpp
|
||||
@ -2077,6 +2176,14 @@ void Matcher::find_shared( Node *n ) {
|
||||
set_shared(m->in(AddPNode::Base)->in(1));
|
||||
}
|
||||
|
||||
// if 'n' and 'm' are part of a graph for BMI instruction, clone this node.
|
||||
#ifdef X86
|
||||
if (UseBMI1Instructions && is_bmi_pattern(n, m)) {
|
||||
mstack.push(m, Visit);
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
// Clone addressing expressions as they are "free" in memory access instructions
|
||||
if( mem_op && i == MemNode::Address && mop == Op_AddP ) {
|
||||
// Some inputs for address expression are not put on stack
|
||||
|
@ -79,6 +79,9 @@ class Matcher : public PhaseTransform {
|
||||
|
||||
// Find shared Nodes, or Nodes that otherwise are Matcher roots
|
||||
void find_shared( Node *n );
|
||||
#ifdef X86
|
||||
bool is_bmi_pattern(Node *n, Node *m);
|
||||
#endif
|
||||
|
||||
// Debug and profile information for nodes in old space:
|
||||
GrowableArray<Node_Notes*>* _old_node_note_array;
|
||||
|
@ -1593,35 +1593,33 @@ LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
|
||||
|
||||
// Try to constant-fold a stable array element.
|
||||
static const Type* fold_stable_ary_elem(const TypeAryPtr* ary, int off, BasicType loadbt) {
|
||||
assert(ary->const_oop(), "array should be constant");
|
||||
assert(ary->is_stable(), "array should be stable");
|
||||
|
||||
if (ary->const_oop() != NULL) {
|
||||
// Decode the results of GraphKit::array_element_address.
|
||||
ciArray* aobj = ary->const_oop()->as_array();
|
||||
ciConstant con = aobj->element_value_by_offset(off);
|
||||
// Decode the results of GraphKit::array_element_address.
|
||||
ciArray* aobj = ary->const_oop()->as_array();
|
||||
ciConstant con = aobj->element_value_by_offset(off);
|
||||
|
||||
if (con.basic_type() != T_ILLEGAL && !con.is_null_or_zero()) {
|
||||
const Type* con_type = Type::make_from_constant(con);
|
||||
if (con_type != NULL) {
|
||||
if (con_type->isa_aryptr()) {
|
||||
// Join with the array element type, in case it is also stable.
|
||||
int dim = ary->stable_dimension();
|
||||
con_type = con_type->is_aryptr()->cast_to_stable(true, dim-1);
|
||||
}
|
||||
if (loadbt == T_NARROWOOP && con_type->isa_oopptr()) {
|
||||
con_type = con_type->make_narrowoop();
|
||||
}
|
||||
#ifndef PRODUCT
|
||||
if (TraceIterativeGVN) {
|
||||
tty->print("FoldStableValues: array element [off=%d]: con_type=", off);
|
||||
con_type->dump(); tty->cr();
|
||||
}
|
||||
#endif //PRODUCT
|
||||
return con_type;
|
||||
if (con.basic_type() != T_ILLEGAL && !con.is_null_or_zero()) {
|
||||
const Type* con_type = Type::make_from_constant(con);
|
||||
if (con_type != NULL) {
|
||||
if (con_type->isa_aryptr()) {
|
||||
// Join with the array element type, in case it is also stable.
|
||||
int dim = ary->stable_dimension();
|
||||
con_type = con_type->is_aryptr()->cast_to_stable(true, dim-1);
|
||||
}
|
||||
if (loadbt == T_NARROWOOP && con_type->isa_oopptr()) {
|
||||
con_type = con_type->make_narrowoop();
|
||||
}
|
||||
#ifndef PRODUCT
|
||||
if (TraceIterativeGVN) {
|
||||
tty->print("FoldStableValues: array element [off=%d]: con_type=", off);
|
||||
con_type->dump(); tty->cr();
|
||||
}
|
||||
#endif //PRODUCT
|
||||
return con_type;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1641,7 +1639,7 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
|
||||
// Try to guess loaded type from pointer type
|
||||
if (tp->isa_aryptr()) {
|
||||
const TypeAryPtr* ary = tp->is_aryptr();
|
||||
const Type *t = ary->elem();
|
||||
const Type* t = ary->elem();
|
||||
|
||||
// Determine whether the reference is beyond the header or not, by comparing
|
||||
// the offset against the offset of the start of the array's data.
|
||||
@ -1653,10 +1651,9 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
|
||||
const bool off_beyond_header = ((uint)off >= (uint)min_base_off);
|
||||
|
||||
// Try to constant-fold a stable array element.
|
||||
if (FoldStableValues && ary->is_stable()) {
|
||||
// Make sure the reference is not into the header
|
||||
if (off_beyond_header && off != Type::OffsetBot) {
|
||||
assert(adr->is_AddP() && adr->in(AddPNode::Offset)->is_Con(), "offset is a constant");
|
||||
if (FoldStableValues && ary->is_stable() && ary->const_oop() != NULL) {
|
||||
// Make sure the reference is not into the header and the offset is constant
|
||||
if (off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) {
|
||||
const Type* con_type = fold_stable_ary_elem(ary, off, memory_type());
|
||||
if (con_type != NULL) {
|
||||
return con_type;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user