This commit is contained in:
J. Duke 2017-08-25 12:39:05 +02:00
commit 727a3fa93a
2936 changed files with 172965 additions and 77113 deletions

View File

@ -444,3 +444,4 @@ a133a7d1007b1456bc62824382fd8ac93b45d329 jdk-10+17
b803e6cff41e72a1e6d8782e1ef7c25a6e3e5ee3 jdk-10+19
d2982a786f53814367698e63efe6349c9128e1db jdk-9+180
b656dea9398ef601f7fc08d1a5157a560e0ccbe0 jdk-9+181
682e2a6df836f4731f92eb2ddcd467075047f6ea jdk-10+20

View File

@ -187,6 +187,9 @@ TOOLCHAIN_MISC_CHECKS
# Setup the JTReg Regression Test Harness.
TOOLCHAIN_SETUP_JTREG
# Setup Jib dependency tool
TOOLCHAIN_SETUP_JIB
FLAGS_SETUP_INIT_FLAGS
# Now we can test some aspects on the target using configure macros.
@ -220,7 +223,7 @@ BASIC_COMPILE_FIXPATH
LIB_DETERMINE_DEPENDENCIES
LIB_SETUP_LIBRARIES
# Hotspot setup depends on lib checks (AOT needs libelf).
# Hotspot setup depends on lib checks.
HOTSPOT_SETUP_JVM_FEATURES

View File

@ -760,6 +760,19 @@ AC_DEFUN([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
[
FLAGS_SETUP_ABI_PROFILE
# Optional POSIX functionality needed by the JVM
#
# Check if clock_gettime is available and in which library. This indicates
# availability of CLOCK_MONOTONIC for hotspot. But we don't need to link, so
# don't let it update LIBS.
save_LIBS="$LIBS"
AC_SEARCH_LIBS(clock_gettime, rt, [HAS_CLOCK_GETTIME=true], [])
if test "x$LIBS" = "x-lrt "; then
CLOCK_GETTIME_IN_LIBRT=true
fi
LIBS="$save_LIBS"
FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK_HELPER([TARGET])
FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK_HELPER([BUILD], [OPENJDK_BUILD_])
@ -897,7 +910,7 @@ AC_DEFUN([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK_HELPER],
$2CFLAGS_JDK="[$]$2CFLAGS_JDK -xc99=%none -xCC -errshort=tags -Xa -v -mt -W0,-noglobal"
$2CXXFLAGS_JDK="[$]$2CXXFLAGS_JDK -errtags=yes +w -mt -features=no%except -DCC_NOEX -norunpath -xnolib"
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
$2JVM_CFLAGS="[$]$2JVM_CFLAGS -D_REENTRANT -D__STDC_FORMAT_MACROS"
$2JVM_CFLAGS="[$]$2JVM_CFLAGS -D_REENTRANT"
$2CFLAGS_JDK="[$]$2CFLAGS_JDK -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE -DSTDC"
$2CXXFLAGS_JDK="[$]$2CXXFLAGS_JDK -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE -DSTDC"
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
@ -962,6 +975,11 @@ AC_DEFUN([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK_HELPER],
fi
fi
# Always enable optional macros for VM.
$2JVM_CFLAGS="[$]$2JVM_CFLAGS -D__STDC_FORMAT_MACROS"
$2JVM_CFLAGS="[$]$2JVM_CFLAGS -D__STDC_LIMIT_MACROS"
$2JVM_CFLAGS="[$]$2JVM_CFLAGS -D__STDC_CONSTANT_MACROS"
# Setup target OS define. Use OS target name but in upper case.
OPENJDK_$1_OS_UPPERCASE=`$ECHO $OPENJDK_$1_OS | $TR 'abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'`
$2COMMON_CCXXFLAGS_JDK="[$]$2COMMON_CCXXFLAGS_JDK -D$OPENJDK_$1_OS_UPPERCASE"
@ -981,6 +999,16 @@ AC_DEFUN([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK_HELPER],
$2COMMON_CCXXFLAGS_JDK="[$]$2COMMON_CCXXFLAGS_JDK -DDEBUG"
fi
# Optional POSIX functionality needed by the VM
if test "x$HAS_CLOCK_GETTIME" = "xtrue"; then
$2JVM_CFLAGS="[$]$2JVM_CFLAGS -DSUPPORTS_CLOCK_MONOTONIC"
if test "x$CLOCK_GETTIME_IN_LIBRT" = "xtrue"; then
$2JVM_CFLAGS="[$]$2JVM_CFLAGS -DNEEDS_LIBRT"
fi
fi
# Set some additional per-OS defines.
if test "x$OPENJDK_$1_OS" = xlinux; then
$2JVM_CFLAGS="[$]$2JVM_CFLAGS -DLINUX"
@ -989,7 +1017,7 @@ AC_DEFUN([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK_HELPER],
elif test "x$OPENJDK_$1_OS" = xsolaris; then
$2JVM_CFLAGS="[$]$2JVM_CFLAGS -DSOLARIS"
$2JVM_CFLAGS="[$]$2JVM_CFLAGS -template=no%extdef -features=no%split_init \
-D_Crun_inline_placement -library=%none $PICFLAG -mt -features=no%except"
-D_Crun_inline_placement -library=stlport4 $PICFLAG -mt -features=no%except"
elif test "x$OPENJDK_$1_OS" = xmacosx; then
$2COMMON_CCXXFLAGS_JDK="[$]$2COMMON_CCXXFLAGS_JDK -D_ALLBSD_SOURCE -D_DARWIN_UNLIMITED_SELECT"
$2JVM_CFLAGS="[$]$2JVM_CFLAGS -D_ALLBSD_SOURCE"

View File

@ -663,8 +663,6 @@ JVM_FEATURES_core
JVM_FEATURES_client
JVM_FEATURES_server
INCLUDE_GRAAL
ELF_LIBS
ELF_CFLAGS
STLPORT_LIB
LIBZIP_CAN_USE_MMAP
LIBDL
@ -790,6 +788,7 @@ ARFLAGS
COMPILER_BINDCMD_FILE_FLAG
COMPILER_COMMAND_FILE_FLAG
COMPILER_TARGET_BITS_FLAG
JIB_JAR
JT_HOME
JTREGEXE
HOTSPOT_TOOLCHAIN_TYPE
@ -1182,6 +1181,7 @@ with_extra_ldflags
with_toolchain_version
with_build_devkit
with_jtreg
with_jib
with_abi_profile
with_macosx_version_max
enable_warnings_as_errors
@ -1219,9 +1219,6 @@ with_lcms
with_dxsdk
with_dxsdk_lib
with_dxsdk_include
with_libelf
with_libelf_include
with_libelf_lib
with_jvm_features
with_jvm_interpreter
enable_jtreg_failure_handler
@ -1351,8 +1348,6 @@ PNG_CFLAGS
PNG_LIBS
LCMS_CFLAGS
LCMS_LIBS
ELF_CFLAGS
ELF_LIBS
ICECC_CMD
ICECC_CREATE_ENV
ICECC_WRAPPER
@ -2130,6 +2125,7 @@ Optional Packages:
dependent]
--with-build-devkit Devkit to use for the build platform toolchain
--with-jtreg Regression Test Harness [probed]
--with-jib Jib dependency management tool [not used]
--with-abi-profile specify ABI profile for ARM builds
(arm-vfp-sflt,arm-vfp-hflt,arm-sflt,
armv5-vfp-sflt,armv6-vfp-hflt,arm64,aarch64)
@ -2186,11 +2182,6 @@ Optional Packages:
compatibility and is ignored
--with-dxsdk-include Deprecated. Option is kept for backwards
compatibility and is ignored
--with-libelf specify prefix directory for the libelf package
(expecting the libraries under PATH/lib and the
headers under PATH/include)
--with-libelf-include specify directory for the libelf include files
--with-libelf-lib specify directory for the libelf library
--with-jvm-features additional JVM features to enable (separated by
comma), use '--help' to show possible values [none]
--with-jvm-interpreter Deprecated. Option is kept for backwards
@ -2327,8 +2318,6 @@ Some influential environment variables:
PNG_LIBS linker flags for PNG, overriding pkg-config
LCMS_CFLAGS C compiler flags for LCMS, overriding pkg-config
LCMS_LIBS linker flags for LCMS, overriding pkg-config
ELF_CFLAGS C compiler flags for ELF, overriding pkg-config
ELF_LIBS linker flags for ELF, overriding pkg-config
ICECC_CMD Override default value for ICECC_CMD
ICECC_CREATE_ENV
Override default value for ICECC_CREATE_ENV
@ -4224,8 +4213,6 @@ apt_help() {
PKGHANDLER_COMMAND="sudo apt-get install ccache" ;;
dtrace)
PKGHANDLER_COMMAND="sudo apt-get install systemtap-sdt-dev" ;;
elf)
PKGHANDLER_COMMAND="sudo apt-get install libelf-dev" ;;
esac
}
@ -4245,8 +4232,6 @@ yum_help() {
PKGHANDLER_COMMAND="sudo yum install libXtst-devel libXt-devel libXrender-devel libXi-devel" ;;
ccache)
PKGHANDLER_COMMAND="sudo yum install ccache" ;;
elf)
PKGHANDLER_COMMAND="sudo yum install elfutils-libelf-devel" ;;
esac
}
@ -4516,7 +4501,7 @@ VALID_JVM_VARIANTS="server client minimal core zero zeroshark custom"
#
# Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -4793,36 +4778,6 @@ VALID_JVM_VARIANTS="server client minimal core zero zeroshark custom"
################################################################################
#
# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
################################################################################
# Setup libelf (ELF library)
################################################################################
################################################################################
# Determine which libraries are needed for this configuration
@ -4961,7 +4916,7 @@ VALID_JVM_VARIANTS="server client minimal core zero zeroshark custom"
#
# Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -5092,6 +5047,9 @@ TOOLCHAIN_MINIMUM_VERSION_xlc=""
# Setup the JTReg Regression Test Harness.
# Setup the JIB dependency resolver
#
# Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -5193,7 +5151,7 @@ VS_SDK_PLATFORM_NAME_2013=
#CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks:
DATE_WHEN_GENERATED=1500423205
DATE_WHEN_GENERATED=1503411624
###############################################################################
#
@ -49524,6 +49482,41 @@ $as_echo "$as_me: The path of JT_HOME, which resolves as \"$path\", is invalid."
# Setup Jib dependency tool
# Check whether --with-jib was given.
if test "${with_jib+set}" = set; then :
withval=$with_jib;
fi
if test "x$with_jib" = xno || test "x$with_jib" = x; then
# jib disabled
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for jib" >&5
$as_echo_n "checking for jib... " >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
elif test "x$with_jib" = xyes; then
as_fn_error $? "Must supply a value to --with-jib" "$LINENO" 5
else
JIB_HOME="${with_jib}"
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for jib" >&5
$as_echo_n "checking for jib... " >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${JIB_HOME}" >&5
$as_echo "${JIB_HOME}" >&6; }
if test ! -d "${JIB_HOME}"; then
as_fn_error $? "--with-jib must be a directory" "$LINENO" 5
fi
JIB_JAR=$(ls ${JIB_HOME}/lib/jib-*.jar)
if test ! -f "${JIB_JAR}"; then
as_fn_error $? "Could not find jib jar file in ${JIB_HOME}" "$LINENO" 5
fi
fi
# COMPILER_TARGET_BITS_FLAG : option for selecting 32- or 64-bit output
# COMPILER_COMMAND_FILE_FLAG : option for passing a command file to the compiler
@ -51066,6 +51059,74 @@ $as_echo "$JDK_ARCH_ABI_PROP_NAME" >&6; }
fi
# Optional POSIX functionality needed by the JVM
#
# Check if clock_gettime is available and in which library. This indicates
# availability of CLOCK_MONOTONIC for hotspot. But we don't need to link, so
# don't let it update LIBS.
save_LIBS="$LIBS"
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5
$as_echo_n "checking for library containing clock_gettime... " >&6; }
if ${ac_cv_search_clock_gettime+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_func_search_save_LIBS=$LIBS
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
/* Override any GCC internal prototype to avoid an error.
Use char because int might match the return type of a GCC
builtin and then its argument prototype would still apply. */
#ifdef __cplusplus
extern "C"
#endif
char clock_gettime ();
int
main ()
{
return clock_gettime ();
;
return 0;
}
_ACEOF
for ac_lib in '' rt; do
if test -z "$ac_lib"; then
ac_res="none required"
else
ac_res=-l$ac_lib
LIBS="-l$ac_lib $ac_func_search_save_LIBS"
fi
if ac_fn_cxx_try_link "$LINENO"; then :
ac_cv_search_clock_gettime=$ac_res
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext
if ${ac_cv_search_clock_gettime+:} false; then :
break
fi
done
if ${ac_cv_search_clock_gettime+:} false; then :
else
ac_cv_search_clock_gettime=no
fi
rm conftest.$ac_ext
LIBS=$ac_func_search_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5
$as_echo "$ac_cv_search_clock_gettime" >&6; }
ac_res=$ac_cv_search_clock_gettime
if test "$ac_res" != no; then :
test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
HAS_CLOCK_GETTIME=true
fi
if test "x$LIBS" = "x-lrt "; then
CLOCK_GETTIME_IN_LIBRT=true
fi
LIBS="$save_LIBS"
# Special extras...
if test "x$TOOLCHAIN_TYPE" = xsolstudio; then
if test "x$OPENJDK_TARGET_CPU_ARCH" = "xsparc"; then
@ -51366,7 +51427,7 @@ $as_echo "$as_me: GCC >= 6 detected; adding ${NO_DELETE_NULL_POINTER_CHECKS_CFLA
CFLAGS_JDK="$CFLAGS_JDK -xc99=%none -xCC -errshort=tags -Xa -v -mt -W0,-noglobal"
CXXFLAGS_JDK="$CXXFLAGS_JDK -errtags=yes +w -mt -features=no%except -DCC_NOEX -norunpath -xnolib"
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
JVM_CFLAGS="$JVM_CFLAGS -D_REENTRANT -D__STDC_FORMAT_MACROS"
JVM_CFLAGS="$JVM_CFLAGS -D_REENTRANT"
CFLAGS_JDK="$CFLAGS_JDK -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE -DSTDC"
CXXFLAGS_JDK="$CXXFLAGS_JDK -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE -DSTDC"
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
@ -51431,6 +51492,11 @@ $as_echo "$as_me: GCC >= 6 detected; adding ${NO_DELETE_NULL_POINTER_CHECKS_CFLA
fi
fi
# Always enable optional macros for VM.
JVM_CFLAGS="$JVM_CFLAGS -D__STDC_FORMAT_MACROS"
JVM_CFLAGS="$JVM_CFLAGS -D__STDC_LIMIT_MACROS"
JVM_CFLAGS="$JVM_CFLAGS -D__STDC_CONSTANT_MACROS"
# Setup target OS define. Use OS target name but in upper case.
OPENJDK_TARGET_OS_UPPERCASE=`$ECHO $OPENJDK_TARGET_OS | $TR 'abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'`
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D$OPENJDK_TARGET_OS_UPPERCASE"
@ -51450,6 +51516,16 @@ $as_echo "$as_me: GCC >= 6 detected; adding ${NO_DELETE_NULL_POINTER_CHECKS_CFLA
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DDEBUG"
fi
# Optional POSIX functionality needed by the VM
if test "x$HAS_CLOCK_GETTIME" = "xtrue"; then
JVM_CFLAGS="$JVM_CFLAGS -DSUPPORTS_CLOCK_MONOTONIC"
if test "x$CLOCK_GETTIME_IN_LIBRT" = "xtrue"; then
JVM_CFLAGS="$JVM_CFLAGS -DNEEDS_LIBRT"
fi
fi
# Set some additional per-OS defines.
if test "x$OPENJDK_TARGET_OS" = xlinux; then
JVM_CFLAGS="$JVM_CFLAGS -DLINUX"
@ -51458,7 +51534,7 @@ $as_echo "$as_me: GCC >= 6 detected; adding ${NO_DELETE_NULL_POINTER_CHECKS_CFLA
elif test "x$OPENJDK_TARGET_OS" = xsolaris; then
JVM_CFLAGS="$JVM_CFLAGS -DSOLARIS"
JVM_CFLAGS="$JVM_CFLAGS -template=no%extdef -features=no%split_init \
-D_Crun_inline_placement -library=%none $PICFLAG -mt -features=no%except"
-D_Crun_inline_placement -library=stlport4 $PICFLAG -mt -features=no%except"
elif test "x$OPENJDK_TARGET_OS" = xmacosx; then
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_ALLBSD_SOURCE -D_DARWIN_UNLIMITED_SELECT"
JVM_CFLAGS="$JVM_CFLAGS -D_ALLBSD_SOURCE"
@ -52230,7 +52306,7 @@ $as_echo "$as_me: GCC >= 6 detected; adding ${NO_DELETE_NULL_POINTER_CHECKS_CFLA
OPENJDK_BUILD_CFLAGS_JDK="$OPENJDK_BUILD_CFLAGS_JDK -xc99=%none -xCC -errshort=tags -Xa -v -mt -W0,-noglobal"
OPENJDK_BUILD_CXXFLAGS_JDK="$OPENJDK_BUILD_CXXFLAGS_JDK -errtags=yes +w -mt -features=no%except -DCC_NOEX -norunpath -xnolib"
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -D_REENTRANT -D__STDC_FORMAT_MACROS"
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -D_REENTRANT"
OPENJDK_BUILD_CFLAGS_JDK="$OPENJDK_BUILD_CFLAGS_JDK -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE -DSTDC"
OPENJDK_BUILD_CXXFLAGS_JDK="$OPENJDK_BUILD_CXXFLAGS_JDK -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE -DSTDC"
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
@ -52295,6 +52371,11 @@ $as_echo "$as_me: GCC >= 6 detected; adding ${NO_DELETE_NULL_POINTER_CHECKS_CFLA
fi
fi
# Always enable optional macros for VM.
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -D__STDC_FORMAT_MACROS"
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -D__STDC_LIMIT_MACROS"
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -D__STDC_CONSTANT_MACROS"
# Setup target OS define. Use OS target name but in upper case.
OPENJDK_BUILD_OS_UPPERCASE=`$ECHO $OPENJDK_BUILD_OS | $TR 'abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'`
OPENJDK_BUILD_COMMON_CCXXFLAGS_JDK="$OPENJDK_BUILD_COMMON_CCXXFLAGS_JDK -D$OPENJDK_BUILD_OS_UPPERCASE"
@ -52314,6 +52395,16 @@ $as_echo "$as_me: GCC >= 6 detected; adding ${NO_DELETE_NULL_POINTER_CHECKS_CFLA
OPENJDK_BUILD_COMMON_CCXXFLAGS_JDK="$OPENJDK_BUILD_COMMON_CCXXFLAGS_JDK -DDEBUG"
fi
# Optional POSIX functionality needed by the VM
if test "x$HAS_CLOCK_GETTIME" = "xtrue"; then
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -DSUPPORTS_CLOCK_MONOTONIC"
if test "x$CLOCK_GETTIME_IN_LIBRT" = "xtrue"; then
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -DNEEDS_LIBRT"
fi
fi
# Set some additional per-OS defines.
if test "x$OPENJDK_BUILD_OS" = xlinux; then
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -DLINUX"
@ -52322,7 +52413,7 @@ $as_echo "$as_me: GCC >= 6 detected; adding ${NO_DELETE_NULL_POINTER_CHECKS_CFLA
elif test "x$OPENJDK_BUILD_OS" = xsolaris; then
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -DSOLARIS"
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -template=no%extdef -features=no%split_init \
-D_Crun_inline_placement -library=%none $PICFLAG -mt -features=no%except"
-D_Crun_inline_placement -library=stlport4 $PICFLAG -mt -features=no%except"
elif test "x$OPENJDK_BUILD_OS" = xmacosx; then
OPENJDK_BUILD_COMMON_CCXXFLAGS_JDK="$OPENJDK_BUILD_COMMON_CCXXFLAGS_JDK -D_ALLBSD_SOURCE -D_DARWIN_UNLIMITED_SELECT"
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -D_ALLBSD_SOURCE"
@ -53989,8 +54080,8 @@ $as_echo "no, forced" >&6; }
fi
if test "x$ENABLE_AOT" = "xtrue"; then
# Only enable AOT on linux-X64.
if test "x$OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU" = "xlinux-x86_64"; then
# Only enable AOT on X64 platforms.
if test "x$OPENJDK_TARGET_CPU" = "xx86_64"; then
if test -e "$HOTSPOT_TOPDIR/src/jdk.aot"; then
if test -e "$HOTSPOT_TOPDIR/src/jdk.internal.vm.compiler"; then
ENABLE_AOT="true"
@ -54009,7 +54100,7 @@ $as_echo "no, forced" >&6; }
else
ENABLE_AOT="false"
if test "x$enable_aot" = "xyes"; then
as_fn_error $? "AOT is currently only supported on Linux-x86_64. Remove --enable-aot." "$LINENO" 5
as_fn_error $? "AOT is currently only supported on x86_64. Remove --enable-aot." "$LINENO" 5
fi
fi
fi
@ -62526,7 +62617,7 @@ $as_echo "$FREETYPE_LIB_PATH" >&6; }
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
POTENTIAL_FREETYPE_INCLUDE_PATH="$FREETYPE_BASE_DIR/include"
POTENTIAL_FREETYPE_LIB_PATH="$FREETYPE_BASE_DIR/lib/x86_64-linux-gnu"
POTENTIAL_FREETYPE_LIB_PATH="$FREETYPE_BASE_DIR/lib/$OPENJDK_TARGET_CPU-linux-gnu"
METHOD="well-known location"
# Let's start with an optimistic view of the world :-)
@ -65499,279 +65590,6 @@ $as_echo "no, not found at $STLPORT_LIB" >&6; }
# Check whether --with-libelf was given.
if test "${with_libelf+set}" = set; then :
withval=$with_libelf;
fi
# Check whether --with-libelf-include was given.
if test "${with_libelf_include+set}" = set; then :
withval=$with_libelf_include;
fi
# Check whether --with-libelf-lib was given.
if test "${with_libelf_lib+set}" = set; then :
withval=$with_libelf_lib;
fi
if test "x$ENABLE_AOT" = xfalse; then
if (test "x${with_libelf}" != x && test "x${with_libelf}" != xno) || \
(test "x${with_libelf_include}" != x && test "x${with_libelf_include}" != xno) || \
(test "x${with_libelf_lib}" != x && test "x${with_libelf_lib}" != xno); then
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: libelf is not used, so --with-libelf[-*] is ignored" >&5
$as_echo "$as_me: WARNING: libelf is not used, so --with-libelf[-*] is ignored" >&2;}
fi
LIBELF_CFLAGS=
LIBELF_LIBS=
else
LIBELF_FOUND=no
if test "x${with_libelf}" = xno || test "x${with_libelf_include}" = xno || test "x${with_libelf_lib}" = xno; then
ENABLE_AOT="false"
if test "x${enable_aot}" = xyes; then
as_fn_error $? "libelf is explicitly disabled, cannot build AOT. Enable libelf or remove --enable-aot to disable AOT." "$LINENO" 5
fi
else
if test "x${with_libelf}" != x; then
ELF_LIBS="-L${with_libelf}/lib -lelf"
ELF_CFLAGS="-I${with_libelf}/include"
LIBELF_FOUND=yes
fi
if test "x${with_libelf_include}" != x; then
ELF_CFLAGS="-I${with_libelf_include}"
LIBELF_FOUND=yes
fi
if test "x${with_libelf_lib}" != x; then
ELF_LIBS="-L${with_libelf_lib} -lelf"
LIBELF_FOUND=yes
fi
# Do not try pkg-config if we have a sysroot set.
if test "x$SYSROOT" = x; then
if test "x$LIBELF_FOUND" = xno; then
# Figure out ELF_CFLAGS and ELF_LIBS
pkg_failed=no
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ELF" >&5
$as_echo_n "checking for ELF... " >&6; }
if test -n "$ELF_CFLAGS"; then
pkg_cv_ELF_CFLAGS="$ELF_CFLAGS"
elif test -n "$PKG_CONFIG"; then
if test -n "$PKG_CONFIG" && \
{ { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libelf\""; } >&5
($PKG_CONFIG --exists --print-errors "libelf") 2>&5
ac_status=$?
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; }; then
pkg_cv_ELF_CFLAGS=`$PKG_CONFIG --cflags "libelf" 2>/dev/null`
else
pkg_failed=yes
fi
else
pkg_failed=untried
fi
if test -n "$ELF_LIBS"; then
pkg_cv_ELF_LIBS="$ELF_LIBS"
elif test -n "$PKG_CONFIG"; then
if test -n "$PKG_CONFIG" && \
{ { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libelf\""; } >&5
($PKG_CONFIG --exists --print-errors "libelf") 2>&5
ac_status=$?
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; }; then
pkg_cv_ELF_LIBS=`$PKG_CONFIG --libs "libelf" 2>/dev/null`
else
pkg_failed=yes
fi
else
pkg_failed=untried
fi
if test $pkg_failed = yes; then
if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
_pkg_short_errors_supported=yes
else
_pkg_short_errors_supported=no
fi
if test $_pkg_short_errors_supported = yes; then
ELF_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "libelf" 2>&1`
else
ELF_PKG_ERRORS=`$PKG_CONFIG --print-errors "libelf" 2>&1`
fi
# Put the nasty error message in config.log where it belongs
echo "$ELF_PKG_ERRORS" >&5
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
LIBELF_FOUND=no
elif test $pkg_failed = untried; then
LIBELF_FOUND=no
else
ELF_CFLAGS=$pkg_cv_ELF_CFLAGS
ELF_LIBS=$pkg_cv_ELF_LIBS
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
LIBELF_FOUND=yes
fi
fi
fi
if test "x$LIBELF_FOUND" = xno; then
for ac_header in libelf.h
do :
ac_fn_cxx_check_header_mongrel "$LINENO" "libelf.h" "ac_cv_header_libelf_h" "$ac_includes_default"
if test "x$ac_cv_header_libelf_h" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_LIBELF_H 1
_ACEOF
LIBELF_FOUND=yes
ELF_CFLAGS=
ELF_LIBS=-lelf
else
LIBELF_FOUND=no
fi
done
fi
if test "x$LIBELF_FOUND" = xno; then
ENABLE_AOT="false"
# Print a helpful message on how to acquire the necessary build dependency.
# elf is the help tag: freetype, cups, alsa etc
MISSING_DEPENDENCY=elf
if test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.cygwin"; then
cygwin_help $MISSING_DEPENDENCY
elif test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.msys"; then
msys_help $MISSING_DEPENDENCY
else
PKGHANDLER_COMMAND=
case $PKGHANDLER in
apt-get)
apt_help $MISSING_DEPENDENCY ;;
yum)
yum_help $MISSING_DEPENDENCY ;;
brew)
brew_help $MISSING_DEPENDENCY ;;
port)
port_help $MISSING_DEPENDENCY ;;
pkgutil)
pkgutil_help $MISSING_DEPENDENCY ;;
pkgadd)
pkgadd_help $MISSING_DEPENDENCY ;;
esac
if test "x$PKGHANDLER_COMMAND" != x; then
HELP_MSG="You might be able to fix this by running '$PKGHANDLER_COMMAND'."
fi
fi
if test "x${enable_aot}" = xyes; then
as_fn_error $? "libelf not found, cannot build AOT. Remove --enable-aot to disable AOT or: $HELP_MSG" "$LINENO" 5
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: libelf not found, cannot build AOT. $HELP_MSG" >&5
$as_echo "$as_me: WARNING: libelf not found, cannot build AOT. $HELP_MSG" >&2;}
fi
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if libelf works" >&5
$as_echo_n "checking if libelf works... " >&6; }
ac_ext=c
ac_cpp='$CPP $CPPFLAGS'
ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
ac_compiler_gnu=$ac_cv_c_compiler_gnu
OLD_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS $ELF_CFLAGS"
OLD_LIBS="$LIBS"
LIBS="$LIBS $ELF_LIBS"
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
#include <libelf.h>
int
main ()
{
elf_version(0);
return 0;
;
return 0;
}
_ACEOF
if ac_fn_c_try_link "$LINENO"; then :
LIBELF_WORKS=yes
else
LIBELF_WORKS=no
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
CFLAGS="$OLD_CFLAGS"
LIBS="$OLD_LIBS"
ac_ext=cpp
ac_cpp='$CXXCPP $CPPFLAGS'
ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBELF_WORKS" >&5
$as_echo "$LIBELF_WORKS" >&6; }
if test "x$LIBELF_WORKS" = xno; then
ENABLE_AOT="false"
# Print a helpful message on how to acquire the necessary build dependency.
# elf is the help tag: freetype, cups, alsa etc
MISSING_DEPENDENCY=elf
if test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.cygwin"; then
cygwin_help $MISSING_DEPENDENCY
elif test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.msys"; then
msys_help $MISSING_DEPENDENCY
else
PKGHANDLER_COMMAND=
case $PKGHANDLER in
apt-get)
apt_help $MISSING_DEPENDENCY ;;
yum)
yum_help $MISSING_DEPENDENCY ;;
brew)
brew_help $MISSING_DEPENDENCY ;;
port)
port_help $MISSING_DEPENDENCY ;;
pkgutil)
pkgutil_help $MISSING_DEPENDENCY ;;
pkgadd)
pkgadd_help $MISSING_DEPENDENCY ;;
esac
if test "x$PKGHANDLER_COMMAND" != x; then
HELP_MSG="You might be able to fix this by running '$PKGHANDLER_COMMAND'."
fi
fi
if test "x$enable_aot" = "xyes"; then
as_fn_error $? "Found libelf but could not link and compile with it. Remove --enable-aot to disable AOT or: $HELP_MSG" "$LINENO" 5
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Found libelf but could not link and compile with it. $HELP_MSG" >&5
$as_echo "$as_me: WARNING: Found libelf but could not link and compile with it. $HELP_MSG" >&2;}
fi
fi
fi
fi
fi
@ -65783,14 +65601,7 @@ $as_echo "$as_me: WARNING: Found libelf but could not link and compile with it.
# Hotspot setup depends on lib checks (AOT needs libelf).
# Hotspot setup depends on lib checks.
# The user can in some cases supply additional jvm features. For the custom

View File

@ -123,8 +123,6 @@ apt_help() {
PKGHANDLER_COMMAND="sudo apt-get install ccache" ;;
dtrace)
PKGHANDLER_COMMAND="sudo apt-get install systemtap-sdt-dev" ;;
elf)
PKGHANDLER_COMMAND="sudo apt-get install libelf-dev" ;;
esac
}
@ -144,8 +142,6 @@ yum_help() {
PKGHANDLER_COMMAND="sudo yum install libXtst-devel libXt-devel libXrender-devel libXi-devel" ;;
ccache)
PKGHANDLER_COMMAND="sudo yum install ccache" ;;
elf)
PKGHANDLER_COMMAND="sudo yum install elfutils-libelf-devel" ;;
esac
}

View File

@ -212,8 +212,8 @@ AC_DEFUN_ONCE([HOTSPOT_ENABLE_DISABLE_AOT],
fi
if test "x$ENABLE_AOT" = "xtrue"; then
# Only enable AOT on linux-X64.
if test "x$OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU" = "xlinux-x86_64"; then
# Only enable AOT on X64 platforms.
if test "x$OPENJDK_TARGET_CPU" = "xx86_64"; then
if test -e "$HOTSPOT_TOPDIR/src/jdk.aot"; then
if test -e "$HOTSPOT_TOPDIR/src/jdk.internal.vm.compiler"; then
ENABLE_AOT="true"
@ -232,7 +232,7 @@ AC_DEFUN_ONCE([HOTSPOT_ENABLE_DISABLE_AOT],
else
ENABLE_AOT="false"
if test "x$enable_aot" = "xyes"; then
AC_MSG_ERROR([AOT is currently only supported on Linux-x86_64. Remove --enable-aot.])
AC_MSG_ERROR([AOT is currently only supported on x86_64. Remove --enable-aot.])
fi
fi
fi

View File

@ -1,129 +0,0 @@
#
# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
################################################################################
# Setup libelf (ELF library)
################################################################################
AC_DEFUN_ONCE([LIB_SETUP_LIBELF],
[
AC_ARG_WITH(libelf, [AS_HELP_STRING([--with-libelf],
[specify prefix directory for the libelf package
(expecting the libraries under PATH/lib and the headers under PATH/include)])])
AC_ARG_WITH(libelf-include, [AS_HELP_STRING([--with-libelf-include],
[specify directory for the libelf include files])])
AC_ARG_WITH(libelf-lib, [AS_HELP_STRING([--with-libelf-lib],
[specify directory for the libelf library])])
if test "x$ENABLE_AOT" = xfalse; then
if (test "x${with_libelf}" != x && test "x${with_libelf}" != xno) || \
(test "x${with_libelf_include}" != x && test "x${with_libelf_include}" != xno) || \
(test "x${with_libelf_lib}" != x && test "x${with_libelf_lib}" != xno); then
AC_MSG_WARN([[libelf is not used, so --with-libelf[-*] is ignored]])
fi
LIBELF_CFLAGS=
LIBELF_LIBS=
else
LIBELF_FOUND=no
if test "x${with_libelf}" = xno || test "x${with_libelf_include}" = xno || test "x${with_libelf_lib}" = xno; then
ENABLE_AOT="false"
if test "x${enable_aot}" = xyes; then
AC_MSG_ERROR([libelf is explicitly disabled, cannot build AOT. Enable libelf or remove --enable-aot to disable AOT.])
fi
else
if test "x${with_libelf}" != x; then
ELF_LIBS="-L${with_libelf}/lib -lelf"
ELF_CFLAGS="-I${with_libelf}/include"
LIBELF_FOUND=yes
fi
if test "x${with_libelf_include}" != x; then
ELF_CFLAGS="-I${with_libelf_include}"
LIBELF_FOUND=yes
fi
if test "x${with_libelf_lib}" != x; then
ELF_LIBS="-L${with_libelf_lib} -lelf"
LIBELF_FOUND=yes
fi
# Do not try pkg-config if we have a sysroot set.
if test "x$SYSROOT" = x; then
if test "x$LIBELF_FOUND" = xno; then
# Figure out ELF_CFLAGS and ELF_LIBS
PKG_CHECK_MODULES([ELF], [libelf], [LIBELF_FOUND=yes], [LIBELF_FOUND=no])
fi
fi
if test "x$LIBELF_FOUND" = xno; then
AC_CHECK_HEADERS([libelf.h],
[
LIBELF_FOUND=yes
ELF_CFLAGS=
ELF_LIBS=-lelf
],
[LIBELF_FOUND=no]
)
fi
if test "x$LIBELF_FOUND" = xno; then
ENABLE_AOT="false"
HELP_MSG_MISSING_DEPENDENCY([elf])
if test "x${enable_aot}" = xyes; then
AC_MSG_ERROR([libelf not found, cannot build AOT. Remove --enable-aot to disable AOT or: $HELP_MSG])
else
AC_MSG_WARN([libelf not found, cannot build AOT. $HELP_MSG])
fi
else
AC_MSG_CHECKING([if libelf works])
AC_LANG_PUSH(C)
OLD_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS $ELF_CFLAGS"
OLD_LIBS="$LIBS"
LIBS="$LIBS $ELF_LIBS"
AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <libelf.h>],
[
elf_version(0);
return 0;
])],
[LIBELF_WORKS=yes],
[LIBELF_WORKS=no]
)
CFLAGS="$OLD_CFLAGS"
LIBS="$OLD_LIBS"
AC_LANG_POP(C)
AC_MSG_RESULT([$LIBELF_WORKS])
if test "x$LIBELF_WORKS" = xno; then
ENABLE_AOT="false"
HELP_MSG_MISSING_DEPENDENCY([elf])
if test "x$enable_aot" = "xyes"; then
AC_MSG_ERROR([Found libelf but could not link and compile with it. Remove --enable-aot to disable AOT or: $HELP_MSG])
else
AC_MSG_WARN([Found libelf but could not link and compile with it. $HELP_MSG])
fi
fi
fi
fi
fi
AC_SUBST(ELF_CFLAGS)
AC_SUBST(ELF_LIBS)
])

View File

@ -366,7 +366,7 @@ AC_DEFUN_ONCE([LIB_SETUP_FREETYPE],
if test "x$FOUND_FREETYPE" != xyes; then
FREETYPE_BASE_DIR="$SYSROOT/usr"
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
LIB_CHECK_POTENTIAL_FREETYPE([$FREETYPE_BASE_DIR/include], [$FREETYPE_BASE_DIR/lib/x86_64-linux-gnu], [well-known location])
LIB_CHECK_POTENTIAL_FREETYPE([$FREETYPE_BASE_DIR/include], [$FREETYPE_BASE_DIR/lib/$OPENJDK_TARGET_CPU-linux-gnu], [well-known location])
else
LIB_CHECK_POTENTIAL_FREETYPE([$FREETYPE_BASE_DIR/include], [$FREETYPE_BASE_DIR/lib/i386-linux-gnu], [well-known location])
if test "x$FOUND_FREETYPE" != xyes; then

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -31,7 +31,6 @@ m4_include([lib-ffi.m4])
m4_include([lib-freetype.m4])
m4_include([lib-std.m4])
m4_include([lib-x11.m4])
m4_include([lib-elf.m4])
################################################################################
# Determine which libraries are needed for this configuration
@ -91,7 +90,6 @@ AC_DEFUN_ONCE([LIB_SETUP_LIBRARIES],
LIB_SETUP_BUNDLED_LIBS
LIB_SETUP_MISC_LIBS
LIB_SETUP_SOLARIS_STLPORT
LIB_SETUP_LIBELF
])
################################################################################

View File

@ -690,6 +690,7 @@ SETFILE:=@SETFILE@
XATTR:=@XATTR@
JT_HOME:=@JT_HOME@
JTREGEXE:=@JTREGEXE@
JIB_JAR:=@JIB_JAR@
XCODEBUILD=@XCODEBUILD@
DTRACE := @DTRACE@
FIXPATH:=@FIXPATH@
@ -774,9 +775,6 @@ USE_EXTERNAL_LIBPNG:=@USE_EXTERNAL_LIBPNG@
PNG_LIBS:=@PNG_LIBS@
PNG_CFLAGS:=@PNG_CFLAGS@
ELF_CFLAGS:=@ELF_CFLAGS@
ELF_LIBS:=@ELF_LIBS@
####################################################
#
# Misc

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -1005,3 +1005,31 @@ AC_DEFUN_ONCE([TOOLCHAIN_SETUP_JTREG],
AC_SUBST(JT_HOME)
AC_SUBST(JTREGEXE)
])
# Setup the JIB dependency resolver
AC_DEFUN_ONCE([TOOLCHAIN_SETUP_JIB],
[
AC_ARG_WITH(jib, [AS_HELP_STRING([--with-jib],
[Jib dependency management tool @<:@not used@:>@])])
if test "x$with_jib" = xno || test "x$with_jib" = x; then
# jib disabled
AC_MSG_CHECKING([for jib])
AC_MSG_RESULT(no)
elif test "x$with_jib" = xyes; then
AC_MSG_ERROR([Must supply a value to --with-jib])
else
JIB_HOME="${with_jib}"
AC_MSG_CHECKING([for jib])
AC_MSG_RESULT(${JIB_HOME})
if test ! -d "${JIB_HOME}"; then
AC_MSG_ERROR([--with-jib must be a directory])
fi
JIB_JAR=$(ls ${JIB_HOME}/lib/jib-*.jar)
if test ! -f "${JIB_JAR}"; then
AC_MSG_ERROR([Could not find jib jar file in ${JIB_HOME}])
fi
fi
AC_SUBST(JIB_JAR)
])

View File

@ -237,7 +237,7 @@ var getJibProfilesCommon = function (input, data) {
// These are the base setttings for all the main build profiles.
common.main_profile_base = {
dependencies: ["boot_jdk", "gnumake", "jtreg"],
dependencies: ["boot_jdk", "gnumake", "jtreg", "jib"],
default_make_targets: ["product-bundles", "test-bundles"],
configure_args: concat(["--enable-jtreg-failure-handler"],
versionArgs(input, common))
@ -590,7 +590,7 @@ var getJibProfilesProfiles = function (input, common, data) {
"run-test-jprt": {
target_os: input.build_os,
target_cpu: input.build_cpu,
dependencies: [ "jtreg", "gnumake", "boot_jdk" ],
dependencies: [ "jtreg", "gnumake", "boot_jdk", "devkit", "jib" ],
labels: "test",
environment: {
"JT_JAVA": common.boot_jdk_home
@ -600,7 +600,7 @@ var getJibProfilesProfiles = function (input, common, data) {
"run-test": {
target_os: input.build_os,
target_cpu: input.build_cpu,
dependencies: [ "jtreg", "gnumake", "boot_jdk" ],
dependencies: [ "jtreg", "gnumake", "boot_jdk", "devkit", "jib" ],
labels: "test",
environment: {
"JT_JAVA": common.boot_jdk_home
@ -619,7 +619,7 @@ var getJibProfilesProfiles = function (input, common, data) {
target_os: input.build_os,
target_cpu: input.build_cpu,
src: "src.conf",
dependencies: [ "jtreg", "gnumake", "boot_jdk", testedProfile + ".jdk",
dependencies: [ "jtreg", "gnumake", "boot_jdk", "jib", testedProfile + ".jdk",
testedProfile + ".test", "src.full"
],
work_dir: input.get("src.full", "install_path") + "/test",
@ -974,7 +974,7 @@ var getJibProfilesDependencies = function (input, common) {
jtreg: {
server: "javare",
revision: "4.2",
build_number: "b07",
build_number: "b08",
checksum_file: "MD5_VALUES",
file: "jtreg_bin-4.2.zip",
environment_name: "JT_HOME",
@ -1023,8 +1023,26 @@ var getJibProfilesDependencies = function (input, common) {
configure_args: "PANDOC=" + input.get("pandoc", "install_path") + "/pandoc/pandoc",
environment_path: input.get("pandoc", "install_path") + "/pandoc"
},
// This adds java jib as a dependency for the test artifacts resolver
jib: {
organization: "com.oracle.java.jib",
ext: "zip",
classifier: "distribution",
revision: "3.0-SNAPSHOT",
environment_name: "JIB_JAR",
environment_value: input.get("jib", "install_path")
+ "/jib-3.0-SNAPSHOT-distribution/lib/jib-3.0-SNAPSHOT.jar"
}
};
// Need to add a value for the Visual Studio tools variable to make
// jaot be able to pick up the Visual Studio linker in testing.
if (input.target_os == "windows") {
dependencies.devkit.environment = {
VS120COMNTOOLS: input.get("devkit", "install_path") + "/Common7/Tools"
};
}
return dependencies;
};

View File

@ -58,7 +58,6 @@
<li><a href="#x11">X11</a></li>
<li><a href="#alsa">ALSA</a></li>
<li><a href="#libffi">libffi</a></li>
<li><a href="#libelf">libelf</a></li>
</ul></li>
<li><a href="#other-tooling-requirements">Other Tooling Requirements</a><ul>
<li><a href="#gnu-make">GNU Make</a></li>
@ -469,13 +468,6 @@ tar -xzf freetype-2.5.3.tar.gz</code></pre>
<li>To install on an rpm-based Linux, try running <code>sudo yum install libffi-devel</code>.</li>
</ul>
<p>Use <code>--with-libffi=&lt;path&gt;</code> if <code>configure</code> does not properly locate your libffi files.</p>
<h3 id="libelf">libelf</h3>
<p>libelf from the <a href="http://sourceware.org/elfutils">elfutils project</a> is required when building the AOT feature of Hotspot.</p>
<ul>
<li>To install on an apt-based Linux, try running <code>sudo apt-get install libelf-dev</code>.</li>
<li>To install on an rpm-based Linux, try running <code>sudo yum install elfutils-libelf-devel</code>.</li>
</ul>
<p>Use <code>--with-libelf=&lt;path&gt;</code> if <code>configure</code> does not properly locate your libelf files.</p>
<h2 id="other-tooling-requirements">Other Tooling Requirements</h2>
<h3 id="gnu-make">GNU Make</h3>
<p>OpenJDK requires <a href="http://www.gnu.org/software/make">GNU Make</a>. No other flavors of make are supported.</p>
@ -537,7 +529,6 @@ tar -xzf freetype-2.5.3.tar.gz</code></pre>
<li><code>--with-x=&lt;path&gt;</code> - Set the path to <a href="#x11">X11</a></li>
<li><code>--with-alsa=&lt;path&gt;</code> - Set the path to <a href="#alsa">ALSA</a></li>
<li><code>--with-libffi=&lt;path&gt;</code> - Set the path to <a href="#libffi">libffi</a></li>
<li><code>--with-libelf=&lt;path&gt;</code> - Set the path to <a href="#libelf">libelf</a></li>
<li><code>--with-jtreg=&lt;path&gt;</code> - Set the path to JTReg. See <a href="#running-tests">Running Tests</a></li>
</ul>
<p>Certain third-party libraries used by OpenJDK (libjpeg, giflib, libpng, lcms and zlib) are included in the OpenJDK repository. The default behavior of the OpenJDK build is to use this version of these libraries, but they might be replaced by an external version. To do so, specify <code>system</code> as the <code>&lt;source&gt;</code> option in these arguments. (The default is <code>bundled</code>).</p>

View File

@ -648,19 +648,6 @@ Hotspot.
Use `--with-libffi=<path>` if `configure` does not properly locate your libffi
files.
### libelf
libelf from the [elfutils project](http://sourceware.org/elfutils) is required
when building the AOT feature of Hotspot.
* To install on an apt-based Linux, try running `sudo apt-get install
libelf-dev`.
* To install on an rpm-based Linux, try running `sudo yum install
elfutils-libelf-devel`.
Use `--with-libelf=<path>` if `configure` does not properly locate your libelf
files.
## Other Tooling Requirements
### GNU Make
@ -813,7 +800,6 @@ features, use `bash configure --help=short` instead.)
* `--with-x=<path>` - Set the path to [X11](#x11)
* `--with-alsa=<path>` - Set the path to [ALSA](#alsa)
* `--with-libffi=<path>` - Set the path to [libffi](#libffi)
* `--with-libelf=<path>` - Set the path to [libelf](#libelf)
* `--with-jtreg=<path>` - Set the path to JTReg. See [Running Tests](
#running-tests)

View File

@ -444,3 +444,4 @@ a923b3f30e7bddb4f960059ddfc7978fc63e2e6e jdk-10+18
28488561cfbcfa4d0d9c489e8afe0155f4231360 jdk-10+19
6ce6cb8ff41c71c49f23b15e0f0468aca5d52b17 jdk-9+180
ba71941ad9dba53b8fffb30602ef673eee88696c jdk-9+181
7a54ec280513a33e49e60546c0cf9ca573925a43 jdk-10+20

View File

@ -604,3 +604,4 @@ c9d3317623d48da3327232c81e3f8cfc0d29d888 jdk-10+18
33b74e13c1457f36041addb8b850831f81ca6e9f jdk-10+19
d7baadc223e790c08bc69bf7e553bce65b4e7e40 jdk-9+180
4a443796f6f57842d6a0434ac27ca3d1033ccc20 jdk-9+181
e93ed1a092409351c90b3a76d80b9aa8b44d5e6a jdk-10+20

View File

@ -47,11 +47,10 @@ ifeq ($(INCLUDE_GRAAL), true)
$(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_MATCH_PROCESSOR, \
SETUP := GENERATE_OLDBYTECODE, \
SRC := \
$(SRC_DIR)/org.graalvm.compiler.common/src \
$(SRC_DIR)/org.graalvm.word/src \
$(SRC_DIR)/org.graalvm.compiler.core/src \
$(SRC_DIR)/org.graalvm.compiler.core.common/src \
$(SRC_DIR)/org.graalvm.compiler.core.match.processor/src \
$(SRC_DIR)/org.graalvm.compiler.api.collections/src \
$(SRC_DIR)/org.graalvm.compiler.api.replacements/src \
$(SRC_DIR)/org.graalvm.compiler.asm/src \
$(SRC_DIR)/org.graalvm.compiler.bytecode/src \
@ -68,6 +67,7 @@ ifeq ($(INCLUDE_GRAAL), true)
$(SRC_DIR)/org.graalvm.compiler.phases.common/src \
$(SRC_DIR)/org.graalvm.compiler.serviceprovider/src \
$(SRC_DIR)/org.graalvm.compiler.virtual/src \
$(SRC_DIR)/org.graalvm.util/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.code/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.common/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.meta/src \
@ -102,6 +102,7 @@ ifeq ($(INCLUDE_GRAAL), true)
SRC := \
$(SRC_DIR)/org.graalvm.compiler.options/src \
$(SRC_DIR)/org.graalvm.compiler.options.processor/src \
$(SRC_DIR)/org.graalvm.util/src \
, \
BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor, \
JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor.jar, \
@ -114,9 +115,8 @@ ifeq ($(INCLUDE_GRAAL), true)
$(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_REPLACEMENTS_VERIFIER, \
SETUP := GENERATE_OLDBYTECODE, \
SRC := \
$(SRC_DIR)/org.graalvm.compiler.common/src \
$(SRC_DIR)/org.graalvm.word/src \
$(SRC_DIR)/org.graalvm.compiler.replacements.verifier/src \
$(SRC_DIR)/org.graalvm.compiler.api.collections/src \
$(SRC_DIR)/org.graalvm.compiler.api.replacements/src \
$(SRC_DIR)/org.graalvm.compiler.code/src \
$(SRC_DIR)/org.graalvm.compiler.core.common/src \
@ -125,6 +125,7 @@ ifeq ($(INCLUDE_GRAAL), true)
$(SRC_DIR)/org.graalvm.compiler.nodeinfo/src \
$(SRC_DIR)/org.graalvm.compiler.options/src \
$(SRC_DIR)/org.graalvm.compiler.serviceprovider/src \
$(SRC_DIR)/org.graalvm.util/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.code/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.common/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.meta/src \

View File

@ -37,7 +37,6 @@ SRC_DIR := $(HOTSPOT_TOPDIR)/src/$(MODULE)/share/classes
PROC_SRC_SUBDIRS := \
org.graalvm.compiler.code \
org.graalvm.compiler.common \
org.graalvm.compiler.core \
org.graalvm.compiler.core.aarch64 \
org.graalvm.compiler.core.amd64 \

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -73,7 +73,7 @@ ifeq ($(call check-jvm-feature, compiler2), true)
OUTPUT_DIR := $(JVM_VARIANT_OUTPUTDIR)/tools/adlc, \
PROGRAM := adlc, \
DEBUG_SYMBOLS := false, \
DISABLED_WARNINGS_clang := parentheses tautological-compare, \
DISABLED_WARNINGS_clang := tautological-compare, \
DISABLED_WARNINGS_solstudio := notemsource, \
))

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -52,7 +52,7 @@ ifeq ($(call check-jvm-feature, dtrace), true)
CXX := $(BUILD_CXX), \
LDEXE := $(BUILD_CXX), \
generateJvmOffsets.cpp_CXXFLAGS := $(JVM_CFLAGS) -mt -xnolib -norunpath, \
generateJvmOffsetsMain.c_CFLAGS := -library=%none -mt -m64 -norunpath -z nodefs, \
generateJvmOffsetsMain.c_CFLAGS := -mt -m64 -norunpath -z nodefs, \
LDFLAGS := -m64, \
LIBS := -lc, \
OBJECT_DIR := $(JVM_VARIANT_OUTPUTDIR)/tools/dtrace-gen-offsets/objs, \

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -52,7 +52,8 @@ else
$(call create-mapfile)
endif
# Disabling switch warning for clang because of test source.
# Disabling undef, switch, format-nonliteral and tautological-undefined-compare
# warnings for clang because of test source.
# Note: On AIX, the gtest test classes linked into the libjvm.so push the TOC
# size beyond 64k, so we need to link with bigtoc. However, this means that

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -69,6 +69,7 @@ JVM_CFLAGS_TARGET_DEFINES += \
-DTARGET_ARCH_$(HOTSPOT_TARGET_CPU_ARCH) \
-DINCLUDE_SUFFIX_OS=_$(HOTSPOT_TARGET_OS) \
-DINCLUDE_SUFFIX_CPU=_$(HOTSPOT_TARGET_CPU_ARCH) \
-DINCLUDE_SUFFIX_COMPILER=_$(HOTSPOT_TOOLCHAIN_TYPE) \
-DTARGET_COMPILER_$(HOTSPOT_TOOLCHAIN_TYPE) \
-D$(HOTSPOT_TARGET_CPU_DEFINE) \
-DHOTSPOT_LIB_ARCH='"$(OPENJDK_TARGET_CPU_LEGACY_LIB)"' \
@ -217,9 +218,7 @@ $(eval $(call SetupNativeCompilation, BUILD_LIBJVM, \
CFLAGS_DEBUG_SYMBOLS := $(JVM_CFLAGS_SYMBOLS), \
CXXFLAGS_DEBUG_SYMBOLS := $(JVM_CFLAGS_SYMBOLS), \
vm_version.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
DISABLED_WARNINGS_clang := delete-non-virtual-dtor dynamic-class-memaccess \
empty-body format logical-op-parentheses parentheses \
parentheses-equality switch tautological-compare, \
DISABLED_WARNINGS_clang := tautological-compare, \
DISABLED_WARNINGS_xlc := 1540-0216 1540-0198 1540-1090 1540-1639 \
1540-1088 1500-010, \
ASFLAGS := $(JVM_ASFLAGS), \

View File

@ -1,53 +0,0 @@
#
# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
include $(SPEC)
include NativeCompilation.gmk
$(eval $(call IncludeCustomExtension, hotspot, lib/Lib-jdk.aot.gmk))
##############################################################################
# Build libjelfshim only when AOT is enabled.
ifeq ($(ENABLE_AOT), true)
JELFSHIM_NAME := jelfshim
$(eval $(call SetupNativeCompilation, BUILD_LIBJELFSHIM, \
TOOLCHAIN := TOOLCHAIN_DEFAULT, \
OPTIMIZATION := LOW, \
LIBRARY := $(JELFSHIM_NAME), \
OUTPUT_DIR := $(call FindLibDirForModule, $(MODULE)), \
SRC := $(HOTSPOT_TOPDIR)/src/jdk.aot/unix/native/libjelfshim, \
CFLAGS := $(CFLAGS_JDKLIB) $(ELF_CFLAGS) \
-DAOT_VERSION_STRING='"$(VERSION_STRING)"' \
-I$(SUPPORT_OUTPUTDIR)/headers/$(MODULE), \
LDFLAGS := $(LDFLAGS_JDKLIB), \
OBJECT_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/lib$(JELFSHIM_NAME), \
LIBS := $(ELF_LIBS) $(LIBS_JDKLIB), \
))
TARGETS += $(BUILD_LIBJELFSHIM)
endif
##############################################################################

View File

@ -35,12 +35,17 @@ include $(SPEC)
include MakeBase.gmk
include TestFilesCompilation.gmk
$(eval $(call IncludeCustomExtension, hotspot, test/JtregNative.gmk))
################################################################################
# Targets for building the native tests themselves.
################################################################################
# Add more directories here when needed.
BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
BUILD_HOTSPOT_JTREG_NATIVE_SRC += \
$(HOTSPOT_TOPDIR)/test/gc/g1/TestJNIWeakG1 \
$(HOTSPOT_TOPDIR)/test/gc/stress/gclocker \
$(HOTSPOT_TOPDIR)/test/gc/cslocker \
$(HOTSPOT_TOPDIR)/test/native_sanity \
$(HOTSPOT_TOPDIR)/test/runtime/jni/8025979 \
$(HOTSPOT_TOPDIR)/test/runtime/jni/8033445 \
@ -53,8 +58,10 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
$(HOTSPOT_TOPDIR)/test/runtime/modules/getModuleJNI \
$(HOTSPOT_TOPDIR)/test/runtime/SameObject \
$(HOTSPOT_TOPDIR)/test/runtime/BoolReturn \
$(HOTSPOT_TOPDIR)/test/runtime/noClassDefFoundMsg \
$(HOTSPOT_TOPDIR)/test/compiler/floatingpoint/ \
$(HOTSPOT_TOPDIR)/test/compiler/calls \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/GetOwnedMonitorInfo \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/GetNamedModule \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/IsModifiableModule \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/AddModuleReads \
@ -66,6 +73,7 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/ModuleAwareAgents/ClassFileLoadHook \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/ModuleAwareAgents/ClassLoadPrepare \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/ModuleAwareAgents/ThreadStart \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/StartPhase/AllowedFunctions \
#
# Add conditional directories here when needed.
@ -85,6 +93,7 @@ endif
ifeq ($(TOOLCHAIN_TYPE), solstudio)
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_liboverflow := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libSimpleClassFileLoadHook := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libGetOwnedMonitorInfoTest := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libGetNamedModuleTest := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libIsModifiableModuleTest := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libAddModuleReadsTest := -lc
@ -93,6 +102,7 @@ ifeq ($(TOOLCHAIN_TYPE), solstudio)
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAClassFileLoadHook := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAClassLoadPrepare := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAThreadStart := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libAllowedFunctions := -lc
endif
ifeq ($(OPENJDK_TARGET_OS), linux)

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2014, Red Hat Inc. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
@ -3564,7 +3564,7 @@ const int Matcher::min_vector_size(const BasicType bt) {
}
// Vector ideal reg.
const int Matcher::vector_ideal_reg(int len) {
const uint Matcher::vector_ideal_reg(int len) {
switch(len) {
case 8: return Op_VecD;
case 16: return Op_VecX;
@ -3573,7 +3573,7 @@ const int Matcher::vector_ideal_reg(int len) {
return 0;
}
const int Matcher::vector_shift_count_ideal_reg(int size) {
const uint Matcher::vector_shift_count_ideal_reg(int size) {
return Op_VecX;
}
@ -5218,7 +5218,7 @@ frame %{
// ppc port uses 0 but we definitely need to allow for fixed_slots
// which folds in the space used for monitors
return_addr(STACK - 2 +
round_to((Compile::current()->in_preserve_stack_slots() +
align_up((Compile::current()->in_preserve_stack_slots() +
Compile::current()->fixed_slots()),
stack_alignment_in_slots()));
@ -5343,6 +5343,17 @@ operand immI_M1()
interface(CONST_INTER);
%}
// Shift values for add/sub extension shift
operand immIExt()
%{
predicate(0 <= n->get_int() && (n->get_int() <= 4));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immI_le_4()
%{
predicate(n->get_int() <= 4);
@ -5423,6 +5434,16 @@ operand immI_56()
interface(CONST_INTER);
%}
operand immI_63()
%{
predicate(n->get_int() == 63);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immI_64()
%{
predicate(n->get_int() == 64);
@ -5453,20 +5474,10 @@ operand immI_65535()
interface(CONST_INTER);
%}
operand immL_63()
%{
predicate(n->get_int() == 63);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immL_255()
%{
predicate(n->get_int() == 255);
match(ConI);
predicate(n->get_long() == 255L);
match(ConL);
op_cost(0);
format %{ %}
@ -10951,7 +10962,7 @@ instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
// Long Negation
instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
match(Set dst (SubL zero src));
ins_cost(INSN_COST);
@ -11146,7 +11157,7 @@ instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
ins_pipe(ldiv_reg_reg);
%}
instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
match(Set dst (URShiftL (RShiftL src1 div1) div2));
ins_cost(INSN_COST);
format %{ "lsr $dst, $src1, $div1" %}
@ -11156,7 +11167,7 @@ instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
ins_pipe(ialu_reg_shift);
%}
instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
ins_cost(INSN_COST);
format %{ "add $dst, $src, $div1" %}
@ -12789,7 +12800,7 @@ instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
%{
match(Set dst (AddL src1 (ConvI2L src2)));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, sxtw $src2" %}
format %{ "add $dst, $src1, $src2, sxtw" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
@ -12802,7 +12813,7 @@ instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
%{
match(Set dst (SubL src1 (ConvI2L src2)));
ins_cost(INSN_COST);
format %{ "sub $dst, $src1, sxtw $src2" %}
format %{ "sub $dst, $src1, $src2, sxtw" %}
ins_encode %{
__ sub(as_Register($dst$$reg), as_Register($src1$$reg),
@ -12816,7 +12827,7 @@ instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 l
%{
match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, sxth $src2" %}
format %{ "add $dst, $src1, $src2, sxth" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
@ -12829,7 +12840,7 @@ instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 l
%{
match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, sxtb $src2" %}
format %{ "add $dst, $src1, $src2, sxtb" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
@ -12842,7 +12853,7 @@ instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 l
%{
match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, uxtb $src2" %}
format %{ "add $dst, $src1, $src2, uxtb" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
@ -12855,7 +12866,7 @@ instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, imm
%{
match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, sxth $src2" %}
format %{ "add $dst, $src1, $src2, sxth" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
@ -12868,7 +12879,7 @@ instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, imm
%{
match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, sxtw $src2" %}
format %{ "add $dst, $src1, $src2, sxtw" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
@ -12881,7 +12892,7 @@ instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, imm
%{
match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, sxtb $src2" %}
format %{ "add $dst, $src1, $src2, sxtb" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
@ -12894,7 +12905,7 @@ instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, imm
%{
match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, uxtb $src2" %}
format %{ "add $dst, $src1, $src2, uxtb" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
@ -13034,6 +13045,294 @@ instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295
ins_pipe(ialu_reg_reg);
%}
instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
%{
match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
ins_cost(1.9 * INSN_COST);
format %{ "add $dst, $src1, $src2, sxtb #lshift2" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
%{
match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
ins_cost(1.9 * INSN_COST);
format %{ "add $dst, $src1, $src2, sxth #lshift2" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
%{
match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
ins_cost(1.9 * INSN_COST);
format %{ "add $dst, $src1, $src2, sxtw #lshift2" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
%{
match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
ins_cost(1.9 * INSN_COST);
format %{ "sub $dst, $src1, $src2, sxtb #lshift2" %}
ins_encode %{
__ sub(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
%{
match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
ins_cost(1.9 * INSN_COST);
format %{ "sub $dst, $src1, $src2, sxth #lshift2" %}
ins_encode %{
__ sub(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
%{
match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
ins_cost(1.9 * INSN_COST);
format %{ "sub $dst, $src1, $src2, sxtw #lshift2" %}
ins_encode %{
__ sub(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
%{
match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
ins_cost(1.9 * INSN_COST);
format %{ "addw $dst, $src1, $src2, sxtb #lshift2" %}
ins_encode %{
__ addw(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
%{
match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
ins_cost(1.9 * INSN_COST);
format %{ "addw $dst, $src1, $src2, sxth #lshift2" %}
ins_encode %{
__ addw(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
%{
match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
ins_cost(1.9 * INSN_COST);
format %{ "subw $dst, $src1, $src2, sxtb #lshift2" %}
ins_encode %{
__ subw(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
%{
match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
ins_cost(1.9 * INSN_COST);
format %{ "subw $dst, $src1, $src2, sxth #lshift2" %}
ins_encode %{
__ subw(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "add $dst, $src1, $src2, sxtw #lshift" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%};
instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "sub $dst, $src1, $src2, sxtw #lshift" %}
ins_encode %{
__ sub(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%};
instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "add $dst, $src1, $src2, uxtb #lshift" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "add $dst, $src1, $src2, uxth #lshift" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "add $dst, $src1, $src2, uxtw #lshift" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "sub $dst, $src1, $src2, uxtb #lshift" %}
ins_encode %{
__ sub(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "sub $dst, $src1, $src2, uxth #lshift" %}
ins_encode %{
__ sub(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "sub $dst, $src1, $src2, uxtw #lshift" %}
ins_encode %{
__ sub(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "addw $dst, $src1, $src2, uxtb #lshift" %}
ins_encode %{
__ addw(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "addw $dst, $src1, $src2, uxth #lshift" %}
ins_encode %{
__ addw(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "subw $dst, $src1, $src2, uxtb #lshift" %}
ins_encode %{
__ subw(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "subw $dst, $src1, $src2, uxth #lshift" %}
ins_encode %{
__ subw(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// END This section of the file is automatically generated. Do not edit --------------
// ============================================================================
@ -15443,9 +15742,9 @@ instruct ShouldNotReachHere() %{
format %{ "ShouldNotReachHere" %}
ins_encode %{
// TODO
// implement proper trap call here
__ brk(999);
// +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
// return true
__ dpcs1(0xdead + 1);
%}
ins_pipe(pipe_class_default);
@ -15803,6 +16102,16 @@ instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
ins_pipe(pipe_class_memory);
%}
instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
%{
match(Set result (HasNegatives ary1 len));
effect(USE_KILL ary1, USE_KILL len, KILL cr);
format %{ "has negatives byte[] $ary1,$len -> $result" %}
ins_encode %{
__ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
%}
ins_pipe( pipe_slow );
%}
// fast char[] to byte[] compression
instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
@ -16833,6 +17142,48 @@ instruct vmla4I(vecX dst, vecX src1, vecX src2)
ins_pipe(vmla128);
%}
// dst + src1 * src2
instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
predicate(UseFMA && n->as_Vector()->length() == 2);
match(Set dst (FmaVF dst (Binary src1 src2)));
format %{ "fmla $dst,$src1,$src2\t# vector (2S)" %}
ins_cost(INSN_COST);
ins_encode %{
__ fmla(as_FloatRegister($dst$$reg), __ T2S,
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(vmuldiv_fp64);
%}
// dst + src1 * src2
instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
predicate(UseFMA && n->as_Vector()->length() == 4);
match(Set dst (FmaVF dst (Binary src1 src2)));
format %{ "fmla $dst,$src1,$src2\t# vector (4S)" %}
ins_cost(INSN_COST);
ins_encode %{
__ fmla(as_FloatRegister($dst$$reg), __ T4S,
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(vmuldiv_fp128);
%}
// dst + src1 * src2
instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
predicate(UseFMA && n->as_Vector()->length() == 2);
match(Set dst (FmaVD dst (Binary src1 src2)));
format %{ "fmla $dst,$src1,$src2\t# vector (2D)" %}
ins_cost(INSN_COST);
ins_encode %{
__ fmla(as_FloatRegister($dst$$reg), __ T2D,
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(vmuldiv_fp128);
%}
// --------------------------------- MLS --------------------------------------
instruct vmls4S(vecD dst, vecD src1, vecD src2)
@ -16892,6 +17243,51 @@ instruct vmls4I(vecX dst, vecX src1, vecX src2)
ins_pipe(vmla128);
%}
// dst - src1 * src2
instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
predicate(UseFMA && n->as_Vector()->length() == 2);
match(Set dst (FmaVF dst (Binary (NegVF src1) src2)));
match(Set dst (FmaVF dst (Binary src1 (NegVF src2))));
format %{ "fmls $dst,$src1,$src2\t# vector (2S)" %}
ins_cost(INSN_COST);
ins_encode %{
__ fmls(as_FloatRegister($dst$$reg), __ T2S,
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(vmuldiv_fp64);
%}
// dst - src1 * src2
instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
predicate(UseFMA && n->as_Vector()->length() == 4);
match(Set dst (FmaVF dst (Binary (NegVF src1) src2)));
match(Set dst (FmaVF dst (Binary src1 (NegVF src2))));
format %{ "fmls $dst,$src1,$src2\t# vector (4S)" %}
ins_cost(INSN_COST);
ins_encode %{
__ fmls(as_FloatRegister($dst$$reg), __ T4S,
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(vmuldiv_fp128);
%}
// dst - src1 * src2
instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
predicate(UseFMA && n->as_Vector()->length() == 2);
match(Set dst (FmaVD dst (Binary (NegVD src1) src2)));
match(Set dst (FmaVD dst (Binary src1 (NegVD src2))));
format %{ "fmls $dst,$src1,$src2\t# vector (2D)" %}
ins_cost(INSN_COST);
ins_encode %{
__ fmls(as_FloatRegister($dst$$reg), __ T2D,
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(vmuldiv_fp128);
%}
// --------------------------------- DIV --------------------------------------
instruct vdiv2F(vecD dst, vecD src1, vecD src2)

View File

@ -268,21 +268,21 @@ instruct $2$1_rReg(iReg$1NoSp dst, iReg$1 src, iRegI shift, rFlagsReg cr)
ins_pipe(ialu_reg_reg_vshift);
%}')dnl
define(ROL_INSN, `
instruct $3$1_rReg_Var_C$2(iRegLNoSp dst, iRegL src, iRegI shift, immI$2 c$2, rFlagsReg cr)
instruct $3$1_rReg_Var_C$2(iReg$1NoSp dst, iReg$1 src, iRegI shift, immI$2 c$2, rFlagsReg cr)
%{
match(Set dst (Or$1 (LShift$1 src shift) (URShift$1 src (SubI c$2 shift))));
expand %{
$3L_rReg(dst, src, shift, cr);
$3$1_rReg(dst, src, shift, cr);
%}
%}')dnl
define(ROR_INSN, `
instruct $3$1_rReg_Var_C$2(iRegLNoSp dst, iRegL src, iRegI shift, immI$2 c$2, rFlagsReg cr)
instruct $3$1_rReg_Var_C$2(iReg$1NoSp dst, iReg$1 src, iRegI shift, immI$2 c$2, rFlagsReg cr)
%{
match(Set dst (Or$1 (URShift$1 src shift) (LShift$1 src (SubI c$2 shift))));
expand %{
$3L_rReg(dst, src, shift, cr);
$3$1_rReg(dst, src, shift, cr);
%}
%}')dnl
ROL_EXPAND(L, rol, rorv)
@ -305,7 +305,7 @@ instruct $3Ext$1(iReg$2NoSp dst, iReg$2`'ORL2I($2) src1, iReg$1`'ORL2I($1) src2,
%{
match(Set dst ($3$2 src1 (ConvI2L src2)));
ins_cost(INSN_COST);
format %{ "$4 $dst, $src1, $5 $src2" %}
format %{ "$4 $dst, $src1, $src2, $5" %}
ins_encode %{
__ $4(as_Register($dst$$reg), as_Register($src1$$reg),
@ -321,7 +321,7 @@ instruct $3Ext$1_$6(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) sr
%{
match(Set dst ($3$1 src1 EXTEND($1, $4, src2, lshift, rshift)));
ins_cost(INSN_COST);
format %{ "$5 $dst, $src1, $6 $src2" %}
format %{ "$5 $dst, $src1, $src2, $6" %}
ins_encode %{
__ $5(as_Register($dst$$reg), as_Register($src1$$reg),
@ -363,5 +363,82 @@ ADD_SUB_ZERO_EXTEND(I,65535,Sub,subw,uxth)
ADD_SUB_ZERO_EXTEND(L,255,Sub,sub,uxtb)
ADD_SUB_ZERO_EXTEND(L,65535,Sub,sub,uxth)
ADD_SUB_ZERO_EXTEND(L,4294967295,Sub,sub,uxtw)
dnl
dnl ADD_SUB_ZERO_EXTEND_SHIFT(mode, size, add node, insn, ext type)
define(`ADD_SUB_EXTENDED_SHIFT', `
instruct $3Ext$1_$6_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immIExt lshift2, immI_`'eval($7-$2) lshift1, immI_`'eval($7-$2) rshift1, rFlagsReg cr)
%{
match(Set dst ($3$1 src1 (LShift$1 EXTEND($1, $4, src2, lshift1, rshift1) lshift2)));
ins_cost(1.9 * INSN_COST);
format %{ "$5 $dst, $src1, $src2, $6 #lshift2" %}
ins_encode %{
__ $5(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::$6, ($lshift2$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}')
dnl $1 $2 $3 $4 $5 $6 $7
ADD_SUB_EXTENDED_SHIFT(L,8,Add,RShift,add,sxtb,64)
ADD_SUB_EXTENDED_SHIFT(L,16,Add,RShift,add,sxth,64)
ADD_SUB_EXTENDED_SHIFT(L,32,Add,RShift,add,sxtw,64)
dnl
ADD_SUB_EXTENDED_SHIFT(L,8,Sub,RShift,sub,sxtb,64)
ADD_SUB_EXTENDED_SHIFT(L,16,Sub,RShift,sub,sxth,64)
ADD_SUB_EXTENDED_SHIFT(L,32,Sub,RShift,sub,sxtw,64)
dnl
ADD_SUB_EXTENDED_SHIFT(I,8,Add,RShift,addw,sxtb,32)
ADD_SUB_EXTENDED_SHIFT(I,16,Add,RShift,addw,sxth,32)
dnl
ADD_SUB_EXTENDED_SHIFT(I,8,Sub,RShift,subw,sxtb,32)
ADD_SUB_EXTENDED_SHIFT(I,16,Sub,RShift,subw,sxth,32)
dnl
dnl ADD_SUB_CONV_SHIFT(mode, add node, insn, ext type)
define(`ADD_SUB_CONV_SHIFT', `
instruct $2ExtI_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
%{
match(Set dst ($2$1 src1 (LShiftL (ConvI2L src2) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "$3 $dst, $src1, $src2, $4 #lshift" %}
ins_encode %{
__ $3(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::$4, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}')
dnl
ADD_SUB_CONV_SHIFT(L,Add,add,sxtw);
ADD_SUB_CONV_SHIFT(L,Sub,sub,sxtw);
dnl
dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, ext type)
define(`ADD_SUB_ZERO_EXTEND_SHIFT', `
instruct $3Ext$1_$5_and_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_$2 mask, immIExt lshift, rFlagsReg cr)
%{
match(Set dst ($3$1 src1 (LShift$1 (And$1 src2 mask) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "$4 $dst, $src1, $src2, $5 #lshift" %}
ins_encode %{
__ $4(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::$5, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}')
dnl
dnl $1 $2 $3 $4 $5
ADD_SUB_ZERO_EXTEND_SHIFT(L,255,Add,add,uxtb)
ADD_SUB_ZERO_EXTEND_SHIFT(L,65535,Add,add,uxth)
ADD_SUB_ZERO_EXTEND_SHIFT(L,4294967295,Add,add,uxtw)
dnl
ADD_SUB_ZERO_EXTEND_SHIFT(L,255,Sub,sub,uxtb)
ADD_SUB_ZERO_EXTEND_SHIFT(L,65535,Sub,sub,uxth)
ADD_SUB_ZERO_EXTEND_SHIFT(L,4294967295,Sub,sub,uxtw)
dnl
ADD_SUB_ZERO_EXTEND_SHIFT(I,255,Add,addw,uxtb)
ADD_SUB_ZERO_EXTEND_SHIFT(I,65535,Add,addw,uxth)
dnl
ADD_SUB_ZERO_EXTEND_SHIFT(I,255,Sub,subw,uxtb)
ADD_SUB_ZERO_EXTEND_SHIFT(I,65535,Sub,subw,uxth)
dnl
// END This section of the file is automatically generated. Do not edit --------------

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -28,6 +28,7 @@
#include "oops/constMethod.hpp"
#include "oops/method.hpp"
#include "runtime/frame.inline.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include "utilities/macros.hpp"
@ -53,27 +54,6 @@ int AbstractInterpreter::BasicType_as_index(BasicType type) {
return i;
}
// These should never be compiled since the interpreter will prefer
// the compiled version to the intrinsic version.
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
switch (method_kind(m)) {
case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru
case Interpreter::java_lang_math_tan : // fall thru
case Interpreter::java_lang_math_abs : // fall thru
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : // fall thru
case Interpreter::java_lang_math_pow : // fall thru
case Interpreter::java_lang_math_exp : // fall thru
case Interpreter::java_lang_math_fmaD : // fall thru
case Interpreter::java_lang_math_fmaF :
return false;
default:
return true;
}
}
// How much stack a method activation needs in words.
int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
const int entry_size = frame::interpreter_frame_monitor_size();
@ -121,7 +101,7 @@ int AbstractInterpreter::size_activation(int max_stack,
// On AArch64 we always keep the stack pointer 16-aligned, so we
// must round up here.
size = round_to(size, 2);
size = align_up(size, 2);
return size;
}

View File

@ -2201,6 +2201,8 @@ public:
INSN(fdiv, 1, 0, 0b111111);
INSN(fmul, 1, 0, 0b110111);
INSN(fsub, 0, 1, 0b110101);
INSN(fmla, 0, 0, 0b110011);
INSN(fmls, 0, 1, 0b110011);
#undef INSN

View File

@ -30,12 +30,6 @@
class Bytes: AllStatic {
public:
// Returns true if the byte ordering used by Java is different from the native byte ordering
// of the underlying machine. For example, this is true for Intel x86, but false for Solaris
// on Sparc.
static inline bool is_Java_byte_ordering_different(){ return true; }
// Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering
// (no special code is needed since x86 CPUs can access unaligned data)
static inline u2 get_native_u2(address p) { return *(u2*)p; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -23,12 +23,6 @@
*
*/
#include "precompiled.hpp"
#include "c1/c1_FpuStackSim.hpp"
#include "c1/c1_FrameMap.hpp"
#include "utilities/array.hpp"
#include "utilities/ostream.hpp"
//--------------------------------------------------------
// FpuStackSim
//--------------------------------------------------------

View File

@ -2740,8 +2740,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
// set already but no need to check.
__ cbz(rscratch1, next);
__ andr(rscratch1, tmp, TypeEntries::type_unknown);
__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
__ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
if (TypeEntries::is_type_none(current_klass)) {
__ cbz(rscratch2, none);
@ -2761,8 +2760,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
__ ldr(tmp, mdo_addr);
__ andr(rscratch1, tmp, TypeEntries::type_unknown);
__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
__ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
}
// different than before. Cannot keep accurate profile.
@ -2812,8 +2810,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
__ ldr(tmp, mdo_addr);
__ andr(rscratch1, tmp, TypeEntries::type_unknown);
__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
__ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
__ orr(tmp, tmp, TypeEntries::type_unknown);
__ str(tmp, mdo_addr);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -784,6 +784,8 @@ extern "C" void pm(unsigned long fp, unsigned long bcx) {
frame::frame(void* sp, void* fp, void* pc) {
init((intptr_t*)sp, (intptr_t*)fp, (address)pc);
}
void frame::pd_ps() {}
#endif
void JavaFrameAnchor::make_walkable(JavaThread* thread) {

View File

@ -154,8 +154,11 @@ define_pd_global(intx, InlineSmallCode, 1000);
product(intx, BlockZeroingLowLimit, 256, \
"Minimum size in bytes when block zeroing will be used") \
range(1, max_jint) \
product(bool, TraceTraps, false, "Trace all traps the signal handler")
product(bool, TraceTraps, false, "Trace all traps the signal handler")\
product(int, SoftwarePrefetchHintDistance, -1, \
"Use prfm hint with specified distance in compiled code." \
"Value -1 means off.") \
range(-1, 32760)
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -270,7 +270,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
get_constant_pool(result);
// load pointer for resolved_references[] objArray
ldr(result, Address(result, ConstantPool::resolved_references_offset_in_bytes()));
ldr(result, Address(result, ConstantPool::cache_offset_in_bytes()));
ldr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
// JNIHandles::resolve(obj);
ldr(result, Address(result, 0));
// Add in the index
@ -278,6 +279,15 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
}
void InterpreterMacroAssembler::load_resolved_klass_at_offset(
Register cpool, Register index, Register klass, Register temp) {
add(temp, cpool, index, LSL, LogBytesPerWord);
ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
ldr(klass, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes())); // klass = cpool->_resolved_klasses
add(klass, klass, temp, LSL, LogBytesPerWord);
ldr(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
}
// Generate a subtype check: branch to ok_is_subtype if sub_klass is a
// subtype of super_klass.
//
@ -682,7 +692,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
}
// Load (object->mark() | 1) into swap_reg
ldr(rscratch1, Address(obj_reg, 0));
ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
orr(swap_reg, rscratch1, 1);
// Save (object->mark() | 1) into BasicLock's displaced header
@ -694,14 +704,14 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
Label fail;
if (PrintBiasedLockingStatistics) {
Label fast;
cmpxchgptr(swap_reg, lock_reg, obj_reg, rscratch1, fast, &fail);
cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, fast, &fail);
bind(fast);
atomic_incw(Address((address)BiasedLocking::fast_path_entry_count_addr()),
rscratch2, rscratch1, tmp);
b(done);
bind(fail);
} else {
cmpxchgptr(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
}
// Test if the oopMark is an obvious stack pointer, i.e.,
@ -791,7 +801,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
cbz(header_reg, done);
// Atomic swap back the old header
cmpxchgptr(swap_reg, header_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
// Call the runtime routine for slow case.
str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj
@ -1744,8 +1754,7 @@ void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register t
// Load the offset of the area within the MDO used for
// parameters. If it's negative we're not profiling any parameters
ldr(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
cmp(tmp1, 0u);
br(Assembler::LT, profile_continue);
tbnz(tmp1, 63, profile_continue); // i.e. sign bit set
// Compute a pointer to the area for parameters from the offset
// and move the pointer to the slot for the last

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -54,9 +54,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
int number_of_arguments,
bool check_exceptions);
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// base routine for all dispatches
void dispatch_base(TosState state, address* table, bool verifyoop = true);
@ -67,6 +64,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
void jump_to_entry(address entry);
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// Interpreter-specific registers
void save_bcp() {
str(rbcp, Address(rfp, frame::interpreter_frame_bcp_offset * wordSize));
@ -123,6 +123,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
// load cpool->resolved_references(index);
void load_resolved_reference_at_index(Register result, Register index);
// load cpool->resolved_klass_at(index);
void load_resolved_klass_at_offset(Register cpool, Register index, Register klass, Register temp);
void pop_ptr(Register r = r0);
void pop_i(Register r = r0);
void pop_l(Register r = r0);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -369,7 +369,7 @@ class SlowSignatureHandler
}
public:
SlowSignatureHandler(methodHandle method, address from, intptr_t* to)
SlowSignatureHandler(const methodHandle& method, address from, intptr_t* to)
: NativeSignatureIterator(method)
{
_from = from;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -47,7 +47,7 @@ class SignatureHandlerGenerator: public NativeSignatureIterator {
public:
// Creation
SignatureHandlerGenerator(methodHandle method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
_masm = new MacroAssembler(buffer);
_num_int_args = (method->is_static() ? 1 : 0);
_num_fp_args = 0;

View File

@ -76,8 +76,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
SafepointSynchronize::safepoint_counter_addr(), offset);
Address safepoint_counter_addr(rcounter_addr, offset);
__ ldrw(rcounter, safepoint_counter_addr);
__ andw(rscratch1, rcounter, 1);
__ cbnzw(rscratch1, slow);
__ tbnz(rcounter, 0, slow);
__ eor(robj, c_rarg1, rcounter);
__ eor(robj, robj, rcounter); // obj, since
// robj ^ rcounter ^ rcounter == robj

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,7 +55,7 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS)
}
}
#endif // ASSERT
Handle obj = HotSpotObjectConstantImpl::object(constant);
Handle obj(THREAD, HotSpotObjectConstantImpl::object(constant));
jobject value = JNIHandles::make_local(obj());
MacroAssembler::patch_oop(pc, (address)obj());
int oop_index = _oop_recorder->find_index(value);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -38,6 +38,7 @@
#include "opto/compile.hpp"
#include "opto/intrinsicnode.hpp"
#include "opto/node.hpp"
#include "prims/jvm.h"
#include "runtime/biasedLocking.hpp"
#include "runtime/icache.hpp"
#include "runtime/interfaceSupport.hpp"
@ -515,7 +516,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
mov(rscratch1, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
andr(swap_reg, swap_reg, rscratch1);
orr(tmp_reg, swap_reg, rthread);
cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
// If the biasing toward our thread failed, this means that
// another thread succeeded in biasing it toward itself and we
// need to revoke that bias. The revocation will occur in the
@ -542,7 +543,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
Label here;
load_prototype_header(tmp_reg, obj_reg);
orr(tmp_reg, rthread, tmp_reg);
cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
// If the biasing toward our thread failed, then another thread
// succeeded in biasing it toward itself and we need to revoke that
// bias. The revocation will occur in the runtime in the slow case.
@ -569,7 +570,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
{
Label here, nope;
load_prototype_header(tmp_reg, obj_reg);
cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, &nope);
cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, rscratch1, here, &nope);
bind(here);
// Fall through to the normal CAS-based lock, because no matter what
@ -2011,6 +2012,12 @@ void MacroAssembler::stop(const char* msg) {
hlt(0);
}
void MacroAssembler::unimplemented(const char* what) {
char* b = new char[1024];
jio_snprintf(b, 1024, "unimplemented: %s", what);
stop(b);
}
// If a constant does not fit in an immediate field, generate some
// number of MOV instructions and then perform the operation.
void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, unsigned imm,
@ -2141,6 +2148,12 @@ void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Reg
b(*fail);
}
void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp,
Label &succeed, Label *fail) {
assert(oopDesc::mark_offset_in_bytes() == 0, "assumption");
cmpxchgptr(oldv, newv, obj, tmp, succeed, fail);
}
void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp,
Label &succeed, Label *fail) {
// oldv holds comparison value
@ -4816,6 +4829,62 @@ void MacroAssembler::string_compare(Register str1, Register str2,
BLOCK_COMMENT("} string_compare");
}
// This method checks if provided byte array contains byte with highest bit set.
void MacroAssembler::has_negatives(Register ary1, Register len, Register result) {
// Simple and most common case of aligned small array which is not at the
// end of memory page is placed here. All other cases are in stub.
Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE;
const uint64_t UPPER_BIT_MASK=0x8080808080808080;
assert_different_registers(ary1, len, result);
cmpw(len, 0);
br(LE, SET_RESULT);
cmpw(len, 4 * wordSize);
br(GE, STUB_LONG); // size > 32 then go to stub
int shift = 64 - exact_log2(os::vm_page_size());
lsl(rscratch1, ary1, shift);
mov(rscratch2, (size_t)(4 * wordSize) << shift);
adds(rscratch2, rscratch1, rscratch2); // At end of page?
br(CS, STUB); // at the end of page then go to stub
subs(len, len, wordSize);
br(LT, END);
BIND(LOOP);
ldr(rscratch1, Address(post(ary1, wordSize)));
tst(rscratch1, UPPER_BIT_MASK);
br(NE, SET_RESULT);
subs(len, len, wordSize);
br(GE, LOOP);
cmpw(len, -wordSize);
br(EQ, SET_RESULT);
BIND(END);
ldr(result, Address(ary1));
sub(len, zr, len, LSL, 3); // LSL 3 is to get bits from bytes
lslv(result, result, len);
tst(result, UPPER_BIT_MASK);
b(SET_RESULT);
BIND(STUB);
RuntimeAddress has_neg = RuntimeAddress(StubRoutines::aarch64::has_negatives());
assert(has_neg.target() != NULL, "has_negatives stub has not been generated");
trampoline_call(has_neg);
b(DONE);
BIND(STUB_LONG);
RuntimeAddress has_neg_long = RuntimeAddress(
StubRoutines::aarch64::has_negatives_long());
assert(has_neg_long.target() != NULL, "has_negatives stub has not been generated");
trampoline_call(has_neg_long);
b(DONE);
BIND(SET_RESULT);
cset(result, NE); // set true or false
BIND(DONE);
}
// Compare Strings or char/byte arrays.
// is_string is true iff this is a string comparison.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -77,12 +77,6 @@ class MacroAssembler: public Assembler {
bool check_exceptions // whether to check for pending exceptions after return
);
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler,
// as only the interpreter handles PopFrame and ForceEarlyReturn requests.
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
// Maximum size of class area in Metaspace when compressed
@ -97,6 +91,12 @@ class MacroAssembler: public Assembler {
> (1u << log2_intptr(CompressedClassSpaceSize))));
}
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler,
// as only the interpreter handles PopFrame and ForceEarlyReturn requests.
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// Biased locking support
// lock_reg and obj_reg must be loaded up with the appropriate values.
// swap_reg is killed.
@ -169,6 +169,7 @@ class MacroAssembler: public Assembler {
template<class T>
inline void cmpw(Register Rd, T imm) { subsw(zr, Rd, imm); }
// imm is limited to 12 bits.
inline void cmp(Register Rd, unsigned imm) { subs(zr, Rd, imm); }
inline void cmnw(Register Rd, unsigned imm) { addsw(zr, Rd, imm); }
@ -941,7 +942,7 @@ public:
void untested() { stop("untested"); }
void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); }
void unimplemented(const char* what = "");
void should_not_reach_here() { stop("should not reach here"); }
@ -949,8 +950,8 @@ public:
void bang_stack_with_offset(int offset) {
// stack grows down, caller passes positive offset
assert(offset > 0, "must bang with negative offset");
mov(rscratch2, -offset);
str(zr, Address(sp, rscratch2));
sub(rscratch2, sp, offset);
str(zr, Address(rscratch2));
}
// Writes to stack successive pages until offset reached to check for
@ -974,6 +975,8 @@ public:
// Various forms of CAS
void cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp,
Label &suceed, Label *fail);
void cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp,
Label &suceed, Label *fail);
@ -1207,6 +1210,8 @@ public:
Register tmp1,
FloatRegister vtmp, FloatRegister vtmpZ, int ae);
void has_negatives(Register ary1, Register len, Register result);
void arrays_equals(Register a1, Register a2,
Register result, Register cnt1,
int elem_size, bool is_string);

View File

@ -1,126 +0,0 @@
/*
* Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
// This method will be called (as any other Klass virtual method) with
// the Klass itself as the first argument. Example:
//
// oop obj;
// int size = obj->klass()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();
//
// The dummy method is called with the Klass object as the first
// operand, and an object as the second argument.
//
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
// differing only by an ordinal constant, and they bear no relationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#define __ masm->
extern "C" {
void aarch64_prolog(void);
}
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
char** mc_top,
char* mc_end) {
#ifdef BUILTIN_SIM
// Write a dummy word to the writable shared metaspace.
// MetaspaceShared::initialize_shared_spaces will fill it with the
// address of aarch64_prolog().
address *prolog_ptr = (address*)*md_top;
*(intptr_t *)(*md_top) = (intptr_t)0;
(*md_top) += sizeof(intptr_t);
#endif
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
*(intptr_t *)(*md_top) = vtable_bytes;
*md_top += sizeof(intptr_t);
void** dummy_vtable = (void**)*md_top;
*vtable = dummy_vtable;
*md_top += vtable_bytes;
// Get ready to generate dummy methods.
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
MacroAssembler* masm = new MacroAssembler(&cb);
Label common_code;
for (int i = 0; i < vtbl_list_size; ++i) {
for (int j = 0; j < num_virtuals; ++j) {
dummy_vtable[num_virtuals * i + j] = (void*)masm->pc();
// We're called directly from C code.
#ifdef BUILTIN_SIM
__ c_stub_prolog(8, 0, MacroAssembler::ret_type_integral, prolog_ptr);
#endif
// Load rscratch1 with a value indicating vtable/offset pair.
// -- bits[ 7..0] (8 bits) which virtual method in table?
// -- bits[12..8] (5 bits) which virtual method table?
__ mov(rscratch1, (i << 8) + j);
__ b(common_code);
}
}
__ bind(common_code);
Register tmp0 = r10, tmp1 = r11; // AAPCS64 temporary registers
__ enter();
__ lsr(tmp0, rscratch1, 8); // isolate vtable identifier.
__ mov(tmp1, (address)vtbl_list); // address of list of vtable pointers.
__ ldr(tmp1, Address(tmp1, tmp0, Address::lsl(LogBytesPerWord))); // get correct vtable pointer.
__ str(tmp1, Address(c_rarg0)); // update vtable pointer in obj.
__ add(rscratch1, tmp1, rscratch1, ext::uxtb, LogBytesPerWord); // address of real method pointer.
__ ldr(rscratch1, Address(rscratch1)); // get real method pointer.
__ blrt(rscratch1, 8, 0, 1); // jump to the real method.
__ leave();
__ ret(lr);
*mc_top = (char*)__ pc();
}
#ifdef BUILTIN_SIM
void MetaspaceShared::relocate_vtbl_list(char **buffer) {
void **sim_entry = (void**)*buffer;
*sim_entry = (void*)aarch64_prolog;
*buffer += sizeof(intptr_t);
}
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -63,7 +63,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Register obj, SystemDictionary::WKID klass_id,
const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
Klass* klass = SystemDictionary::well_known_klass(klass_id);
Register temp = rscratch2;
Register temp2 = rscratch1; // used by MacroAssembler::cmpptr
Label L_ok, L_bad;
@ -137,8 +137,9 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
__ verify_oop(method_temp);
__ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
__ verify_oop(method_temp);
// the following assumes that a Method* is normally compressed in the vmtarget field:
__ ldr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));
__ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())));
__ verify_oop(method_temp);
__ ldr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())));
if (VerifyMethodHandles && !for_compiler_entry) {
// make sure recv is already on stack
@ -282,7 +283,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));
Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()));
Address vmtarget_method( rmethod, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()));
Register temp1_recv_klass = temp1;
if (iid != vmIntrinsics::_linkToStatic) {
@ -335,14 +337,16 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
}
__ ldr(rmethod, member_vmtarget);
__ load_heap_oop(rmethod, member_vmtarget);
__ ldr(rmethod, vmtarget_method);
break;
case vmIntrinsics::_linkToStatic:
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
}
__ ldr(rmethod, member_vmtarget);
__ load_heap_oop(rmethod, member_vmtarget);
__ ldr(rmethod, vmtarget_method);
break;
case vmIntrinsics::_linkToVirtual:

View File

@ -36,6 +36,7 @@
#include "oops/compiledICHolder.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/align.hpp"
#include "vmreg_aarch64.inline.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
@ -123,7 +124,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
assert(!save_vectors, "vectors are generated only by C2 and JVMCI");
#endif
int frame_size_in_bytes = round_to(additional_frame_words*wordSize +
int frame_size_in_bytes = align_up(additional_frame_words*wordSize +
reg_save_size*BytesPerInt, 16);
// OopMap frame size is in compiler stack slots (jint's) not bytes or words
int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
@ -190,7 +191,7 @@ void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
__ ldr(r0, Address(sp, r0_offset_in_bytes()));
// Pop all of the register save are off the stack
__ add(sp, sp, round_to(return_offset_in_bytes(), 16));
__ add(sp, sp, align_up(return_offset_in_bytes(), 16));
}
// Is vector's size (in bytes) bigger than a size saved by default?
@ -317,7 +318,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
}
}
return round_to(stk_args, 2);
return align_up(stk_args, 2);
}
// Patch the callers callsite with entry to compiled code if it exists.
@ -375,7 +376,7 @@ static void gen_c2i_adapter(MacroAssembler *masm,
__ mov(r13, sp);
// stack is aligned, keep it that way
extraspace = round_to(extraspace, 2*wordSize);
extraspace = align_up(extraspace, 2*wordSize);
if (extraspace)
__ sub(sp, sp, extraspace);
@ -547,7 +548,7 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
}
// Cut-out for having no stack args.
int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
if (comp_args_on_stack) {
__ sub(rscratch1, sp, comp_words_on_stack * wordSize);
__ andr(sp, rscratch1, -16);
@ -1206,7 +1207,7 @@ static void rt_call(MacroAssembler* masm, address dest, int gpargs, int fpargs,
}
static void verify_oop_args(MacroAssembler* masm,
methodHandle method,
const methodHandle& method,
const BasicType* sig_bt,
const VMRegPair* regs) {
Register temp_reg = r19; // not part of any compiled calling seq
@ -1228,7 +1229,7 @@ static void verify_oop_args(MacroAssembler* masm,
}
static void gen_special_dispatch(MacroAssembler* masm,
methodHandle method,
const methodHandle& method,
const BasicType* sig_bt,
const VMRegPair* regs) {
verify_oop_args(masm, method, sig_bt, regs);
@ -1486,7 +1487,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
total_save_slots = double_slots * 2 + single_slots;
// align the save area
if (double_slots != 0) {
stack_slots = round_to(stack_slots, 2);
stack_slots = align_up(stack_slots, 2);
}
}
@ -1543,7 +1544,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Now compute actual number of stack words we need rounding to make
// stack properly aligned.
stack_slots = round_to(stack_slots, StackAlignmentInSlots);
stack_slots = align_up(stack_slots, StackAlignmentInSlots);
int stack_size = stack_slots * VMRegImpl::stack_slot_size;
@ -1842,7 +1843,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
}
// Load (object->mark() | 1) into swap_reg %r0
__ ldr(rscratch1, Address(obj_reg, 0));
__ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ orr(swap_reg, rscratch1, 1);
// Save (object->mark() | 1) into BasicLock's displaced header
@ -1850,7 +1851,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// src -> dest iff dest == r0 else r0 <- dest
{ Label here;
__ cmpxchgptr(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
__ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
}
// Hmm should this move to the slow path code area???
@ -2029,7 +2030,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Atomic swap old header if oop still contains the stack lock
Label succeed;
__ cmpxchgptr(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
__ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
__ bind(succeed);
// slow path re-enters here
@ -2293,7 +2294,7 @@ int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals)
return 0; // No adjustment for negative locals
int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
// diff is counted in stack words
return round_to(diff, 2);
return align_up(diff, 2);
}

View File

@ -39,6 +39,7 @@
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/align.hpp"
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
@ -619,19 +620,21 @@ class StubGenerator: public StubCodeGenerator {
// Generate code for an array write pre barrier
//
// addr - starting address
// count - element count
// tmp - scratch register
// addr - starting address
// count - element count
// tmp - scratch register
// saved_regs - registers to be saved before calling static_write_ref_array_pre
//
// Destroy no registers except rscratch1 and rscratch2
// Callers must specify which registers to preserve in saved_regs.
// Clobbers: r0-r18, v0-v7, v16-v31, except saved_regs.
//
void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) {
void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized, RegSet saved_regs) {
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
case BarrierSet::G1SATBCTLogging:
// With G1, don't generate the call if we statically know that the target in uninitialized
if (!dest_uninitialized) {
__ push_call_clobbered_registers();
__ push(saved_regs, sp);
if (count == c_rarg0) {
if (addr == c_rarg1) {
// exactly backwards!!
@ -647,7 +650,7 @@ class StubGenerator: public StubCodeGenerator {
__ mov(c_rarg1, count);
}
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
__ pop_call_clobbered_registers();
__ pop(saved_regs, sp);
break;
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
@ -664,20 +667,23 @@ class StubGenerator: public StubCodeGenerator {
// Generate code for an array write post barrier
//
// Input:
// start - register containing starting address of destination array
// end - register containing ending address of destination array
// scratch - scratch register
// start - register containing starting address of destination array
// end - register containing ending address of destination array
// scratch - scratch register
// saved_regs - registers to be saved before calling static_write_ref_array_post
//
// The input registers are overwritten.
// The ending address is inclusive.
void gen_write_ref_array_post_barrier(Register start, Register end, Register scratch) {
// Callers must specify which registers to preserve in saved_regs.
// Clobbers: r0-r18, v0-v7, v16-v31, except saved_regs.
void gen_write_ref_array_post_barrier(Register start, Register end, Register scratch, RegSet saved_regs) {
assert_different_registers(start, end, scratch);
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
case BarrierSet::G1SATBCTLogging:
{
__ push_call_clobbered_registers();
__ push(saved_regs, sp);
// must compute element count unless barrier set interface is changed (other platforms supply count)
assert_different_registers(start, end, scratch);
__ lea(scratch, Address(end, BytesPerHeapOop));
@ -686,7 +692,7 @@ class StubGenerator: public StubCodeGenerator {
__ mov(c_rarg0, start);
__ mov(c_rarg1, scratch);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2);
__ pop_call_clobbered_registers();
__ pop(saved_regs, sp);
}
break;
case BarrierSet::CardTableForRS:
@ -758,7 +764,7 @@ class StubGenerator: public StubCodeGenerator {
// alignment.
Label small;
int low_limit = MAX2(zva_length * 2, (int)BlockZeroingLowLimit);
__ cmp(cnt, low_limit >> 3);
__ subs(rscratch1, cnt, low_limit >> 3);
__ br(Assembler::LT, small);
__ zero_dcache_blocks(base, cnt);
__ bind(small);
@ -821,7 +827,7 @@ class StubGenerator: public StubCodeGenerator {
Label again, drain;
const char *stub_name;
if (direction == copy_forwards)
stub_name = "foward_copy_longs";
stub_name = "forward_copy_longs";
else
stub_name = "backward_copy_longs";
StubCodeMark mark(this, "StubRoutines", stub_name);
@ -1438,6 +1444,7 @@ class StubGenerator: public StubCodeGenerator {
address generate_disjoint_copy(size_t size, bool aligned, bool is_oop, address *entry,
const char *name, bool dest_uninitialized = false) {
Register s = c_rarg0, d = c_rarg1, count = c_rarg2;
RegSet saved_reg = RegSet::of(s, d, count);
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ pc();
@ -1450,9 +1457,9 @@ class StubGenerator: public StubCodeGenerator {
}
if (is_oop) {
gen_write_ref_array_pre_barrier(d, count, dest_uninitialized, saved_reg);
// save regs before copy_memory
__ push(RegSet::of(d, count), sp);
// no registers are destroyed by this call
gen_write_ref_array_pre_barrier(d, count, dest_uninitialized);
}
copy_memory(aligned, s, d, count, rscratch1, size);
if (is_oop) {
@ -1461,7 +1468,7 @@ class StubGenerator: public StubCodeGenerator {
verify_oop_array(size, d, count, r16);
__ sub(count, count, 1); // make an inclusive end pointer
__ lea(count, Address(d, count, Address::lsl(exact_log2(size))));
gen_write_ref_array_post_barrier(d, count, rscratch1);
gen_write_ref_array_post_barrier(d, count, rscratch1, RegSet());
}
__ leave();
__ mov(r0, zr); // return 0
@ -1494,7 +1501,7 @@ class StubGenerator: public StubCodeGenerator {
address *entry, const char *name,
bool dest_uninitialized = false) {
Register s = c_rarg0, d = c_rarg1, count = c_rarg2;
RegSet saved_regs = RegSet::of(s, d, count);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ pc();
__ enter();
@ -1511,9 +1518,9 @@ class StubGenerator: public StubCodeGenerator {
__ br(Assembler::HS, nooverlap_target);
if (is_oop) {
gen_write_ref_array_pre_barrier(d, count, dest_uninitialized, saved_regs);
// save regs before copy_memory
__ push(RegSet::of(d, count), sp);
// no registers are destroyed by this call
gen_write_ref_array_pre_barrier(d, count, dest_uninitialized);
}
copy_memory(aligned, s, d, count, rscratch1, -size);
if (is_oop) {
@ -1522,7 +1529,7 @@ class StubGenerator: public StubCodeGenerator {
verify_oop_array(size, d, count, r16);
__ sub(count, count, 1); // make an inclusive end pointer
__ lea(count, Address(d, count, Address::lsl(exact_log2(size))));
gen_write_ref_array_post_barrier(d, count, rscratch1);
gen_write_ref_array_post_barrier(d, count, rscratch1, RegSet());
}
__ leave();
__ mov(r0, zr); // return 0
@ -1804,6 +1811,9 @@ class StubGenerator: public StubCodeGenerator {
const Register ckoff = c_rarg3; // super_check_offset
const Register ckval = c_rarg4; // super_klass
RegSet wb_pre_saved_regs = RegSet::range(c_rarg0, c_rarg4);
RegSet wb_post_saved_regs = RegSet::of(count);
// Registers used as temps (r18, r19, r20 are save-on-entry)
const Register count_save = r21; // orig elementscount
const Register start_to = r20; // destination array start address
@ -1861,7 +1871,7 @@ class StubGenerator: public StubCodeGenerator {
}
#endif //ASSERT
gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
gen_write_ref_array_pre_barrier(to, count, dest_uninitialized, wb_pre_saved_regs);
// save the original count
__ mov(count_save, count);
@ -1905,7 +1915,7 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_do_card_marks);
__ add(to, to, -heapOopSize); // make an inclusive end pointer
gen_write_ref_array_post_barrier(start_to, to, rscratch1);
gen_write_ref_array_post_barrier(start_to, to, rscratch1, wb_post_saved_regs);
__ bind(L_done_pop);
__ pop(RegSet::of(r18, r19, r20, r21), sp);
@ -3660,6 +3670,167 @@ class StubGenerator: public StubCodeGenerator {
__ eor(result, __ T16B, lo, t0);
}
address generate_has_negatives(address &has_negatives_long) {
StubCodeMark mark(this, "StubRoutines", "has_negatives");
const int large_loop_size = 64;
const uint64_t UPPER_BIT_MASK=0x8080808080808080;
int dcache_line = VM_Version::dcache_line_size();
Register ary1 = r1, len = r2, result = r0;
__ align(CodeEntryAlignment);
address entry = __ pc();
__ enter();
Label RET_TRUE, RET_TRUE_NO_POP, RET_FALSE, ALIGNED, LOOP16, CHECK_16, DONE,
LARGE_LOOP, POST_LOOP16, LEN_OVER_15, LEN_OVER_8, POST_LOOP16_LOAD_TAIL;
__ cmp(len, 15);
__ br(Assembler::GT, LEN_OVER_15);
// The only case when execution falls into this code is when pointer is near
// the end of memory page and we have to avoid reading next page
__ add(ary1, ary1, len);
__ subs(len, len, 8);
__ br(Assembler::GT, LEN_OVER_8);
__ ldr(rscratch2, Address(ary1, -8));
__ sub(rscratch1, zr, len, __ LSL, 3); // LSL 3 is to get bits from bytes.
__ lsrv(rscratch2, rscratch2, rscratch1);
__ tst(rscratch2, UPPER_BIT_MASK);
__ cset(result, Assembler::NE);
__ leave();
__ ret(lr);
__ bind(LEN_OVER_8);
__ ldp(rscratch1, rscratch2, Address(ary1, -16));
__ sub(len, len, 8); // no data dep., then sub can be executed while loading
__ tst(rscratch2, UPPER_BIT_MASK);
__ br(Assembler::NE, RET_TRUE_NO_POP);
__ sub(rscratch2, zr, len, __ LSL, 3); // LSL 3 is to get bits from bytes
__ lsrv(rscratch1, rscratch1, rscratch2);
__ tst(rscratch1, UPPER_BIT_MASK);
__ cset(result, Assembler::NE);
__ leave();
__ ret(lr);
Register tmp1 = r3, tmp2 = r4, tmp3 = r5, tmp4 = r6, tmp5 = r7, tmp6 = r10;
const RegSet spilled_regs = RegSet::range(tmp1, tmp5) + tmp6;
has_negatives_long = __ pc(); // 2nd entry point
__ enter();
__ bind(LEN_OVER_15);
__ push(spilled_regs, sp);
__ andr(rscratch2, ary1, 15); // check pointer for 16-byte alignment
__ cbz(rscratch2, ALIGNED);
__ ldp(tmp6, tmp1, Address(ary1));
__ mov(tmp5, 16);
__ sub(rscratch1, tmp5, rscratch2); // amount of bytes until aligned address
__ add(ary1, ary1, rscratch1);
__ sub(len, len, rscratch1);
__ orr(tmp6, tmp6, tmp1);
__ tst(tmp6, UPPER_BIT_MASK);
__ br(Assembler::NE, RET_TRUE);
__ bind(ALIGNED);
__ cmp(len, large_loop_size);
__ br(Assembler::LT, CHECK_16);
// Perform 16-byte load as early return in pre-loop to handle situation
// when initially aligned large array has negative values at starting bytes,
// so LARGE_LOOP would do 4 reads instead of 1 (in worst case), which is
// slower. Cases with negative bytes further ahead won't be affected that
// much. In fact, it'll be faster due to early loads, less instructions and
// less branches in LARGE_LOOP.
__ ldp(tmp6, tmp1, Address(__ post(ary1, 16)));
__ sub(len, len, 16);
__ orr(tmp6, tmp6, tmp1);
__ tst(tmp6, UPPER_BIT_MASK);
__ br(Assembler::NE, RET_TRUE);
__ cmp(len, large_loop_size);
__ br(Assembler::LT, CHECK_16);
if (SoftwarePrefetchHintDistance >= 0
&& SoftwarePrefetchHintDistance >= dcache_line) {
// initial prefetch
__ prfm(Address(ary1, SoftwarePrefetchHintDistance - dcache_line));
}
__ bind(LARGE_LOOP);
if (SoftwarePrefetchHintDistance >= 0) {
__ prfm(Address(ary1, SoftwarePrefetchHintDistance));
}
// Issue load instructions first, since it can save few CPU/MEM cycles, also
// instead of 4 triples of "orr(...), addr(...);cbnz(...);" (for each ldp)
// better generate 7 * orr(...) + 1 andr(...) + 1 cbnz(...) which saves 3
// instructions per cycle and have less branches, but this approach disables
// early return, thus, all 64 bytes are loaded and checked every time.
__ ldp(tmp2, tmp3, Address(ary1));
__ ldp(tmp4, tmp5, Address(ary1, 16));
__ ldp(rscratch1, rscratch2, Address(ary1, 32));
__ ldp(tmp6, tmp1, Address(ary1, 48));
__ add(ary1, ary1, large_loop_size);
__ sub(len, len, large_loop_size);
__ orr(tmp2, tmp2, tmp3);
__ orr(tmp4, tmp4, tmp5);
__ orr(rscratch1, rscratch1, rscratch2);
__ orr(tmp6, tmp6, tmp1);
__ orr(tmp2, tmp2, tmp4);
__ orr(rscratch1, rscratch1, tmp6);
__ orr(tmp2, tmp2, rscratch1);
__ tst(tmp2, UPPER_BIT_MASK);
__ br(Assembler::NE, RET_TRUE);
__ cmp(len, large_loop_size);
__ br(Assembler::GE, LARGE_LOOP);
__ bind(CHECK_16); // small 16-byte load pre-loop
__ cmp(len, 16);
__ br(Assembler::LT, POST_LOOP16);
__ bind(LOOP16); // small 16-byte load loop
__ ldp(tmp2, tmp3, Address(__ post(ary1, 16)));
__ sub(len, len, 16);
__ orr(tmp2, tmp2, tmp3);
__ tst(tmp2, UPPER_BIT_MASK);
__ br(Assembler::NE, RET_TRUE);
__ cmp(len, 16);
__ br(Assembler::GE, LOOP16); // 16-byte load loop end
__ bind(POST_LOOP16); // 16-byte aligned, so we can read unconditionally
__ cmp(len, 8);
__ br(Assembler::LE, POST_LOOP16_LOAD_TAIL);
__ ldr(tmp3, Address(__ post(ary1, 8)));
__ sub(len, len, 8);
__ tst(tmp3, UPPER_BIT_MASK);
__ br(Assembler::NE, RET_TRUE);
__ bind(POST_LOOP16_LOAD_TAIL);
__ cbz(len, RET_FALSE); // Can't shift left by 64 when len==0
__ ldr(tmp1, Address(ary1));
__ mov(tmp2, 64);
__ sub(tmp4, tmp2, len, __ LSL, 3);
__ lslv(tmp1, tmp1, tmp4);
__ tst(tmp1, UPPER_BIT_MASK);
__ br(Assembler::NE, RET_TRUE);
// Fallthrough
__ bind(RET_FALSE);
__ pop(spilled_regs, sp);
__ leave();
__ mov(result, zr);
__ ret(lr);
__ bind(RET_TRUE);
__ pop(spilled_regs, sp);
__ bind(RET_TRUE_NO_POP);
__ leave();
__ mov(result, 1);
__ ret(lr);
__ bind(DONE);
__ pop(spilled_regs, sp);
__ leave();
__ ret(lr);
return entry;
}
/**
* Arguments:
*
@ -4676,6 +4847,7 @@ class StubGenerator: public StubCodeGenerator {
// }
};
// Initialization
void generate_initial() {
// Generate initial stubs and initializes the entry points
@ -4734,6 +4906,9 @@ class StubGenerator: public StubCodeGenerator {
// arraycopy stubs used by compilers
generate_arraycopy_stubs();
// has negatives stub for large arrays.
StubRoutines::aarch64::_has_negatives = generate_has_negatives(StubRoutines::aarch64::_has_negatives_long);
if (UseMultiplyToLenIntrinsic) {
StubRoutines::_multiplyToLen = generate_multiplyToLen();
}

View File

@ -44,6 +44,8 @@ address StubRoutines::aarch64::_float_sign_flip = NULL;
address StubRoutines::aarch64::_double_sign_mask = NULL;
address StubRoutines::aarch64::_double_sign_flip = NULL;
address StubRoutines::aarch64::_zero_blocks = NULL;
address StubRoutines::aarch64::_has_negatives = NULL;
address StubRoutines::aarch64::_has_negatives_long = NULL;
bool StubRoutines::aarch64::_completed = false;
/**

View File

@ -62,6 +62,9 @@ class aarch64 {
static address _double_sign_flip;
static address _zero_blocks;
static address _has_negatives;
static address _has_negatives_long;
static bool _completed;
public:
@ -120,6 +123,14 @@ class aarch64 {
return _zero_blocks;
}
static address has_negatives() {
return _has_negatives;
}
static address has_negatives_long() {
return _has_negatives_long;
}
static bool complete() {
return _completed;
}

View File

@ -402,14 +402,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(
return entry;
}
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
address entry = __ pc();
// NULL last_sp until next java call
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
__ dispatch_next(state);
return entry;
}
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
address entry = __ pc();
@ -444,6 +436,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ notify(Assembler::method_reentry);
}
#endif
__ check_and_handle_popframe(rthread);
__ check_and_handle_earlyret(rthread);
__ get_dispatch();
__ dispatch_next(state, step);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -246,8 +246,7 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
assert(load_bc_into_bc_reg, "we use bc_reg as temp");
__ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
__ movw(bc_reg, bc);
__ cmpw(temp_reg, (unsigned) 0);
__ br(Assembler::EQ, L_patch_done); // don't patch
__ cbzw(temp_reg, L_patch_done); // don't patch
}
break;
default:
@ -3418,8 +3417,7 @@ void TemplateTable::_new() {
__ br(Assembler::NE, slow_case);
// get InstanceKlass
__ lea(r4, Address(r4, r3, Address::lsl(3)));
__ ldr(r4, Address(r4, sizeof(ConstantPool)));
__ load_resolved_klass_at_offset(r4, r3, r4, rscratch1);
// make sure klass is initialized & doesn't have finalizer
// make sure klass is fully initialized
@ -3572,8 +3570,7 @@ void TemplateTable::checkcast()
// Get superklass in r0 and subklass in r3
__ bind(quicked);
__ mov(r3, r0); // Save object in r3; r0 needed for subtype check
__ lea(r0, Address(r2, r19, Address::lsl(3)));
__ ldr(r0, Address(r0, sizeof(ConstantPool)));
__ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); // r0 = klass
__ bind(resolved);
__ load_klass(r19, r3);
@ -3629,8 +3626,7 @@ void TemplateTable::instanceof() {
// Get superklass in r0 and subklass in r3
__ bind(quicked);
__ load_klass(r3, r0);
__ lea(r0, Address(r2, r19, Address::lsl(3)));
__ ldr(r0, Address(r0, sizeof(ConstantPool)));
__ load_resolved_klass_at_offset(r2, r19, r0, rscratch1);
__ bind(resolved);

View File

@ -137,6 +137,8 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(PrefetchScanIntervalInBytes, 3*dcache_line);
if (FLAG_IS_DEFAULT(PrefetchCopyIntervalInBytes))
FLAG_SET_DEFAULT(PrefetchCopyIntervalInBytes, 3*dcache_line);
if (FLAG_IS_DEFAULT(SoftwarePrefetchHintDistance))
FLAG_SET_DEFAULT(SoftwarePrefetchHintDistance, 3*dcache_line);
if (PrefetchCopyIntervalInBytes != -1 &&
((PrefetchCopyIntervalInBytes & 7) || (PrefetchCopyIntervalInBytes >= 32768))) {
@ -146,6 +148,12 @@ void VM_Version::get_processor_features() {
PrefetchCopyIntervalInBytes = 32760;
}
if (SoftwarePrefetchHintDistance != -1 &&
(SoftwarePrefetchHintDistance & 7)) {
warning("SoftwarePrefetchHintDistance must be -1, or a multiple of 8");
SoftwarePrefetchHintDistance &= ~7;
}
unsigned long auxv = getauxval(AT_HWCAP);
char buf[512];

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,6 +32,7 @@
#include "runtime/handles.inline.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/synchronizer.hpp"
#include "utilities/align.hpp"
#include "utilities/macros.hpp"
int AbstractInterpreter::BasicType_as_index(BasicType type) {
@ -68,23 +69,6 @@ int AbstractInterpreter::BasicType_as_index(BasicType type) {
return i;
}
// These should never be compiled since the interpreter will prefer
// the compiled version to the intrinsic version.
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
switch (method_kind(m)) {
case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru
case Interpreter::java_lang_math_tan : // fall thru
case Interpreter::java_lang_math_abs : // fall thru
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt :
return false;
default:
return true;
}
}
// How much stack a method activation needs in words.
int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
const int stub_code = AARCH64_ONLY(24) NOT_AARCH64(12); // see generate_call_stub
@ -125,7 +109,7 @@ int AbstractInterpreter::size_activation(int max_stack,
tempcount*Interpreter::stackElementWords + extra_args;
#ifdef AARCH64
size = round_to(size, StackAlignmentInBytes/BytesPerWord);
size = align_up(size, StackAlignmentInBytes/BytesPerWord);
#endif // AARCH64
return size;
@ -206,7 +190,7 @@ void AbstractInterpreter::layout_activation(Method* method,
}
if (caller->is_interpreted_frame()) {
intptr_t* locals_base = (locals - method->max_locals()*Interpreter::stackElementWords + 1);
locals_base = (intptr_t*)round_down((intptr_t)locals_base, StackAlignmentInBytes);
locals_base = align_down(locals_base, StackAlignmentInBytes);
assert(interpreter_frame->sender_sp() <= locals_base, "interpreter-to-interpreter frame chaining");
} else if (caller->is_compiled_frame()) {
@ -234,10 +218,17 @@ void AbstractInterpreter::layout_activation(Method* method,
#ifdef AARCH64
interpreter_frame->interpreter_frame_set_stack_top(stack_top);
// We have to add extra reserved slots to max_stack. There are 3 users of the extra slots,
// none of which are at the same time, so we just need to make sure there is enough room
// for the biggest user:
// -reserved slot for exception handler
// -reserved slots for JSR292. Method::extra_stack_entries() is the size.
// -3 reserved slots so get_method_counters() can save some registers before call_VM().
int max_stack = method->constMethod()->max_stack() + MAX2(3, Method::extra_stack_entries());
intptr_t* extended_sp = (intptr_t*) monbot -
(method->max_stack() + 1) * Interpreter::stackElementWords - // +1 is reserved slot for exception handler
(max_stack * Interpreter::stackElementWords) -
popframe_extra_args;
extended_sp = (intptr_t*)round_down((intptr_t)extended_sp, StackAlignmentInBytes);
extended_sp = align_down(extended_sp, StackAlignmentInBytes);
interpreter_frame->interpreter_frame_set_extended_sp(extended_sp);
#else
interpreter_frame->interpreter_frame_set_last_sp(stack_top);
@ -249,7 +240,7 @@ void AbstractInterpreter::layout_activation(Method* method,
#ifdef AARCH64
if (caller->is_interpreted_frame()) {
intptr_t* sender_sp = (intptr_t*)round_down((intptr_t)caller->interpreter_frame_tos_address(), StackAlignmentInBytes);
intptr_t* sender_sp = align_down(caller->interpreter_frame_tos_address(), StackAlignmentInBytes);
interpreter_frame->set_interpreter_frame_sender_sp(sender_sp);
} else {

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@ -1122,7 +1122,7 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
}
// Vector ideal reg corresponding to specified size in bytes
const int Matcher::vector_ideal_reg(int size) {
const uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize >= size, "");
switch(size) {
case 8: return Op_VecD;
@ -1132,7 +1132,7 @@ const int Matcher::vector_ideal_reg(int size) {
return 0;
}
const int Matcher::vector_shift_count_ideal_reg(int size) {
const uint Matcher::vector_shift_count_ideal_reg(int size) {
return vector_ideal_reg(size);
}
@ -1881,7 +1881,7 @@ frame %{
// Ret Addr is on stack in slot 0 if no locks or verification or alignment.
// Otherwise, it is above the locks and verification slot and alignment word
return_addr(STACK - 1*VMRegImpl::slots_per_word +
round_to((Compile::current()->in_preserve_stack_slots() +
align_up((Compile::current()->in_preserve_stack_slots() +
Compile::current()->fixed_slots()),
stack_alignment_in_slots()));
@ -11968,9 +11968,13 @@ instruct ShouldNotReachHere( )
size(4);
// Use the following format syntax
format %{ "breakpoint ; ShouldNotReachHere" %}
format %{ "ShouldNotReachHere" %}
ins_encode %{
__ breakpoint();
#ifdef AARCH64
__ dpcs1(0xdead);
#else
__ udf(0xdead);
#endif
%}
ins_pipe(tail_call);
%}

View File

@ -578,6 +578,11 @@ class Assembler : public AbstractAssembler {
F(bl, 0xb)
#undef F
void udf(int imm_16) {
assert((imm_16 >> 16) == 0, "encoding constraint");
emit_int32(0xe7f000f0 | (imm_16 & 0xfff0) << 8 | (imm_16 & 0xf));
}
// ARMv7 instructions
#define F(mnemonic, wt) \

View File

@ -1083,6 +1083,7 @@ class Assembler : public AbstractAssembler {
F(brk, 0b001, 0b000, 0b00)
F(hlt, 0b010, 0b000, 0b00)
F(dpcs1, 0b101, 0b000, 0b01)
#undef F
enum SystemRegister { // o0<1> op1<3> CRn<4> CRm<4> op2<3>

View File

@ -35,12 +35,6 @@
class Bytes: AllStatic {
public:
// Returns true if the byte ordering used by Java is different from the native byte ordering
// of the underlying machine.
static inline bool is_Java_byte_ordering_different() {
return VM_LITTLE_ENDIAN != 0;
}
static inline u2 get_Java_u2(address p) {
return (u2(p[0]) << 8) | u2(p[1]);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,10 +22,4 @@
*
*/
#include "precompiled.hpp"
#include "c1/c1_FpuStackSim.hpp"
#include "c1/c1_FrameMap.hpp"
#include "utilities/array.hpp"
#include "utilities/ostream.hpp"
// Nothing needed here

View File

@ -37,6 +37,7 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/signature.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/align.hpp"
#include "vmreg_arm.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
@ -250,7 +251,7 @@ static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers
__ sub(SP, SP, (reg_save_size - 2) * wordSize);
for (int i = 0; i < round_down(number_of_saved_gprs, 2); i += 2) {
for (int i = 0; i < align_down((int)number_of_saved_gprs, 2); i += 2) {
__ stp(as_Register(i), as_Register(i+1), Address(SP, (R0_offset + i) * wordSize));
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -621,6 +621,8 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
frame::frame(void* sp, void* fp, void* pc) {
init((intptr_t*)sp, (intptr_t*)fp, (address)pc);
}
void frame::pd_ps() {}
#endif
intptr_t *frame::initial_deoptimization_info() {

View File

@ -298,7 +298,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
Register cache = result;
// load pointer for resolved_references[] objArray
ldr(cache, Address(result, ConstantPool::resolved_references_offset_in_bytes()));
ldr(cache, Address(result, ConstantPool::cache_offset_in_bytes()));
ldr(cache, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
// JNIHandles::resolve(result)
ldr(cache, Address(cache, 0));
// Add in the index
@ -308,6 +309,15 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
load_heap_oop(result, Address(cache, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
}
void InterpreterMacroAssembler::load_resolved_klass_at_offset(
Register Rcpool, Register Rindex, Register Rklass) {
add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
ldrh(Rtemp, Address(Rtemp, sizeof(ConstantPool))); // Rtemp = resolved_klass_index
ldr(Rklass, Address(Rcpool, ConstantPool::resolved_klasses_offset_in_bytes())); // Rklass = cpool->_resolved_klasses
add(Rklass, Rklass, AsmOperand(Rtemp, lsl, LogBytesPerWord));
ldr(Rklass, Address(Rklass, Array<Klass*>::base_offset_in_bytes()));
}
// Generate a subtype check: branch to not_subtype if sub_klass is
// not a subtype of super_klass.
// Profiling code for the subtype check failure (profile_typecheck_failed)
@ -2016,75 +2026,42 @@ void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
void InterpreterMacroAssembler::get_method_counters(Register method,
Register Rcounters,
Label& skip) {
Label& skip,
bool saveRegs,
Register reg1,
Register reg2,
Register reg3) {
const Address method_counters(method, Method::method_counters_offset());
Label has_counters;
ldr(Rcounters, method_counters);
cbnz(Rcounters, has_counters);
if (saveRegs) {
// Save and restore in use caller-saved registers since they will be trashed by call_VM
assert(reg1 != noreg, "must specify reg1");
assert(reg2 != noreg, "must specify reg2");
#ifdef AARCH64
const Register tmp = Rcounters;
const int saved_regs_size = 20*wordSize;
// Note: call_VM will cut SP according to Rstack_top value before call, and restore SP to
// extended_sp value from frame after the call.
// So make sure there is enough stack space to save registers and adjust Rstack_top accordingly.
{
Label enough_stack_space;
check_extended_sp(tmp);
sub(Rstack_top, Rstack_top, saved_regs_size);
cmp(SP, Rstack_top);
b(enough_stack_space, ls);
align_reg(tmp, Rstack_top, StackAlignmentInBytes);
mov(SP, tmp);
str(tmp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
bind(enough_stack_space);
check_stack_top();
int offset = 0;
stp(R0, R1, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R2, R3, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R4, R5, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R6, R7, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R8, R9, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R10, R11, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R12, R13, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R14, R15, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R16, R17, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R18, LR, Address(Rstack_top, offset)); offset += 2*wordSize;
assert (offset == saved_regs_size, "should be");
}
assert(reg3 != noreg, "must specify reg3");
stp(reg1, reg2, Address(Rstack_top, -2*wordSize, pre_indexed));
stp(reg3, ZR, Address(Rstack_top, -2*wordSize, pre_indexed));
#else
push(RegisterSet(R0, R3) | RegisterSet(R12) | RegisterSet(R14));
#endif // AARCH64
assert(reg3 == noreg, "must not specify reg3");
push(RegisterSet(reg1) | RegisterSet(reg2));
#endif
}
mov(R1, method);
call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::build_method_counters), R1);
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), R1);
if (saveRegs) {
#ifdef AARCH64
{
int offset = 0;
ldp(R0, R1, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R2, R3, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R4, R5, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R6, R7, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R8, R9, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R10, R11, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R12, R13, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R14, R15, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R16, R17, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R18, LR, Address(Rstack_top, offset)); offset += 2*wordSize;
assert (offset == saved_regs_size, "should be");
add(Rstack_top, Rstack_top, saved_regs_size);
}
ldp(reg3, ZR, Address(Rstack_top, 2*wordSize, post_indexed));
ldp(reg1, reg2, Address(Rstack_top, 2*wordSize, post_indexed));
#else
pop(RegisterSet(R0, R3) | RegisterSet(R12) | RegisterSet(R14));
#endif // AARCH64
pop(RegisterSet(reg1) | RegisterSet(reg2));
#endif
}
ldr(Rcounters, method_counters);
cbz(Rcounters, skip); // No MethodCounters created, OutOfMemory

View File

@ -53,9 +53,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
// Template interpreter specific version of call_VM_helper
virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions);
virtual void check_and_handle_popframe();
virtual void check_and_handle_earlyret();
// base routine for all dispatches
typedef enum { DispatchDefault, DispatchNormal } DispatchTableMode;
void dispatch_base(TosState state, DispatchTableMode table_mode, bool verifyoop = true);
@ -63,6 +60,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
public:
InterpreterMacroAssembler(CodeBuffer* code);
virtual void check_and_handle_popframe();
virtual void check_and_handle_earlyret();
// Interpreter-specific registers
#if defined(AARCH64) && defined(ASSERT)
@ -141,6 +141,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
// Load object from cpool->resolved_references(*bcp+1)
void load_resolved_reference_at_index(Register result, Register tmp);
// load cpool->resolved_klass_at(index); Rtemp is corrupted upon return
void load_resolved_klass_at_offset(Register Rcpool, Register Rindex, Register Rklass);
void store_check_part1(Register card_table_base); // Sets card_table_base register.
void store_check_part2(Register obj, Register card_table_base, Register tmp);
@ -328,7 +331,13 @@ class InterpreterMacroAssembler: public MacroAssembler {
void trace_state(const char* msg) PRODUCT_RETURN;
void get_method_counters(Register method, Register Rcounters, Label& skip);
void get_method_counters(Register method,
Register Rcounters,
Label& skip,
bool saveRegs = false,
Register reg1 = noreg,
Register reg2 = noreg,
Register reg3 = noreg);
};
#endif // CPU_ARM_VM_INTERP_MASM_ARM_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -422,7 +422,7 @@ class SlowSignatureHandler: public NativeSignatureIterator {
#endif // !__ABI_HARD__
public:
SlowSignatureHandler(methodHandle method, address from, intptr_t* to) :
SlowSignatureHandler(const methodHandle& method, address from, intptr_t* to) :
NativeSignatureIterator(method) {
_from = from;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -56,7 +56,7 @@ class SignatureHandlerGenerator: public NativeSignatureIterator {
#endif
public:
// Creation
SignatureHandlerGenerator(methodHandle method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
_masm = new MacroAssembler(buffer);
_abi_offset = 0;
_ireg = is_static() ? 2 : 1;

View File

@ -206,6 +206,9 @@ protected:
// may customize this version by overriding it for its purposes (e.g., to save/restore
// additional registers when doing a VM call).
virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions);
public:
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler,
@ -213,10 +216,6 @@ protected:
virtual void check_and_handle_popframe() {}
virtual void check_and_handle_earlyret() {}
public:
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
// By default, we do not need relocation information for non
// patchable absolute addresses. However, when needed by some
// extensions, ignore_non_patchable_relocations can be modified,

View File

@ -1,99 +0,0 @@
/*
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "assembler_arm.inline.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
// This method will be called (as any other Klass virtual method) with
// the Klass itself as the first argument. Example:
//
// oop obj;
// int size = obj->klass()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();
//
// The dummy method is called with the Klass object as the first
// operand, and an object as the second argument.
//
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
// differing only by an ordinal constant, and they bear no relationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#define __ masm->
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
char** mc_top,
char* mc_end) {
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
*(intptr_t *)(*md_top) = vtable_bytes;
*md_top += sizeof(intptr_t);
void** dummy_vtable = (void**)*md_top;
*vtable = dummy_vtable;
*md_top += vtable_bytes;
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
MacroAssembler* masm = new MacroAssembler(&cb);
for (int i = 0; i < vtbl_list_size; ++i) {
Label common_code;
for (int j = 0; j < num_virtuals; ++j) {
dummy_vtable[num_virtuals * i + j] = (void*) __ pc();
__ mov(Rtemp, j); // Rtemp contains an index of a virtual method in the table
__ b(common_code);
}
InlinedAddress vtable_address((address)&vtbl_list[i]);
__ bind(common_code);
const Register tmp2 = AARCH64_ONLY(Rtemp2) NOT_AARCH64(R4);
assert_different_registers(Rtemp, tmp2);
#ifndef AARCH64
__ push(tmp2);
#endif // !AARCH64
// Do not use ldr_global since the code must be portable across all ARM architectures
__ ldr_literal(tmp2, vtable_address);
__ ldr(tmp2, Address(tmp2)); // get correct vtable address
__ ldr(Rtemp, Address::indexed_ptr(tmp2, Rtemp)); // get real method pointer
__ str(tmp2, Address(R0)); // update vtable. R0 = "this"
#ifndef AARCH64
__ pop(tmp2);
#endif // !AARCH64
__ jump(Rtemp);
__ bind_literal(vtable_address);
}
__ flush();
*mc_top = (char*) __ pc();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,6 +32,7 @@
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jvm.h"
#include "prims/methodHandles.hpp"
#define __ _masm->
@ -67,7 +68,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Register obj, Register temp1, Register temp2, SystemDictionary::WKID klass_id,
const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
Klass* klass = SystemDictionary::well_known_klass(klass_id);
Label L_ok, L_bad;
BLOCK_COMMENT("verify_klass {");
__ verify_oop(obj);
@ -157,8 +158,9 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
__ load_heap_oop(tmp, Address(tmp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
__ verify_oop(tmp);
// the following assumes that a Method* is normally compressed in the vmtarget field:
__ ldr(Rmethod, Address(tmp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));
__ load_heap_oop(Rmethod, Address(tmp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())));
__ verify_oop(Rmethod);
__ ldr(Rmethod, Address(Rmethod, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())));
if (VerifyMethodHandles && !for_compiler_entry) {
// make sure recv is already on stack
@ -320,7 +322,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
Address member_vmtarget(member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));
Address member_vmtarget(member_reg, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()));
Address vmtarget_method(Rmethod, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()));
Register temp1_recv_klass = temp1;
if (iid != vmIntrinsics::_linkToStatic) {
@ -375,14 +378,17 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
}
__ ldr(Rmethod, member_vmtarget);
__ load_heap_oop(Rmethod, member_vmtarget);
__ ldr(Rmethod, vmtarget_method);
break;
case vmIntrinsics::_linkToStatic:
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
}
__ ldr(Rmethod, member_vmtarget);
__ load_heap_oop(Rmethod, member_vmtarget);
__ ldr(Rmethod, vmtarget_method);
break;
break;
case vmIntrinsics::_linkToVirtual:

View File

@ -34,6 +34,7 @@
#include "oops/compiledICHolder.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/align.hpp"
#include "vmreg_arm.inline.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
@ -747,7 +748,7 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
assert_different_registers(tmp, R0, R1, R2, R3, R4, R5, R6, R7, Rsender_sp, Rparams);
if (comp_args_on_stack) {
__ sub_slow(SP, SP, round_to(comp_args_on_stack * VMRegImpl::stack_slot_size, StackAlignmentInBytes));
__ sub_slow(SP, SP, align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, StackAlignmentInBytes));
}
for (int i = 0; i < total_args_passed; i++) {
@ -870,7 +871,7 @@ static void gen_c2i_adapter(MacroAssembler *masm,
#ifdef AARCH64
int extraspace = round_to(total_args_passed * Interpreter::stackElementSize, StackAlignmentInBytes);
int extraspace = align_up(total_args_passed * Interpreter::stackElementSize, StackAlignmentInBytes);
if (extraspace) {
__ sub(SP, SP, extraspace);
}
@ -1023,7 +1024,7 @@ static int reg2offset_out(VMReg r) {
static void verify_oop_args(MacroAssembler* masm,
methodHandle method,
const methodHandle& method,
const BasicType* sig_bt,
const VMRegPair* regs) {
Register temp_reg = Rmethod; // not part of any compiled calling seq
@ -1044,7 +1045,7 @@ static void verify_oop_args(MacroAssembler* masm,
}
static void gen_special_dispatch(MacroAssembler* masm,
methodHandle method,
const methodHandle& method,
const BasicType* sig_bt,
const VMRegPair* regs) {
verify_oop_args(masm, method, sig_bt, regs);
@ -1181,7 +1182,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
stack_slots += 2 * VMRegImpl::slots_per_word;
// Calculate the final stack size taking account of alignment
stack_slots = round_to(stack_slots, StackAlignmentInBytes / VMRegImpl::stack_slot_size);
stack_slots = align_up(stack_slots, StackAlignmentInBytes / VMRegImpl::stack_slot_size);
int stack_size = stack_slots * VMRegImpl::stack_slot_size;
int lock_slot_fp_offset = stack_size - 2 * wordSize -
lock_slot_offset * VMRegImpl::stack_slot_size;
@ -1851,7 +1852,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
int extra_locals_size = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
#ifdef AARCH64
extra_locals_size = round_to(extra_locals_size, StackAlignmentInBytes/BytesPerWord);
extra_locals_size = align_up(extra_locals_size, StackAlignmentInBytes/BytesPerWord);
#endif // AARCH64
return extra_locals_size;
}

View File

@ -37,6 +37,7 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/align.hpp"
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
@ -2876,7 +2877,7 @@ class StubGenerator: public StubCodeGenerator {
BLOCK_COMMENT("PreBarrier");
#ifdef AARCH64
callee_saved_regs = round_to(callee_saved_regs, 2);
callee_saved_regs = align_up(callee_saved_regs, 2);
for (int i = 0; i < callee_saved_regs; i += 2) {
__ raw_push(as_Register(i), as_Register(i+1));
}

View File

@ -45,6 +45,7 @@
#include "runtime/synchronizer.hpp"
#include "runtime/timer.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include "utilities/macros.hpp"
@ -270,12 +271,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
return entry;
}
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
// Not used.
STOP("generate_continuation_for");
return NULL;
}
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
address entry = __ pc();
@ -310,6 +305,9 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ convert_retval_to_tos(state);
#endif // !AARCH64
__ check_and_handle_popframe();
__ check_and_handle_earlyret();
__ dispatch_next(state, step);
return entry;
@ -678,7 +676,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// Rstack_top & RextendedSP
__ sub(Rstack_top, SP, 10*wordSize);
if (native_call) {
__ sub(RextendedSP, Rstack_top, round_to(wordSize, StackAlignmentInBytes)); // reserve 1 slot for exception handling
__ sub(RextendedSP, Rstack_top, align_up(wordSize, StackAlignmentInBytes)); // reserve 1 slot for exception handling
} else {
__ sub(RextendedSP, Rstack_top, AsmOperand(RmaxStack, lsl, Interpreter::logStackElementSize));
__ align_reg(RextendedSP, RextendedSP, StackAlignmentInBytes);
@ -1098,7 +1096,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// Allocate more stack space to accomodate all arguments passed on GP and FP registers:
// 8 * wordSize for GPRs
// 8 * wordSize for FPRs
int reg_arguments = round_to(8*wordSize + 8*wordSize, StackAlignmentInBytes);
int reg_arguments = align_up(8*wordSize + 8*wordSize, StackAlignmentInBytes);
#else
// C functions need aligned stack
@ -1111,7 +1109,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// Allocate more stack space to accomodate all GP as well as FP registers:
// 4 * wordSize
// 8 * BytesPerLong
int reg_arguments = round_to((4*wordSize) + (8*BytesPerLong), StackAlignmentInBytes);
int reg_arguments = align_up((4*wordSize) + (8*BytesPerLong), StackAlignmentInBytes);
#else
// Reserve at least 4 words on the stack for loading
// of parameters passed on registers (R0-R3).
@ -1401,7 +1399,13 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
#ifdef AARCH64
// setup RmaxStack
__ ldrh(RmaxStack, Address(RconstMethod, ConstMethod::max_stack_offset()));
__ add(RmaxStack, RmaxStack, MAX2(1, Method::extra_stack_entries())); // reserve slots for exception handler and JSR292 appendix argument
// We have to add extra reserved slots to max_stack. There are 3 users of the extra slots,
// none of which are at the same time, so we just need to make sure there is enough room
// for the biggest user:
// -reserved slot for exception handler
// -reserved slots for JSR292. Method::extra_stack_entries() is the size.
// -3 reserved slots so get_method_counters() can save some registers before call_VM().
__ add(RmaxStack, RmaxStack, MAX2(3, Method::extra_stack_entries()));
#endif // AARCH64
// see if we've got enough room on the stack for locals plus overhead.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2286,13 +2286,18 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
}
__ bind(no_mdo);
// Increment backedge counter in MethodCounters*
__ get_method_counters(Rmethod, Rcounters, dispatch);
// Note Rbumped_taken_count is a callee saved registers for ARM32, but caller saved for ARM64
__ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
Rdisp, R3_bytecode,
AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset()));
__ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask,
Rcnt, R4_tmp, eq, &backedge_counter_overflow);
} else {
// increment counter
__ get_method_counters(Rmethod, Rcounters, dispatch);
// Increment backedge counter in MethodCounters*
__ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
Rdisp, R3_bytecode,
AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
__ ldr_u32(Rtemp, Address(Rcounters, be_offset)); // load backedge counter
__ add(Rtemp, Rtemp, InvocationCounter::count_increment); // increment counter
__ str_32(Rtemp, Address(Rcounters, be_offset)); // store counter
@ -4367,10 +4372,9 @@ void TemplateTable::_new() {
#endif // AARCH64
// get InstanceKlass
__ add(Rklass, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
__ ldr(Rklass, Address(Rklass, sizeof(ConstantPool)));
__ cmp(Rtemp, JVM_CONSTANT_Class);
__ b(slow_case, ne);
__ load_resolved_klass_at_offset(Rcpool, Rindex, Rklass);
// make sure klass is initialized & doesn't have finalizer
// make sure klass is fully initialized
@ -4642,8 +4646,7 @@ void TemplateTable::checkcast() {
// Get superklass in Rsuper and subklass in Rsub
__ bind(quicked);
__ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
__ ldr(Rsuper, Address(Rtemp, sizeof(ConstantPool)));
__ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
__ bind(resolved);
__ load_klass(Rsub, Robj);
@ -4716,8 +4719,7 @@ void TemplateTable::instanceof() {
// Get superklass in Rsuper and subklass in Rsub
__ bind(quicked);
__ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
__ ldr(Rsuper, Address(Rtemp, sizeof(ConstantPool)));
__ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
__ bind(resolved);
__ load_klass(Rsub, Robj);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jvm.h"
#include "runtime/java.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/stubCodeGenerator.hpp"
@ -256,7 +257,9 @@ void VM_Version::initialize() {
}
}
AllocatePrefetchDistance = 128;
if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
FLAG_SET_DEFAULT(AllocatePrefetchDistance, 128);
}
#ifdef COMPILER2
FLAG_SET_DEFAULT(UseFPUForSpilling, true);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jvm.h"
#include "runtime/java.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/stubCodeGenerator.hpp"
@ -201,7 +202,9 @@ void VM_Version::initialize() {
}
}
AllocatePrefetchDistance = 128;
if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
FLAG_SET_DEFAULT(AllocatePrefetchDistance, 128);
}
#ifdef COMPILER2
FLAG_SET_DEFAULT(UseFPUForSpilling, true);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -51,27 +51,6 @@ int AbstractInterpreter::BasicType_as_index(BasicType type) {
return i;
}
// These should never be compiled since the interpreter will prefer
// the compiled version to the intrinsic version.
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
switch (method_kind(m)) {
case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru
case Interpreter::java_lang_math_tan : // fall thru
case Interpreter::java_lang_math_abs : // fall thru
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : // fall thru
case Interpreter::java_lang_math_pow : // fall thru
case Interpreter::java_lang_math_exp : // fall thru
case Interpreter::java_lang_math_fmaD : // fall thru
case Interpreter::java_lang_math_fmaF :
return false;
default:
return true;
}
}
// How much stack a method activation needs in stack slots.
// We must calc this exactly like in generate_fixed_frame.
// Note: This returns the conservative size assuming maximum alignment.

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -376,10 +376,12 @@ class Assembler : public AbstractAssembler {
STWX_OPCODE = (31u << OPCODE_SHIFT | 151u << 1),
STWU_OPCODE = (37u << OPCODE_SHIFT),
STWUX_OPCODE = (31u << OPCODE_SHIFT | 183u << 1),
STWBRX_OPCODE = (31u << OPCODE_SHIFT | 662u << 1),
STH_OPCODE = (44u << OPCODE_SHIFT),
STHX_OPCODE = (31u << OPCODE_SHIFT | 407u << 1),
STHU_OPCODE = (45u << OPCODE_SHIFT),
STHBRX_OPCODE = (31u << OPCODE_SHIFT | 918u << 1),
STB_OPCODE = (38u << OPCODE_SHIFT),
STBX_OPCODE = (31u << OPCODE_SHIFT | 215u << 1),
@ -401,11 +403,13 @@ class Assembler : public AbstractAssembler {
LD_OPCODE = (58u << OPCODE_SHIFT | 0u << XO_30_31_SHIFT), // DS-FORM
LDU_OPCODE = (58u << OPCODE_SHIFT | 1u << XO_30_31_SHIFT), // DS-FORM
LDX_OPCODE = (31u << OPCODE_SHIFT | 21u << XO_21_30_SHIFT), // X-FORM
LDBRX_OPCODE = (31u << OPCODE_SHIFT | 532u << 1), // X-FORM
STD_OPCODE = (62u << OPCODE_SHIFT | 0u << XO_30_31_SHIFT), // DS-FORM
STDU_OPCODE = (62u << OPCODE_SHIFT | 1u << XO_30_31_SHIFT), // DS-FORM
STDUX_OPCODE = (31u << OPCODE_SHIFT | 181u << 1), // X-FORM
STDUX_OPCODE = (31u << OPCODE_SHIFT | 181u << 1), // X-FORM
STDX_OPCODE = (31u << OPCODE_SHIFT | 149u << XO_21_30_SHIFT), // X-FORM
STDBRX_OPCODE = (31u << OPCODE_SHIFT | 660u << 1), // X-FORM
RLDICR_OPCODE = (30u << OPCODE_SHIFT | 1u << XO_27_29_SHIFT), // MD-FORM
RLDICL_OPCODE = (30u << OPCODE_SHIFT | 0u << XO_27_29_SHIFT), // MD-FORM
@ -506,7 +510,13 @@ class Assembler : public AbstractAssembler {
LXVD2X_OPCODE = (31u << OPCODE_SHIFT | 844u << 1),
STXVD2X_OPCODE = (31u << OPCODE_SHIFT | 972u << 1),
MTVSRD_OPCODE = (31u << OPCODE_SHIFT | 179u << 1),
MTVSRWZ_OPCODE = (31u << OPCODE_SHIFT | 243u << 1),
MFVSRD_OPCODE = (31u << OPCODE_SHIFT | 51u << 1),
MTVSRWA_OPCODE = (31u << OPCODE_SHIFT | 211u << 1),
MFVSRWZ_OPCODE = (31u << OPCODE_SHIFT | 115u << 1),
XXPERMDI_OPCODE= (60u << OPCODE_SHIFT | 10u << 3),
XXMRGHW_OPCODE = (60u << OPCODE_SHIFT | 18u << 3),
XXMRGLW_OPCODE = (60u << OPCODE_SHIFT | 50u << 3),
// Vector Permute and Formatting
VPKPX_OPCODE = (4u << OPCODE_SHIFT | 782u ),
@ -556,6 +566,7 @@ class Assembler : public AbstractAssembler {
VADDUBM_OPCODE = (4u << OPCODE_SHIFT | 0u ),
VADDUWM_OPCODE = (4u << OPCODE_SHIFT | 128u ),
VADDUHM_OPCODE = (4u << OPCODE_SHIFT | 64u ),
VADDUDM_OPCODE = (4u << OPCODE_SHIFT | 192u ),
VADDUBS_OPCODE = (4u << OPCODE_SHIFT | 512u ),
VADDUWS_OPCODE = (4u << OPCODE_SHIFT | 640u ),
VADDUHS_OPCODE = (4u << OPCODE_SHIFT | 576u ),
@ -1094,16 +1105,19 @@ class Assembler : public AbstractAssembler {
static int vrs( VectorRegister r) { return vrs(r->encoding());}
static int vrt( VectorRegister r) { return vrt(r->encoding());}
// Only used on SHA sigma instructions (VX-form)
static int vst( int x) { return opp_u_field(x, 16, 16); }
static int vsix( int x) { return opp_u_field(x, 20, 17); }
// Support Vector-Scalar (VSX) instructions.
static int vsra( int x) { return opp_u_field(x, 15, 11); }
static int vsrb( int x) { return opp_u_field(x, 20, 16); }
static int vsrc( int x) { return opp_u_field(x, 25, 21); }
static int vsrs( int x) { return opp_u_field(x, 10, 6); }
static int vsrt( int x) { return opp_u_field(x, 10, 6); }
static int vsra( int x) { return opp_u_field(x & 0x1F, 15, 11) | opp_u_field((x & 0x20) >> 5, 29, 29); }
static int vsrb( int x) { return opp_u_field(x & 0x1F, 20, 16) | opp_u_field((x & 0x20) >> 5, 30, 30); }
static int vsrs( int x) { return opp_u_field(x & 0x1F, 10, 6) | opp_u_field((x & 0x20) >> 5, 31, 31); }
static int vsrt( int x) { return vsrs(x); }
static int vsdm( int x) { return opp_u_field(x, 23, 22); }
static int vsra( VectorSRegister r) { return vsra(r->encoding());}
static int vsrb( VectorSRegister r) { return vsrb(r->encoding());}
static int vsrc( VectorSRegister r) { return vsrc(r->encoding());}
static int vsrs( VectorSRegister r) { return vsrs(r->encoding());}
static int vsrt( VectorSRegister r) { return vsrt(r->encoding());}
@ -1552,6 +1566,9 @@ class Assembler : public AbstractAssembler {
inline void ld( Register d, int si16, Register s1);
inline void ldu( Register d, int si16, Register s1);
// 8 bytes reversed
inline void ldbrx( Register d, Register s1, Register s2);
// For convenience. Load pointer into d from b+s1.
inline void ld_ptr(Register d, int b, Register s1);
DEBUG_ONLY(inline void ld_ptr(Register d, ByteSize b, Register s1);)
@ -1560,10 +1577,12 @@ class Assembler : public AbstractAssembler {
inline void stwx( Register d, Register s1, Register s2);
inline void stw( Register d, int si16, Register s1);
inline void stwu( Register d, int si16, Register s1);
inline void stwbrx( Register d, Register s1, Register s2);
inline void sthx( Register d, Register s1, Register s2);
inline void sth( Register d, int si16, Register s1);
inline void sthu( Register d, int si16, Register s1);
inline void sthbrx( Register d, Register s1, Register s2);
inline void stbx( Register d, Register s1, Register s2);
inline void stb( Register d, int si16, Register s1);
@ -1573,6 +1592,7 @@ class Assembler : public AbstractAssembler {
inline void std( Register d, int si16, Register s1);
inline void stdu( Register d, int si16, Register s1);
inline void stdux(Register s, Register a, Register b);
inline void stdbrx( Register d, Register s1, Register s2);
inline void st_ptr(Register d, int si16, Register s1);
DEBUG_ONLY(inline void st_ptr(Register d, ByteSize b, Register s1);)
@ -2016,7 +2036,7 @@ class Assembler : public AbstractAssembler {
inline void vperm( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
inline void vsel( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
inline void vsl( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vsldoi( VectorRegister d, VectorRegister a, VectorRegister b, int si4);
inline void vsldoi( VectorRegister d, VectorRegister a, VectorRegister b, int ui4);
inline void vslo( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vsr( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vsro( VectorRegister d, VectorRegister a, VectorRegister b);
@ -2027,6 +2047,7 @@ class Assembler : public AbstractAssembler {
inline void vaddubm( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vadduwm( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vadduhm( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vaddudm( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vaddubs( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vadduws( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vadduhs( VectorRegister d, VectorRegister a, VectorRegister b);
@ -2102,6 +2123,7 @@ class Assembler : public AbstractAssembler {
inline void vandc( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vnor( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vor( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vmr( VectorRegister d, VectorRegister a);
inline void vxor( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vrld( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vrlb( VectorRegister d, VectorRegister a, VectorRegister b);
@ -2125,8 +2147,24 @@ class Assembler : public AbstractAssembler {
inline void lxvd2x( VectorSRegister d, Register a, Register b);
inline void stxvd2x( VectorSRegister d, Register a);
inline void stxvd2x( VectorSRegister d, Register a, Register b);
inline void mtvrwz( VectorRegister d, Register a);
inline void mfvrwz( Register a, VectorRegister d);
inline void mtvrd( VectorRegister d, Register a);
inline void mfvrd( Register a, VectorRegister d);
inline void xxpermdi( VectorSRegister d, VectorSRegister a, VectorSRegister b, int dm);
inline void xxmrghw( VectorSRegister d, VectorSRegister a, VectorSRegister b);
inline void xxmrglw( VectorSRegister d, VectorSRegister a, VectorSRegister b);
// VSX Extended Mnemonics
inline void xxspltd( VectorSRegister d, VectorSRegister a, int x);
inline void xxmrghd( VectorSRegister d, VectorSRegister a, VectorSRegister b);
inline void xxmrgld( VectorSRegister d, VectorSRegister a, VectorSRegister b);
inline void xxswapd( VectorSRegister d, VectorSRegister a);
// Vector-Scalar (VSX) instructions.
inline void mtfprd( FloatRegister d, Register a);
inline void mtfprwa( FloatRegister d, Register a);
inline void mffprd( Register a, FloatRegister d);
// AES (introduced with Power 8)
inline void vcipher( VectorRegister d, VectorRegister a, VectorRegister b);
@ -2182,14 +2220,18 @@ class Assembler : public AbstractAssembler {
inline void lbz( Register d, int si16);
inline void ldx( Register d, Register s2);
inline void ld( Register d, int si16);
inline void ldbrx(Register d, Register s2);
inline void stwx( Register d, Register s2);
inline void stw( Register d, int si16);
inline void stwbrx( Register d, Register s2);
inline void sthx( Register d, Register s2);
inline void sth( Register d, int si16);
inline void sthbrx( Register d, Register s2);
inline void stbx( Register d, Register s2);
inline void stb( Register d, int si16);
inline void stdx( Register d, Register s2);
inline void std( Register d, int si16);
inline void stdbrx( Register d, Register s2);
// PPC 2, section 3.2.1 Instruction Cache Instructions
inline void icbi( Register s2);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -327,6 +327,7 @@ inline void Assembler::lbzu( Register d, int si16, Register s1) { assert(d !=
inline void Assembler::ld( Register d, int si16, Register s1) { emit_int32(LD_OPCODE | rt(d) | ds(si16) | ra0mem(s1));}
inline void Assembler::ldx( Register d, Register s1, Register s2) { emit_int32(LDX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::ldu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LDU_OPCODE | rt(d) | ds(si16) | rta0mem(s1));}
inline void Assembler::ldbrx( Register d, Register s1, Register s2) { emit_int32(LDBRX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::ld_ptr(Register d, int b, Register s1) { ld(d, b, s1); }
DEBUG_ONLY(inline void Assembler::ld_ptr(Register d, ByteSize b, Register s1) { ld(d, in_bytes(b), s1); })
@ -335,10 +336,12 @@ DEBUG_ONLY(inline void Assembler::ld_ptr(Register d, ByteSize b, Register s1) {
inline void Assembler::stwx( Register d, Register s1, Register s2) { emit_int32(STWX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::stw( Register d, int si16, Register s1) { emit_int32(STW_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}
inline void Assembler::stwu( Register d, int si16, Register s1) { emit_int32(STWU_OPCODE | rs(d) | d1(si16) | rta0mem(s1));}
inline void Assembler::stwbrx( Register d, Register s1, Register s2) { emit_int32(STWBRX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::sthx( Register d, Register s1, Register s2) { emit_int32(STHX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::sth( Register d, int si16, Register s1) { emit_int32(STH_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}
inline void Assembler::sthu( Register d, int si16, Register s1) { emit_int32(STHU_OPCODE | rs(d) | d1(si16) | rta0mem(s1));}
inline void Assembler::sthbrx( Register d, Register s1, Register s2) { emit_int32(STHBRX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::stbx( Register d, Register s1, Register s2) { emit_int32(STBX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::stb( Register d, int si16, Register s1) { emit_int32(STB_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}
@ -348,6 +351,7 @@ inline void Assembler::std( Register d, int si16, Register s1) { emit_int32(
inline void Assembler::stdx( Register d, Register s1, Register s2) { emit_int32(STDX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::stdu( Register d, int si16, Register s1) { emit_int32(STDU_OPCODE | rs(d) | ds(si16) | rta0mem(s1));}
inline void Assembler::stdux(Register s, Register a, Register b) { emit_int32(STDUX_OPCODE| rs(s) | rta0mem(a) | rb(b));}
inline void Assembler::stdbrx( Register d, Register s1, Register s2) { emit_int32(STDBRX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::st_ptr(Register d, int b, Register s1) { std(d, b, s1); }
DEBUG_ONLY(inline void Assembler::st_ptr(Register d, ByteSize b, Register s1) { std(d, in_bytes(b), s1); })
@ -754,12 +758,28 @@ inline void Assembler::lvsl( VectorRegister d, Register s1, Register s2) { emit
inline void Assembler::lvsr( VectorRegister d, Register s1, Register s2) { emit_int32( LVSR_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
// Vector-Scalar (VSX) instructions.
inline void Assembler::lxvd2x (VectorSRegister d, Register s1) { emit_int32( LXVD2X_OPCODE | vsrt(d) | ra(0) | rb(s1)); }
inline void Assembler::lxvd2x (VectorSRegister d, Register s1, Register s2) { emit_int32( LXVD2X_OPCODE | vsrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::stxvd2x(VectorSRegister d, Register s1) { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra(0) | rb(s1)); }
inline void Assembler::stxvd2x(VectorSRegister d, Register s1, Register s2) { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::mtvrd( VectorRegister d, Register a) { emit_int32( MTVSRD_OPCODE | vrt(d) | ra(a) | 1u); } // 1u: d is treated as Vector (VMX/Altivec).
inline void Assembler::mfvrd( Register a, VectorRegister d) { emit_int32( MFVSRD_OPCODE | vrt(d) | ra(a) | 1u); } // 1u: d is treated as Vector (VMX/Altivec).
inline void Assembler::lxvd2x( VectorSRegister d, Register s1) { emit_int32( LXVD2X_OPCODE | vsrt(d) | ra(0) | rb(s1)); }
inline void Assembler::lxvd2x( VectorSRegister d, Register s1, Register s2) { emit_int32( LXVD2X_OPCODE | vsrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::stxvd2x( VectorSRegister d, Register s1) { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra(0) | rb(s1)); }
inline void Assembler::stxvd2x( VectorSRegister d, Register s1, Register s2) { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::mtvrd( VectorRegister d, Register a) { emit_int32( MTVSRD_OPCODE | vsrt(d->to_vsr()) | ra(a)); }
inline void Assembler::mfvrd( Register a, VectorRegister d) { emit_int32( MFVSRD_OPCODE | vsrt(d->to_vsr()) | ra(a)); }
inline void Assembler::mtvrwz( VectorRegister d, Register a) { emit_int32( MTVSRWZ_OPCODE | vsrt(d->to_vsr()) | ra(a)); }
inline void Assembler::mfvrwz( Register a, VectorRegister d) { emit_int32( MFVSRWZ_OPCODE | vsrt(d->to_vsr()) | ra(a)); }
inline void Assembler::xxpermdi(VectorSRegister d, VectorSRegister a, VectorSRegister b, int dm) { emit_int32( XXPERMDI_OPCODE | vsrt(d) | vsra(a) | vsrb(b) | vsdm(dm)); }
inline void Assembler::xxmrghw( VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXMRGHW_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
inline void Assembler::xxmrglw( VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXMRGHW_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
// VSX Extended Mnemonics
inline void Assembler::xxspltd( VectorSRegister d, VectorSRegister a, int x) { xxpermdi(d, a, a, x ? 3 : 0); }
inline void Assembler::xxmrghd( VectorSRegister d, VectorSRegister a, VectorSRegister b) { xxpermdi(d, a, b, 0); }
inline void Assembler::xxmrgld( VectorSRegister d, VectorSRegister a, VectorSRegister b) { xxpermdi(d, a, b, 3); }
inline void Assembler::xxswapd( VectorSRegister d, VectorSRegister a) { xxpermdi(d, a, a, 2); }
// Vector-Scalar (VSX) instructions.
inline void Assembler::mtfprd( FloatRegister d, Register a) { emit_int32( MTVSRD_OPCODE | frt(d) | ra(a)); }
inline void Assembler::mtfprwa( FloatRegister d, Register a) { emit_int32( MTVSRWA_OPCODE | frt(d) | ra(a)); }
inline void Assembler::mffprd( Register a, FloatRegister d) { emit_int32( MFVSRD_OPCODE | frt(d) | ra(a)); }
inline void Assembler::vpkpx( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKPX_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vpkshss( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKSHSS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
@ -791,7 +811,7 @@ inline void Assembler::vspltisw(VectorRegister d, int si5)
inline void Assembler::vperm( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c){ emit_int32( VPERM_OPCODE | vrt(d) | vra(a) | vrb(b) | vrc(c)); }
inline void Assembler::vsel( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c){ emit_int32( VSEL_OPCODE | vrt(d) | vra(a) | vrb(b) | vrc(c)); }
inline void Assembler::vsl( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSL_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsldoi( VectorRegister d, VectorRegister a, VectorRegister b, int si4) { emit_int32( VSLDOI_OPCODE| vrt(d) | vra(a) | vrb(b) | vsldoi_shb(simm(si4,4))); }
inline void Assembler::vsldoi( VectorRegister d, VectorRegister a, VectorRegister b, int ui4) { emit_int32( VSLDOI_OPCODE| vrt(d) | vra(a) | vrb(b) | vsldoi_shb(uimm(ui4,4))); }
inline void Assembler::vslo( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSLO_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsr( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsro( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRO_OPCODE | vrt(d) | vra(a) | vrb(b)); }
@ -802,6 +822,7 @@ inline void Assembler::vaddsws( VectorRegister d, VectorRegister a, VectorRegist
inline void Assembler::vaddubm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUBM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vadduwm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUWM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vadduhm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUHM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vaddudm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUDM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vaddubs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vadduws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vadduhs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
@ -878,6 +899,7 @@ inline void Assembler::vand( VectorRegister d, VectorRegister a, VectorRegist
inline void Assembler::vandc( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VANDC_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vnor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNOR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VOR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vmr( VectorRegister d, VectorRegister a) { emit_int32( VOR_OPCODE | vrt(d) | vra(a) | vrb(a)); }
inline void Assembler::vxor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VXOR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vrld( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLD_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vrlb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
@ -944,14 +966,18 @@ inline void Assembler::lbzx( Register d, Register s2) { emit_int32( LBZX_OPCODE
inline void Assembler::lbz( Register d, int si16 ) { emit_int32( LBZ_OPCODE | rt(d) | d1(si16));}
inline void Assembler::ld( Register d, int si16 ) { emit_int32( LD_OPCODE | rt(d) | ds(si16));}
inline void Assembler::ldx( Register d, Register s2) { emit_int32( LDX_OPCODE | rt(d) | rb(s2));}
inline void Assembler::ldbrx(Register d, Register s2) { emit_int32( LDBRX_OPCODE| rt(d) | rb(s2));}
inline void Assembler::stwx( Register d, Register s2) { emit_int32( STWX_OPCODE | rs(d) | rb(s2));}
inline void Assembler::stw( Register d, int si16 ) { emit_int32( STW_OPCODE | rs(d) | d1(si16));}
inline void Assembler::stwbrx(Register d, Register s2){ emit_int32(STWBRX_OPCODE| rs(d) | rb(s2));}
inline void Assembler::sthx( Register d, Register s2) { emit_int32( STHX_OPCODE | rs(d) | rb(s2));}
inline void Assembler::sth( Register d, int si16 ) { emit_int32( STH_OPCODE | rs(d) | d1(si16));}
inline void Assembler::sthbrx(Register d, Register s2){ emit_int32(STHBRX_OPCODE| rs(d) | rb(s2));}
inline void Assembler::stbx( Register d, Register s2) { emit_int32( STBX_OPCODE | rs(d) | rb(s2));}
inline void Assembler::stb( Register d, int si16 ) { emit_int32( STB_OPCODE | rs(d) | d1(si16));}
inline void Assembler::std( Register d, int si16 ) { emit_int32( STD_OPCODE | rs(d) | ds(si16));}
inline void Assembler::stdx( Register d, Register s2) { emit_int32( STDX_OPCODE | rs(d) | rb(s2));}
inline void Assembler::stdbrx(Register d, Register s2){ emit_int32(STDBRX_OPCODE| rs(d) | rb(s2));}
// ra0 version
inline void Assembler::icbi( Register s2) { emit_int32( ICBI_OPCODE | rb(s2) ); }

View File

@ -37,10 +37,6 @@ class Bytes: AllStatic {
#if defined(VM_LITTLE_ENDIAN)
// Returns true, if the byte ordering used by Java is different from the native byte ordering
// of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
static inline bool is_Java_byte_ordering_different() { return true; }
// Forward declarations of the compiler-dependent implementation
static inline u2 swap_u2(u2 x);
static inline u4 swap_u4(u4 x);
@ -155,10 +151,6 @@ class Bytes: AllStatic {
#else // !defined(VM_LITTLE_ENDIAN)
// Returns true, if the byte ordering used by Java is different from the nativ byte ordering
// of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
static inline bool is_Java_byte_ordering_different() { return false; }
// Thus, a swap between native and Java ordering is always a no-op:
static inline u2 swap_u2(u2 x) { return x; }
static inline u4 swap_u4(u4 x) { return x; }

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -514,25 +514,48 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
}
case Bytecodes::_i2d:
case Bytecodes::_l2d: {
__ fcfid(dst->as_double_reg(), src->as_double_reg()); // via mem
bool src_in_memory = !VM_Version::has_mtfprd();
FloatRegister rdst = dst->as_double_reg();
FloatRegister rsrc;
if (src_in_memory) {
rsrc = src->as_double_reg(); // via mem
} else {
// move src to dst register
if (code == Bytecodes::_i2d) {
__ mtfprwa(rdst, src->as_register());
} else {
__ mtfprd(rdst, src->as_register_lo());
}
rsrc = rdst;
}
__ fcfid(rdst, rsrc);
break;
}
case Bytecodes::_i2f: {
case Bytecodes::_i2f:
case Bytecodes::_l2f: {
bool src_in_memory = !VM_Version::has_mtfprd();
FloatRegister rdst = dst->as_float_reg();
FloatRegister rsrc = src->as_double_reg(); // via mem
FloatRegister rsrc;
if (src_in_memory) {
rsrc = src->as_double_reg(); // via mem
} else {
// move src to dst register
if (code == Bytecodes::_i2f) {
__ mtfprwa(rdst, src->as_register());
} else {
__ mtfprd(rdst, src->as_register_lo());
}
rsrc = rdst;
}
if (VM_Version::has_fcfids()) {
__ fcfids(rdst, rsrc);
} else {
assert(code == Bytecodes::_i2f, "fcfid+frsp needs fixup code to avoid rounding incompatibility");
__ fcfid(rdst, rsrc);
__ frsp(rdst, rdst);
}
break;
}
case Bytecodes::_l2f: { // >= Power7
assert(VM_Version::has_fcfids(), "fcfid+frsp needs fixup code to avoid rounding incompatibility");
__ fcfids(dst->as_float_reg(), src->as_double_reg()); // via mem
break;
}
case Bytecodes::_f2d: {
__ fmr_if_needed(dst->as_double_reg(), src->as_float_reg());
break;
@ -543,31 +566,49 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
}
case Bytecodes::_d2i:
case Bytecodes::_f2i: {
bool dst_in_memory = !VM_Version::has_mtfprd();
FloatRegister rsrc = (code == Bytecodes::_d2i) ? src->as_double_reg() : src->as_float_reg();
Address addr = frame_map()->address_for_slot(dst->double_stack_ix());
Address addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : NULL;
Label L;
// Result must be 0 if value is NaN; test by comparing value to itself.
__ fcmpu(CCR0, rsrc, rsrc);
__ li(R0, 0); // 0 in case of NAN
__ std(R0, addr.disp(), addr.base());
if (dst_in_memory) {
__ li(R0, 0); // 0 in case of NAN
__ std(R0, addr.disp(), addr.base());
} else {
__ li(dst->as_register(), 0);
}
__ bso(CCR0, L);
__ fctiwz(rsrc, rsrc); // USE_KILL
__ stfd(rsrc, addr.disp(), addr.base());
if (dst_in_memory) {
__ stfd(rsrc, addr.disp(), addr.base());
} else {
__ mffprd(dst->as_register(), rsrc);
}
__ bind(L);
break;
}
case Bytecodes::_d2l:
case Bytecodes::_f2l: {
bool dst_in_memory = !VM_Version::has_mtfprd();
FloatRegister rsrc = (code == Bytecodes::_d2l) ? src->as_double_reg() : src->as_float_reg();
Address addr = frame_map()->address_for_slot(dst->double_stack_ix());
Address addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : NULL;
Label L;
// Result must be 0 if value is NaN; test by comparing value to itself.
__ fcmpu(CCR0, rsrc, rsrc);
__ li(R0, 0); // 0 in case of NAN
__ std(R0, addr.disp(), addr.base());
if (dst_in_memory) {
__ li(R0, 0); // 0 in case of NAN
__ std(R0, addr.disp(), addr.base());
} else {
__ li(dst->as_register_lo(), 0);
}
__ bso(CCR0, L);
__ fctidz(rsrc, rsrc); // USE_KILL
__ stfd(rsrc, addr.disp(), addr.base());
if (dst_in_memory) {
__ stfd(rsrc, addr.disp(), addr.base());
} else {
__ mffprd(dst->as_register_lo(), rsrc);
}
__ bind(L);
break;
}
@ -3177,9 +3218,8 @@ void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
assert_different_registers(val, crc, res);
__ load_const_optimized(res, StubRoutines::crc_table_addr(), R0);
__ nand(crc, crc, crc); // ~crc
__ update_byte_crc32(crc, val, res);
__ nand(res, crc, crc); // ~crc
__ kernel_crc32_singleByteReg(crc, val, res, true);
__ mr(res, crc);
}
#undef __

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -63,18 +63,6 @@ void LIRItem::load_nonconstant() {
}
inline void load_int_as_long(LIR_List *ll, LIRItem &li, LIR_Opr dst) {
LIR_Opr r = li.value()->operand();
if (r->is_register()) {
LIR_Opr dst_l = FrameMap::as_long_opr(dst->as_register());
ll->convert(Bytecodes::_i2l, li.result(), dst_l); // Convert.
} else {
// Constants or memory get loaded with sign extend on this platform.
ll->move(li.result(), dst);
}
}
//--------------------------------------------------------------
// LIRGenerator
//--------------------------------------------------------------
@ -883,81 +871,91 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
// _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
// _i2b, _i2c, _i2s
void LIRGenerator::do_Convert(Convert* x) {
switch (x->op()) {
if (!VM_Version::has_mtfprd()) {
switch (x->op()) {
// int -> float: force spill
case Bytecodes::_l2f: {
if (!VM_Version::has_fcfids()) { // fcfids is >= Power7 only
// fcfid+frsp needs fixup code to avoid rounding incompatibility.
address entry = CAST_FROM_FN_PTR(address, SharedRuntime::l2f);
LIR_Opr result = call_runtime(x->value(), entry, x->type(), NULL);
set_result(x, result);
// int -> float: force spill
case Bytecodes::_l2f: {
if (!VM_Version::has_fcfids()) { // fcfids is >= Power7 only
// fcfid+frsp needs fixup code to avoid rounding incompatibility.
address entry = CAST_FROM_FN_PTR(address, SharedRuntime::l2f);
LIR_Opr result = call_runtime(x->value(), entry, x->type(), NULL);
set_result(x, result);
return;
} // else fallthru
}
case Bytecodes::_l2d: {
LIRItem value(x->value(), this);
LIR_Opr reg = rlock_result(x);
value.load_item();
LIR_Opr tmp = force_to_spill(value.result(), T_DOUBLE);
__ convert(x->op(), tmp, reg);
return;
}
case Bytecodes::_i2f:
case Bytecodes::_i2d: {
LIRItem value(x->value(), this);
LIR_Opr reg = rlock_result(x);
value.load_item();
// Convert i2l first.
LIR_Opr tmp1 = new_register(T_LONG);
__ convert(Bytecodes::_i2l, value.result(), tmp1);
LIR_Opr tmp2 = force_to_spill(tmp1, T_DOUBLE);
__ convert(x->op(), tmp2, reg);
return;
}
// float -> int: result will be stored
case Bytecodes::_f2l:
case Bytecodes::_d2l: {
LIRItem value(x->value(), this);
LIR_Opr reg = rlock_result(x);
value.set_destroys_register(); // USE_KILL
value.load_item();
set_vreg_flag(reg, must_start_in_memory);
__ convert(x->op(), value.result(), reg);
return;
}
case Bytecodes::_f2i:
case Bytecodes::_d2i: {
LIRItem value(x->value(), this);
LIR_Opr reg = rlock_result(x);
value.set_destroys_register(); // USE_KILL
value.load_item();
// Convert l2i afterwards.
LIR_Opr tmp1 = new_register(T_LONG);
set_vreg_flag(tmp1, must_start_in_memory);
__ convert(x->op(), value.result(), tmp1);
__ convert(Bytecodes::_l2i, tmp1, reg);
return;
}
// Within same category: just register conversions.
case Bytecodes::_i2b:
case Bytecodes::_i2c:
case Bytecodes::_i2s:
case Bytecodes::_i2l:
case Bytecodes::_l2i:
case Bytecodes::_f2d:
case Bytecodes::_d2f:
break;
} // else fallthru
}
case Bytecodes::_l2d: {
LIRItem value(x->value(), this);
LIR_Opr reg = rlock_result(x);
value.load_item();
LIR_Opr tmp = force_to_spill(value.result(), T_DOUBLE);
__ convert(x->op(), tmp, reg);
break;
}
case Bytecodes::_i2f:
case Bytecodes::_i2d: {
LIRItem value(x->value(), this);
LIR_Opr reg = rlock_result(x);
value.load_item();
// Convert i2l first.
LIR_Opr tmp1 = new_register(T_LONG);
__ convert(Bytecodes::_i2l, value.result(), tmp1);
LIR_Opr tmp2 = force_to_spill(tmp1, T_DOUBLE);
__ convert(x->op(), tmp2, reg);
break;
}
// float -> int: result will be stored
case Bytecodes::_f2l:
case Bytecodes::_d2l: {
LIRItem value(x->value(), this);
LIR_Opr reg = rlock_result(x);
value.set_destroys_register(); // USE_KILL
value.load_item();
set_vreg_flag(reg, must_start_in_memory);
__ convert(x->op(), value.result(), reg);
break;
default: ShouldNotReachHere();
}
case Bytecodes::_f2i:
case Bytecodes::_d2i: {
LIRItem value(x->value(), this);
LIR_Opr reg = rlock_result(x);
value.set_destroys_register(); // USE_KILL
value.load_item();
// Convert l2i afterwards.
LIR_Opr tmp1 = new_register(T_LONG);
set_vreg_flag(tmp1, must_start_in_memory);
__ convert(x->op(), value.result(), tmp1);
__ convert(Bytecodes::_l2i, tmp1, reg);
break;
}
// Within same category: just register conversions.
case Bytecodes::_i2b:
case Bytecodes::_i2c:
case Bytecodes::_i2s:
case Bytecodes::_i2l:
case Bytecodes::_l2i:
case Bytecodes::_f2d:
case Bytecodes::_d2f: {
LIRItem value(x->value(), this);
LIR_Opr reg = rlock_result(x);
value.load_item();
__ convert(x->op(), value.result(), reg);
break;
}
default: ShouldNotReachHere();
}
// Register conversion.
LIRItem value(x->value(), this);
LIR_Opr reg = rlock_result(x);
value.load_item();
switch (x->op()) {
case Bytecodes::_f2l:
case Bytecodes::_d2l:
case Bytecodes::_f2i:
case Bytecodes::_d2i: value.set_destroys_register(); break; // USE_KILL
default: break;
}
__ convert(x->op(), value.result(), reg);
}
@ -1426,10 +1424,9 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
arg2 = cc->at(1),
arg3 = cc->at(2);
// CCallingConventionRequiresIntsAsLongs
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits.
__ leal(LIR_OprFact::address(a), arg2);
load_int_as_long(gen()->lir(), len, arg3);
len.load_item_force(arg3); // We skip int->long conversion here, , because CRC32 stub expects int.
__ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args());
__ move(result_reg, result);
@ -1441,6 +1438,76 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
}
}
void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
assert(UseCRC32CIntrinsics, "or should not be here");
LIR_Opr result = rlock_result(x);
switch (x->id()) {
case vmIntrinsics::_updateBytesCRC32C:
case vmIntrinsics::_updateDirectByteBufferCRC32C: {
bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
LIRItem crc(x->argument_at(0), this);
LIRItem buf(x->argument_at(1), this);
LIRItem off(x->argument_at(2), this);
LIRItem end(x->argument_at(3), this);
buf.load_item();
off.load_nonconstant();
end.load_nonconstant();
// len = end - off
LIR_Opr len = end.result();
LIR_Opr tmpA = new_register(T_INT);
LIR_Opr tmpB = new_register(T_INT);
__ move(end.result(), tmpA);
__ move(off.result(), tmpB);
__ sub(tmpA, tmpB, tmpA);
len = tmpA;
LIR_Opr index = off.result();
int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
if (off.result()->is_constant()) {
index = LIR_OprFact::illegalOpr;
offset += off.result()->as_jint();
}
LIR_Opr base_op = buf.result();
LIR_Address* a = NULL;
if (index->is_valid()) {
LIR_Opr tmp = new_register(T_LONG);
__ convert(Bytecodes::_i2l, index, tmp);
index = tmp;
__ add(index, LIR_OprFact::intptrConst(offset), index);
a = new LIR_Address(base_op, index, T_BYTE);
} else {
a = new LIR_Address(base_op, offset, T_BYTE);
}
BasicTypeList signature(3);
signature.append(T_INT);
signature.append(T_ADDRESS);
signature.append(T_INT);
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
const LIR_Opr result_reg = result_register_for(x->type());
LIR_Opr arg1 = cc->at(0),
arg2 = cc->at(1),
arg3 = cc->at(2);
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32C stub doesn't care about high bits.
__ leal(LIR_OprFact::address(a), arg2);
__ move(len, cc->at(2)); // We skip int->long conversion here, because CRC32C stub expects int.
__ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), LIR_OprFact::illegalOpr, result_reg, cc->args());
__ move(result_reg, result);
break;
}
default: {
ShouldNotReachHere();
}
}
}
void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
assert(x->number_of_arguments() == 3, "wrong type");
assert(UseFMA, "Needs FMA instructions support.");
@ -1467,7 +1534,3 @@ void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
fatal("vectorizedMismatch intrinsic is not implemented on this platform");
}
void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
Unimplemented();
}

View File

@ -36,6 +36,7 @@
#include "runtime/os.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/align.hpp"
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
@ -340,7 +341,7 @@ void C1_MacroAssembler::allocate_array(
// Check for negative or excessive length.
size_t max_length = max_array_allocation_length >> log2_elt_size;
if (UseTLAB) {
size_t max_tlab = align_size_up(ThreadLocalAllocBuffer::max_size() >> log2_elt_size, 64*K);
size_t max_tlab = align_up(ThreadLocalAllocBuffer::max_size() >> log2_elt_size, 64*K);
if (max_tlab < max_length) { max_length = max_tlab; }
}
load_const_optimized(t1, max_length);

View File

@ -36,6 +36,7 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/signature.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/align.hpp"
#include "utilities/macros.hpp"
#include "vmreg_ppc.inline.hpp"
#if INCLUDE_ALL_GCS
@ -251,7 +252,7 @@ void Runtime1::initialize_pd() {
fpu_reg_save_offsets[i] = sp_offset;
sp_offset += BytesPerWord;
}
frame_size_in_bytes = align_size_up(sp_offset, frame::alignment_in_bytes);
frame_size_in_bytes = align_up(sp_offset, frame::alignment_in_bytes);
}
@ -275,7 +276,7 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe
static OopMapSet* generate_exception_throw_with_stack_parms(StubAssembler* sasm, address target,
int stack_parms) {
// Make a frame and preserve the caller's caller-save registers.
const int parm_size_in_bytes = align_size_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes);
const int parm_size_in_bytes = align_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes);
const int padding = parm_size_in_bytes - (stack_parms << LogBytesPerWord);
OopMap* oop_map = save_live_registers(sasm, true, noreg, parm_size_in_bytes);
@ -287,6 +288,7 @@ static OopMapSet* generate_exception_throw_with_stack_parms(StubAssembler* sasm,
__ ld(R5_ARG3, frame_size_in_bytes + padding + 8, R1_SP);
case 1:
__ ld(R4_ARG2, frame_size_in_bytes + padding + 0, R1_SP);
case 0:
call_offset = __ call_RT(noreg, noreg, target);
break;
default: Unimplemented(); break;
@ -325,7 +327,7 @@ OopMapSet* Runtime1::generate_stub_call(StubAssembler* sasm, Register result, ad
static OopMapSet* stub_call_with_stack_parms(StubAssembler* sasm, Register result, address target,
int stack_parms, bool do_return = true) {
// Make a frame and preserve the caller's caller-save registers.
const int parm_size_in_bytes = align_size_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes);
const int parm_size_in_bytes = align_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes);
const int padding = parm_size_in_bytes - (stack_parms << LogBytesPerWord);
OopMap* oop_map = save_live_registers(sasm, true, noreg, parm_size_in_bytes);
@ -337,6 +339,7 @@ static OopMapSet* stub_call_with_stack_parms(StubAssembler* sasm, Register resul
__ ld(R5_ARG3, frame_size_in_bytes + padding + 8, R1_SP);
case 1:
__ ld(R4_ARG2, frame_size_in_bytes + padding + 0, R1_SP);
case 0:
call_offset = __ call_RT(result, noreg, target);
break;
default: Unimplemented(); break;

View File

@ -244,4 +244,6 @@ intptr_t *frame::initial_deoptimization_info() {
frame::frame(void* sp, void* fp, void* pc) : _sp((intptr_t*)sp), _unextended_sp((intptr_t*)sp) {
find_codeblob_and_set_pc_and_deopt_state((address)pc); // also sets _fp and adjusts _unextended_sp
}
void frame::pd_ps() {}
#endif

View File

@ -82,13 +82,7 @@
public:
// C frame layout
enum {
// stack alignment
alignment_in_bytes = 16,
// log_2(16*8 bits) = 7.
log_2_of_alignment_in_bits = 7
};
static const int alignment_in_bytes = 16;
// ABI_MINFRAME:
struct abi_minframe {

View File

@ -28,6 +28,7 @@
#include "code/codeCache.hpp"
#include "code/vmreg.inline.hpp"
#include "utilities/align.hpp"
// Inline functions for ppc64 frames:
@ -193,7 +194,7 @@ inline intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
inline int frame::interpreter_frame_monitor_size() {
// Number of stack slots for a monitor.
return round_to(BasicObjectLock::size(), // number of stack slots
return align_up(BasicObjectLock::size(), // number of stack slots
WordsPerLong); // number of stack slots for a Java long
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -164,7 +164,7 @@ define_pd_global(intx, InitArrayShortSize, 9*BytesPerLong);
product(bool, ZapMemory, false, "Write 0x0101... to empty memory." \
" Use this to ease debugging.") \
\
/* Use Restricted Transactional Memory for lock eliding */ \
/* Use Restricted Transactional Memory for lock elision */ \
product(bool, UseRTMLocking, false, \
"Enable RTM lock eliding for inflated locks in compiled code") \
\
@ -174,24 +174,31 @@ define_pd_global(intx, InitArrayShortSize, 9*BytesPerLong);
product(bool, UseRTMDeopt, false, \
"Perform deopt and recompilation based on RTM abort ratio") \
\
product(uintx, RTMRetryCount, 5, \
product(int, RTMRetryCount, 5, \
"Number of RTM retries on lock abort or busy") \
range(0, max_jint) \
\
experimental(intx, RTMSpinLoopCount, 100, \
experimental(int, RTMSpinLoopCount, 100, \
"Spin count for lock to become free before RTM retry") \
range(0, 32767) /* immediate operand limit on ppc */ \
\
experimental(intx, RTMAbortThreshold, 1000, \
experimental(int, RTMAbortThreshold, 1000, \
"Calculate abort ratio after this number of aborts") \
range(0, max_jint) \
\
experimental(intx, RTMLockingThreshold, 10000, \
experimental(int, RTMLockingThreshold, 10000, \
"Lock count at which to do RTM lock eliding without " \
"abort ratio calculation") \
range(0, max_jint) \
\
experimental(intx, RTMAbortRatio, 50, \
experimental(int, RTMAbortRatio, 50, \
"Lock abort ratio at which to stop use RTM lock eliding") \
range(0, 100) /* natural range */ \
\
experimental(intx, RTMTotalCountIncrRate, 64, \
experimental(int, RTMTotalCountIncrRate, 64, \
"Increment total RTM attempted lock count once every n times") \
range(1, 32767) /* immediate operand limit on ppc */ \
constraint(RTMTotalCountIncrRateConstraintFunc,AfterErgo) \
\
experimental(intx, RTMLockingCalculationDelay, 0, \
"Number of milliseconds to wait before start calculating aborts " \

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,8 +45,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
#define thread_(field_name) in_bytes(JavaThread::field_name ## _offset()), R16_thread
#define method_(field_name) in_bytes(Method::field_name ## _offset()), R19_method
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
virtual void check_and_handle_popframe(Register scratch_reg);
virtual void check_and_handle_earlyret(Register scratch_reg);
// Base routine for all dispatches.
void dispatch_base(TosState state, address* table);
@ -79,6 +79,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
// Load object from cpool->resolved_references(index).
void load_resolved_reference_at_index(Register result, Register index, Label *is_null = NULL);
// load cpool->resolved_klass_at(index)
void load_resolved_klass_at_offset(Register Rcpool, Register Roffset, Register Rklass);
void load_receiver(Register Rparam_count, Register Rrecv_dst);
// helpers for expression stack
@ -96,8 +99,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
void push_2ptrs(Register first, Register second);
void push_l_pop_d(Register l = R17_tos, FloatRegister d = F15_ftos);
void push_d_pop_l(FloatRegister d = F15_ftos, Register l = R17_tos);
void move_l_to_d(Register l = R17_tos, FloatRegister d = F15_ftos);
void move_d_to_l(FloatRegister d = F15_ftos, Register l = R17_tos);
void pop (TosState state); // transition vtos -> state
void push(TosState state); // transition state -> vtos

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -284,14 +284,22 @@ void InterpreterMacroAssembler::push_2ptrs(Register first, Register second) {
addi(R15_esp, R15_esp, - 2 * Interpreter::stackElementSize );
}
void InterpreterMacroAssembler::push_l_pop_d(Register l, FloatRegister d) {
std(l, 0, R15_esp);
lfd(d, 0, R15_esp);
void InterpreterMacroAssembler::move_l_to_d(Register l, FloatRegister d) {
if (VM_Version::has_mtfprd()) {
mtfprd(d, l);
} else {
std(l, 0, R15_esp);
lfd(d, 0, R15_esp);
}
}
void InterpreterMacroAssembler::push_d_pop_l(FloatRegister d, Register l) {
stfd(d, 0, R15_esp);
ld(l, 0, R15_esp);
void InterpreterMacroAssembler::move_d_to_l(FloatRegister d, Register l) {
if (VM_Version::has_mtfprd()) {
mffprd(l, d);
} else {
stfd(d, 0, R15_esp);
ld(l, 0, R15_esp);
}
}
void InterpreterMacroAssembler::push(TosState state) {
@ -454,7 +462,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
Register tmp = index; // reuse
sldi(tmp, index, LogBytesPerHeapOop);
// Load pointer for resolved_references[] objArray.
ld(result, ConstantPool::resolved_references_offset_in_bytes(), result);
ld(result, ConstantPool::cache_offset_in_bytes(), result);
ld(result, ConstantPoolCache::resolved_references_offset_in_bytes(), result);
// JNIHandles::resolve(result)
ld(result, 0, result);
#ifdef ASSERT
@ -471,6 +480,25 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result, is_null);
}
// load cpool->resolved_klass_at(index)
void InterpreterMacroAssembler::load_resolved_klass_at_offset(Register Rcpool, Register Roffset, Register Rklass) {
// int value = *(Rcpool->int_at_addr(which));
// int resolved_klass_index = extract_low_short_from_int(value);
add(Roffset, Rcpool, Roffset);
#if defined(VM_LITTLE_ENDIAN)
lhz(Roffset, sizeof(ConstantPool), Roffset); // Roffset = resolved_klass_index
#else
lhz(Roffset, sizeof(ConstantPool) + 2, Roffset); // Roffset = resolved_klass_index
#endif
ld(Rklass, ConstantPool::resolved_klasses_offset_in_bytes(), Rcpool); // Rklass = Rcpool->_resolved_klasses
sldi(Roffset, Roffset, LogBytesPerWord);
addi(Roffset, Roffset, Array<Klass*>::base_offset_in_bytes());
isync(); // Order load of instance Klass wrt. tags.
ldx(Rklass, Rklass, Roffset);
}
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass. Blows registers Rsub_klass, tmp1, tmp2.
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, Register Rsuper_klass, Register Rtmp1,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -45,7 +45,7 @@ class SignatureHandlerGenerator: public NativeSignatureIterator {
public:
// Creation
SignatureHandlerGenerator(methodHandle method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
_masm = new MacroAssembler(buffer);
_num_used_fp_arg_regs = 0;
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2498,14 +2498,20 @@ void MacroAssembler::rtm_abort_ratio_calculation(Register rtm_counters_Reg,
// All transactions = total_count * RTMTotalCountIncrRate
// Set no_rtm bit if (Aborted transactions >= All transactions * RTMAbortRatio)
ld(R0, RTMLockingCounters::abort_count_offset(), rtm_counters_Reg);
cmpdi(CCR0, R0, RTMAbortThreshold);
blt(CCR0, L_check_always_rtm2);
if (is_simm(RTMAbortThreshold, 16)) { // cmpdi can handle 16bit immediate only.
cmpdi(CCR0, R0, RTMAbortThreshold);
blt(CCR0, L_check_always_rtm2); // reload of rtm_counters_Reg not necessary
} else {
load_const_optimized(rtm_counters_Reg, RTMAbortThreshold);
cmpd(CCR0, R0, rtm_counters_Reg);
blt(CCR0, L_check_always_rtm1); // reload of rtm_counters_Reg required
}
mulli(R0, R0, 100);
const Register tmpReg = rtm_counters_Reg;
ld(tmpReg, RTMLockingCounters::total_count_offset(), rtm_counters_Reg);
mulli(tmpReg, tmpReg, RTMTotalCountIncrRate);
mulli(tmpReg, tmpReg, RTMAbortRatio);
mulli(tmpReg, tmpReg, RTMTotalCountIncrRate); // allowable range: int16
mulli(tmpReg, tmpReg, RTMAbortRatio); // allowable range: int16
cmpd(CCR0, R0, tmpReg);
blt(CCR0, L_check_always_rtm1); // jump to reload
if (method_data != NULL) {
@ -2521,7 +2527,13 @@ void MacroAssembler::rtm_abort_ratio_calculation(Register rtm_counters_Reg,
load_const_optimized(rtm_counters_Reg, (address)rtm_counters, R0); // reload
bind(L_check_always_rtm2);
ld(tmpReg, RTMLockingCounters::total_count_offset(), rtm_counters_Reg);
cmpdi(CCR0, tmpReg, RTMLockingThreshold / RTMTotalCountIncrRate);
int64_t thresholdValue = RTMLockingThreshold / RTMTotalCountIncrRate;
if (is_simm(thresholdValue, 16)) { // cmpdi can handle 16bit immediate only.
cmpdi(CCR0, tmpReg, thresholdValue);
} else {
load_const_optimized(R0, thresholdValue);
cmpd(CCR0, tmpReg, R0);
}
blt(CCR0, L_done);
if (method_data != NULL) {
// Set rtm_state to "always rtm" in MDO.
@ -2620,7 +2632,7 @@ void MacroAssembler::rtm_stack_locking(ConditionRegister flag,
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
Label L_noincrement;
if (RTMTotalCountIncrRate > 1) {
branch_on_random_using_tb(tmp, (int)RTMTotalCountIncrRate, L_noincrement);
branch_on_random_using_tb(tmp, RTMTotalCountIncrRate, L_noincrement);
}
assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM");
load_const_optimized(tmp, (address)stack_rtm_counters->total_count_addr(), R0);
@ -2687,7 +2699,7 @@ void MacroAssembler::rtm_inflated_locking(ConditionRegister flag,
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
Label L_noincrement;
if (RTMTotalCountIncrRate > 1) {
branch_on_random_using_tb(R0, (int)RTMTotalCountIncrRate, L_noincrement);
branch_on_random_using_tb(R0, RTMTotalCountIncrRate, L_noincrement);
}
assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
load_const(R0, (address)rtm_counters->total_count_addr(), tmpReg);
@ -4120,7 +4132,7 @@ void MacroAssembler::update_byte_crc32(Register crc, Register val, Register tabl
* @param table register pointing to CRC table
*/
void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
Register data, bool loopAlignment, bool invertCRC) {
Register data, bool loopAlignment) {
assert_different_registers(crc, buf, len, table, data);
Label L_mainLoop, L_done;
@ -4131,10 +4143,6 @@ void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register
clrldi_(len, len, 32); // Enforce 32 bit. Anything to do?
beq(CCR0, L_done);
if (invertCRC) {
nand(crc, crc, crc); // ~c
}
mtctr(len);
align(mainLoop_alignment);
BIND(L_mainLoop);
@ -4143,10 +4151,6 @@ void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register
update_byte_crc32(crc, data, table);
bdnz(L_mainLoop); // Iterate.
if (invertCRC) {
nand(crc, crc, crc); // ~c
}
bind(L_done);
}
@ -4203,7 +4207,8 @@ void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register tab
*/
void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3,
Register tc0, Register tc1, Register tc2, Register tc3) {
Register tc0, Register tc1, Register tc2, Register tc3,
bool invertCRC) {
assert_different_registers(crc, buf, len, table);
Label L_mainLoop, L_tail;
@ -4217,14 +4222,16 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
const int complexThreshold = 2*mainLoop_stepping;
// Don't test for len <= 0 here. This pathological case should not occur anyway.
// Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
// The situation itself is detected and handled correctly by the conditional branches
// following aghi(len, -stepping) and aghi(len, +stepping).
// Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles
// for all well-behaved cases. The situation itself is detected and handled correctly
// within update_byteLoop_crc32.
assert(tailLoop_stepping == 1, "check tailLoop_stepping!");
BLOCK_COMMENT("kernel_crc32_2word {");
nand(crc, crc, crc); // ~c
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
// Check for short (<mainLoop_stepping) buffer.
cmpdi(CCR0, len, complexThreshold);
@ -4245,7 +4252,7 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
blt(CCR0, L_tail); // For less than one mainloop_stepping left, do only tail processing
mr(len, tmp); // remaining bytes for main loop (>=mainLoop_stepping is guaranteed).
}
update_byteLoop_crc32(crc, buf, tmp2, table, data, false, false);
update_byteLoop_crc32(crc, buf, tmp2, table, data, false);
}
srdi(tmp2, len, log_stepping); // #iterations for mainLoop
@ -4281,9 +4288,11 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
// Process last few (<complexThreshold) bytes of buffer.
BIND(L_tail);
update_byteLoop_crc32(crc, buf, len, table, data, false, false);
update_byteLoop_crc32(crc, buf, len, table, data, false);
nand(crc, crc, crc); // ~c
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
BLOCK_COMMENT("} kernel_crc32_2word");
}
@ -4297,7 +4306,8 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
*/
void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3,
Register tc0, Register tc1, Register tc2, Register tc3) {
Register tc0, Register tc1, Register tc2, Register tc3,
bool invertCRC) {
assert_different_registers(crc, buf, len, table);
Label L_mainLoop, L_tail;
@ -4311,14 +4321,16 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
const int complexThreshold = 2*mainLoop_stepping;
// Don't test for len <= 0 here. This pathological case should not occur anyway.
// Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
// The situation itself is detected and handled correctly by the conditional branches
// following aghi(len, -stepping) and aghi(len, +stepping).
// Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles
// for all well-behaved cases. The situation itself is detected and handled correctly
// within update_byteLoop_crc32.
assert(tailLoop_stepping == 1, "check tailLoop_stepping!");
BLOCK_COMMENT("kernel_crc32_1word {");
nand(crc, crc, crc); // ~c
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
// Check for short (<mainLoop_stepping) buffer.
cmpdi(CCR0, len, complexThreshold);
@ -4339,7 +4351,7 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
blt(CCR0, L_tail); // For less than one mainloop_stepping left, do only tail processing
mr(len, tmp); // remaining bytes for main loop (>=mainLoop_stepping is guaranteed).
}
update_byteLoop_crc32(crc, buf, tmp2, table, data, false, false);
update_byteLoop_crc32(crc, buf, tmp2, table, data, false);
}
srdi(tmp2, len, log_stepping); // #iterations for mainLoop
@ -4374,9 +4386,11 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
// Process last few (<complexThreshold) bytes of buffer.
BIND(L_tail);
update_byteLoop_crc32(crc, buf, len, table, data, false, false);
update_byteLoop_crc32(crc, buf, len, table, data, false);
nand(crc, crc, crc); // ~c
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
BLOCK_COMMENT("} kernel_crc32_1word");
}
@ -4389,16 +4403,24 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
* Uses R7_ARG5, R8_ARG6 as work registers.
*/
void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3) {
Register t0, Register t1, Register t2, Register t3,
bool invertCRC) {
assert_different_registers(crc, buf, len, table);
Register data = t0; // Holds the current byte to be folded into crc.
BLOCK_COMMENT("kernel_crc32_1byte {");
// Process all bytes in a single-byte loop.
update_byteLoop_crc32(crc, buf, len, table, data, true, true);
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
// Process all bytes in a single-byte loop.
update_byteLoop_crc32(crc, buf, len, table, data, true);
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
BLOCK_COMMENT("} kernel_crc32_1byte");
}
@ -4416,7 +4438,8 @@ void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len
*/
void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Register len, Register table,
Register constants, Register barretConstants,
Register t0, Register t1, Register t2, Register t3, Register t4) {
Register t0, Register t1, Register t2, Register t3, Register t4,
bool invertCRC) {
assert_different_registers(crc, buf, len, table);
Label L_alignedHead, L_tail, L_alignTail, L_start, L_end;
@ -4434,13 +4457,15 @@ void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Regi
Register tc0 = t4;
Register tc1 = constants;
Register tc2 = barretConstants;
kernel_crc32_1word(crc, buf, len, table,t0, t1, t2, t3, tc0, tc1, tc2, table);
kernel_crc32_1word(crc, buf, len, table,t0, t1, t2, t3, tc0, tc1, tc2, table, invertCRC);
b(L_end);
BIND(L_start);
// 2. ~c
nand(crc, crc, crc);
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
// 3. calculate from 0 to first 128bit-aligned address
clrldi_(prealign, buf, 57);
@ -4449,7 +4474,7 @@ void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Regi
subfic(prealign, prealign, 128);
subf(len, prealign, len);
update_byteLoop_crc32(crc, buf, prealign, table, t2, false, false);
update_byteLoop_crc32(crc, buf, prealign, table, t2, false);
// 4. calculate from first 128bit-aligned address to last 128bit-aligned address
BIND(L_alignedHead);
@ -4464,12 +4489,14 @@ void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Regi
cmpdi(CCR0, postalign, 0);
beq(CCR0, L_tail);
update_byteLoop_crc32(crc, buf, postalign, table, t2, false, false);
update_byteLoop_crc32(crc, buf, postalign, table, t2, false);
BIND(L_tail);
// 6. ~c
nand(crc, crc, crc);
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
BIND(L_end);
@ -4549,12 +4576,12 @@ void MacroAssembler::kernel_crc32_1word_aligned(Register crc, Register buf, Regi
vspltisw(VR0, -1);
vsldoi(mask_32bit, zeroes, VR0, 4);
vsldoi(mask_64bit, zeroes, VR0, -8);
vsldoi(mask_64bit, zeroes, VR0, 8);
// Get the initial value into v8
vxor(VR8, VR8, VR8);
mtvrd(VR8, crc);
vsldoi(VR8, zeroes, VR8, -8); // shift into bottom 32 bits
vsldoi(VR8, zeroes, VR8, 8); // shift into bottom 32 bits
li (rLoaded, 0);
@ -4903,7 +4930,7 @@ void MacroAssembler::kernel_crc32_1word_aligned(Register crc, Register buf, Regi
addi(barretConstants, barretConstants, 16);
lvx(const2, barretConstants);
vsldoi(VR1, VR0, VR0, -8);
vsldoi(VR1, VR0, VR0, 8);
vxor(VR0, VR0, VR1); // xor two 64 bit results together
// shift left one bit
@ -4961,16 +4988,35 @@ void MacroAssembler::kernel_crc32_1word_aligned(Register crc, Register buf, Regi
offsetInt -= 8; ld(R31, offsetInt, R1_SP);
}
void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp) {
void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp, bool invertCRC) {
assert_different_registers(crc, buf, /* len, not used!! */ table, tmp);
BLOCK_COMMENT("kernel_crc32_singleByte:");
nand(crc, crc, crc); // ~c
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
lbz(tmp, 0, buf); // Byte from buffer, zero-extended.
lbz(tmp, 0, buf); // Byte from buffer, zero-extended.
update_byte_crc32(crc, tmp, table);
nand(crc, crc, crc); // ~c
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
}
void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table, bool invertCRC) {
assert_different_registers(crc, val, table);
BLOCK_COMMENT("kernel_crc32_singleByteReg:");
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
update_byte_crc32(crc, val, table);
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
}
// dest_lo += src1 + src2

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -819,33 +819,47 @@ class MacroAssembler: public Assembler {
Register tmp6, Register tmp7, Register tmp8, Register tmp9, Register tmp10,
Register tmp11, Register tmp12, Register tmp13);
// CRC32 Intrinsics.
// Emitters for CRC32 calculation.
// A note on invertCRC:
// Unfortunately, internal representation of crc differs between CRC32 and CRC32C.
// CRC32 holds it's current crc value in the externally visible representation.
// CRC32C holds it's current crc value in internal format, ready for updating.
// Thus, the crc value must be bit-flipped before updating it in the CRC32 case.
// In the CRC32C case, it must be bit-flipped when it is given to the outside world (getValue()).
// The bool invertCRC parameter indicates whether bit-flipping is required before updates.
void load_reverse_32(Register dst, Register src);
int crc32_table_columns(Register table, Register tc0, Register tc1, Register tc2, Register tc3);
void fold_byte_crc32(Register crc, Register val, Register table, Register tmp);
void fold_8bit_crc32(Register crc, Register table, Register tmp);
void update_byte_crc32(Register crc, Register val, Register table);
void update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
Register data, bool loopAlignment, bool invertCRC);
Register data, bool loopAlignment);
void update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
Register t0, Register t1, Register t2, Register t3,
Register tc0, Register tc1, Register tc2, Register tc3);
void kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3,
Register tc0, Register tc1, Register tc2, Register tc3);
Register tc0, Register tc1, Register tc2, Register tc3,
bool invertCRC);
void kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3,
Register tc0, Register tc1, Register tc2, Register tc3);
Register tc0, Register tc1, Register tc2, Register tc3,
bool invertCRC);
void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3);
Register t0, Register t1, Register t2, Register t3,
bool invertCRC);
void kernel_crc32_1word_vpmsumd(Register crc, Register buf, Register len, Register table,
Register constants, Register barretConstants,
Register t0, Register t1, Register t2, Register t3, Register t4);
Register t0, Register t1, Register t2, Register t3, Register t4,
bool invertCRC);
void kernel_crc32_1word_aligned(Register crc, Register buf, Register len,
Register constants, Register barretConstants,
Register t0, Register t1, Register t2);
void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp);
void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
bool invertCRC);
void kernel_crc32_singleByteReg(Register crc, Register val, Register table,
bool invertCRC);
//
// Debugging

View File

@ -1,78 +0,0 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "asm/codeBuffer.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
// This method will be called (as any other Klass virtual method) with
// the Klass itself as the first argument. Example:
//
// oop obj;
// int size = obj->klass()->klass_part()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();
//
// The dummy method is called with the Klass object as the first
// operand, and an object as the second argument.
//
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
// differing only by an ordinal constant, and they bear no releationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#define __ masm->
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
char** mc_top,
char* mc_end) {
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
*(intptr_t *)(*md_top) = vtable_bytes;
*md_top += sizeof(intptr_t);
void** dummy_vtable = (void**)*md_top;
*vtable = dummy_vtable;
*md_top += vtable_bytes;
// Get ready to generate dummy methods.
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
MacroAssembler* masm = new MacroAssembler(&cb);
// There are more general problems with CDS on ppc, so I can not
// really test this. But having this instead of Unimplementd() allows
// us to pass TestOptionsWithRanges.java.
__ unimplemented();
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jvm.h"
#include "prims/methodHandles.hpp"
#define __ _masm->
@ -71,7 +72,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Register temp_reg, Register temp2_reg,
const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
Klass* klass = SystemDictionary::well_known_klass(klass_id);
Label L_ok, L_bad;
BLOCK_COMMENT("verify_klass {");
__ verify_oop(obj_reg);
@ -174,8 +175,9 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
__ verify_oop(method_temp);
__ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()), method_temp, temp2);
__ verify_oop(method_temp);
// The following assumes that a Method* is normally compressed in the vmtarget field:
__ ld(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), method_temp);
__ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()), method_temp);
__ verify_oop(method_temp);
__ ld(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()), method_temp);
if (VerifyMethodHandles && !for_compiler_entry) {
// Make sure recv is already on stack.
@ -361,14 +363,16 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp2);
}
__ ld(R19_method, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), member_reg);
__ load_heap_oop(R19_method, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()), member_reg);
__ ld(R19_method, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()), R19_method);
break;
case vmIntrinsics::_linkToStatic:
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp2);
}
__ ld(R19_method, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), member_reg);
__ load_heap_oop(R19_method, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()), member_reg);
__ ld(R19_method, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()), R19_method);
break;
case vmIntrinsics::_linkToVirtual:

View File

@ -1,6 +1,6 @@
//
// Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2012, 2016 SAP SE. All rights reserved.
// Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2012, 2017 SAP SE. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@ -2053,12 +2053,12 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
}
// Vector ideal reg.
const int Matcher::vector_ideal_reg(int size) {
const uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize == 8 && size == 8, "");
return Op_RegL;
}
const int Matcher::vector_shift_count_ideal_reg(int size) {
const uint Matcher::vector_shift_count_ideal_reg(int size) {
fatal("vector shift is not supported");
return Node::NotAMachineReg;
}
@ -3079,6 +3079,17 @@ encode %{
__ bind(done);
%}
enc_class enc_cmove_bso_reg(iRegLdst dst, flagsRegSrc crx, regD src) %{
// TODO: PPC port $archOpcode(ppc64Opcode_cmove);
MacroAssembler _masm(&cbuf);
Label done;
__ bso($crx$$CondRegister, done);
__ mffprd($dst$$Register, $src$$FloatRegister);
// TODO PPC port __ endgroup_if_needed(_size == 12);
__ bind(done);
%}
enc_class enc_bc(flagsRegSrc crx, cmpOp cmp, Label lbl) %{
// TODO: PPC port $archOpcode(ppc64Opcode_bc);
@ -5842,6 +5853,16 @@ instruct loadConN_lo(iRegNdst dst, iRegNsrc src1, immN src2) %{
ins_pipe(pipe_class_default);
%}
instruct rldicl(iRegLdst dst, iRegLsrc src, immI16 shift, immI16 mask_begin) %{
effect(DEF dst, USE src, USE shift, USE mask_begin);
size(4);
ins_encode %{
__ rldicl($dst$$Register, $src$$Register, $shift$$constant, $mask_begin$$constant);
%}
ins_pipe(pipe_class_default);
%}
// Needed to postalloc expand loadConN: ConN is loaded as ConI
// leaving the upper 32 bits with sign-extension bits.
// This clears these bits: dst = src & 0xFFFFFFFF.
@ -9306,6 +9327,44 @@ instruct shrP_convP2X_reg_imm6(iRegLdst dst, iRegP_N2P src1, uimmI6 src2) %{
ins_pipe(pipe_class_default);
%}
// Bitfield Extract: URShiftI + AndI
instruct andI_urShiftI_regI_immI_immIpow2minus1(iRegIdst dst, iRegIsrc src1, immI src2, immIpow2minus1 src3) %{
match(Set dst (AndI (URShiftI src1 src2) src3));
format %{ "EXTRDI $dst, $src1, shift=$src2, mask=$src3 \t// int bitfield extract" %}
size(4);
ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
int rshift = ($src2$$constant) & 0x1f;
int length = log2_long(((jlong) $src3$$constant) + 1);
if (rshift + length > 32) {
// if necessary, adjust mask to omit rotated bits.
length = 32 - rshift;
}
__ extrdi($dst$$Register, $src1$$Register, length, 64 - (rshift + length));
%}
ins_pipe(pipe_class_default);
%}
// Bitfield Extract: URShiftL + AndL
instruct andL_urShiftL_regL_immI_immLpow2minus1(iRegLdst dst, iRegLsrc src1, immI src2, immLpow2minus1 src3) %{
match(Set dst (AndL (URShiftL src1 src2) src3));
format %{ "EXTRDI $dst, $src1, shift=$src2, mask=$src3 \t// long bitfield extract" %}
size(4);
ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
int rshift = ($src2$$constant) & 0x3f;
int length = log2_long(((jlong) $src3$$constant) + 1);
if (rshift + length > 64) {
// if necessary, adjust mask to omit rotated bits.
length = 64 - rshift;
}
__ extrdi($dst$$Register, $src1$$Register, length, 64 - (rshift + length));
%}
ins_pipe(pipe_class_default);
%}
instruct sxtI_reg(iRegIdst dst, iRegIsrc src) %{
match(Set dst (ConvL2I (ConvI2L src)));
@ -10078,9 +10137,36 @@ instruct andcL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
// float intBitsToFloat(int bits)
//
// Notes on the implementation on ppc64:
// We only provide rules which move between a register and a stack-location,
// because we always have to go through memory when moving between a float
// register and an integer register.
// For Power7 and earlier, the rules are limited to those which move between a
// register and a stack-location, because we always have to go through memory
// when moving between a float register and an integer register.
// This restriction is removed in Power8 with the introduction of the mtfprd
// and mffprd instructions.
instruct moveL2D_reg(regD dst, iRegLsrc src) %{
match(Set dst (MoveL2D src));
predicate(VM_Version::has_mtfprd());
format %{ "MTFPRD $dst, $src" %}
size(4);
ins_encode %{
__ mtfprd($dst$$FloatRegister, $src$$Register);
%}
ins_pipe(pipe_class_default);
%}
instruct moveI2D_reg(regD dst, iRegIsrc src) %{
// no match-rule, false predicate
effect(DEF dst, USE src);
predicate(false);
format %{ "MTFPRWA $dst, $src" %}
size(4);
ins_encode %{
__ mtfprwa($dst$$FloatRegister, $src$$Register);
%}
ins_pipe(pipe_class_default);
%}
//---------- Chain stack slots between similar types --------
@ -10519,6 +10605,16 @@ instruct convB2I_reg(iRegIdst dst, iRegIsrc src, immI_24 amount) %{
ins_pipe(pipe_class_default);
%}
instruct extsh(iRegIdst dst, iRegIsrc src) %{
effect(DEF dst, USE src);
size(4);
ins_encode %{
__ extsh($dst$$Register, $src$$Register);
%}
ins_pipe(pipe_class_default);
%}
// LShiftI 16 + RShiftI 16 converts short to int.
instruct convS2I_reg(iRegIdst dst, iRegIsrc src, immI_16 amount) %{
match(Set dst (RShiftI (LShiftI src amount) amount));
@ -10583,6 +10679,20 @@ instruct cmovI_bso_stackSlotL(iRegIdst dst, flagsRegSrc crx, stackSlotL src) %{
ins_pipe(pipe_class_default);
%}
instruct cmovI_bso_reg(iRegIdst dst, flagsRegSrc crx, regD src) %{
// no match-rule, false predicate
effect(DEF dst, USE crx, USE src);
predicate(false);
ins_variable_size_depending_on_alignment(true);
format %{ "cmovI $crx, $dst, $src" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(false /* TODO: PPC PORT(InsertEndGroupPPC64 && Compile::current()->do_hb_scheduling())*/ ? 12 : 8);
ins_encode( enc_cmove_bso_reg(dst, crx, src) );
ins_pipe(pipe_class_default);
%}
instruct cmovI_bso_stackSlotL_conLvalue0_Ex(iRegIdst dst, flagsRegSrc crx, stackSlotL mem) %{
// no match-rule, false predicate
effect(DEF dst, USE crx, USE mem);
@ -10637,9 +10747,64 @@ instruct cmovI_bso_stackSlotL_conLvalue0_Ex(iRegIdst dst, flagsRegSrc crx, stack
%}
%}
instruct cmovI_bso_reg_conLvalue0_Ex(iRegIdst dst, flagsRegSrc crx, regD src) %{
// no match-rule, false predicate
effect(DEF dst, USE crx, USE src);
predicate(false);
format %{ "CmovI $dst, $crx, $src \t// postalloc expanded" %}
postalloc_expand %{
//
// replaces
//
// region dst crx src
// \ | | /
// dst=cmovI_bso_reg_conLvalue0
//
// with
//
// region dst
// \ /
// dst=loadConI16(0)
// |
// ^ region dst crx src
// | \ | | /
// dst=cmovI_bso_reg
//
// Create new nodes.
MachNode *m1 = new loadConI16Node();
MachNode *m2 = new cmovI_bso_regNode();
// inputs for new nodes
m1->add_req(n_region);
m2->add_req(n_region, n_crx, n_src);
// precedences for new nodes
m2->add_prec(m1);
// operands for new nodes
m1->_opnds[0] = op_dst;
m1->_opnds[1] = new immI16Oper(0);
m2->_opnds[0] = op_dst;
m2->_opnds[1] = op_crx;
m2->_opnds[2] = op_src;
// registers for new nodes
ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
// Insert new nodes.
nodes->push(m1);
nodes->push(m2);
%}
%}
// Double to Int conversion, NaN is mapped to 0.
instruct convD2I_reg_ExEx(iRegIdst dst, regD src) %{
match(Set dst (ConvD2I src));
predicate(!VM_Version::has_mtfprd());
ins_cost(DEFAULT_COST);
expand %{
@ -10653,6 +10818,21 @@ instruct convD2I_reg_ExEx(iRegIdst dst, regD src) %{
%}
%}
// Double to Int conversion, NaN is mapped to 0. Special version for Power8.
instruct convD2I_reg_mffprd_ExEx(iRegIdst dst, regD src) %{
match(Set dst (ConvD2I src));
predicate(VM_Version::has_mtfprd());
ins_cost(DEFAULT_COST);
expand %{
regD tmpD;
flagsReg crx;
cmpDUnordered_reg_reg(crx, src, src); // Check whether src is NaN.
convD2IRaw_regD(tmpD, src); // Convert float to int (speculated).
cmovI_bso_reg_conLvalue0_Ex(dst, crx, tmpD); // Cmove based on NaN check.
%}
%}
instruct convF2IRaw_regF(regF dst, regF src) %{
// no match-rule, false predicate
effect(DEF dst, USE src);
@ -10670,6 +10850,7 @@ instruct convF2IRaw_regF(regF dst, regF src) %{
// Float to Int conversion, NaN is mapped to 0.
instruct convF2I_regF_ExEx(iRegIdst dst, regF src) %{
match(Set dst (ConvF2I src));
predicate(!VM_Version::has_mtfprd());
ins_cost(DEFAULT_COST);
expand %{
@ -10683,6 +10864,21 @@ instruct convF2I_regF_ExEx(iRegIdst dst, regF src) %{
%}
%}
// Float to Int conversion, NaN is mapped to 0. Special version for Power8.
instruct convF2I_regF_mffprd_ExEx(iRegIdst dst, regF src) %{
match(Set dst (ConvF2I src));
predicate(VM_Version::has_mtfprd());
ins_cost(DEFAULT_COST);
expand %{
regF tmpF;
flagsReg crx;
cmpFUnordered_reg_reg(crx, src, src); // Check whether src is NaN.
convF2IRaw_regF(tmpF, src); // Convert float to int (speculated).
cmovI_bso_reg_conLvalue0_Ex(dst, crx, tmpF); // Cmove based on NaN check.
%}
%}
// Convert to Long
instruct convI2L_reg(iRegLdst dst, iRegIsrc src) %{
@ -10752,6 +10948,20 @@ instruct cmovL_bso_stackSlotL(iRegLdst dst, flagsRegSrc crx, stackSlotL src) %{
ins_pipe(pipe_class_default);
%}
instruct cmovL_bso_reg(iRegLdst dst, flagsRegSrc crx, regD src) %{
// no match-rule, false predicate
effect(DEF dst, USE crx, USE src);
predicate(false);
ins_variable_size_depending_on_alignment(true);
format %{ "cmovL $crx, $dst, $src" %}
// Worst case is branch + move + stop, no stop without scheduler.
size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8);
ins_encode( enc_cmove_bso_reg(dst, crx, src) );
ins_pipe(pipe_class_default);
%}
instruct cmovL_bso_stackSlotL_conLvalue0_Ex(iRegLdst dst, flagsRegSrc crx, stackSlotL mem) %{
// no match-rule, false predicate
effect(DEF dst, USE crx, USE mem);
@ -10803,9 +11013,61 @@ instruct cmovL_bso_stackSlotL_conLvalue0_Ex(iRegLdst dst, flagsRegSrc crx, stack
%}
%}
instruct cmovL_bso_reg_conLvalue0_Ex(iRegLdst dst, flagsRegSrc crx, regD src) %{
// no match-rule, false predicate
effect(DEF dst, USE crx, USE src);
predicate(false);
format %{ "CmovL $dst, $crx, $src \t// postalloc expanded" %}
postalloc_expand %{
//
// replaces
//
// region dst crx src
// \ | | /
// dst=cmovL_bso_reg_conLvalue0
//
// with
//
// region dst
// \ /
// dst=loadConL16(0)
// |
// ^ region dst crx src
// | \ | | /
// dst=cmovL_bso_reg
//
// Create new nodes.
MachNode *m1 = new loadConL16Node();
MachNode *m2 = new cmovL_bso_regNode();
// inputs for new nodes
m1->add_req(n_region);
m2->add_req(n_region, n_crx, n_src);
m2->add_prec(m1);
// operands for new nodes
m1->_opnds[0] = op_dst;
m1->_opnds[1] = new immL16Oper(0);
m2->_opnds[0] = op_dst;
m2->_opnds[1] = op_crx;
m2->_opnds[2] = op_src;
// registers for new nodes
ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
// Insert new nodes.
nodes->push(m1);
nodes->push(m2);
%}
%}
// Float to Long conversion, NaN is mapped to 0.
instruct convF2L_reg_ExEx(iRegLdst dst, regF src) %{
match(Set dst (ConvF2L src));
predicate(!VM_Version::has_mtfprd());
ins_cost(DEFAULT_COST);
expand %{
@ -10819,6 +11081,21 @@ instruct convF2L_reg_ExEx(iRegLdst dst, regF src) %{
%}
%}
// Float to Long conversion, NaN is mapped to 0. Special version for Power8.
instruct convF2L_reg_mffprd_ExEx(iRegLdst dst, regF src) %{
match(Set dst (ConvF2L src));
predicate(VM_Version::has_mtfprd());
ins_cost(DEFAULT_COST);
expand %{
regF tmpF;
flagsReg crx;
cmpFUnordered_reg_reg(crx, src, src); // Check whether src is NaN.
convF2LRaw_regF(tmpF, src); // Convert float to long (speculated).
cmovL_bso_reg_conLvalue0_Ex(dst, crx, tmpF); // Cmove based on NaN check.
%}
%}
instruct convD2LRaw_regD(regD dst, regD src) %{
// no match-rule, false predicate
effect(DEF dst, USE src);
@ -10836,6 +11113,7 @@ instruct convD2LRaw_regD(regD dst, regD src) %{
// Double to Long conversion, NaN is mapped to 0.
instruct convD2L_reg_ExEx(iRegLdst dst, regD src) %{
match(Set dst (ConvD2L src));
predicate(!VM_Version::has_mtfprd());
ins_cost(DEFAULT_COST);
expand %{
@ -10849,6 +11127,21 @@ instruct convD2L_reg_ExEx(iRegLdst dst, regD src) %{
%}
%}
// Double to Long conversion, NaN is mapped to 0. Special version for Power8.
instruct convD2L_reg_mffprd_ExEx(iRegLdst dst, regD src) %{
match(Set dst (ConvD2L src));
predicate(VM_Version::has_mtfprd());
ins_cost(DEFAULT_COST);
expand %{
regD tmpD;
flagsReg crx;
cmpDUnordered_reg_reg(crx, src, src); // Check whether src is NaN.
convD2LRaw_regD(tmpD, src); // Convert float to long (speculated).
cmovL_bso_reg_conLvalue0_Ex(dst, crx, tmpD); // Cmove based on NaN check.
%}
%}
// Convert to Float
// Placed here as needed in expand.
@ -10914,7 +11207,7 @@ instruct convL2FRaw_regF(regF dst, regD src) %{
// Integer to Float conversion. Special version for Power7.
instruct convI2F_ireg_fcfids_Ex(regF dst, iRegIsrc src) %{
match(Set dst (ConvI2F src));
predicate(VM_Version::has_fcfids());
predicate(VM_Version::has_fcfids() && !VM_Version::has_mtfprd());
ins_cost(DEFAULT_COST);
expand %{
@ -10928,10 +11221,23 @@ instruct convI2F_ireg_fcfids_Ex(regF dst, iRegIsrc src) %{
%}
%}
// Integer to Float conversion. Special version for Power8.
instruct convI2F_ireg_mtfprd_Ex(regF dst, iRegIsrc src) %{
match(Set dst (ConvI2F src));
predicate(VM_Version::has_fcfids() && VM_Version::has_mtfprd());
ins_cost(DEFAULT_COST);
expand %{
regD tmpD;
moveI2D_reg(tmpD, src);
convL2FRaw_regF(dst, tmpD); // Convert to float.
%}
%}
// L2F to avoid runtime call.
instruct convL2F_ireg_fcfids_Ex(regF dst, iRegLsrc src) %{
match(Set dst (ConvL2F src));
predicate(VM_Version::has_fcfids());
predicate(VM_Version::has_fcfids() && !VM_Version::has_mtfprd());
ins_cost(DEFAULT_COST);
expand %{
@ -10943,6 +11249,19 @@ instruct convL2F_ireg_fcfids_Ex(regF dst, iRegLsrc src) %{
%}
%}
// L2F to avoid runtime call. Special version for Power8.
instruct convL2F_ireg_mtfprd_Ex(regF dst, iRegLsrc src) %{
match(Set dst (ConvL2F src));
predicate(VM_Version::has_fcfids() && VM_Version::has_mtfprd());
ins_cost(DEFAULT_COST);
expand %{
regD tmpD;
moveL2D_reg(tmpD, src);
convL2FRaw_regF(dst, tmpD); // Convert to float.
%}
%}
// Moved up as used in expand.
//instruct convD2F_reg(regF dst, regD src) %{%}
@ -10951,6 +11270,7 @@ instruct convL2F_ireg_fcfids_Ex(regF dst, iRegLsrc src) %{
// Integer to Double conversion.
instruct convI2D_reg_Ex(regD dst, iRegIsrc src) %{
match(Set dst (ConvI2D src));
predicate(!VM_Version::has_mtfprd());
ins_cost(DEFAULT_COST);
expand %{
@ -10964,6 +11284,19 @@ instruct convI2D_reg_Ex(regD dst, iRegIsrc src) %{
%}
%}
// Integer to Double conversion. Special version for Power8.
instruct convI2D_reg_mtfprd_Ex(regD dst, iRegIsrc src) %{
match(Set dst (ConvI2D src));
predicate(VM_Version::has_mtfprd());
ins_cost(DEFAULT_COST);
expand %{
regD tmpD;
moveI2D_reg(tmpD, src);
convL2DRaw_regD(dst, tmpD); // Convert to double.
%}
%}
// Long to Double conversion
instruct convL2D_reg_Ex(regD dst, stackSlotL src) %{
match(Set dst (ConvL2D src));
@ -10976,6 +11309,19 @@ instruct convL2D_reg_Ex(regD dst, stackSlotL src) %{
%}
%}
// Long to Double conversion. Special version for Power8.
instruct convL2D_reg_mtfprd_Ex(regD dst, iRegLsrc src) %{
match(Set dst (ConvL2D src));
predicate(VM_Version::has_mtfprd());
ins_cost(DEFAULT_COST);
expand %{
regD tmpD;
moveL2D_reg(tmpD, src);
convL2DRaw_regD(dst, tmpD); // Convert to double.
%}
%}
instruct convF2D_reg(regD dst, regF src) %{
match(Set dst (ConvF2D src));
format %{ "FMR $dst, $src \t// float->double" %}
@ -12705,8 +13051,7 @@ instruct insrwi(iRegIdst dst, iRegIsrc src, immI16 pos, immI16 shift) %{
// Just slightly faster than java implementation.
instruct bytes_reverse_int_Ex(iRegIdst dst, iRegIsrc src) %{
match(Set dst (ReverseBytesI src));
predicate(UseCountLeadingZerosInstructionsPPC64);
ins_cost(DEFAULT_COST);
ins_cost(7*DEFAULT_COST);
expand %{
immI16 imm24 %{ (int) 24 %}
@ -12728,6 +13073,172 @@ instruct bytes_reverse_int_Ex(iRegIdst dst, iRegIsrc src) %{
%}
%}
instruct bytes_reverse_long_Ex(iRegLdst dst, iRegLsrc src) %{
match(Set dst (ReverseBytesL src));
ins_cost(15*DEFAULT_COST);
expand %{
immI16 imm56 %{ (int) 56 %}
immI16 imm48 %{ (int) 48 %}
immI16 imm40 %{ (int) 40 %}
immI16 imm32 %{ (int) 32 %}
immI16 imm24 %{ (int) 24 %}
immI16 imm16 %{ (int) 16 %}
immI16 imm8 %{ (int) 8 %}
immI16 imm0 %{ (int) 0 %}
iRegLdst tmpL1;
iRegLdst tmpL2;
iRegLdst tmpL3;
iRegLdst tmpL4;
iRegLdst tmpL5;
iRegLdst tmpL6;
// src : |a|b|c|d|e|f|g|h|
rldicl(tmpL1, src, imm8, imm24); // tmpL1 : | | | |e|f|g|h|a|
rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |a| | | |e|
rldicl(tmpL3, tmpL2, imm32, imm0); // tmpL3 : | | | |e| | | |a|
rldicl(tmpL1, src, imm16, imm24); // tmpL1 : | | | |f|g|h|a|b|
rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |b| | | |f|
rldicl(tmpL4, tmpL2, imm40, imm0); // tmpL4 : | | |f| | | |b| |
orL_reg_reg(tmpL5, tmpL3, tmpL4); // tmpL5 : | | |f|e| | |b|a|
rldicl(tmpL1, src, imm24, imm24); // tmpL1 : | | | |g|h|a|b|c|
rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |c| | | |g|
rldicl(tmpL3, tmpL2, imm48, imm0); // tmpL3 : | |g| | | |c| | |
rldicl(tmpL1, src, imm32, imm24); // tmpL1 : | | | |h|a|b|c|d|
rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |d| | | |h|
rldicl(tmpL4, tmpL2, imm56, imm0); // tmpL4 : |h| | | |d| | | |
orL_reg_reg(tmpL6, tmpL3, tmpL4); // tmpL6 : |h|g| | |d|c| | |
orL_reg_reg(dst, tmpL5, tmpL6); // dst : |h|g|f|e|d|c|b|a|
%}
%}
instruct bytes_reverse_ushort_Ex(iRegIdst dst, iRegIsrc src) %{
match(Set dst (ReverseBytesUS src));
ins_cost(2*DEFAULT_COST);
expand %{
immI16 imm16 %{ (int) 16 %}
immI16 imm8 %{ (int) 8 %}
urShiftI_reg_imm(dst, src, imm8);
insrwi(dst, src, imm16, imm8);
%}
%}
instruct bytes_reverse_short_Ex(iRegIdst dst, iRegIsrc src) %{
match(Set dst (ReverseBytesS src));
ins_cost(3*DEFAULT_COST);
expand %{
immI16 imm16 %{ (int) 16 %}
immI16 imm8 %{ (int) 8 %}
iRegLdst tmpI1;
urShiftI_reg_imm(tmpI1, src, imm8);
insrwi(tmpI1, src, imm16, imm8);
extsh(dst, tmpI1);
%}
%}
// Load Integer reversed byte order
instruct loadI_reversed(iRegIdst dst, indirect mem) %{
match(Set dst (ReverseBytesI (LoadI mem)));
ins_cost(MEMORY_REF_COST);
size(4);
ins_encode %{
__ lwbrx($dst$$Register, $mem$$Register);
%}
ins_pipe(pipe_class_default);
%}
// Load Long - aligned and reversed
instruct loadL_reversed(iRegLdst dst, indirect mem) %{
match(Set dst (ReverseBytesL (LoadL mem)));
predicate(VM_Version::has_ldbrx());
ins_cost(MEMORY_REF_COST);
size(4);
ins_encode %{
__ ldbrx($dst$$Register, $mem$$Register);
%}
ins_pipe(pipe_class_default);
%}
// Load unsigned short / char reversed byte order
instruct loadUS_reversed(iRegIdst dst, indirect mem) %{
match(Set dst (ReverseBytesUS (LoadUS mem)));
ins_cost(MEMORY_REF_COST);
size(4);
ins_encode %{
__ lhbrx($dst$$Register, $mem$$Register);
%}
ins_pipe(pipe_class_default);
%}
// Load short reversed byte order
instruct loadS_reversed(iRegIdst dst, indirect mem) %{
match(Set dst (ReverseBytesS (LoadS mem)));
ins_cost(MEMORY_REF_COST + DEFAULT_COST);
size(8);
ins_encode %{
__ lhbrx($dst$$Register, $mem$$Register);
__ extsh($dst$$Register, $dst$$Register);
%}
ins_pipe(pipe_class_default);
%}
// Store Integer reversed byte order
instruct storeI_reversed(iRegIsrc src, indirect mem) %{
match(Set mem (StoreI mem (ReverseBytesI src)));
ins_cost(MEMORY_REF_COST);
size(4);
ins_encode %{
__ stwbrx($src$$Register, $mem$$Register);
%}
ins_pipe(pipe_class_default);
%}
// Store Long reversed byte order
instruct storeL_reversed(iRegLsrc src, indirect mem) %{
match(Set mem (StoreL mem (ReverseBytesL src)));
predicate(VM_Version::has_stdbrx());
ins_cost(MEMORY_REF_COST);
size(4);
ins_encode %{
__ stdbrx($src$$Register, $mem$$Register);
%}
ins_pipe(pipe_class_default);
%}
// Store unsigned short / char reversed byte order
instruct storeUS_reversed(iRegIsrc src, indirect mem) %{
match(Set mem (StoreC mem (ReverseBytesUS src)));
ins_cost(MEMORY_REF_COST);
size(4);
ins_encode %{
__ sthbrx($src$$Register, $mem$$Register);
%}
ins_pipe(pipe_class_default);
%}
// Store short reversed byte order
instruct storeS_reversed(iRegIsrc src, indirect mem) %{
match(Set mem (StoreC mem (ReverseBytesS src)));
ins_cost(MEMORY_REF_COST);
size(4);
ins_encode %{
__ sthbrx($src$$Register, $mem$$Register);
%}
ins_pipe(pipe_class_default);
%}
//---------- Replicate Vector Instructions ------------------------------------
// Insrdi does replicate if src == dst.

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -81,8 +81,17 @@ const char* VectorSRegisterImpl::name() const {
"VSR0", "VSR1", "VSR2", "VSR3", "VSR4", "VSR5", "VSR6", "VSR7",
"VSR8", "VSR9", "VSR10", "VSR11", "VSR12", "VSR13", "VSR14", "VSR15",
"VSR16", "VSR17", "VSR18", "VSR19", "VSR20", "VSR21", "VSR22", "VSR23",
"VSR24", "VSR25", "VSR26", "VSR27", "VSR28", "VSR29", "VSR30", "VSR31"
"VSR24", "VSR25", "VSR26", "VSR27", "VSR28", "VSR29", "VSR30", "VSR31",
"VSR32", "VSR33", "VSR34", "VSR35", "VSR36", "VSR37", "VSR38", "VSR39",
"VSR40", "VSR41", "VSR42", "VSR43", "VSR44", "VSR45", "VSR46", "VSR47",
"VSR48", "VSR49", "VSR50", "VSR51", "VSR52", "VSR53", "VSR54", "VSR55",
"VSR56", "VSR57", "VSR58", "VSR59", "VSR60", "VSR61", "VSR62", "VSR63"
};
return is_valid() ? names[encoding()] : "vsnoreg";
}
// Method to convert a VectorRegister to a Vector-Scalar Register (VectorSRegister)
VectorSRegister VectorRegisterImpl::to_vsr() const {
if (this == vnoreg) { return vsnoregi; }
return as_VectorSRegister(encoding() + 32);
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -398,6 +398,11 @@ inline VectorRegister as_VectorRegister(int encoding) {
return (VectorRegister)(intptr_t)encoding;
}
// Forward declaration
// Use VectorSRegister as a shortcut.
class VectorSRegisterImpl;
typedef VectorSRegisterImpl* VectorSRegister;
// The implementation of vector registers for the Power architecture
class VectorRegisterImpl: public AbstractRegisterImpl {
public:
@ -415,6 +420,9 @@ class VectorRegisterImpl: public AbstractRegisterImpl {
bool is_valid() const { return 0 <= value() && value() < number_of_registers; }
const char* name() const;
// convert to VSR
VectorSRegister to_vsr() const;
};
// The Vector registers of the Power architecture
@ -491,10 +499,6 @@ CONSTANT_REGISTER_DECLARATION(VectorRegister, VR31, (31));
#endif // DONT_USE_REGISTER_DEFINES
// Use VectorSRegister as a shortcut.
class VectorSRegisterImpl;
typedef VectorSRegisterImpl* VectorSRegister;
inline VectorSRegister as_VectorSRegister(int encoding) {
return (VectorSRegister)(intptr_t)encoding;
}
@ -503,7 +507,7 @@ inline VectorSRegister as_VectorSRegister(int encoding) {
class VectorSRegisterImpl: public AbstractRegisterImpl {
public:
enum {
number_of_registers = 32
number_of_registers = 64
};
// construction
@ -554,6 +558,38 @@ CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR28, (28));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR29, (29));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR30, (30));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR31, (31));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR32, (32));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR33, (33));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR34, (34));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR35, (35));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR36, (36));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR37, (37));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR38, (38));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR39, (39));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR40, (40));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR41, (41));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR42, (42));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR43, (43));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR44, (44));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR45, (45));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR46, (46));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR47, (47));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR48, (48));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR49, (49));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR50, (50));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR51, (51));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR52, (52));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR53, (53));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR54, (54));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR55, (55));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR56, (56));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR57, (57));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR58, (58));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR59, (59));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR60, (60));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR61, (61));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR62, (62));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR63, (63));
#ifndef DONT_USE_REGISTER_DEFINES
#define vsnoregi ((VectorSRegister)(vsnoreg_VectorSRegisterEnumValue))
@ -589,6 +625,38 @@ CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR31, (31));
#define VSR29 ((VectorSRegister)( VSR29_VectorSRegisterEnumValue))
#define VSR30 ((VectorSRegister)( VSR30_VectorSRegisterEnumValue))
#define VSR31 ((VectorSRegister)( VSR31_VectorSRegisterEnumValue))
#define VSR32 ((VectorSRegister)( VSR32_VectorSRegisterEnumValue))
#define VSR33 ((VectorSRegister)( VSR33_VectorSRegisterEnumValue))
#define VSR34 ((VectorSRegister)( VSR34_VectorSRegisterEnumValue))
#define VSR35 ((VectorSRegister)( VSR35_VectorSRegisterEnumValue))
#define VSR36 ((VectorSRegister)( VSR36_VectorSRegisterEnumValue))
#define VSR37 ((VectorSRegister)( VSR37_VectorSRegisterEnumValue))
#define VSR38 ((VectorSRegister)( VSR38_VectorSRegisterEnumValue))
#define VSR39 ((VectorSRegister)( VSR39_VectorSRegisterEnumValue))
#define VSR40 ((VectorSRegister)( VSR40_VectorSRegisterEnumValue))
#define VSR41 ((VectorSRegister)( VSR41_VectorSRegisterEnumValue))
#define VSR42 ((VectorSRegister)( VSR42_VectorSRegisterEnumValue))
#define VSR43 ((VectorSRegister)( VSR43_VectorSRegisterEnumValue))
#define VSR44 ((VectorSRegister)( VSR44_VectorSRegisterEnumValue))
#define VSR45 ((VectorSRegister)( VSR45_VectorSRegisterEnumValue))
#define VSR46 ((VectorSRegister)( VSR46_VectorSRegisterEnumValue))
#define VSR47 ((VectorSRegister)( VSR47_VectorSRegisterEnumValue))
#define VSR48 ((VectorSRegister)( VSR48_VectorSRegisterEnumValue))
#define VSR49 ((VectorSRegister)( VSR49_VectorSRegisterEnumValue))
#define VSR50 ((VectorSRegister)( VSR50_VectorSRegisterEnumValue))
#define VSR51 ((VectorSRegister)( VSR51_VectorSRegisterEnumValue))
#define VSR52 ((VectorSRegister)( VSR52_VectorSRegisterEnumValue))
#define VSR53 ((VectorSRegister)( VSR53_VectorSRegisterEnumValue))
#define VSR54 ((VectorSRegister)( VSR54_VectorSRegisterEnumValue))
#define VSR55 ((VectorSRegister)( VSR55_VectorSRegisterEnumValue))
#define VSR56 ((VectorSRegister)( VSR56_VectorSRegisterEnumValue))
#define VSR57 ((VectorSRegister)( VSR57_VectorSRegisterEnumValue))
#define VSR58 ((VectorSRegister)( VSR58_VectorSRegisterEnumValue))
#define VSR59 ((VectorSRegister)( VSR59_VectorSRegisterEnumValue))
#define VSR60 ((VectorSRegister)( VSR60_VectorSRegisterEnumValue))
#define VSR61 ((VectorSRegister)( VSR61_VectorSRegisterEnumValue))
#define VSR62 ((VectorSRegister)( VSR62_VectorSRegisterEnumValue))
#define VSR63 ((VectorSRegister)( VSR63_VectorSRegisterEnumValue))
#endif // DONT_USE_REGISTER_DEFINES
// Maximum number of incoming arguments that can be passed in i registers.
@ -609,7 +677,7 @@ class ConcreteRegisterImpl : public AbstractRegisterImpl {
* 2 // register halves
+ ConditionRegisterImpl::number_of_registers // condition code registers
+ SpecialRegisterImpl::number_of_registers // special registers
+ VectorRegisterImpl::number_of_registers // vector registers
+ VectorRegisterImpl::number_of_registers // VSX registers
};
static const int max_gpr;

View File

@ -35,6 +35,7 @@
#include "oops/compiledICHolder.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/align.hpp"
#include "vmreg_ppc.inline.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
@ -221,7 +222,7 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble
const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
sizeof(RegisterSaver::LiveRegType);
const int register_save_size = regstosave_num * reg_size;
const int frame_size_in_bytes = round_to(register_save_size, frame::alignment_in_bytes)
const int frame_size_in_bytes = align_up(register_save_size, frame::alignment_in_bytes)
+ frame::abi_reg_args_size;
*out_frame_size_in_bytes = frame_size_in_bytes;
const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
@ -658,7 +659,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
ShouldNotReachHere();
}
}
return round_to(stk, 2);
return align_up(stk, 2);
}
#if defined(COMPILER1) || defined(COMPILER2)
@ -845,7 +846,7 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
}
}
return round_to(stk, 2);
return align_up(stk, 2);
}
#endif // COMPILER2
@ -873,7 +874,7 @@ static address gen_c2i_adapter(MacroAssembler *masm,
// Adapter needs TOP_IJAVA_FRAME_ABI.
const int adapter_size = frame::top_ijava_frame_abi_size +
round_to(total_args_passed * wordSize, frame::alignment_in_bytes);
align_up(total_args_passed * wordSize, frame::alignment_in_bytes);
// regular (verified) c2i entry point
c2i_entrypoint = __ pc();
@ -1022,9 +1023,9 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
// number (all values in registers) or the maximum stack slot accessed.
// Convert 4-byte c2 stack slots to words.
comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
// Round up to miminum stack alignment, in wordSize.
comp_words_on_stack = round_to(comp_words_on_stack, 2);
comp_words_on_stack = align_up(comp_words_on_stack, 2);
__ resize_frame(-comp_words_on_stack * wordSize, R11_scratch1);
}
@ -1609,7 +1610,7 @@ static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType
}
static void verify_oop_args(MacroAssembler* masm,
methodHandle method,
const methodHandle& method,
const BasicType* sig_bt,
const VMRegPair* regs) {
Register temp_reg = R19_method; // not part of any compiled calling seq
@ -1631,7 +1632,7 @@ static void verify_oop_args(MacroAssembler* masm,
}
static void gen_special_dispatch(MacroAssembler* masm,
methodHandle method,
const methodHandle& method,
const BasicType* sig_bt,
const VMRegPair* regs) {
verify_oop_args(masm, method, sig_bt, regs);
@ -1918,7 +1919,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
}
}
}
total_save_slots = double_slots * 2 + round_to(single_slots, 2); // round to even
total_save_slots = double_slots * 2 + align_up(single_slots, 2); // round to even
}
int oop_handle_slot_offset = stack_slots;
@ -1945,7 +1946,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Now compute actual number of stack words we need.
// Rounding to make stack properly aligned.
stack_slots = round_to(stack_slots, // 7)
stack_slots = align_up(stack_slots, // 7)
frame::alignment_in_bytes / VMRegImpl::stack_slot_size);
int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size;
@ -2203,8 +2204,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// disallows any pending_exception.
// Save argument registers and leave room for C-compatible ABI_REG_ARGS.
int frame_size = frame::abi_reg_args_size +
round_to(total_c_args * wordSize, frame::alignment_in_bytes);
int frame_size = frame::abi_reg_args_size + align_up(total_c_args * wordSize, frame::alignment_in_bytes);
__ mr(R11_scratch1, R1_SP);
RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs, out_regs2);
@ -2570,7 +2570,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// This function returns the adjust size (in number of words) to a c2i adapter
// activation for use during deoptimization.
int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
return round_to((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::alignment_in_bytes);
return align_up((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::alignment_in_bytes);
}
uint SharedRuntime::out_preserve_stack_slots() {

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,6 +38,7 @@
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/align.hpp"
#define __ _masm->
@ -626,7 +627,7 @@ class StubGenerator: public StubCodeGenerator {
int spill_slots = 3;
if (preserve1 != noreg) { spill_slots++; }
if (preserve2 != noreg) { spill_slots++; }
const int frame_size = align_size_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
Label filtered;
// Is marking active?
@ -687,7 +688,7 @@ class StubGenerator: public StubCodeGenerator {
case BarrierSet::G1SATBCTLogging:
{
int spill_slots = (preserve != noreg) ? 1 : 0;
const int frame_size = align_size_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
__ save_LR_CR(R0);
__ push_frame(frame_size, R0);
@ -2728,7 +2729,7 @@ class StubGenerator: public StubCodeGenerator {
__ vspltisb (vTmp2, -16);
__ vrld (keyPerm, keyPerm, vTmp2);
__ vrld (keyPerm, keyPerm, vTmp2);
__ vsldoi (keyPerm, keyPerm, keyPerm, -8);
__ vsldoi (keyPerm, keyPerm, keyPerm, 8);
// load the 1st round key to vKey1
__ li (keypos, 0);
@ -2928,7 +2929,7 @@ class StubGenerator: public StubCodeGenerator {
__ vspltisb (vTmp2, -16);
__ vrld (keyPerm, keyPerm, vTmp2);
__ vrld (keyPerm, keyPerm, vTmp2);
__ vsldoi (keyPerm, keyPerm, keyPerm, -8);
__ vsldoi (keyPerm, keyPerm, keyPerm, 8);
__ cmpwi (CCR0, keylen, 44);
__ beq (CCR0, L_do44);
@ -3276,6 +3277,36 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
// Compute CRC32/CRC32C function.
void generate_CRC_updateBytes(const char* name, Register table, bool invertCRC) {
// arguments to kernel_crc32:
const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call.
const Register data = R4_ARG2; // source byte array
const Register dataLen = R5_ARG3; // #bytes to process
const Register t0 = R2;
const Register t1 = R7;
const Register t2 = R8;
const Register t3 = R9;
const Register tc0 = R10;
const Register tc1 = R11;
const Register tc2 = R12;
BLOCK_COMMENT("Stub body {");
assert_different_registers(crc, data, dataLen, table);
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, table, invertCRC);
BLOCK_COMMENT("return");
__ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
__ blr();
BLOCK_COMMENT("} Stub body");
}
/**
* Arguments:
*
@ -3296,14 +3327,14 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", name);
address start = __ function_entry(); // Remember stub start address (is rtn value).
const Register table = R6; // crc table address
#ifdef VM_LITTLE_ENDIAN
// arguments to kernel_crc32:
const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call.
const Register data = R4_ARG2; // source byte array
const Register dataLen = R5_ARG3; // #bytes to process
const Register table = R6; // crc table address
#ifdef VM_LITTLE_ENDIAN
if (VM_Version::has_vpmsumb()) {
const Register constants = R2; // constants address
const Register bconstants = R8; // barret table address
@ -3321,7 +3352,7 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::ppc64::generate_load_crc_constants_addr(_masm, constants);
StubRoutines::ppc64::generate_load_crc_barret_constants_addr(_masm, bconstants);
__ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4);
__ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, true);
BLOCK_COMMENT("return");
__ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
@ -3331,31 +3362,79 @@ class StubGenerator: public StubCodeGenerator {
} else
#endif
{
const Register t0 = R2;
const Register t1 = R7;
const Register t2 = R8;
const Register t3 = R9;
const Register tc0 = R10;
const Register tc1 = R11;
const Register tc2 = R12;
StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
generate_CRC_updateBytes(name, table, true);
}
return start;
}
/**
* Arguments:
*
* Inputs:
* R3_ARG1 - int crc
* R4_ARG2 - byte* buf
* R5_ARG3 - int length (of buffer)
*
* scratch:
* R2, R6-R12
*
* Ouput:
* R3_RET - int crc result
*/
// Compute CRC32C function.
address generate_CRC32C_updateBytes(const char* name) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ function_entry(); // Remember stub start address (is rtn value).
const Register table = R6; // crc table address
#if 0 // no vector support yet for CRC32C
#ifdef VM_LITTLE_ENDIAN
// arguments to kernel_crc32:
const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call.
const Register data = R4_ARG2; // source byte array
const Register dataLen = R5_ARG3; // #bytes to process
if (VM_Version::has_vpmsumb()) {
const Register constants = R2; // constants address
const Register bconstants = R8; // barret table address
const Register t0 = R9;
const Register t1 = R10;
const Register t2 = R11;
const Register t3 = R12;
const Register t4 = R7;
BLOCK_COMMENT("Stub body {");
assert_different_registers(crc, data, dataLen, table);
StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
StubRoutines::ppc64::generate_load_crc32c_constants_addr(_masm, constants);
StubRoutines::ppc64::generate_load_crc32c_barret_constants_addr(_masm, bconstants);
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, table);
__ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, false);
BLOCK_COMMENT("return");
__ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
__ blr();
BLOCK_COMMENT("} Stub body");
} else
#endif
#endif
{
StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
generate_CRC_updateBytes(name, table, false);
}
return start;
}
// Initialization
void generate_initial() {
// Generates all stubs and initializes the entry points
@ -3383,6 +3462,12 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_crc_table_adr = (address)StubRoutines::ppc64::_crc_table;
StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes");
}
// CRC32C Intrinsics.
if (UseCRC32CIntrinsics) {
StubRoutines::_crc32c_table_addr = (address)StubRoutines::ppc64::_crc32c_table;
StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes("CRC32C_updateBytes");
}
}
void generate_all() {

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,13 +55,16 @@ class ppc64 {
// CRC32 Intrinsics.
static juint _crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
static juint _crc32c_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
static juint* _constants;
static juint* _barret_constants;
public:
// CRC32 Intrinsics.
static void generate_load_table_addr(MacroAssembler* masm, Register table, address table_addr, uint64_t table_contents);
static void generate_load_crc_table_addr(MacroAssembler* masm, Register table);
static void generate_load_crc32c_table_addr(MacroAssembler* masm, Register table);
static void generate_load_crc_constants_addr(MacroAssembler* masm, Register table);
static void generate_load_crc_barret_constants_addr(MacroAssembler* masm, Register table);
static juint* generate_crc_constants();

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2017 SAP SE. All rights reserved.
* Copyright (c) 2015, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -643,12 +643,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
return entry;
}
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
address entry = __ pc();
__ unimplemented("generate_continuation_for");
return entry;
}
// This entry is returned to when a call returns to the interpreter.
// When we arrive here, we expect that the callee stack frame is already popped.
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
@ -692,6 +686,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
#endif
__ sldi(size, size, Interpreter::logStackElementSize);
__ add(R15_esp, R15_esp, size);
__ check_and_handle_popframe(R11_scratch1);
__ check_and_handle_earlyret(R11_scratch1);
__ dispatch_next(state, step);
return entry;
}
@ -1894,7 +1892,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
__ lwz(crc, 2*wordSize, argP); // Current crc state, zero extend to 64 bit to have a clean register.
StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
__ kernel_crc32_singleByte(crc, data, dataLen, table, tmp);
__ kernel_crc32_singleByte(crc, data, dataLen, table, tmp, true);
// Restore caller sp for c2i case and return.
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
@ -1910,7 +1908,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
return NULL;
}
// CRC32 Intrinsics.
/**
* Method entry for static native methods:
* int java.util.zip.CRC32.updateBytes( int crc, byte[] b, int off, int len)
@ -1986,7 +1984,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
// Performance measurements show the 1word and 2word variants to be almost equivalent,
// with very light advantages for the 1word variant. We chose the 1word variant for
// code compactness.
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3);
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3, true);
// Restore caller sp for c2i case and return.
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
@ -2002,8 +2000,88 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
return NULL;
}
// Not supported
/**
* Method entry for intrinsic-candidate (non-native) methods:
* int java.util.zip.CRC32C.updateBytes( int crc, byte[] b, int off, int end)
* int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long* buf, int off, int end)
* Unlike CRC32, CRC32C does not have any methods marked as native
* CRC32C also uses an "end" variable instead of the length variable CRC32 uses
**/
address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
if (UseCRC32CIntrinsics) {
address start = __ pc(); // Remember stub start address (is rtn value).
// We don't generate local frame and don't align stack because
// we not even call stub code (we generate the code inline)
// and there is no safepoint on this path.
// Load parameters.
// Z_esp is callers operand stack pointer, i.e. it points to the parameters.
const Register argP = R15_esp;
const Register crc = R3_ARG1; // crc value
const Register data = R4_ARG2; // address of java byte array
const Register dataLen = R5_ARG3; // source data len
const Register table = R6_ARG4; // address of crc32c table
const Register t0 = R9; // scratch registers for crc calculation
const Register t1 = R10;
const Register t2 = R11;
const Register t3 = R12;
const Register tc0 = R2; // registers to hold pre-calculated column addresses
const Register tc1 = R7;
const Register tc2 = R8;
const Register tc3 = table; // table address is reconstructed at the end of kernel_crc32_* emitters
const Register tmp = t0; // Only used very locally to calculate byte buffer address.
// Arguments are reversed on java expression stack.
// Calculate address of start element.
if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) { // Used for "updateDirectByteBuffer".
BLOCK_COMMENT("CRC32C_updateDirectByteBuffer {");
// crc @ (SP + 5W) (32bit)
// buf @ (SP + 3W) (64bit ptr to long array)
// off @ (SP + 2W) (32bit)
// dataLen @ (SP + 1W) (32bit)
// data = buf + off
__ ld( data, 3*wordSize, argP); // start of byte buffer
__ lwa( tmp, 2*wordSize, argP); // byte buffer offset
__ lwa( dataLen, 1*wordSize, argP); // #bytes to process
__ lwz( crc, 5*wordSize, argP); // current crc state
__ add( data, data, tmp); // Add byte buffer offset.
__ sub( dataLen, dataLen, tmp); // (end_index - offset)
} else { // Used for "updateBytes update".
BLOCK_COMMENT("CRC32C_updateBytes {");
// crc @ (SP + 4W) (32bit)
// buf @ (SP + 3W) (64bit ptr to byte array)
// off @ (SP + 2W) (32bit)
// dataLen @ (SP + 1W) (32bit)
// data = buf + off + base_offset
__ ld( data, 3*wordSize, argP); // start of byte buffer
__ lwa( tmp, 2*wordSize, argP); // byte buffer offset
__ lwa( dataLen, 1*wordSize, argP); // #bytes to process
__ add( data, data, tmp); // add byte buffer offset
__ sub( dataLen, dataLen, tmp); // (end_index - offset)
__ lwz( crc, 4*wordSize, argP); // current crc state
__ addi(data, data, arrayOopDesc::base_offset_in_bytes(T_BYTE));
}
StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
// Performance measurements show the 1word and 2word variants to be almost equivalent,
// with very light advantages for the 1word variant. We chose the 1word variant for
// code compactness.
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3, false);
// Restore caller sp for c2i case and return.
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
__ blr();
BLOCK_COMMENT("} CRC32C_update{Bytes|DirectByteBuffer}");
return start;
}
return NULL;
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2016 SAP SE. All rights reserved.
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1472,13 +1472,13 @@ void TemplateTable::convert() {
case Bytecodes::_i2d:
__ extsw(R17_tos, R17_tos);
case Bytecodes::_l2d:
__ push_l_pop_d();
__ move_l_to_d();
__ fcfid(F15_ftos, F15_ftos);
break;
case Bytecodes::_i2f:
__ extsw(R17_tos, R17_tos);
__ push_l_pop_d();
__ move_l_to_d();
if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only
// Comment: alternatively, load with sign extend could be done by lfiwax.
__ fcfids(F15_ftos, F15_ftos);
@ -1490,7 +1490,7 @@ void TemplateTable::convert() {
case Bytecodes::_l2f:
if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only
__ push_l_pop_d();
__ move_l_to_d();
__ fcfids(F15_ftos, F15_ftos);
} else {
// Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp.
@ -1514,7 +1514,7 @@ void TemplateTable::convert() {
__ li(R17_tos, 0); // 0 in case of NAN
__ bso(CCR0, done);
__ fctiwz(F15_ftos, F15_ftos);
__ push_d_pop_l();
__ move_d_to_l();
break;
case Bytecodes::_d2l:
@ -1523,7 +1523,7 @@ void TemplateTable::convert() {
__ li(R17_tos, 0); // 0 in case of NAN
__ bso(CCR0, done);
__ fctidz(F15_ftos, F15_ftos);
__ push_d_pop_l();
__ move_d_to_l();
break;
default: ShouldNotReachHere();
@ -3660,11 +3660,9 @@ void TemplateTable::_new() {
__ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
__ bne(CCR0, Lslow_case);
// Get instanceKlass (load from Rcpool + sizeof(ConstantPool) + Rindex*BytesPerWord).
// Get instanceKlass
__ sldi(Roffset, Rindex, LogBytesPerWord);
__ addi(Rscratch, Rcpool, sizeof(ConstantPool));
__ isync(); // Order load of instance Klass wrt. tags.
__ ldx(RinstanceKlass, Roffset, Rscratch);
__ load_resolved_klass_at_offset(Rcpool, Roffset, RinstanceKlass);
// Make sure klass is fully initialized and get instance_size.
__ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass);
@ -3722,7 +3720,7 @@ void TemplateTable::_new() {
__ bge(CCR0, Lslow_case);
// Increment waste limit to prevent getting stuck on this slow path.
__ addi(RtlabWasteLimitValue, RtlabWasteLimitValue, (int)ThreadLocalAllocBuffer::refill_waste_limit_increment());
__ add_const_optimized(RtlabWasteLimitValue, RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment());
__ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread);
}
// else: No allocation in the shared eden. // fallthru: __ b(Lslow_case);
@ -3875,9 +3873,7 @@ void TemplateTable::checkcast() {
// Extract target class from constant pool.
__ bind(Lquicked);
__ sldi(Roffset, Roffset, LogBytesPerWord);
__ addi(Rcpool, Rcpool, sizeof(ConstantPool));
__ isync(); // Order load of specified Klass wrt. tags.
__ ldx(RspecifiedKlass, Rcpool, Roffset);
__ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass);
// Do the checkcast.
__ bind(Lresolved);
@ -3939,9 +3935,7 @@ void TemplateTable::instanceof() {
// Extract target class from constant pool.
__ bind(Lquicked);
__ sldi(Roffset, Roffset, LogBytesPerWord);
__ addi(Rcpool, Rcpool, sizeof(ConstantPool));
__ isync(); // Order load of specified Klass wrt. tags.
__ ldx(RspecifiedKlass, Rcpool, Roffset);
__ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass);
// Do the checkcast.
__ bind(Lresolved);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,9 +28,11 @@
#include "asm/macroAssembler.inline.hpp"
#include "compiler/disassembler.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jvm.h"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "utilities/align.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/globalDefinitions.hpp"
#include "vm_version_ppc.hpp"
@ -79,7 +81,7 @@ void VM_Version::initialize() {
UINTX_FORMAT " on this machine", PowerArchitecturePPC64);
// Power 8: Configure Data Stream Control Register.
if (has_mfdscr()) {
if (PowerArchitecturePPC64 >= 8 && has_mfdscr()) {
config_dscr();
}
@ -111,7 +113,7 @@ void VM_Version::initialize() {
// Create and print feature-string.
char buf[(num_features+1) * 16]; // Max 16 chars per feature.
jio_snprintf(buf, sizeof(buf),
"ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
"ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
(has_fsqrt() ? " fsqrt" : ""),
(has_isel() ? " isel" : ""),
(has_lxarxeh() ? " lxarxeh" : ""),
@ -126,7 +128,9 @@ void VM_Version::initialize() {
(has_vpmsumb() ? " vpmsumb" : ""),
(has_tcheck() ? " tcheck" : ""),
(has_mfdscr() ? " mfdscr" : ""),
(has_vsx() ? " vsx" : "")
(has_vsx() ? " vsx" : ""),
(has_ldbrx() ? " ldbrx" : ""),
(has_stdbrx() ? " stdbrx" : "")
// Make sure number of %s matches num_features!
);
_features_string = os::strdup(buf);
@ -172,18 +176,27 @@ void VM_Version::initialize() {
assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
// Implementation does not use any of the vector instructions
// available with Power8. Their exploitation is still pending.
// If defined(VM_LITTLE_ENDIAN) and running on Power8 or newer hardware,
// the implementation uses the vector instructions available with Power8.
// In all other cases, the implementation uses only generally available instructions.
if (!UseCRC32Intrinsics) {
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
}
}
if (UseCRC32CIntrinsics) {
if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics))
warning("CRC32C intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
// Implementation does not use any of the vector instructions available with Power8.
// Their exploitation is still pending (aka "work in progress").
if (!UseCRC32CIntrinsics) {
if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
}
}
// TODO: Provide implementation.
if (UseAdler32Intrinsics) {
warning("Adler32Intrinsics not available on this CPU.");
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
}
// The AES intrinsic stubs require AES instruction support.
@ -245,11 +258,6 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
}
if (UseAdler32Intrinsics) {
warning("Adler32Intrinsics not available on this CPU.");
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
}
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
UseMultiplyToLenIntrinsic = true;
}
@ -319,18 +327,6 @@ void VM_Version::initialize() {
// high lock contention. For now we do not use it by default.
vm_exit_during_initialization("UseRTMLocking flag should be only set on command line");
}
if (!is_power_of_2(RTMTotalCountIncrRate)) {
warning("RTMTotalCountIncrRate must be a power of 2, resetting it to 64");
FLAG_SET_DEFAULT(RTMTotalCountIncrRate, 64);
}
if (RTMAbortRatio < 0 || RTMAbortRatio > 100) {
warning("RTMAbortRatio must be in the range 0 to 100, resetting it to 50");
FLAG_SET_DEFAULT(RTMAbortRatio, 50);
}
if (RTMSpinLoopCount < 0) {
warning("RTMSpinLoopCount must not be a negative value, resetting it to 0");
FLAG_SET_DEFAULT(RTMSpinLoopCount, 0);
}
#else
// Only C2 does RTM locking optimization.
// Can't continue because UseRTMLocking affects UseBiasedLocking flag
@ -659,6 +655,8 @@ void VM_Version::determine_features() {
a->tcheck(0); // code[12] -> tcheck
a->mfdscr(R0); // code[13] -> mfdscr
a->lxvd2x(VSR0, R3_ARG1); // code[14] -> vsx
a->ldbrx(R7, R3_ARG1, R4_ARG2); // code[15] -> ldbrx
a->stdbrx(R7, R3_ARG1, R4_ARG2); // code[16] -> stdbrx
a->blr();
// Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
@ -688,7 +686,7 @@ void VM_Version::determine_features() {
// Execute code. Illegal instructions will be replaced by 0 in the signal handler.
VM_Version::_is_determine_features_test_running = true;
// We must align the first argument to 16 bytes because of the lqarx check.
(*test)((address)align_size_up((intptr_t)mid_of_test_area, 16), (uint64_t)0);
(*test)(align_up((address)mid_of_test_area, 16), 0);
VM_Version::_is_determine_features_test_running = false;
// determine which instructions are legal.
@ -708,6 +706,8 @@ void VM_Version::determine_features() {
if (code[feature_cntr++]) features |= tcheck_m;
if (code[feature_cntr++]) features |= mfdscr_m;
if (code[feature_cntr++]) features |= vsx_m;
if (code[feature_cntr++]) features |= ldbrx_m;
if (code[feature_cntr++]) features |= stdbrx_m;
// Print the detection code.
if (PrintAssembly) {

Some files were not shown because too many files have changed in this diff Show More