This commit is contained in:
J. Duke 2017-07-05 21:29:38 +02:00
commit 8a546668ec
179 changed files with 6569 additions and 2321 deletions

View File

@ -353,3 +353,4 @@ be58b02c11f90b88c67e4d0e2cb5e4cf2d9b3c57 jdk-9+105
c7be2a78c31b3b6132f2f5e9e4b3d3bb1c20245c jdk-9+108 c7be2a78c31b3b6132f2f5e9e4b3d3bb1c20245c jdk-9+108
1787bdaabb2b6f4193406e25a50cb0419ea8e8f3 jdk-9+109 1787bdaabb2b6f4193406e25a50cb0419ea8e8f3 jdk-9+109
925be13b3740d07a5958ccb5ab3c0ae1baba7055 jdk-9+110 925be13b3740d07a5958ccb5ab3c0ae1baba7055 jdk-9+110
f900d5afd9c83a0df8f36161c27c5e4c86a66f4c jdk-9+111

View File

@ -427,6 +427,9 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION],
elif test "x$TOOLCHAIN_TYPE" = xxlc; then elif test "x$TOOLCHAIN_TYPE" = xxlc; then
CFLAGS_DEBUG_SYMBOLS="-g" CFLAGS_DEBUG_SYMBOLS="-g"
CXXFLAGS_DEBUG_SYMBOLS="-g" CXXFLAGS_DEBUG_SYMBOLS="-g"
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
CFLAGS_DEBUG_SYMBOLS="-Zi"
CXXFLAGS_DEBUG_SYMBOLS="-Zi"
fi fi
AC_SUBST(CFLAGS_DEBUG_SYMBOLS) AC_SUBST(CFLAGS_DEBUG_SYMBOLS)
AC_SUBST(CXXFLAGS_DEBUG_SYMBOLS) AC_SUBST(CXXFLAGS_DEBUG_SYMBOLS)
@ -585,6 +588,12 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
elif test "x$TOOLCHAIN_TYPE" = xxlc; then elif test "x$TOOLCHAIN_TYPE" = xxlc; then
CFLAGS_JDK="${CFLAGS_JDK} -qchars=signed -qfullpath -qsaveopt" CFLAGS_JDK="${CFLAGS_JDK} -qchars=signed -qfullpath -qsaveopt"
CXXFLAGS_JDK="${CXXFLAGS_JDK} -qchars=signed -qfullpath -qsaveopt" CXXFLAGS_JDK="${CXXFLAGS_JDK} -qchars=signed -qfullpath -qsaveopt"
elif test "x$TOOLCHAIN_TYPE" = xgcc; then
CXXSTD_CXXFLAG="-std=gnu++98"
FLAGS_CXX_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [$CXXSTD_CXXFLAG -Werror],
IF_FALSE: [CXXSTD_CXXFLAG=""])
CXXFLAGS_JDK="${CXXFLAGS_JDK} ${CXXSTD_CXXFLAG}"
AC_SUBST([CXXSTD_CXXFLAG])
fi fi
CFLAGS_JDK="${CFLAGS_JDK} $EXTRA_CFLAGS" CFLAGS_JDK="${CFLAGS_JDK} $EXTRA_CFLAGS"
@ -622,6 +631,7 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
CFLAGS_JDK="${CFLAGS_JDK} -fno-strict-aliasing" CFLAGS_JDK="${CFLAGS_JDK} -fno-strict-aliasing"
;; ;;
esac esac
TOOLCHAIN_CHECK_COMPILER_VERSION(VERSION: 6, IF_AT_LEAST: FLAGS_SETUP_GCC6_COMPILER_FLAGS)
elif test "x$TOOLCHAIN_TYPE" = xclang; then elif test "x$TOOLCHAIN_TYPE" = xclang; then
if test "x$OPENJDK_TARGET_OS" = xlinux; then if test "x$OPENJDK_TARGET_OS" = xlinux; then
if test "x$OPENJDK_TARGET_CPU" = xx86; then if test "x$OPENJDK_TARGET_CPU" = xx86; then
@ -654,7 +664,7 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
CXXFLAGS_JDK="$CXXFLAGS_JDK -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE -DSTDC" CXXFLAGS_JDK="$CXXFLAGS_JDK -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE -DSTDC"
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS $COMMON_CCXXFLAGS_JDK \ COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS $COMMON_CCXXFLAGS_JDK \
-Zi -MD -Zc:wchar_t- -W3 -wd4800 \ -MD -Zc:wchar_t- -W3 -wd4800 \
-DWIN32_LEAN_AND_MEAN \ -DWIN32_LEAN_AND_MEAN \
-D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE \ -D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE \
-D_WINSOCK_DEPRECATED_NO_WARNINGS \ -D_WINSOCK_DEPRECATED_NO_WARNINGS \
@ -821,9 +831,6 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
LDFLAGS_SAFESH="-safeseh" LDFLAGS_SAFESH="-safeseh"
LDFLAGS_JDK="$LDFLAGS_JDK $LDFLAGS_SAFESH" LDFLAGS_JDK="$LDFLAGS_JDK $LDFLAGS_SAFESH"
fi fi
# TODO: make -debug optional "--disable-full-debug-symbols"
LDFLAGS_MICROSOFT_DEBUG="-debug"
LDFLAGS_JDK="$LDFLAGS_JDK $LDFLAGS_MICROSOFT_DEBUG"
elif test "x$TOOLCHAIN_TYPE" = xgcc; then elif test "x$TOOLCHAIN_TYPE" = xgcc; then
# If this is a --hash-style=gnu system, use --hash-style=both, why? # If this is a --hash-style=gnu system, use --hash-style=both, why?
# We have previously set HAS_GNU_HASH if this is the case # We have previously set HAS_GNU_HASH if this is the case
@ -950,14 +957,14 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
AC_SUBST(LDFLAGS_TESTEXE) AC_SUBST(LDFLAGS_TESTEXE)
]) ])
# FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [ARGUMENT], IF_TRUE: [RUN-IF-TRUE], # FLAGS_C_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [ARGUMENT], IF_TRUE: [RUN-IF-TRUE],
# IF_FALSE: [RUN-IF-FALSE]) # IF_FALSE: [RUN-IF-FALSE])
# ------------------------------------------------------------ # ------------------------------------------------------------
# Check that the c and c++ compilers support an argument # Check that the C compiler supports an argument
BASIC_DEFUN_NAMED([FLAGS_COMPILER_CHECK_ARGUMENTS], BASIC_DEFUN_NAMED([FLAGS_C_COMPILER_CHECK_ARGUMENTS],
[*ARGUMENT IF_TRUE IF_FALSE], [$@], [*ARGUMENT IF_TRUE IF_FALSE], [$@],
[ [
AC_MSG_CHECKING([if compiler supports "ARG_ARGUMENT"]) AC_MSG_CHECKING([if the C compiler supports "ARG_ARGUMENT"])
supports=yes supports=yes
saved_cflags="$CFLAGS" saved_cflags="$CFLAGS"
@ -968,6 +975,26 @@ BASIC_DEFUN_NAMED([FLAGS_COMPILER_CHECK_ARGUMENTS],
AC_LANG_POP([C]) AC_LANG_POP([C])
CFLAGS="$saved_cflags" CFLAGS="$saved_cflags"
AC_MSG_RESULT([$supports])
if test "x$supports" = "xyes" ; then
:
ARG_IF_TRUE
else
:
ARG_IF_FALSE
fi
])
# FLAGS_CXX_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [ARGUMENT], IF_TRUE: [RUN-IF-TRUE],
# IF_FALSE: [RUN-IF-FALSE])
# ------------------------------------------------------------
# Check that the C++ compiler supports an argument
BASIC_DEFUN_NAMED([FLAGS_CXX_COMPILER_CHECK_ARGUMENTS],
[*ARGUMENT IF_TRUE IF_FALSE], [$@],
[
AC_MSG_CHECKING([if the C++ compiler supports "ARG_ARGUMENT"])
supports=yes
saved_cxxflags="$CXXFLAGS" saved_cxxflags="$CXXFLAGS"
CXXFLAGS="$CXXFLAG ARG_ARGUMENT" CXXFLAGS="$CXXFLAG ARG_ARGUMENT"
AC_LANG_PUSH([C++]) AC_LANG_PUSH([C++])
@ -986,6 +1013,34 @@ BASIC_DEFUN_NAMED([FLAGS_COMPILER_CHECK_ARGUMENTS],
fi fi
]) ])
# FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [ARGUMENT], IF_TRUE: [RUN-IF-TRUE],
# IF_FALSE: [RUN-IF-FALSE])
# ------------------------------------------------------------
# Check that the C and C++ compilers support an argument
BASIC_DEFUN_NAMED([FLAGS_COMPILER_CHECK_ARGUMENTS],
[*ARGUMENT IF_TRUE IF_FALSE], [$@],
[
FLAGS_C_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [ARG_ARGUMENT],
IF_TRUE: [C_COMP_SUPPORTS="yes"],
IF_FALSE: [C_COMP_SUPPORTS="no"])
FLAGS_CXX_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [ARG_ARGUMENT],
IF_TRUE: [CXX_COMP_SUPPORTS="yes"],
IF_FALSE: [CXX_COMP_SUPPORTS="no"])
AC_MSG_CHECKING([if both compilers support "ARG_ARGUMENT"])
supports=no
if test "x$C_COMP_SUPPORTS" = "xyes" -a "x$CXX_COMP_SUPPORTS" = "xyes"; then supports=yes; fi
AC_MSG_RESULT([$supports])
if test "x$supports" = "xyes" ; then
:
ARG_IF_TRUE
else
:
ARG_IF_FALSE
fi
])
# FLAGS_LINKER_CHECK_ARGUMENTS(ARGUMENT: [ARGUMENT], IF_TRUE: [RUN-IF-TRUE], # FLAGS_LINKER_CHECK_ARGUMENTS(ARGUMENT: [ARGUMENT], IF_TRUE: [RUN-IF-TRUE],
# IF_FALSE: [RUN-IF-FALSE]) # IF_FALSE: [RUN-IF-FALSE])
# ------------------------------------------------------------ # ------------------------------------------------------------
@ -1110,3 +1165,20 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_MISC],
AC_SUBST(BUILD_CC_DISABLE_WARNING_PREFIX) AC_SUBST(BUILD_CC_DISABLE_WARNING_PREFIX)
AC_SUBST(CFLAGS_WARNINGS_ARE_ERRORS) AC_SUBST(CFLAGS_WARNINGS_ARE_ERRORS)
]) ])
AC_DEFUN_ONCE([FLAGS_SETUP_GCC6_COMPILER_FLAGS],
[
# These flags are required for GCC 6 builds as undefined behaviour in OpenJDK code
# runs afoul of the more aggressive versions of these optimisations.
# Notably, value range propagation now assumes that the this pointer of C++
# member functions is non-null.
NO_NULL_POINTER_CHECK_CFLAG="-fno-delete-null-pointer-checks"
FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [$NO_NULL_POINTER_CHECK_CFLAG -Werror],
IF_FALSE: [NO_NULL_POINTER_CHECK_CFLAG=""])
AC_SUBST([NO_NULL_POINTER_CHECK_CFLAG])
NO_LIFETIME_DSE_CFLAG="-fno-lifetime-dse"
FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [$NO_LIFETIME_DSE_CFLAG -Werror],
IF_FALSE: [NO_LIFETIME_DSE_CFLAG=""])
CFLAGS_JDK="${CFLAGS_JDK} ${NO_NULL_POINTER_CHECK_CFLAG} ${NO_LIFETIME_DSE_CFLAG}"
AC_SUBST([NO_LIFETIME_DSE_CFLAG])
])

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -110,7 +110,8 @@ LD:=@HOTSPOT_LD@
MT:=@HOTSPOT_MT@ MT:=@HOTSPOT_MT@
RC:=@HOTSPOT_RC@ RC:=@HOTSPOT_RC@
EXTRA_CFLAGS=@LEGACY_EXTRA_CFLAGS@ $(CFLAGS_CCACHE) EXTRA_CFLAGS=@LEGACY_EXTRA_CFLAGS@ $(CFLAGS_CCACHE) $(NO_NULL_POINTER_CHECK_FLAG) \
$(NO_LIFETIME_DSE_CFLAG) $(CXXSTD_CXXFLAG)
EXTRA_CXXFLAGS=@LEGACY_EXTRA_CXXFLAGS@ $(CFLAGS_CCACHE) EXTRA_CXXFLAGS=@LEGACY_EXTRA_CXXFLAGS@ $(CFLAGS_CCACHE)
EXTRA_LDFLAGS=@LEGACY_EXTRA_LDFLAGS@ EXTRA_LDFLAGS=@LEGACY_EXTRA_LDFLAGS@

View File

@ -157,10 +157,10 @@ AC_DEFUN_ONCE([LIB_SETUP_ZLIB],
AC_MSG_CHECKING([for which zlib to use]) AC_MSG_CHECKING([for which zlib to use])
DEFAULT_ZLIB=bundled DEFAULT_ZLIB=system
if test "x$OPENJDK_TARGET_OS" = xmacosx; then if test "x$OPENJDK_TARGET_OS" = xwindows; then
# On macosx default is system...on others default is bundled # On windows default is bundled...on others default is system
DEFAULT_ZLIB=system DEFAULT_ZLIB=bundled
fi fi
if test "x${ZLIB_FOUND}" != "xyes"; then if test "x${ZLIB_FOUND}" != "xyes"; then

View File

@ -355,6 +355,9 @@ CFLAGS_WARNINGS_ARE_ERRORS:=@CFLAGS_WARNINGS_ARE_ERRORS@
WARNINGS_AS_ERRORS := @WARNINGS_AS_ERRORS@ WARNINGS_AS_ERRORS := @WARNINGS_AS_ERRORS@
CFLAGS_CCACHE:=@CFLAGS_CCACHE@ CFLAGS_CCACHE:=@CFLAGS_CCACHE@
NO_NULL_POINTER_CHECK_FLAG=@NO_NULL_POINTER_CHECK_CFLAG@
NO_LIFETIME_DSE_CFLAG=@NO_LIFETIME_DSE_CFLAG@
CXXSTD_CXXFLAG=@CXXSTD_CXXFLAG@
# Tools that potentially need to be cross compilation aware. # Tools that potentially need to be cross compilation aware.
CC:=@FIXPATH@ @CCACHE@ @ICECC@ @CC@ CC:=@FIXPATH@ @CCACHE@ @ICECC@ @CC@

View File

@ -92,11 +92,11 @@ BASIC_DEFUN_NAMED([TOOLCHAIN_CHECK_COMPILER_VERSION],
REFERENCE_VERSION=ARG_VERSION REFERENCE_VERSION=ARG_VERSION
if [ [[ "$REFERENCE_VERSION" =~ (.*\.){3} ]] ]; then if [ [[ "$REFERENCE_VERSION" =~ (.*\.){3} ]] ]; then
AC_MSG_ERROR([Internal errror: Cannot compare to ARG_VERSION, only three parts (X.Y.Z) is supported]) AC_MSG_ERROR([Internal error: Cannot compare to ARG_VERSION, only three parts (X.Y.Z) is supported])
fi fi
if [ [[ "$REFERENCE_VERSION" =~ [0-9]{6} ]] ]; then if [ [[ "$REFERENCE_VERSION" =~ [0-9]{6} ]] ]; then
AC_MSG_ERROR([Internal errror: Cannot compare to ARG_VERSION, only parts < 99999 is supported]) AC_MSG_ERROR([Internal error: Cannot compare to ARG_VERSION, only parts < 99999 is supported])
fi fi
# Version comparison method inspired by http://stackoverflow.com/a/24067243 # Version comparison method inspired by http://stackoverflow.com/a/24067243
@ -403,7 +403,7 @@ AC_DEFUN([TOOLCHAIN_EXTRACT_COMPILER_VERSION],
COMPILER_VERSION_STRING=`$ECHO $COMPILER_VERSION_OUTPUT | \ COMPILER_VERSION_STRING=`$ECHO $COMPILER_VERSION_OUTPUT | \
$SED -e 's/ *Copyright .*//'` $SED -e 's/ *Copyright .*//'`
COMPILER_VERSION_NUMBER=`$ECHO $COMPILER_VERSION_OUTPUT | \ COMPILER_VERSION_NUMBER=`$ECHO $COMPILER_VERSION_OUTPUT | \
$SED -e 's/^.* \(@<:@1-9@:>@\.@<:@0-9.@:>@*\) .*$/\1/'` $SED -e 's/^.* \(@<:@1-9@:>@\.@<:@0-9.@:>@*\)@<:@^0-9.@:>@.*$/\1/'`
elif test "x$TOOLCHAIN_TYPE" = xclang; then elif test "x$TOOLCHAIN_TYPE" = xclang; then
# clang --version output typically looks like # clang --version output typically looks like
# Apple LLVM version 5.0 (clang-500.2.79) (based on LLVM 3.3svn) # Apple LLVM version 5.0 (clang-500.2.79) (based on LLVM 3.3svn)

View File

@ -217,6 +217,7 @@ var getJibProfilesCommon = function (input) {
configure_args: ["--with-default-make-target=all"], configure_args: ["--with-default-make-target=all"],
configure_args_32bit: ["--with-target-bits=32", "--with-jvm-variants=client,server"], configure_args_32bit: ["--with-target-bits=32", "--with-jvm-variants=client,server"],
configure_args_debug: ["--enable-debug"], configure_args_debug: ["--enable-debug"],
configure_args_slowdebug: ["--with-debug-level=slowdebug"],
organization: "jpg.infra.builddeps" organization: "jpg.infra.builddeps"
}; };
@ -241,6 +242,7 @@ var getJibProfilesProfiles = function (input, common) {
target_cpu: "x64", target_cpu: "x64",
dependencies: concat(common.dependencies, "devkit"), dependencies: concat(common.dependencies, "devkit"),
configure_args: common.configure_args, configure_args: common.configure_args,
configure_args: concat(common.configure_args, "--with-zlib=system"),
make_args: common.make_args make_args: common.make_args
}, },
@ -250,6 +252,7 @@ var getJibProfilesProfiles = function (input, common) {
build_cpu: "x64", build_cpu: "x64",
dependencies: concat(common.dependencies, "devkit"), dependencies: concat(common.dependencies, "devkit"),
configure_args: concat(common.configure_args, common.configure_args_32bit), configure_args: concat(common.configure_args, common.configure_args_32bit),
configure_args: concat(common.configure_args, "--with-zlib=system"),
make_args: common.make_args make_args: common.make_args
}, },
@ -258,6 +261,7 @@ var getJibProfilesProfiles = function (input, common) {
target_cpu: "x64", target_cpu: "x64",
dependencies: concat(common.dependencies, "devkit"), dependencies: concat(common.dependencies, "devkit"),
configure_args: common.configure_args, configure_args: common.configure_args,
configure_args: concat(common.configure_args, "--with-zlib=system"),
make_args: common.make_args make_args: common.make_args
}, },
@ -266,6 +270,7 @@ var getJibProfilesProfiles = function (input, common) {
target_cpu: "x64", target_cpu: "x64",
dependencies: concat(common.dependencies, "devkit", "cups"), dependencies: concat(common.dependencies, "devkit", "cups"),
configure_args: common.configure_args, configure_args: common.configure_args,
configure_args: concat(common.configure_args, "--with-zlib=system"),
make_args: common.make_args make_args: common.make_args
}, },
@ -274,6 +279,7 @@ var getJibProfilesProfiles = function (input, common) {
target_cpu: "sparcv9", target_cpu: "sparcv9",
dependencies: concat(common.dependencies, "devkit", "cups"), dependencies: concat(common.dependencies, "devkit", "cups"),
configure_args: common.configure_args, configure_args: common.configure_args,
configure_args: concat(common.configure_args, "--with-zlib=system"),
make_args: common.make_args make_args: common.make_args
}, },
@ -297,53 +303,28 @@ var getJibProfilesProfiles = function (input, common) {
profiles = concatObjects(profiles, mainProfiles); profiles = concatObjects(profiles, mainProfiles);
// Generate debug versions of all the main profiles // Generate debug versions of all the main profiles
profiles = concatObjects(profiles, generateDebugProfiles(common, mainProfiles)); profiles = concatObjects(profiles, generateDebugProfiles(common, mainProfiles));
// Generate slowdebug versions of all the main profiles
profiles = concatObjects(profiles, generateSlowdebugProfiles(common, mainProfiles));
// Specific open profiles needed for JPRT testing // Generate open only profiles for all the main profiles for JPRT and reference
var jprtOpenProfiles = { // implementation builds.
var openOnlyProfiles = generateOpenOnlyProfiles(common, mainProfiles);
// The open only profiles on linux are used for reference builds and should
// produce the compact profile images by default.
var openOnlyProfilesExtra = {
"linux-x64-open": { "linux-x64-open": {
target_os: mainProfiles["linux-x64"].target_os, configure_args: ["--with-default-make-target=all profiles"],
target_cpu: mainProfiles["linux-x64"].target_cpu,
dependencies: mainProfiles["linux-x64"].dependencies,
configure_args: concat(mainProfiles["linux-x64"].configure_args,
"--enable-openjdk-only"),
make_args: mainProfiles["linux-x64"].make_args,
labels: [ "open" ]
}, },
"linux-x86-open": { "linux-x86-open": {
target_os: mainProfiles["linux-x86"].target_os, configure_args: ["--with-default-make-target=all profiles"],
target_cpu: mainProfiles["linux-x86"].target_cpu,
dependencies: mainProfiles["linux-x86"].dependencies,
configure_args: concat(mainProfiles["linux-x86"].configure_args,
"--enable-openjdk-only"),
make_args: mainProfiles["linux-x86"].make_args,
labels: [ "open" ]
},
"solaris-x64-open": {
target_os: mainProfiles["solaris-x64"].target_os,
target_cpu: mainProfiles["solaris-x64"].target_cpu,
dependencies: mainProfiles["solaris-x64"].dependencies,
configure_args: concat(mainProfiles["solaris-x64"].configure_args,
"--enable-openjdk-only"),
make_args: mainProfiles["solaris-x64"].make_args,
labels: [ "open" ]
},
"windows-x86-open": {
target_os: mainProfiles["windows-x86"].target_os,
target_cpu: mainProfiles["windows-x86"].target_cpu,
dependencies: mainProfiles["windows-x86"].dependencies,
configure_args: concat(mainProfiles["windows-x86"].configure_args,
"--enable-openjdk-only"),
make_args: mainProfiles["windows-x86"].make_args,
labels: [ "open" ]
} }
}; };
profiles = concatObjects(profiles, jprtOpenProfiles); var openOnlyProfiles = concatObjects(openOnlyProfiles, openOnlyProfilesExtra);
profiles = concatObjects(profiles, openOnlyProfiles);
// Generate debug profiles for the open jprt profiles // Generate debug profiles for the open jprt profiles
profiles = concatObjects(profiles, generateDebugProfiles(common, jprtOpenProfiles)); profiles = concatObjects(profiles, generateDebugProfiles(common, openOnlyProfiles));
// Profiles used to run tests. Used in JPRT. // Profiles used to run tests. Used in JPRT.
var testOnlyProfiles = { var testOnlyProfiles = {
@ -501,6 +482,51 @@ var generateDebugProfiles = function (common, profiles) {
return newProfiles; return newProfiles;
}; };
/**
* Generates slowdebug versions of profiles. Clones the given profiles and adds
* debug metadata.
*
* @param common Common values
* @param profiles Profiles map to generate debug profiles for
* @returns {{}} New map of profiles containing debug profiles
*/
var generateSlowdebugProfiles = function (common, profiles) {
var newProfiles = {};
for (var profile in profiles) {
var debugProfile = profile + "-slowdebug";
newProfiles[debugProfile] = clone(profiles[profile]);
newProfiles[debugProfile].debug_level = "slowdebug";
newProfiles[debugProfile].labels
= concat(newProfiles[debugProfile].labels || [], "slowdebug"),
newProfiles[debugProfile].configure_args
= concat(newProfiles[debugProfile].configure_args,
common.configure_args_slowdebug);
}
return newProfiles;
};
/**
* Generates open only versions of profiles. Clones the given profiles and adds
* open metadata.
*
* @param common Common values
* @param profiles Profiles map to generate open only profiles for
* @returns {{}} New map of profiles containing open only profiles
*/
var generateOpenOnlyProfiles = function (common, profiles) {
var newProfiles = {};
for (var profile in profiles) {
var openProfile = profile + "-open";
newProfiles[openProfile] = clone(profiles[profile]);
newProfiles[openProfile].labels
= concat(newProfiles[openProfile].labels || [], "open"),
newProfiles[openProfile].configure_args
= concat(newProfiles[openProfile].configure_args,
"--enable-openjdk-only");
}
return newProfiles;
};
/** /**
* Deep clones an object tree. * Deep clones an object tree.
* *

View File

@ -513,3 +513,4 @@ c5146d4da417f76edfc43097d2e2ced042a65b4e jdk-9+107
934f6793f5f7dca44f69b4559d525fa64b31840d jdk-9+108 934f6793f5f7dca44f69b4559d525fa64b31840d jdk-9+108
7e7e50ac4faf19899fc811569e32cfa478759ebb jdk-9+109 7e7e50ac4faf19899fc811569e32cfa478759ebb jdk-9+109
2f5d1578b24060ea06bd1f340a124db95d1475b2 jdk-9+110 2f5d1578b24060ea06bd1f340a124db95d1475b2 jdk-9+110
c558850fac5750d8ca98a45180121980f57cdd28 jdk-9+111

View File

@ -91,7 +91,7 @@ else ifeq ($(OPENJDK_TARGET_OS), windows)
ifeq ($(OPENJDK_TARGET_CPU), x86_64) ifeq ($(OPENJDK_TARGET_CPU), x86_64)
SA_CXXFLAGS += -DWIN64 SA_CXXFLAGS += -DWIN64
else else
SA_CXXFLAGS += -RTC1 -ZI SA_CXXFLAGS += -RTC1
SA_LDFLAGS += -SAFESEH SA_LDFLAGS += -SAFESEH
endif endif
endif endif

View File

@ -3425,9 +3425,6 @@ const bool Matcher::misaligned_vectors_ok() {
// false => size gets scaled to BytesPerLong, ok. // false => size gets scaled to BytesPerLong, ok.
const bool Matcher::init_array_count_is_in_bytes = false; const bool Matcher::init_array_count_is_in_bytes = false;
// Threshold size for cleararray.
const int Matcher::init_array_short_size = 18 * BytesPerLong;
// Use conditional move (CMOVL) // Use conditional move (CMOVL)
const int Matcher::long_cmove_cost() { const int Matcher::long_cmove_cost() {
// long cmoves are no more expensive than int cmoves // long cmoves are no more expensive than int cmoves
@ -4135,14 +4132,14 @@ encode %{
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register, __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
&Assembler::ldxr, &MacroAssembler::cmp, &Assembler::stlxr); Assembler::xword, /*acquire*/ false, /*release*/ true);
%} %}
enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{ enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register, __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
&Assembler::ldxrw, &MacroAssembler::cmpw, &Assembler::stlxrw); Assembler::word, /*acquire*/ false, /*release*/ true);
%} %}
@ -4154,14 +4151,14 @@ encode %{
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register, __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
&Assembler::ldaxr, &MacroAssembler::cmp, &Assembler::stlxr); Assembler::xword, /*acquire*/ true, /*release*/ true);
%} %}
enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{ enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register, __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
&Assembler::ldaxrw, &MacroAssembler::cmpw, &Assembler::stlxrw); Assembler::word, /*acquire*/ true, /*release*/ true);
%} %}
@ -4679,8 +4676,14 @@ encode %{
// Compare object markOop with mark and if equal exchange scratch1 // Compare object markOop with mark and if equal exchange scratch1
// with object markOop. // with object markOop.
{ if (UseLSE) {
__ mov(tmp, disp_hdr);
__ casal(Assembler::xword, tmp, box, oop);
__ cmp(tmp, disp_hdr);
__ br(Assembler::EQ, cont);
} else {
Label retry_load; Label retry_load;
__ prfm(Address(oop), PSTL1STRM);
__ bind(retry_load); __ bind(retry_load);
__ ldaxr(tmp, oop); __ ldaxr(tmp, oop);
__ cmp(tmp, disp_hdr); __ cmp(tmp, disp_hdr);
@ -4729,8 +4732,13 @@ encode %{
__ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value)); __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
__ mov(disp_hdr, zr); __ mov(disp_hdr, zr);
{ if (UseLSE) {
__ mov(rscratch1, disp_hdr);
__ casal(Assembler::xword, rscratch1, rthread, tmp);
__ cmp(rscratch1, disp_hdr);
} else {
Label retry_load, fail; Label retry_load, fail;
__ prfm(Address(tmp), PSTL1STRM);
__ bind(retry_load); __ bind(retry_load);
__ ldaxr(rscratch1, tmp); __ ldaxr(rscratch1, tmp);
__ cmp(disp_hdr, rscratch1); __ cmp(disp_hdr, rscratch1);
@ -4818,8 +4826,13 @@ encode %{
// see the stack address of the basicLock in the markOop of the // see the stack address of the basicLock in the markOop of the
// object. // object.
{ if (UseLSE) {
__ mov(tmp, box);
__ casl(Assembler::xword, tmp, disp_hdr, oop);
__ cmp(tmp, box);
} else {
Label retry_load; Label retry_load;
__ prfm(Address(oop), PSTL1STRM);
__ bind(retry_load); __ bind(retry_load);
__ ldxr(tmp, oop); __ ldxr(tmp, oop);
__ cmp(box, tmp); __ cmp(box, tmp);

View File

@ -972,7 +972,7 @@ public:
// System // System
void system(int op0, int op1, int CRn, int CRm, int op2, void system(int op0, int op1, int CRn, int CRm, int op2,
Register rt = (Register)0b11111) Register rt = dummy_reg)
{ {
starti; starti;
f(0b11010101000, 31, 21); f(0b11010101000, 31, 21);
@ -1082,7 +1082,7 @@ public:
#define INSN(NAME, opc) \ #define INSN(NAME, opc) \
void NAME() { \ void NAME() { \
branch_reg((Register)0b11111, opc); \ branch_reg(dummy_reg, opc); \
} }
INSN(eret, 0b0100); INSN(eret, 0b0100);
@ -1094,10 +1094,22 @@ public:
enum operand_size { byte, halfword, word, xword }; enum operand_size { byte, halfword, word, xword };
void load_store_exclusive(Register Rs, Register Rt1, Register Rt2, void load_store_exclusive(Register Rs, Register Rt1, Register Rt2,
Register Rn, enum operand_size sz, int op, int o0) { Register Rn, enum operand_size sz, int op, bool ordered) {
starti; starti;
f(sz, 31, 30), f(0b001000, 29, 24), f(op, 23, 21); f(sz, 31, 30), f(0b001000, 29, 24), f(op, 23, 21);
rf(Rs, 16), f(o0, 15), rf(Rt2, 10), rf(Rn, 5), rf(Rt1, 0); rf(Rs, 16), f(ordered, 15), rf(Rt2, 10), rf(Rn, 5), rf(Rt1, 0);
}
void load_exclusive(Register dst, Register addr,
enum operand_size sz, bool ordered) {
load_store_exclusive(dummy_reg, dst, dummy_reg, addr,
sz, 0b010, ordered);
}
void store_exclusive(Register status, Register new_val, Register addr,
enum operand_size sz, bool ordered) {
load_store_exclusive(status, new_val, dummy_reg, addr,
sz, 0b000, ordered);
} }
#define INSN4(NAME, sz, op, o0) /* Four registers */ \ #define INSN4(NAME, sz, op, o0) /* Four registers */ \
@ -1109,19 +1121,19 @@ public:
#define INSN3(NAME, sz, op, o0) /* Three registers */ \ #define INSN3(NAME, sz, op, o0) /* Three registers */ \
void NAME(Register Rs, Register Rt, Register Rn) { \ void NAME(Register Rs, Register Rt, Register Rn) { \
guarantee(Rs != Rn && Rs != Rt, "unpredictable instruction"); \ guarantee(Rs != Rn && Rs != Rt, "unpredictable instruction"); \
load_store_exclusive(Rs, Rt, (Register)0b11111, Rn, sz, op, o0); \ load_store_exclusive(Rs, Rt, dummy_reg, Rn, sz, op, o0); \
} }
#define INSN2(NAME, sz, op, o0) /* Two registers */ \ #define INSN2(NAME, sz, op, o0) /* Two registers */ \
void NAME(Register Rt, Register Rn) { \ void NAME(Register Rt, Register Rn) { \
load_store_exclusive((Register)0b11111, Rt, (Register)0b11111, \ load_store_exclusive(dummy_reg, Rt, dummy_reg, \
Rn, sz, op, o0); \ Rn, sz, op, o0); \
} }
#define INSN_FOO(NAME, sz, op, o0) /* Three registers, encoded differently */ \ #define INSN_FOO(NAME, sz, op, o0) /* Three registers, encoded differently */ \
void NAME(Register Rt1, Register Rt2, Register Rn) { \ void NAME(Register Rt1, Register Rt2, Register Rn) { \
guarantee(Rt1 != Rt2, "unpredictable instruction"); \ guarantee(Rt1 != Rt2, "unpredictable instruction"); \
load_store_exclusive((Register)0b11111, Rt1, Rt2, Rn, sz, op, o0); \ load_store_exclusive(dummy_reg, Rt1, Rt2, Rn, sz, op, o0); \
} }
// bytes // bytes
@ -1169,6 +1181,46 @@ public:
#undef INSN4 #undef INSN4
#undef INSN_FOO #undef INSN_FOO
// 8.1 Compare and swap extensions
void lse_cas(Register Rs, Register Rt, Register Rn,
enum operand_size sz, bool a, bool r, bool not_pair) {
starti;
if (! not_pair) { // Pair
assert(sz == word || sz == xword, "invalid size");
/* The size bit is in bit 30, not 31 */
sz = (operand_size)(sz == word ? 0b00:0b01);
}
f(sz, 31, 30), f(0b001000, 29, 24), f(1, 23), f(a, 22), f(1, 21);
rf(Rs, 16), f(r, 15), f(0b11111, 14, 10), rf(Rn, 5), rf(Rt, 0);
}
// CAS
#define INSN(NAME, a, r) \
void NAME(operand_size sz, Register Rs, Register Rt, Register Rn) { \
assert(Rs != Rn && Rs != Rt, "unpredictable instruction"); \
lse_cas(Rs, Rt, Rn, sz, a, r, true); \
}
INSN(cas, false, false)
INSN(casa, true, false)
INSN(casl, false, true)
INSN(casal, true, true)
#undef INSN
// CASP
#define INSN(NAME, a, r) \
void NAME(operand_size sz, Register Rs, Register Rs1, \
Register Rt, Register Rt1, Register Rn) { \
assert((Rs->encoding() & 1) == 0 && (Rt->encoding() & 1) == 0 && \
Rs->successor() == Rs1 && Rt->successor() == Rt1 && \
Rs != Rn && Rs1 != Rn && Rs != Rt, "invalid registers"); \
lse_cas(Rs, Rt, Rn, sz, a, r, false); \
}
INSN(casp, false, false)
INSN(caspa, true, false)
INSN(caspl, false, true)
INSN(caspal, true, true)
#undef INSN
// Load register (literal) // Load register (literal)
#define INSN(NAME, opc, V) \ #define INSN(NAME, opc, V) \
void NAME(Register Rt, address dest) { \ void NAME(Register Rt, address dest) { \

View File

@ -1556,38 +1556,54 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
} }
void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) { void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
Label retry_load, nope; if (UseLSE) {
// flush and load exclusive from the memory location __ mov(rscratch1, cmpval);
// and fail if it is not what we expect __ casal(Assembler::word, rscratch1, newval, addr);
__ bind(retry_load); __ cmpw(rscratch1, cmpval);
__ ldaxrw(rscratch1, addr); __ cset(rscratch1, Assembler::NE);
__ cmpw(rscratch1, cmpval); } else {
__ cset(rscratch1, Assembler::NE); Label retry_load, nope;
__ br(Assembler::NE, nope); // flush and load exclusive from the memory location
// if we store+flush with no intervening write rscratch1 wil be zero // and fail if it is not what we expect
__ stlxrw(rscratch1, newval, addr); __ prfm(Address(addr), PSTL1STRM);
// retry so we only ever return after a load fails to compare __ bind(retry_load);
// ensures we don't return a stale value after a failed write. __ ldaxrw(rscratch1, addr);
__ cbnzw(rscratch1, retry_load); __ cmpw(rscratch1, cmpval);
__ bind(nope); __ cset(rscratch1, Assembler::NE);
__ br(Assembler::NE, nope);
// if we store+flush with no intervening write rscratch1 wil be zero
__ stlxrw(rscratch1, newval, addr);
// retry so we only ever return after a load fails to compare
// ensures we don't return a stale value after a failed write.
__ cbnzw(rscratch1, retry_load);
__ bind(nope);
}
__ membar(__ AnyAny); __ membar(__ AnyAny);
} }
void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) { void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
Label retry_load, nope; if (UseLSE) {
// flush and load exclusive from the memory location __ mov(rscratch1, cmpval);
// and fail if it is not what we expect __ casal(Assembler::xword, rscratch1, newval, addr);
__ bind(retry_load); __ cmp(rscratch1, cmpval);
__ ldaxr(rscratch1, addr); __ cset(rscratch1, Assembler::NE);
__ cmp(rscratch1, cmpval); } else {
__ cset(rscratch1, Assembler::NE); Label retry_load, nope;
__ br(Assembler::NE, nope); // flush and load exclusive from the memory location
// if we store+flush with no intervening write rscratch1 wil be zero // and fail if it is not what we expect
__ stlxr(rscratch1, newval, addr); __ prfm(Address(addr), PSTL1STRM);
// retry so we only ever return after a load fails to compare __ bind(retry_load);
// ensures we don't return a stale value after a failed write. __ ldaxr(rscratch1, addr);
__ cbnz(rscratch1, retry_load); __ cmp(rscratch1, cmpval);
__ bind(nope); __ cset(rscratch1, Assembler::NE);
__ br(Assembler::NE, nope);
// if we store+flush with no intervening write rscratch1 wil be zero
__ stlxr(rscratch1, newval, addr);
// retry so we only ever return after a load fails to compare
// ensures we don't return a stale value after a failed write.
__ cbnz(rscratch1, retry_load);
__ bind(nope);
}
__ membar(__ AnyAny); __ membar(__ AnyAny);
} }
@ -3156,6 +3172,7 @@ void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr
} }
Label again; Label again;
__ lea(tmp, addr); __ lea(tmp, addr);
__ prfm(Address(tmp), PSTL1STRM);
__ bind(again); __ bind(again);
(_masm->*lda)(dst, tmp); (_masm->*lda)(dst, tmp);
(_masm->*add)(rscratch1, dst, inc); (_masm->*add)(rscratch1, dst, inc);
@ -3175,6 +3192,7 @@ void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr
assert_different_registers(obj, addr.base(), tmp, rscratch2, dst); assert_different_registers(obj, addr.base(), tmp, rscratch2, dst);
Label again; Label again;
__ lea(tmp, addr); __ lea(tmp, addr);
__ prfm(Address(tmp), PSTL1STRM);
__ bind(again); __ bind(again);
(_masm->*lda)(dst, tmp); (_masm->*lda)(dst, tmp);
(_masm->*stl)(rscratch2, obj, tmp); (_masm->*stl)(rscratch2, obj, tmp);

View File

@ -76,6 +76,8 @@ define_pd_global(bool, CompactStrings, false);
// avoid biased locking while we are bootstrapping the aarch64 build // avoid biased locking while we are bootstrapping the aarch64 build
define_pd_global(bool, UseBiasedLocking, false); define_pd_global(bool, UseBiasedLocking, false);
define_pd_global(intx, InitArrayShortSize, 18*BytesPerLong);
#if defined(COMPILER1) || defined(COMPILER2) #if defined(COMPILER1) || defined(COMPILER2)
define_pd_global(intx, InlineSmallCode, 1000); define_pd_global(intx, InlineSmallCode, 1000);
#endif #endif
@ -101,9 +103,13 @@ define_pd_global(intx, InlineSmallCode, 1000);
\ \
product(bool, UseCRC32, false, \ product(bool, UseCRC32, false, \
"Use CRC32 instructions for CRC32 computation") \ "Use CRC32 instructions for CRC32 computation") \
\
product(bool, UseLSE, false, \
"Use LSE instructions") \
// Don't attempt to use Neon on builtin sim until builtin sim supports it // Don't attempt to use Neon on builtin sim until builtin sim supports it
#define UseCRC32 false #define UseCRC32 false
#define UseSIMDForMemoryOps false
#else #else
#define UseBuiltinSim false #define UseBuiltinSim false
@ -121,6 +127,10 @@ define_pd_global(intx, InlineSmallCode, 1000);
"Use Neon for CRC32 computation") \ "Use Neon for CRC32 computation") \
product(bool, UseCRC32, false, \ product(bool, UseCRC32, false, \
"Use CRC32 instructions for CRC32 computation") \ "Use CRC32 instructions for CRC32 computation") \
product(bool, UseSIMDForMemoryOps, false, \
"Use SIMD instructions in generated memory move code") \
product(bool, UseLSE, false, \
"Use LSE instructions") \
product(bool, TraceTraps, false, "Trace all traps the signal handler") product(bool, TraceTraps, false, "Trace all traps the signal handler")
#endif #endif

View File

@ -1638,6 +1638,7 @@ Address MacroAssembler::form_address(Register Rd, Register base, long byte_offse
void MacroAssembler::atomic_incw(Register counter_addr, Register tmp, Register tmp2) { void MacroAssembler::atomic_incw(Register counter_addr, Register tmp, Register tmp2) {
Label retry_load; Label retry_load;
prfm(Address(counter_addr), PSTL1STRM);
bind(retry_load); bind(retry_load);
// flush and load exclusive from the memory location // flush and load exclusive from the memory location
ldxrw(tmp, counter_addr); ldxrw(tmp, counter_addr);
@ -2070,25 +2071,32 @@ void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Reg
// oldv holds comparison value // oldv holds comparison value
// newv holds value to write in exchange // newv holds value to write in exchange
// addr identifies memory word to compare against/update // addr identifies memory word to compare against/update
// tmp returns 0/1 for success/failure if (UseLSE) {
Label retry_load, nope; mov(tmp, oldv);
casal(Assembler::xword, oldv, newv, addr);
bind(retry_load); cmp(tmp, oldv);
// flush and load exclusive from the memory location br(Assembler::EQ, succeed);
// and fail if it is not what we expect membar(AnyAny);
ldaxr(tmp, addr); } else {
cmp(tmp, oldv); Label retry_load, nope;
br(Assembler::NE, nope); prfm(Address(addr), PSTL1STRM);
// if we store+flush with no intervening write tmp wil be zero bind(retry_load);
stlxr(tmp, newv, addr); // flush and load exclusive from the memory location
cbzw(tmp, succeed); // and fail if it is not what we expect
// retry so we only ever return after a load fails to compare ldaxr(tmp, addr);
// ensures we don't return a stale value after a failed write. cmp(tmp, oldv);
b(retry_load); br(Assembler::NE, nope);
// if the memory word differs we return it in oldv and signal a fail // if we store+flush with no intervening write tmp wil be zero
bind(nope); stlxr(tmp, newv, addr);
membar(AnyAny); cbzw(tmp, succeed);
mov(oldv, tmp); // retry so we only ever return after a load fails to compare
// ensures we don't return a stale value after a failed write.
b(retry_load);
// if the memory word differs we return it in oldv and signal a fail
bind(nope);
membar(AnyAny);
mov(oldv, tmp);
}
if (fail) if (fail)
b(*fail); b(*fail);
} }
@ -2099,28 +2107,64 @@ void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Regis
// newv holds value to write in exchange // newv holds value to write in exchange
// addr identifies memory word to compare against/update // addr identifies memory word to compare against/update
// tmp returns 0/1 for success/failure // tmp returns 0/1 for success/failure
Label retry_load, nope; if (UseLSE) {
mov(tmp, oldv);
bind(retry_load); casal(Assembler::word, oldv, newv, addr);
// flush and load exclusive from the memory location cmp(tmp, oldv);
// and fail if it is not what we expect br(Assembler::EQ, succeed);
ldaxrw(tmp, addr); membar(AnyAny);
cmp(tmp, oldv); } else {
br(Assembler::NE, nope); Label retry_load, nope;
// if we store+flush with no intervening write tmp wil be zero prfm(Address(addr), PSTL1STRM);
stlxrw(tmp, newv, addr); bind(retry_load);
cbzw(tmp, succeed); // flush and load exclusive from the memory location
// retry so we only ever return after a load fails to compare // and fail if it is not what we expect
// ensures we don't return a stale value after a failed write. ldaxrw(tmp, addr);
b(retry_load); cmp(tmp, oldv);
// if the memory word differs we return it in oldv and signal a fail br(Assembler::NE, nope);
bind(nope); // if we store+flush with no intervening write tmp wil be zero
membar(AnyAny); stlxrw(tmp, newv, addr);
mov(oldv, tmp); cbzw(tmp, succeed);
// retry so we only ever return after a load fails to compare
// ensures we don't return a stale value after a failed write.
b(retry_load);
// if the memory word differs we return it in oldv and signal a fail
bind(nope);
membar(AnyAny);
mov(oldv, tmp);
}
if (fail) if (fail)
b(*fail); b(*fail);
} }
// A generic CAS; success or failure is in the EQ flag.
void MacroAssembler::cmpxchg(Register addr, Register expected,
Register new_val,
enum operand_size size,
bool acquire, bool release,
Register tmp) {
if (UseLSE) {
mov(tmp, expected);
lse_cas(tmp, new_val, addr, size, acquire, release, /*not_pair*/ true);
cmp(tmp, expected);
} else {
BLOCK_COMMENT("cmpxchg {");
Label retry_load, done;
prfm(Address(addr), PSTL1STRM);
bind(retry_load);
load_exclusive(tmp, addr, size, acquire);
if (size == xword)
cmp(tmp, expected);
else
cmpw(tmp, expected);
br(Assembler::NE, done);
store_exclusive(tmp, new_val, addr, size, release);
cbnzw(tmp, retry_load);
bind(done);
BLOCK_COMMENT("} cmpxchg");
}
}
static bool different(Register a, RegisterOrConstant b, Register c) { static bool different(Register a, RegisterOrConstant b, Register c) {
if (b.is_constant()) if (b.is_constant())
return a != c; return a != c;
@ -2135,6 +2179,7 @@ void MacroAssembler::atomic_##OP(Register prev, RegisterOrConstant incr, Registe
result = different(prev, incr, addr) ? prev : rscratch2; \ result = different(prev, incr, addr) ? prev : rscratch2; \
\ \
Label retry_load; \ Label retry_load; \
prfm(Address(addr), PSTL1STRM); \
bind(retry_load); \ bind(retry_load); \
LDXR(result, addr); \ LDXR(result, addr); \
OP(rscratch1, result, incr); \ OP(rscratch1, result, incr); \
@ -2157,6 +2202,7 @@ void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) {
result = different(prev, newv, addr) ? prev : rscratch2; \ result = different(prev, newv, addr) ? prev : rscratch2; \
\ \
Label retry_load; \ Label retry_load; \
prfm(Address(addr), PSTL1STRM); \
bind(retry_load); \ bind(retry_load); \
LDXR(result, addr); \ LDXR(result, addr); \
STXR(rscratch1, newv, addr); \ STXR(rscratch1, newv, addr); \

View File

@ -971,21 +971,10 @@ public:
} }
// A generic CAS; success or failure is in the EQ flag. // A generic CAS; success or failure is in the EQ flag.
template <typename T1, typename T2>
void cmpxchg(Register addr, Register expected, Register new_val, void cmpxchg(Register addr, Register expected, Register new_val,
T1 load_insn, enum operand_size size,
void (MacroAssembler::*cmp_insn)(Register, Register), bool acquire, bool release,
T2 store_insn, Register tmp = rscratch1);
Register tmp = rscratch1) {
Label retry_load, done;
bind(retry_load);
(this->*load_insn)(tmp, addr);
(this->*cmp_insn)(tmp, expected);
br(Assembler::NE, done);
(this->*store_insn)(tmp, new_val, addr);
cbnzw(tmp, retry_load);
bind(done);
}
// Calls // Calls

View File

@ -107,6 +107,9 @@ CONSTANT_REGISTER_DECLARATION(Register, r31_sp, (31));
CONSTANT_REGISTER_DECLARATION(Register, zr, (32)); CONSTANT_REGISTER_DECLARATION(Register, zr, (32));
CONSTANT_REGISTER_DECLARATION(Register, sp, (33)); CONSTANT_REGISTER_DECLARATION(Register, sp, (33));
// Used as a filler in instructions where a register field is unused.
const Register dummy_reg = r31_sp;
// Use FloatRegister as shortcut // Use FloatRegister as shortcut
class FloatRegisterImpl; class FloatRegisterImpl;
typedef FloatRegisterImpl* FloatRegister; typedef FloatRegisterImpl* FloatRegister;

View File

@ -729,7 +729,7 @@ class StubGenerator: public StubCodeGenerator {
// //
// count is a count of words. // count is a count of words.
// //
// Precondition: count >= 2 // Precondition: count >= 8
// //
// Postconditions: // Postconditions:
// //
@ -741,6 +741,7 @@ class StubGenerator: public StubCodeGenerator {
void generate_copy_longs(Label &start, Register s, Register d, Register count, void generate_copy_longs(Label &start, Register s, Register d, Register count,
copy_direction direction) { copy_direction direction) {
int unit = wordSize * direction; int unit = wordSize * direction;
int bias = (UseSIMDForMemoryOps ? 4:2) * wordSize;
int offset; int offset;
const Register t0 = r3, t1 = r4, t2 = r5, t3 = r6, const Register t0 = r3, t1 = r4, t2 = r5, t3 = r6,
@ -750,7 +751,7 @@ class StubGenerator: public StubCodeGenerator {
assert_different_registers(rscratch1, t0, t1, t2, t3, t4, t5, t6, t7); assert_different_registers(rscratch1, t0, t1, t2, t3, t4, t5, t6, t7);
assert_different_registers(s, d, count, rscratch1); assert_different_registers(s, d, count, rscratch1);
Label again, large, small; Label again, drain;
const char *stub_name; const char *stub_name;
if (direction == copy_forwards) if (direction == copy_forwards)
stub_name = "foward_copy_longs"; stub_name = "foward_copy_longs";
@ -759,57 +760,35 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", stub_name); StubCodeMark mark(this, "StubRoutines", stub_name);
__ align(CodeEntryAlignment); __ align(CodeEntryAlignment);
__ bind(start); __ bind(start);
__ cmp(count, 8);
__ br(Assembler::LO, small);
if (direction == copy_forwards) { if (direction == copy_forwards) {
__ sub(s, s, 2 * wordSize); __ sub(s, s, bias);
__ sub(d, d, 2 * wordSize); __ sub(d, d, bias);
}
__ subs(count, count, 16);
__ br(Assembler::GE, large);
// 8 <= count < 16 words. Copy 8.
__ ldp(t0, t1, Address(s, 2 * unit));
__ ldp(t2, t3, Address(s, 4 * unit));
__ ldp(t4, t5, Address(s, 6 * unit));
__ ldp(t6, t7, Address(__ pre(s, 8 * unit)));
__ stp(t0, t1, Address(d, 2 * unit));
__ stp(t2, t3, Address(d, 4 * unit));
__ stp(t4, t5, Address(d, 6 * unit));
__ stp(t6, t7, Address(__ pre(d, 8 * unit)));
if (direction == copy_forwards) {
__ add(s, s, 2 * wordSize);
__ add(d, d, 2 * wordSize);
} }
#ifdef ASSERT
// Make sure we are never given < 8 words
{ {
Label L1, L2; Label L;
__ bind(small); __ cmp(count, 8);
__ tbz(count, exact_log2(4), L1); __ br(Assembler::GE, L);
__ ldp(t0, t1, Address(__ adjust(s, 2 * unit, direction == copy_backwards))); __ stop("genrate_copy_longs called with < 8 words");
__ ldp(t2, t3, Address(__ adjust(s, 2 * unit, direction == copy_backwards))); __ bind(L);
__ stp(t0, t1, Address(__ adjust(d, 2 * unit, direction == copy_backwards)));
__ stp(t2, t3, Address(__ adjust(d, 2 * unit, direction == copy_backwards)));
__ bind(L1);
__ tbz(count, 1, L2);
__ ldp(t0, t1, Address(__ adjust(s, 2 * unit, direction == copy_backwards)));
__ stp(t0, t1, Address(__ adjust(d, 2 * unit, direction == copy_backwards)));
__ bind(L2);
} }
#endif
__ ret(lr);
__ align(CodeEntryAlignment);
__ bind(large);
// Fill 8 registers // Fill 8 registers
__ ldp(t0, t1, Address(s, 2 * unit)); if (UseSIMDForMemoryOps) {
__ ldp(t2, t3, Address(s, 4 * unit)); __ ldpq(v0, v1, Address(s, 4 * unit));
__ ldp(t4, t5, Address(s, 6 * unit)); __ ldpq(v2, v3, Address(__ pre(s, 8 * unit)));
__ ldp(t6, t7, Address(__ pre(s, 8 * unit))); } else {
__ ldp(t0, t1, Address(s, 2 * unit));
__ ldp(t2, t3, Address(s, 4 * unit));
__ ldp(t4, t5, Address(s, 6 * unit));
__ ldp(t6, t7, Address(__ pre(s, 8 * unit)));
}
__ subs(count, count, 16);
__ br(Assembler::LO, drain);
int prefetch = PrefetchCopyIntervalInBytes; int prefetch = PrefetchCopyIntervalInBytes;
bool use_stride = false; bool use_stride = false;
@ -824,38 +803,56 @@ class StubGenerator: public StubCodeGenerator {
if (PrefetchCopyIntervalInBytes > 0) if (PrefetchCopyIntervalInBytes > 0)
__ prfm(use_stride ? Address(s, stride) : Address(s, prefetch), PLDL1KEEP); __ prfm(use_stride ? Address(s, stride) : Address(s, prefetch), PLDL1KEEP);
__ stp(t0, t1, Address(d, 2 * unit)); if (UseSIMDForMemoryOps) {
__ ldp(t0, t1, Address(s, 2 * unit)); __ stpq(v0, v1, Address(d, 4 * unit));
__ stp(t2, t3, Address(d, 4 * unit)); __ ldpq(v0, v1, Address(s, 4 * unit));
__ ldp(t2, t3, Address(s, 4 * unit)); __ stpq(v2, v3, Address(__ pre(d, 8 * unit)));
__ stp(t4, t5, Address(d, 6 * unit)); __ ldpq(v2, v3, Address(__ pre(s, 8 * unit)));
__ ldp(t4, t5, Address(s, 6 * unit)); } else {
__ stp(t6, t7, Address(__ pre(d, 8 * unit))); __ stp(t0, t1, Address(d, 2 * unit));
__ ldp(t6, t7, Address(__ pre(s, 8 * unit))); __ ldp(t0, t1, Address(s, 2 * unit));
__ stp(t2, t3, Address(d, 4 * unit));
__ ldp(t2, t3, Address(s, 4 * unit));
__ stp(t4, t5, Address(d, 6 * unit));
__ ldp(t4, t5, Address(s, 6 * unit));
__ stp(t6, t7, Address(__ pre(d, 8 * unit)));
__ ldp(t6, t7, Address(__ pre(s, 8 * unit)));
}
__ subs(count, count, 8); __ subs(count, count, 8);
__ br(Assembler::HS, again); __ br(Assembler::HS, again);
// Drain // Drain
__ stp(t0, t1, Address(d, 2 * unit)); __ bind(drain);
__ stp(t2, t3, Address(d, 4 * unit)); if (UseSIMDForMemoryOps) {
__ stp(t4, t5, Address(d, 6 * unit)); __ stpq(v0, v1, Address(d, 4 * unit));
__ stp(t6, t7, Address(__ pre(d, 8 * unit))); __ stpq(v2, v3, Address(__ pre(d, 8 * unit)));
} else {
if (direction == copy_forwards) { __ stp(t0, t1, Address(d, 2 * unit));
__ add(s, s, 2 * wordSize); __ stp(t2, t3, Address(d, 4 * unit));
__ add(d, d, 2 * wordSize); __ stp(t4, t5, Address(d, 6 * unit));
__ stp(t6, t7, Address(__ pre(d, 8 * unit)));
} }
{ {
Label L1, L2; Label L1, L2;
__ tbz(count, exact_log2(4), L1); __ tbz(count, exact_log2(4), L1);
__ ldp(t0, t1, Address(__ adjust(s, 2 * unit, direction == copy_backwards))); if (UseSIMDForMemoryOps) {
__ ldp(t2, t3, Address(__ adjust(s, 2 * unit, direction == copy_backwards))); __ ldpq(v0, v1, Address(__ pre(s, 4 * unit)));
__ stp(t0, t1, Address(__ adjust(d, 2 * unit, direction == copy_backwards))); __ stpq(v0, v1, Address(__ pre(d, 4 * unit)));
__ stp(t2, t3, Address(__ adjust(d, 2 * unit, direction == copy_backwards))); } else {
__ ldp(t0, t1, Address(s, 2 * unit));
__ ldp(t2, t3, Address(__ pre(s, 4 * unit)));
__ stp(t0, t1, Address(d, 2 * unit));
__ stp(t2, t3, Address(__ pre(d, 4 * unit)));
}
__ bind(L1); __ bind(L1);
if (direction == copy_forwards) {
__ add(s, s, bias);
__ add(d, d, bias);
}
__ tbz(count, 1, L2); __ tbz(count, 1, L2);
__ ldp(t0, t1, Address(__ adjust(s, 2 * unit, direction == copy_backwards))); __ ldp(t0, t1, Address(__ adjust(s, 2 * unit, direction == copy_backwards)));
__ stp(t0, t1, Address(__ adjust(d, 2 * unit, direction == copy_backwards))); __ stp(t0, t1, Address(__ adjust(d, 2 * unit, direction == copy_backwards)));
@ -931,16 +928,135 @@ class StubGenerator: public StubCodeGenerator {
int granularity = uabs(step); int granularity = uabs(step);
const Register t0 = r3, t1 = r4; const Register t0 = r3, t1 = r4;
// <= 96 bytes do inline. Direction doesn't matter because we always
// load all the data before writing anything
Label copy4, copy8, copy16, copy32, copy80, copy128, copy_big, finish;
const Register t2 = r5, t3 = r6, t4 = r7, t5 = r8;
const Register t6 = r9, t7 = r10, t8 = r11, t9 = r12;
const Register send = r17, dend = r18;
if (PrefetchCopyIntervalInBytes > 0)
__ prfm(Address(s, 0), PLDL1KEEP);
__ cmp(count, (UseSIMDForMemoryOps ? 96:80)/granularity);
__ br(Assembler::HI, copy_big);
__ lea(send, Address(s, count, Address::lsl(exact_log2(granularity))));
__ lea(dend, Address(d, count, Address::lsl(exact_log2(granularity))));
__ cmp(count, 16/granularity);
__ br(Assembler::LS, copy16);
__ cmp(count, 64/granularity);
__ br(Assembler::HI, copy80);
__ cmp(count, 32/granularity);
__ br(Assembler::LS, copy32);
// 33..64 bytes
if (UseSIMDForMemoryOps) {
__ ldpq(v0, v1, Address(s, 0));
__ ldpq(v2, v3, Address(send, -32));
__ stpq(v0, v1, Address(d, 0));
__ stpq(v2, v3, Address(dend, -32));
} else {
__ ldp(t0, t1, Address(s, 0));
__ ldp(t2, t3, Address(s, 16));
__ ldp(t4, t5, Address(send, -32));
__ ldp(t6, t7, Address(send, -16));
__ stp(t0, t1, Address(d, 0));
__ stp(t2, t3, Address(d, 16));
__ stp(t4, t5, Address(dend, -32));
__ stp(t6, t7, Address(dend, -16));
}
__ b(finish);
// 17..32 bytes
__ bind(copy32);
__ ldp(t0, t1, Address(s, 0));
__ ldp(t2, t3, Address(send, -16));
__ stp(t0, t1, Address(d, 0));
__ stp(t2, t3, Address(dend, -16));
__ b(finish);
// 65..80/96 bytes
// (96 bytes if SIMD because we do 32 byes per instruction)
__ bind(copy80);
if (UseSIMDForMemoryOps) {
__ ldpq(v0, v1, Address(s, 0));
__ ldpq(v2, v3, Address(s, 32));
__ ldpq(v4, v5, Address(send, -32));
__ stpq(v0, v1, Address(d, 0));
__ stpq(v2, v3, Address(d, 32));
__ stpq(v4, v5, Address(dend, -32));
} else {
__ ldp(t0, t1, Address(s, 0));
__ ldp(t2, t3, Address(s, 16));
__ ldp(t4, t5, Address(s, 32));
__ ldp(t6, t7, Address(s, 48));
__ ldp(t8, t9, Address(send, -16));
__ stp(t0, t1, Address(d, 0));
__ stp(t2, t3, Address(d, 16));
__ stp(t4, t5, Address(d, 32));
__ stp(t6, t7, Address(d, 48));
__ stp(t8, t9, Address(dend, -16));
}
__ b(finish);
// 0..16 bytes
__ bind(copy16);
__ cmp(count, 8/granularity);
__ br(Assembler::LO, copy8);
// 8..16 bytes
__ ldr(t0, Address(s, 0));
__ ldr(t1, Address(send, -8));
__ str(t0, Address(d, 0));
__ str(t1, Address(dend, -8));
__ b(finish);
if (granularity < 8) {
// 4..7 bytes
__ bind(copy8);
__ tbz(count, 2 - exact_log2(granularity), copy4);
__ ldrw(t0, Address(s, 0));
__ ldrw(t1, Address(send, -4));
__ strw(t0, Address(d, 0));
__ strw(t1, Address(dend, -4));
__ b(finish);
if (granularity < 4) {
// 0..3 bytes
__ bind(copy4);
__ cbz(count, finish); // get rid of 0 case
if (granularity == 2) {
__ ldrh(t0, Address(s, 0));
__ strh(t0, Address(d, 0));
} else { // granularity == 1
// Now 1..3 bytes. Handle the 1 and 2 byte case by copying
// the first and last byte.
// Handle the 3 byte case by loading and storing base + count/2
// (count == 1 (s+0)->(d+0), count == 2,3 (s+1) -> (d+1))
// This does means in the 1 byte case we load/store the same
// byte 3 times.
__ lsr(count, count, 1);
__ ldrb(t0, Address(s, 0));
__ ldrb(t1, Address(send, -1));
__ ldrb(t2, Address(s, count));
__ strb(t0, Address(d, 0));
__ strb(t1, Address(dend, -1));
__ strb(t2, Address(d, count));
}
__ b(finish);
}
}
__ bind(copy_big);
if (is_backwards) { if (is_backwards) {
__ lea(s, Address(s, count, Address::lsl(exact_log2(-step)))); __ lea(s, Address(s, count, Address::lsl(exact_log2(-step))));
__ lea(d, Address(d, count, Address::lsl(exact_log2(-step)))); __ lea(d, Address(d, count, Address::lsl(exact_log2(-step))));
} }
Label tail;
__ cmp(count, 16/granularity);
__ br(Assembler::LO, tail);
// Now we've got the small case out of the way we can align the // Now we've got the small case out of the way we can align the
// source address on a 2-word boundary. // source address on a 2-word boundary.
@ -986,8 +1102,6 @@ class StubGenerator: public StubCodeGenerator {
#endif #endif
} }
__ cmp(count, 16/granularity);
__ br(Assembler::LT, tail);
__ bind(aligned); __ bind(aligned);
// s is now 2-word-aligned. // s is now 2-word-aligned.
@ -1001,9 +1115,11 @@ class StubGenerator: public StubCodeGenerator {
__ bl(copy_b); __ bl(copy_b);
// And the tail. // And the tail.
__ bind(tail);
copy_memory_small(s, d, count, tmp, step); copy_memory_small(s, d, count, tmp, step);
if (granularity >= 8) __ bind(copy8);
if (granularity >= 4) __ bind(copy4);
__ bind(finish);
} }

View File

@ -1984,6 +1984,7 @@ void TemplateInterpreterGenerator::count_bytecode() {
__ push(rscratch3); __ push(rscratch3);
Label L; Label L;
__ mov(rscratch2, (address) &BytecodeCounter::_counter_value); __ mov(rscratch2, (address) &BytecodeCounter::_counter_value);
__ prfm(Address(rscratch2), PSTL1STRM);
__ bind(L); __ bind(L);
__ ldxr(rscratch1, rscratch2); __ ldxr(rscratch1, rscratch2);
__ add(rscratch1, rscratch1, 1); __ add(rscratch1, rscratch1, 1);

View File

@ -61,6 +61,10 @@
#define HWCAP_CRC32 (1<<7) #define HWCAP_CRC32 (1<<7)
#endif #endif
#ifndef HWCAP_ATOMICS
#define HWCAP_ATOMICS (1<<8)
#endif
int VM_Version::_cpu; int VM_Version::_cpu;
int VM_Version::_model; int VM_Version::_model;
int VM_Version::_model2; int VM_Version::_model2;
@ -172,6 +176,7 @@ void VM_Version::get_processor_features() {
if (auxv & HWCAP_AES) strcat(buf, ", aes"); if (auxv & HWCAP_AES) strcat(buf, ", aes");
if (auxv & HWCAP_SHA1) strcat(buf, ", sha1"); if (auxv & HWCAP_SHA1) strcat(buf, ", sha1");
if (auxv & HWCAP_SHA2) strcat(buf, ", sha256"); if (auxv & HWCAP_SHA2) strcat(buf, ", sha256");
if (auxv & HWCAP_ATOMICS) strcat(buf, ", lse");
_features_string = os::strdup(buf); _features_string = os::strdup(buf);
@ -191,6 +196,15 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
} }
if (auxv & HWCAP_ATOMICS) {
if (FLAG_IS_DEFAULT(UseLSE))
FLAG_SET_DEFAULT(UseLSE, true);
} else {
if (UseLSE) {
warning("UseLSE specified, but not supported on this CPU");
}
}
if (auxv & HWCAP_AES) { if (auxv & HWCAP_AES) {
UseAES = UseAES || FLAG_IS_DEFAULT(UseAES); UseAES = UseAES || FLAG_IS_DEFAULT(UseAES);
UseAESIntrinsics = UseAESIntrinsics =

View File

@ -47,7 +47,7 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
// The expected size in bytes of a cache line, used to pad data structures. // The expected size in bytes of a cache line, used to pad data structures.
#define DEFAULT_CACHE_LINE_SIZE 128 #define DEFAULT_CACHE_LINE_SIZE 128
#if defined(COMPILER2) && defined(AIX) #if defined(COMPILER2) && (defined(AIX) || defined(linux))
// Include Transactional Memory lock eliding optimization // Include Transactional Memory lock eliding optimization
#define INCLUDE_RTM_OPT 1 #define INCLUDE_RTM_OPT 1
#endif #endif

View File

@ -76,6 +76,8 @@ define_pd_global(uintx, TypeProfileLevel, 111);
define_pd_global(bool, CompactStrings, true); define_pd_global(bool, CompactStrings, true);
define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
// Platform dependent flag handling: flags only defined on this platform. // Platform dependent flag handling: flags only defined on this platform.
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint) \ #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint) \
\ \

View File

@ -2137,8 +2137,6 @@ MachTypeNode *Matcher::make_decode_node() {
return decode; return decode;
} }
*/ */
// Threshold size for cleararray.
const int Matcher::init_array_short_size = 8 * BytesPerLong;
// false => size gets scaled to BytesPerLong, ok. // false => size gets scaled to BytesPerLong, ok.
const bool Matcher::init_array_count_is_in_bytes = false; const bool Matcher::init_array_count_is_in_bytes = false;

View File

@ -255,7 +255,16 @@ void VM_Version::initialize() {
} }
#endif #endif
#ifdef linux #ifdef linux
// TODO: check kernel version (we currently have too old versions only) // At least Linux kernel 4.2, as the problematic behavior of syscalls
// being called in the middle of a transaction has been addressed.
// Please, refer to commit b4b56f9ecab40f3b4ef53e130c9f6663be491894
// in Linux kernel source tree: https://goo.gl/Kc5i7A
if (os::Linux::os_version_is_known()) {
if (os::Linux::os_version() >= 0x040200)
os_too_old = false;
} else {
vm_exit_during_initialization("RTM can not be enabled: kernel version is unknown.");
}
#endif #endif
if (os_too_old) { if (os_too_old) {
vm_exit_during_initialization("RTM is not supported on this OS version."); vm_exit_during_initialization("RTM is not supported on this OS version.");

View File

@ -90,6 +90,8 @@ define_pd_global(uintx, TypeProfileLevel, 111);
define_pd_global(bool, CompactStrings, true); define_pd_global(bool, CompactStrings, true);
define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint) \ #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint) \
\ \
product(intx, UseVIS, 99, \ product(intx, UseVIS, 99, \

View File

@ -1980,9 +1980,6 @@ const bool Matcher::isSimpleConstant64(jlong value) {
// No scaling for the parameter the ClearArray node. // No scaling for the parameter the ClearArray node.
const bool Matcher::init_array_count_is_in_bytes = true; const bool Matcher::init_array_count_is_in_bytes = true;
// Threshold size for cleararray.
const int Matcher::init_array_short_size = 8 * BytesPerLong;
// No additional cost for CMOVL. // No additional cost for CMOVL.
const int Matcher::long_cmove_cost() { return 0; } const int Matcher::long_cmove_cost() { return 0; }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -777,6 +777,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
case 0x6E: // movd case 0x6E: // movd
case 0x7E: // movd case 0x7E: // movd
case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush
case 0xFE: // paddd
debug_only(has_disp32 = true); debug_only(has_disp32 = true);
break; break;
@ -926,6 +927,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
ip++; // skip P2, move to opcode ip++; // skip P2, move to opcode
// To find the end of instruction (which == end_pc_operand). // To find the end of instruction (which == end_pc_operand).
switch (0xFF & *ip) { switch (0xFF & *ip) {
case 0x22: // pinsrd r, r/a, #8
case 0x61: // pcmpestri r, r/a, #8 case 0x61: // pcmpestri r, r/a, #8
case 0x70: // pshufd r, r/a, #8 case 0x70: // pshufd r, r/a, #8
case 0x73: // psrldq r, #8 case 0x73: // psrldq r, #8
@ -3953,6 +3955,83 @@ void Assembler::setb(Condition cc, Register dst) {
emit_int8((unsigned char)(0xC0 | encode)); emit_int8((unsigned char)(0xC0 | encode));
} }
void Assembler::palignr(XMMRegister dst, XMMRegister src, int imm8) {
assert(VM_Version::supports_ssse3(), "");
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ false);
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8((unsigned char)0x0F);
emit_int8((unsigned char)(0xC0 | encode));
emit_int8(imm8);
}
void Assembler::pblendw(XMMRegister dst, XMMRegister src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8((unsigned char)0x0E);
emit_int8((unsigned char)(0xC0 | encode));
emit_int8(imm8);
}
void Assembler::sha1rnds4(XMMRegister dst, XMMRegister src, int imm8) {
assert(VM_Version::supports_sha(), "");
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F_3A, &attributes);
emit_int8((unsigned char)0xCC);
emit_int8((unsigned char)(0xC0 | encode));
emit_int8((unsigned char)imm8);
}
void Assembler::sha1nexte(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_sha(), "");
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xC8);
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::sha1msg1(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_sha(), "");
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xC9);
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::sha1msg2(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_sha(), "");
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xCA);
emit_int8((unsigned char)(0xC0 | encode));
}
// xmm0 is implicit additional source to this instruction.
void Assembler::sha256rnds2(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_sha(), "");
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xCB);
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::sha256msg1(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_sha(), "");
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xCC);
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::sha256msg2(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_sha(), "");
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
emit_int8((unsigned char)0xCD);
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::shll(Register dst, int imm8) { void Assembler::shll(Register dst, int imm8) {
assert(isShiftCount(imm8), "illegal shift count"); assert(isShiftCount(imm8), "illegal shift count");
int encode = prefix_and_encode(dst->encoding()); int encode = prefix_and_encode(dst->encoding());
@ -4931,6 +5010,15 @@ void Assembler::paddd(XMMRegister dst, XMMRegister src) {
emit_int8((unsigned char)(0xC0 | encode)); emit_int8((unsigned char)(0xC0 | encode));
} }
void Assembler::paddd(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
emit_int8((unsigned char)0xFE);
emit_operand(dst, src);
}
void Assembler::paddq(XMMRegister dst, XMMRegister src) { void Assembler::paddq(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse2(), "")); NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
@ -5611,8 +5699,9 @@ void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_
} }
void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) { void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
assert(VM_Version::supports_avx(), ""); assert(VM_Version::supports_avx(), "");
assert(imm8 <= 0x01, "imm8: %u", imm8);
int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
int nds_enc = nds->is_valid() ? nds->encoding() : 0; int nds_enc = nds->is_valid() ? nds->encoding() : 0;
@ -5621,11 +5710,12 @@ void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src)
emit_int8((unsigned char)(0xC0 | encode)); emit_int8((unsigned char)(0xC0 | encode));
// 0x00 - insert into lower 128 bits // 0x00 - insert into lower 128 bits
// 0x01 - insert into upper 128 bits // 0x01 - insert into upper 128 bits
emit_int8(0x01); emit_int8(imm8 & 0x01);
} }
void Assembler::vinsertf64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src, int value) { void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
assert(VM_Version::supports_evex(), ""); assert(VM_Version::supports_evex(), "");
assert(imm8 <= 0x01, "imm8: %u", imm8);
InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
int nds_enc = nds->is_valid() ? nds->encoding() : 0; int nds_enc = nds->is_valid() ? nds->encoding() : 0;
int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@ -5633,26 +5723,29 @@ void Assembler::vinsertf64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src,
emit_int8((unsigned char)(0xC0 | encode)); emit_int8((unsigned char)(0xC0 | encode));
// 0x00 - insert into lower 256 bits // 0x00 - insert into lower 256 bits
// 0x01 - insert into upper 256 bits // 0x01 - insert into upper 256 bits
emit_int8(value & 0x01); emit_int8(imm8 & 0x01);
} }
void Assembler::vinsertf64x4h(XMMRegister dst, Address src, int value) { void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
assert(VM_Version::supports_evex(), ""); assert(VM_Version::supports_evex(), "");
assert(dst != xnoreg, "sanity"); assert(dst != xnoreg, "sanity");
assert(imm8 <= 0x01, "imm8: %u", imm8);
InstructionMark im(this); InstructionMark im(this);
InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
int nds_enc = nds->is_valid() ? nds->encoding() : 0;
attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit); attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit);
// swap src<->dst for encoding // swap src<->dst for encoding
vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8(0x1A); emit_int8(0x1A);
emit_operand(dst, src); emit_operand(dst, src);
// 0x00 - insert into lower 256 bits // 0x00 - insert into lower 256 bits
// 0x01 - insert into upper 128 bits // 0x01 - insert into upper 256 bits
emit_int8(value & 0x01); emit_int8(imm8 & 0x01);
} }
void Assembler::vinsertf32x4h(XMMRegister dst, XMMRegister nds, XMMRegister src, int value) { void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
assert(VM_Version::supports_evex(), ""); assert(VM_Version::supports_evex(), "");
assert(imm8 <= 0x03, "imm8: %u", imm8);
InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
int nds_enc = nds->is_valid() ? nds->encoding() : 0; int nds_enc = nds->is_valid() ? nds->encoding() : 0;
int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@ -5662,57 +5755,64 @@ void Assembler::vinsertf32x4h(XMMRegister dst, XMMRegister nds, XMMRegister src,
// 0x01 - insert into q1 128 bits (128..255) // 0x01 - insert into q1 128 bits (128..255)
// 0x02 - insert into q2 128 bits (256..383) // 0x02 - insert into q2 128 bits (256..383)
// 0x03 - insert into q3 128 bits (384..511) // 0x03 - insert into q3 128 bits (384..511)
emit_int8(value & 0x3); emit_int8(imm8 & 0x03);
} }
void Assembler::vinsertf32x4h(XMMRegister dst, Address src, int value) { void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
assert(VM_Version::supports_avx(), ""); assert(VM_Version::supports_avx(), "");
assert(dst != xnoreg, "sanity"); assert(dst != xnoreg, "sanity");
assert(imm8 <= 0x03, "imm8: %u", imm8);
int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
int nds_enc = nds->is_valid() ? nds->encoding() : 0;
InstructionMark im(this); InstructionMark im(this);
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
// swap src<->dst for encoding // swap src<->dst for encoding
vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8(0x18); emit_int8(0x18);
emit_operand(dst, src); emit_operand(dst, src);
// 0x00 - insert into q0 128 bits (0..127) // 0x00 - insert into q0 128 bits (0..127)
// 0x01 - insert into q1 128 bits (128..255) // 0x01 - insert into q1 128 bits (128..255)
// 0x02 - insert into q2 128 bits (256..383) // 0x02 - insert into q2 128 bits (256..383)
// 0x03 - insert into q3 128 bits (384..511) // 0x03 - insert into q3 128 bits (384..511)
emit_int8(value & 0x3); emit_int8(imm8 & 0x03);
} }
void Assembler::vinsertf128h(XMMRegister dst, Address src) { void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
assert(VM_Version::supports_avx(), ""); assert(VM_Version::supports_avx(), "");
assert(dst != xnoreg, "sanity"); assert(dst != xnoreg, "sanity");
assert(imm8 <= 0x01, "imm8: %u", imm8);
int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
int nds_enc = nds->is_valid() ? nds->encoding() : 0;
InstructionMark im(this); InstructionMark im(this);
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
// swap src<->dst for encoding // swap src<->dst for encoding
vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8(0x18); emit_int8(0x18);
emit_operand(dst, src); emit_operand(dst, src);
// 0x00 - insert into lower 128 bits
// 0x01 - insert into upper 128 bits // 0x01 - insert into upper 128 bits
emit_int8(0x01); emit_int8(imm8 & 0x01);
} }
void Assembler::vextractf128h(XMMRegister dst, XMMRegister src) { void Assembler::vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
assert(VM_Version::supports_avx(), ""); assert(VM_Version::supports_avx(), "");
assert(imm8 <= 0x01, "imm8: %u", imm8);
int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8(0x19); emit_int8(0x19);
emit_int8((unsigned char)(0xC0 | encode)); emit_int8((unsigned char)(0xC0 | encode));
// 0x00 - insert into lower 128 bits // 0x00 - extract from lower 128 bits
// 0x01 - insert into upper 128 bits // 0x01 - extract from upper 128 bits
emit_int8(0x01); emit_int8(imm8 & 0x01);
} }
void Assembler::vextractf128h(Address dst, XMMRegister src) { void Assembler::vextractf128(Address dst, XMMRegister src, uint8_t imm8) {
assert(VM_Version::supports_avx(), ""); assert(VM_Version::supports_avx(), "");
assert(src != xnoreg, "sanity"); assert(src != xnoreg, "sanity");
assert(imm8 <= 0x01, "imm8: %u", imm8);
int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
InstructionMark im(this); InstructionMark im(this);
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
@ -5720,12 +5820,14 @@ void Assembler::vextractf128h(Address dst, XMMRegister src) {
vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8(0x19); emit_int8(0x19);
emit_operand(src, dst); emit_operand(src, dst);
// 0x00 - extract from lower 128 bits
// 0x01 - extract from upper 128 bits // 0x01 - extract from upper 128 bits
emit_int8(0x01); emit_int8(imm8 & 0x01);
} }
void Assembler::vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) { void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
assert(VM_Version::supports_avx2(), ""); assert(VM_Version::supports_avx2(), "");
assert(imm8 <= 0x01, "imm8: %u", imm8);
int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
int nds_enc = nds->is_valid() ? nds->encoding() : 0; int nds_enc = nds->is_valid() ? nds->encoding() : 0;
@ -5734,11 +5836,12 @@ void Assembler::vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src)
emit_int8((unsigned char)(0xC0 | encode)); emit_int8((unsigned char)(0xC0 | encode));
// 0x00 - insert into lower 128 bits // 0x00 - insert into lower 128 bits
// 0x01 - insert into upper 128 bits // 0x01 - insert into upper 128 bits
emit_int8(0x01); emit_int8(imm8 & 0x01);
} }
void Assembler::vinserti64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src, int value) { void Assembler::vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
assert(VM_Version::supports_evex(), ""); assert(VM_Version::supports_evex(), "");
assert(imm8 <= 0x01, "imm8: %u", imm8);
InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
int nds_enc = nds->is_valid() ? nds->encoding() : 0; int nds_enc = nds->is_valid() ? nds->encoding() : 0;
int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@ -5746,39 +5849,44 @@ void Assembler::vinserti64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src,
emit_int8((unsigned char)(0xC0 | encode)); emit_int8((unsigned char)(0xC0 | encode));
// 0x00 - insert into lower 256 bits // 0x00 - insert into lower 256 bits
// 0x01 - insert into upper 256 bits // 0x01 - insert into upper 256 bits
emit_int8(value & 0x01); emit_int8(imm8 & 0x01);
} }
void Assembler::vinserti128h(XMMRegister dst, Address src) { void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
assert(VM_Version::supports_avx2(), ""); assert(VM_Version::supports_avx2(), "");
assert(dst != xnoreg, "sanity"); assert(dst != xnoreg, "sanity");
assert(imm8 <= 0x01, "imm8: %u", imm8);
int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
int nds_enc = nds->is_valid() ? nds->encoding() : 0;
InstructionMark im(this); InstructionMark im(this);
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
// swap src<->dst for encoding // swap src<->dst for encoding
vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8(0x38); emit_int8(0x38);
emit_operand(dst, src); emit_operand(dst, src);
// 0x00 - insert into lower 128 bits
// 0x01 - insert into upper 128 bits // 0x01 - insert into upper 128 bits
emit_int8(0x01); emit_int8(imm8 & 0x01);
} }
void Assembler::vextracti128h(XMMRegister dst, XMMRegister src) { void Assembler::vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
assert(VM_Version::supports_avx(), ""); assert(VM_Version::supports_avx(), "");
assert(imm8 <= 0x01, "imm8: %u", imm8);
int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8(0x39); emit_int8(0x39);
emit_int8((unsigned char)(0xC0 | encode)); emit_int8((unsigned char)(0xC0 | encode));
// 0x00 - insert into lower 128 bits // 0x00 - extract from lower 128 bits
// 0x01 - insert into upper 128 bits // 0x01 - extract from upper 128 bits
emit_int8(0x01); emit_int8(imm8 & 0x01);
} }
void Assembler::vextracti128h(Address dst, XMMRegister src) { void Assembler::vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
assert(VM_Version::supports_avx2(), ""); assert(VM_Version::supports_avx2(), "");
assert(src != xnoreg, "sanity"); assert(src != xnoreg, "sanity");
assert(imm8 <= 0x01, "imm8: %u", imm8);
int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
InstructionMark im(this); InstructionMark im(this);
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
@ -5786,47 +5894,53 @@ void Assembler::vextracti128h(Address dst, XMMRegister src) {
vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8(0x39); emit_int8(0x39);
emit_operand(src, dst); emit_operand(src, dst);
// 0x00 - extract from lower 128 bits
// 0x01 - extract from upper 128 bits // 0x01 - extract from upper 128 bits
emit_int8(0x01); emit_int8(imm8 & 0x01);
} }
void Assembler::vextracti64x4h(XMMRegister dst, XMMRegister src, int value) { void Assembler::vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
assert(VM_Version::supports_evex(), ""); assert(VM_Version::supports_evex(), "");
assert(imm8 <= 0x01, "imm8: %u", imm8);
InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8(0x3B); emit_int8(0x3B);
emit_int8((unsigned char)(0xC0 | encode)); emit_int8((unsigned char)(0xC0 | encode));
// 0x00 - extract from lower 256 bits // 0x00 - extract from lower 256 bits
// 0x01 - extract from upper 256 bits // 0x01 - extract from upper 256 bits
emit_int8(value & 0x01); emit_int8(imm8 & 0x01);
} }
void Assembler::vextracti64x2h(XMMRegister dst, XMMRegister src, int value) { void Assembler::vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) {
assert(VM_Version::supports_evex(), ""); assert(VM_Version::supports_evex(), "");
assert(imm8 <= 0x03, "imm8: %u", imm8);
InstructionAttr attributes(AVX_512bit, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); InstructionAttr attributes(AVX_512bit, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8(0x39); emit_int8(0x39);
emit_int8((unsigned char)(0xC0 | encode)); emit_int8((unsigned char)(0xC0 | encode));
// 0x00 - extract from bits 127:0
// 0x01 - extract from bits 255:128 // 0x01 - extract from bits 255:128
// 0x02 - extract from bits 383:256 // 0x02 - extract from bits 383:256
// 0x03 - extract from bits 511:384 // 0x03 - extract from bits 511:384
emit_int8(value & 0x3); emit_int8(imm8 & 0x03);
} }
void Assembler::vextractf64x4h(XMMRegister dst, XMMRegister src, int value) { void Assembler::vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
assert(VM_Version::supports_evex(), ""); assert(VM_Version::supports_evex(), "");
assert(imm8 <= 0x01, "imm8: %u", imm8);
InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8(0x1B); emit_int8(0x1B);
emit_int8((unsigned char)(0xC0 | encode)); emit_int8((unsigned char)(0xC0 | encode));
// 0x00 - extract from lower 256 bits // 0x00 - extract from lower 256 bits
// 0x01 - extract from upper 256 bits // 0x01 - extract from upper 256 bits
emit_int8(value & 0x1); emit_int8(imm8 & 0x01);
} }
void Assembler::vextractf64x4h(Address dst, XMMRegister src, int value) { void Assembler::vextractf64x4(Address dst, XMMRegister src, uint8_t imm8) {
assert(VM_Version::supports_evex(), ""); assert(VM_Version::supports_evex(), "");
assert(src != xnoreg, "sanity"); assert(src != xnoreg, "sanity");
assert(imm8 <= 0x01, "imm8: %u", imm8);
InstructionMark im(this); InstructionMark im(this);
InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T4,/* input_size_in_bits */ EVEX_64bit); attributes.set_address_attributes(/* tuple_type */ EVEX_T4,/* input_size_in_bits */ EVEX_64bit);
@ -5835,11 +5949,12 @@ void Assembler::vextractf64x4h(Address dst, XMMRegister src, int value) {
emit_operand(src, dst); emit_operand(src, dst);
// 0x00 - extract from lower 256 bits // 0x00 - extract from lower 256 bits
// 0x01 - extract from upper 256 bits // 0x01 - extract from upper 256 bits
emit_int8(value & 0x01); emit_int8(imm8 & 0x01);
} }
void Assembler::vextractf32x4h(XMMRegister dst, XMMRegister src, int value) { void Assembler::vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
assert(VM_Version::supports_avx(), ""); assert(VM_Version::supports_avx(), "");
assert(imm8 <= 0x03, "imm8: %u", imm8);
int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@ -5849,12 +5964,13 @@ void Assembler::vextractf32x4h(XMMRegister dst, XMMRegister src, int value) {
// 0x01 - extract from bits 255:128 // 0x01 - extract from bits 255:128
// 0x02 - extract from bits 383:256 // 0x02 - extract from bits 383:256
// 0x03 - extract from bits 511:384 // 0x03 - extract from bits 511:384
emit_int8(value & 0x3); emit_int8(imm8 & 0x03);
} }
void Assembler::vextractf32x4h(Address dst, XMMRegister src, int value) { void Assembler::vextractf32x4(Address dst, XMMRegister src, uint8_t imm8) {
assert(VM_Version::supports_evex(), ""); assert(VM_Version::supports_evex(), "");
assert(src != xnoreg, "sanity"); assert(src != xnoreg, "sanity");
assert(imm8 <= 0x03, "imm8: %u", imm8);
InstructionMark im(this); InstructionMark im(this);
InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
@ -5865,19 +5981,21 @@ void Assembler::vextractf32x4h(Address dst, XMMRegister src, int value) {
// 0x01 - extract from bits 255:128 // 0x01 - extract from bits 255:128
// 0x02 - extract from bits 383:256 // 0x02 - extract from bits 383:256
// 0x03 - extract from bits 511:384 // 0x03 - extract from bits 511:384
emit_int8(value & 0x3); emit_int8(imm8 & 0x03);
} }
void Assembler::vextractf64x2h(XMMRegister dst, XMMRegister src, int value) { void Assembler::vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) {
assert(VM_Version::supports_evex(), ""); assert(VM_Version::supports_evex(), "");
assert(imm8 <= 0x03, "imm8: %u", imm8);
InstructionAttr attributes(AVX_512bit, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); InstructionAttr attributes(AVX_512bit, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8(0x19); emit_int8(0x19);
emit_int8((unsigned char)(0xC0 | encode)); emit_int8((unsigned char)(0xC0 | encode));
// 0x00 - extract from bits 127:0
// 0x01 - extract from bits 255:128 // 0x01 - extract from bits 255:128
// 0x02 - extract from bits 383:256 // 0x02 - extract from bits 383:256
// 0x03 - extract from bits 511:384 // 0x03 - extract from bits 511:384
emit_int8(value & 0x3); emit_int8(imm8 & 0x03);
} }
// duplicate 4-bytes integer data from src into 8 locations in dest // duplicate 4-bytes integer data from src into 8 locations in dest

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1672,6 +1672,18 @@ private:
void setb(Condition cc, Register dst); void setb(Condition cc, Register dst);
void palignr(XMMRegister dst, XMMRegister src, int imm8);
void pblendw(XMMRegister dst, XMMRegister src, int imm8);
void sha1rnds4(XMMRegister dst, XMMRegister src, int imm8);
void sha1nexte(XMMRegister dst, XMMRegister src);
void sha1msg1(XMMRegister dst, XMMRegister src);
void sha1msg2(XMMRegister dst, XMMRegister src);
// xmm0 is implicit additional source to the following instruction.
void sha256rnds2(XMMRegister dst, XMMRegister src);
void sha256msg1(XMMRegister dst, XMMRegister src);
void sha256msg2(XMMRegister dst, XMMRegister src);
void shldl(Register dst, Register src); void shldl(Register dst, Register src);
void shldl(Register dst, Register src, int8_t imm8); void shldl(Register dst, Register src, int8_t imm8);
@ -1868,6 +1880,7 @@ private:
void paddb(XMMRegister dst, XMMRegister src); void paddb(XMMRegister dst, XMMRegister src);
void paddw(XMMRegister dst, XMMRegister src); void paddw(XMMRegister dst, XMMRegister src);
void paddd(XMMRegister dst, XMMRegister src); void paddd(XMMRegister dst, XMMRegister src);
void paddd(XMMRegister dst, Address src);
void paddq(XMMRegister dst, XMMRegister src); void paddq(XMMRegister dst, XMMRegister src);
void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
@ -1958,33 +1971,31 @@ private:
void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len); void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
// Copy low 128bit into high 128bit of YMM registers. // 128bit copy from/to 256bit (YMM) vector registers
void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src); void vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src); void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
void vextractf128h(XMMRegister dst, XMMRegister src); void vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8);
void vextracti128h(XMMRegister dst, XMMRegister src); void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8);
void vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
void vextractf128(Address dst, XMMRegister src, uint8_t imm8);
void vextracti128(Address dst, XMMRegister src, uint8_t imm8);
// Load/store high 128bit of YMM registers which does not destroy other half. // 256bit copy from/to 512bit (ZMM) vector registers
void vinsertf128h(XMMRegister dst, Address src); void vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
void vinserti128h(XMMRegister dst, Address src); void vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
void vextractf128h(Address dst, XMMRegister src); void vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
void vextracti128h(Address dst, XMMRegister src); void vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
void vextractf64x4(Address dst, XMMRegister src, uint8_t imm8);
void vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
// Copy low 256bit into high 256bit of ZMM registers. // 128bit copy from/to 256bit (YMM) or 512bit (ZMM) vector registers
void vinserti64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src, int value); void vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8);
void vinsertf64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src, int value); void vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8);
void vextracti64x4h(XMMRegister dst, XMMRegister src, int value); void vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
void vextractf64x4h(XMMRegister dst, XMMRegister src, int value); void vextractf32x4(Address dst, XMMRegister src, uint8_t imm8);
void vextractf64x4h(Address dst, XMMRegister src, int value); void vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
void vinsertf64x4h(XMMRegister dst, Address src, int value); void vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
// Copy targeted 128bit segments of the ZMM registers
void vextracti64x2h(XMMRegister dst, XMMRegister src, int value);
void vextractf64x2h(XMMRegister dst, XMMRegister src, int value);
void vextractf32x4h(XMMRegister dst, XMMRegister src, int value);
void vextractf32x4h(Address dst, XMMRegister src, int value);
void vinsertf32x4h(XMMRegister dst, XMMRegister nds, XMMRegister src, int value);
void vinsertf32x4h(XMMRegister dst, Address src, int value);
// duplicate 4-bytes integer data from src into 8 locations in dest // duplicate 4-bytes integer data from src into 8 locations in dest
void vpbroadcastd(XMMRegister dst, XMMRegister src); void vpbroadcastd(XMMRegister dst, XMMRegister src);

View File

@ -97,6 +97,8 @@ define_pd_global(bool, CompactStrings, true);
define_pd_global(bool, PreserveFramePointer, false); define_pd_global(bool, PreserveFramePointer, false);
define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint) \ #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint) \
\ \
develop(bool, IEEEPrecision, true, \ develop(bool, IEEEPrecision, true, \

View File

@ -3445,7 +3445,7 @@ void MacroAssembler::movptr(Address dst, Register src) {
void MacroAssembler::movdqu(Address dst, XMMRegister src) { void MacroAssembler::movdqu(Address dst, XMMRegister src) {
if (UseAVX > 2 && !VM_Version::supports_avx512vl() && (src->encoding() > 15)) { if (UseAVX > 2 && !VM_Version::supports_avx512vl() && (src->encoding() > 15)) {
Assembler::vextractf32x4h(dst, src, 0); Assembler::vextractf32x4(dst, src, 0);
} else { } else {
Assembler::movdqu(dst, src); Assembler::movdqu(dst, src);
} }
@ -3453,7 +3453,7 @@ void MacroAssembler::movdqu(Address dst, XMMRegister src) {
void MacroAssembler::movdqu(XMMRegister dst, Address src) { void MacroAssembler::movdqu(XMMRegister dst, Address src) {
if (UseAVX > 2 && !VM_Version::supports_avx512vl() && (dst->encoding() > 15)) { if (UseAVX > 2 && !VM_Version::supports_avx512vl() && (dst->encoding() > 15)) {
Assembler::vinsertf32x4h(dst, src, 0); Assembler::vinsertf32x4(dst, dst, src, 0);
} else { } else {
Assembler::movdqu(dst, src); Assembler::movdqu(dst, src);
} }
@ -3478,7 +3478,7 @@ void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src) {
void MacroAssembler::vmovdqu(Address dst, XMMRegister src) { void MacroAssembler::vmovdqu(Address dst, XMMRegister src) {
if (UseAVX > 2 && !VM_Version::supports_avx512vl() && (src->encoding() > 15)) { if (UseAVX > 2 && !VM_Version::supports_avx512vl() && (src->encoding() > 15)) {
Assembler::vextractf64x4h(dst, src, 0); vextractf64x4_low(dst, src);
} else { } else {
Assembler::vmovdqu(dst, src); Assembler::vmovdqu(dst, src);
} }
@ -3486,7 +3486,7 @@ void MacroAssembler::vmovdqu(Address dst, XMMRegister src) {
void MacroAssembler::vmovdqu(XMMRegister dst, Address src) { void MacroAssembler::vmovdqu(XMMRegister dst, Address src) {
if (UseAVX > 2 && !VM_Version::supports_avx512vl() && (dst->encoding() > 15)) { if (UseAVX > 2 && !VM_Version::supports_avx512vl() && (dst->encoding() > 15)) {
Assembler::vinsertf64x4h(dst, src, 0); vinsertf64x4_low(dst, src);
} else { } else {
Assembler::vmovdqu(dst, src); Assembler::vmovdqu(dst, src);
} }
@ -5649,14 +5649,14 @@ void MacroAssembler::fp_runtime_fallback(address runtime_entry, int nb_args, int
// Save upper half of ZMM registers // Save upper half of ZMM registers
subptr(rsp, 32*num_xmm_regs); subptr(rsp, 32*num_xmm_regs);
for (int n = 0; n < num_xmm_regs; n++) { for (int n = 0; n < num_xmm_regs; n++) {
vextractf64x4h(Address(rsp, n*32), as_XMMRegister(n), 1); vextractf64x4_high(Address(rsp, n*32), as_XMMRegister(n));
} }
} }
assert(UseAVX > 0, "256 bit vectors are supported only with AVX"); assert(UseAVX > 0, "256 bit vectors are supported only with AVX");
// Save upper half of YMM registers // Save upper half of YMM registers
subptr(rsp, 16*num_xmm_regs); subptr(rsp, 16*num_xmm_regs);
for (int n = 0; n < num_xmm_regs; n++) { for (int n = 0; n < num_xmm_regs; n++) {
vextractf128h(Address(rsp, n*16), as_XMMRegister(n)); vextractf128_high(Address(rsp, n*16), as_XMMRegister(n));
} }
} }
#endif #endif
@ -5665,7 +5665,7 @@ void MacroAssembler::fp_runtime_fallback(address runtime_entry, int nb_args, int
#ifdef _LP64 #ifdef _LP64
if (VM_Version::supports_evex()) { if (VM_Version::supports_evex()) {
for (int n = 0; n < num_xmm_regs; n++) { for (int n = 0; n < num_xmm_regs; n++) {
vextractf32x4h(Address(rsp, n*16), as_XMMRegister(n), 0); vextractf32x4(Address(rsp, n*16), as_XMMRegister(n), 0);
} }
} else { } else {
for (int n = 0; n < num_xmm_regs; n++) { for (int n = 0; n < num_xmm_regs; n++) {
@ -5753,7 +5753,7 @@ void MacroAssembler::fp_runtime_fallback(address runtime_entry, int nb_args, int
#ifdef _LP64 #ifdef _LP64
if (VM_Version::supports_evex()) { if (VM_Version::supports_evex()) {
for (int n = 0; n < num_xmm_regs; n++) { for (int n = 0; n < num_xmm_regs; n++) {
vinsertf32x4h(as_XMMRegister(n), Address(rsp, n*16), 0); vinsertf32x4(as_XMMRegister(n), as_XMMRegister(n), Address(rsp, n*16), 0);
} }
} else { } else {
for (int n = 0; n < num_xmm_regs; n++) { for (int n = 0; n < num_xmm_regs; n++) {
@ -5771,12 +5771,12 @@ void MacroAssembler::fp_runtime_fallback(address runtime_entry, int nb_args, int
if (MaxVectorSize > 16) { if (MaxVectorSize > 16) {
// Restore upper half of YMM registers. // Restore upper half of YMM registers.
for (int n = 0; n < num_xmm_regs; n++) { for (int n = 0; n < num_xmm_regs; n++) {
vinsertf128h(as_XMMRegister(n), Address(rsp, n*16)); vinsertf128_high(as_XMMRegister(n), Address(rsp, n*16));
} }
addptr(rsp, 16*num_xmm_regs); addptr(rsp, 16*num_xmm_regs);
if(UseAVX > 2) { if(UseAVX > 2) {
for (int n = 0; n < num_xmm_regs; n++) { for (int n = 0; n < num_xmm_regs; n++) {
vinsertf64x4h(as_XMMRegister(n), Address(rsp, n*32), 1); vinsertf64x4_high(as_XMMRegister(n), Address(rsp, n*32));
} }
addptr(rsp, 32*num_xmm_regs); addptr(rsp, 32*num_xmm_regs);
} }
@ -7198,21 +7198,50 @@ void MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool fp_
} }
void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp) { void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, bool is_large) {
// cnt - number of qwords (8-byte words). // cnt - number of qwords (8-byte words).
// base - start address, qword aligned. // base - start address, qword aligned.
// is_large - if optimizers know cnt is larger than InitArrayShortSize
assert(base==rdi, "base register must be edi for rep stos"); assert(base==rdi, "base register must be edi for rep stos");
assert(tmp==rax, "tmp register must be eax for rep stos"); assert(tmp==rax, "tmp register must be eax for rep stos");
assert(cnt==rcx, "cnt register must be ecx for rep stos"); assert(cnt==rcx, "cnt register must be ecx for rep stos");
assert(InitArrayShortSize % BytesPerLong == 0,
"InitArrayShortSize should be the multiple of BytesPerLong");
Label DONE;
xorptr(tmp, tmp); xorptr(tmp, tmp);
if (!is_large) {
Label LOOP, LONG;
cmpptr(cnt, InitArrayShortSize/BytesPerLong);
jccb(Assembler::greater, LONG);
NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
decrement(cnt);
jccb(Assembler::negative, DONE); // Zero length
// Use individual pointer-sized stores for small counts:
BIND(LOOP);
movptr(Address(base, cnt, Address::times_ptr), tmp);
decrement(cnt);
jccb(Assembler::greaterEqual, LOOP);
jmpb(DONE);
BIND(LONG);
}
// Use longer rep-prefixed ops for non-small counts:
if (UseFastStosb) { if (UseFastStosb) {
shlptr(cnt,3); // convert to number of bytes shlptr(cnt, 3); // convert to number of bytes
rep_stosb(); rep_stosb();
} else { } else {
NOT_LP64(shlptr(cnt,1);) // convert to number of dwords for 32-bit VM NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
rep_stos(); rep_stos();
} }
BIND(DONE);
} }
#ifdef COMPILER2 #ifdef COMPILER2

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -48,7 +48,6 @@ class MacroAssembler: public Assembler {
// This is the base routine called by the different versions of call_VM_leaf. The interpreter // This is the base routine called by the different versions of call_VM_leaf. The interpreter
// may customize this version by overriding it for its purposes (e.g., to save/restore // may customize this version by overriding it for its purposes (e.g., to save/restore
// additional registers when doing a VM call). // additional registers when doing a VM call).
#define COMMA ,
virtual void call_VM_leaf_base( virtual void call_VM_leaf_base(
address entry_point, // the entry point address entry_point, // the entry point
@ -903,35 +902,66 @@ class MacroAssembler: public Assembler {
void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
void ldmxcsr(AddressLiteral src); void ldmxcsr(AddressLiteral src);
void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
Register buf, Register state, Register ofs, Register limit, Register rsp,
bool multi_block);
#ifdef _LP64
void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
Register buf, Register state, Register ofs, Register limit, Register rsp,
bool multi_block, XMMRegister shuf_mask);
#else
void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
Register buf, Register state, Register ofs, Register limit, Register rsp,
bool multi_block);
#endif
void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
Register rax, Register rcx, Register rdx, Register tmp); Register rax, Register rcx, Register rdx, Register tmp);
#ifdef _LP64
void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
Register rax, Register rcx, Register rdx, Register tmp1 LP64_ONLY(COMMA Register tmp2)); Register rax, Register rcx, Register rdx, Register tmp1, Register tmp2);
void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4,
XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx,
Register rdx NOT_LP64(COMMA Register tmp) LP64_ONLY(COMMA Register tmp1) Register rdx, Register tmp1, Register tmp2, Register tmp3, Register tmp4);
LP64_ONLY(COMMA Register tmp2) LP64_ONLY(COMMA Register tmp3) LP64_ONLY(COMMA Register tmp4));
void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
Register rax, Register rbx LP64_ONLY(COMMA Register rcx), Register rdx Register rax, Register rbx, Register rcx, Register rdx, Register tmp1, Register tmp2,
LP64_ONLY(COMMA Register tmp1) LP64_ONLY(COMMA Register tmp2) Register tmp3, Register tmp4);
LP64_ONLY(COMMA Register tmp3) LP64_ONLY(COMMA Register tmp4));
void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
Register rax, Register rcx, Register rdx NOT_LP64(COMMA Register tmp) Register rax, Register rcx, Register rdx, Register tmp1,
LP64_ONLY(COMMA Register r8) LP64_ONLY(COMMA Register r9) Register tmp2, Register tmp3, Register tmp4);
LP64_ONLY(COMMA Register r10) LP64_ONLY(COMMA Register r11)); #else
void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
Register rax, Register rcx, Register rdx, Register tmp1);
void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4,
XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx,
Register rdx, Register tmp);
void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
Register rax, Register rbx, Register rdx);
void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
Register rax, Register rcx, Register rdx, Register tmp);
#ifndef _LP64
void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
Register edx, Register ebx, Register esi, Register edi, Register edx, Register ebx, Register esi, Register edi,
Register ebp, Register esp); Register ebp, Register esp);
void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx, void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx,
Register esi, Register edi, Register ebp, Register esp); Register esi, Register edi, Register ebp, Register esp);
#endif #endif
@ -1185,14 +1215,131 @@ public:
void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); } void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); }
void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); } void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); }
// Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector. void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) { if (UseAVX > 1) { // vinserti128 is available only in AVX2
if (UseAVX > 1) // vinserti128h is available only in AVX2 Assembler::vinserti128(dst, nds, src, imm8);
Assembler::vinserti128h(dst, nds, src); } else {
else Assembler::vinsertf128(dst, nds, src, imm8);
Assembler::vinsertf128h(dst, nds, src); }
} }
void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
if (UseAVX > 1) { // vinserti128 is available only in AVX2
Assembler::vinserti128(dst, nds, src, imm8);
} else {
Assembler::vinsertf128(dst, nds, src, imm8);
}
}
void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
if (UseAVX > 1) { // vextracti128 is available only in AVX2
Assembler::vextracti128(dst, src, imm8);
} else {
Assembler::vextractf128(dst, src, imm8);
}
}
void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
if (UseAVX > 1) { // vextracti128 is available only in AVX2
Assembler::vextracti128(dst, src, imm8);
} else {
Assembler::vextractf128(dst, src, imm8);
}
}
// 128bit copy to/from high 128 bits of 256bit (YMM) vector registers
void vinserti128_high(XMMRegister dst, XMMRegister src) {
vinserti128(dst, dst, src, 1);
}
void vinserti128_high(XMMRegister dst, Address src) {
vinserti128(dst, dst, src, 1);
}
void vextracti128_high(XMMRegister dst, XMMRegister src) {
vextracti128(dst, src, 1);
}
void vextracti128_high(Address dst, XMMRegister src) {
vextracti128(dst, src, 1);
}
void vinsertf128_high(XMMRegister dst, XMMRegister src) {
vinsertf128(dst, dst, src, 1);
}
void vinsertf128_high(XMMRegister dst, Address src) {
vinsertf128(dst, dst, src, 1);
}
void vextractf128_high(XMMRegister dst, XMMRegister src) {
vextractf128(dst, src, 1);
}
void vextractf128_high(Address dst, XMMRegister src) {
vextractf128(dst, src, 1);
}
// 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers
void vinserti64x4_high(XMMRegister dst, XMMRegister src) {
vinserti64x4(dst, dst, src, 1);
}
void vinsertf64x4_high(XMMRegister dst, XMMRegister src) {
vinsertf64x4(dst, dst, src, 1);
}
void vextracti64x4_high(XMMRegister dst, XMMRegister src) {
vextracti64x4(dst, src, 1);
}
void vextractf64x4_high(XMMRegister dst, XMMRegister src) {
vextractf64x4(dst, src, 1);
}
void vextractf64x4_high(Address dst, XMMRegister src) {
vextractf64x4(dst, src, 1);
}
void vinsertf64x4_high(XMMRegister dst, Address src) {
vinsertf64x4(dst, dst, src, 1);
}
// 128bit copy to/from low 128 bits of 256bit (YMM) vector registers
void vinserti128_low(XMMRegister dst, XMMRegister src) {
vinserti128(dst, dst, src, 0);
}
void vinserti128_low(XMMRegister dst, Address src) {
vinserti128(dst, dst, src, 0);
}
void vextracti128_low(XMMRegister dst, XMMRegister src) {
vextracti128(dst, src, 0);
}
void vextracti128_low(Address dst, XMMRegister src) {
vextracti128(dst, src, 0);
}
void vinsertf128_low(XMMRegister dst, XMMRegister src) {
vinsertf128(dst, dst, src, 0);
}
void vinsertf128_low(XMMRegister dst, Address src) {
vinsertf128(dst, dst, src, 0);
}
void vextractf128_low(XMMRegister dst, XMMRegister src) {
vextractf128(dst, src, 0);
}
void vextractf128_low(Address dst, XMMRegister src) {
vextractf128(dst, src, 0);
}
// 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers
void vinserti64x4_low(XMMRegister dst, XMMRegister src) {
vinserti64x4(dst, dst, src, 0);
}
void vinsertf64x4_low(XMMRegister dst, XMMRegister src) {
vinsertf64x4(dst, dst, src, 0);
}
void vextracti64x4_low(XMMRegister dst, XMMRegister src) {
vextracti64x4(dst, src, 0);
}
void vextractf64x4_low(XMMRegister dst, XMMRegister src) {
vextractf64x4(dst, src, 0);
}
void vextractf64x4_low(Address dst, XMMRegister src) {
vextractf64x4(dst, src, 0);
}
void vinsertf64x4_low(XMMRegister dst, Address src) {
vinsertf64x4(dst, dst, src, 0);
}
// Carry-Less Multiplication Quadword // Carry-Less Multiplication Quadword
void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
// 0x00 - multiply lower 64 bits [0:63] // 0x00 - multiply lower 64 bits [0:63]
@ -1284,8 +1431,9 @@ public:
// C2 compiled method's prolog code. // C2 compiled method's prolog code.
void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b); void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b);
// clear memory of size 'cnt' qwords, starting at 'base'. // clear memory of size 'cnt' qwords, starting at 'base';
void clear_mem(Register base, Register cnt, Register rtmp); // if 'is_large' is set, do not try to produce short loop
void clear_mem(Register base, Register cnt, Register rtmp, bool is_large);
#ifdef COMPILER2 #ifdef COMPILER2
void string_indexof_char(Register str1, Register cnt1, Register ch, Register result, void string_indexof_char(Register str1, Register cnt1, Register ch, Register result,

View File

@ -0,0 +1,495 @@
/*
* Copyright (c) 2016, Intel Corporation.
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
#include "runtime/stubRoutines.hpp"
#include "macroAssembler_x86.hpp"
// ofs and limit are used for multi-block byte array.
// int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
void MacroAssembler::fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block) {
Label start, done_hash, loop0;
address upper_word_mask = StubRoutines::x86::upper_word_mask_addr();
address shuffle_byte_flip_mask = StubRoutines::x86::shuffle_byte_flip_mask_addr();
bind(start);
movdqu(abcd, Address(state, 0));
pinsrd(e0, Address(state, 16), 3);
movdqu(shuf_mask, ExternalAddress(upper_word_mask)); // 0xFFFFFFFF000000000000000000000000
pand(e0, shuf_mask);
pshufd(abcd, abcd, 0x1B);
movdqu(shuf_mask, ExternalAddress(shuffle_byte_flip_mask)); //0x000102030405060708090a0b0c0d0e0f
bind(loop0);
// Save hash values for addition after rounds
movdqu(Address(rsp, 0), e0);
movdqu(Address(rsp, 16), abcd);
// Rounds 0 - 3
movdqu(msg0, Address(buf, 0));
pshufb(msg0, shuf_mask);
paddd(e0, msg0);
movdqa(e1, abcd);
sha1rnds4(abcd, e0, 0);
// Rounds 4 - 7
movdqu(msg1, Address(buf, 16));
pshufb(msg1, shuf_mask);
sha1nexte(e1, msg1);
movdqa(e0, abcd);
sha1rnds4(abcd, e1, 0);
sha1msg1(msg0, msg1);
// Rounds 8 - 11
movdqu(msg2, Address(buf, 32));
pshufb(msg2, shuf_mask);
sha1nexte(e0, msg2);
movdqa(e1, abcd);
sha1rnds4(abcd, e0, 0);
sha1msg1(msg1, msg2);
pxor(msg0, msg2);
// Rounds 12 - 15
movdqu(msg3, Address(buf, 48));
pshufb(msg3, shuf_mask);
sha1nexte(e1, msg3);
movdqa(e0, abcd);
sha1msg2(msg0, msg3);
sha1rnds4(abcd, e1, 0);
sha1msg1(msg2, msg3);
pxor(msg1, msg3);
// Rounds 16 - 19
sha1nexte(e0, msg0);
movdqa(e1, abcd);
sha1msg2(msg1, msg0);
sha1rnds4(abcd, e0, 0);
sha1msg1(msg3, msg0);
pxor(msg2, msg0);
// Rounds 20 - 23
sha1nexte(e1, msg1);
movdqa(e0, abcd);
sha1msg2(msg2, msg1);
sha1rnds4(abcd, e1, 1);
sha1msg1(msg0, msg1);
pxor(msg3, msg1);
// Rounds 24 - 27
sha1nexte(e0, msg2);
movdqa(e1, abcd);
sha1msg2(msg3, msg2);
sha1rnds4(abcd, e0, 1);
sha1msg1(msg1, msg2);
pxor(msg0, msg2);
// Rounds 28 - 31
sha1nexte(e1, msg3);
movdqa(e0, abcd);
sha1msg2(msg0, msg3);
sha1rnds4(abcd, e1, 1);
sha1msg1(msg2, msg3);
pxor(msg1, msg3);
// Rounds 32 - 35
sha1nexte(e0, msg0);
movdqa(e1, abcd);
sha1msg2(msg1, msg0);
sha1rnds4(abcd, e0, 1);
sha1msg1(msg3, msg0);
pxor(msg2, msg0);
// Rounds 36 - 39
sha1nexte(e1, msg1);
movdqa(e0, abcd);
sha1msg2(msg2, msg1);
sha1rnds4(abcd, e1, 1);
sha1msg1(msg0, msg1);
pxor(msg3, msg1);
// Rounds 40 - 43
sha1nexte(e0, msg2);
movdqa(e1, abcd);
sha1msg2(msg3, msg2);
sha1rnds4(abcd, e0, 2);
sha1msg1(msg1, msg2);
pxor(msg0, msg2);
// Rounds 44 - 47
sha1nexte(e1, msg3);
movdqa(e0, abcd);
sha1msg2(msg0, msg3);
sha1rnds4(abcd, e1, 2);
sha1msg1(msg2, msg3);
pxor(msg1, msg3);
// Rounds 48 - 51
sha1nexte(e0, msg0);
movdqa(e1, abcd);
sha1msg2(msg1, msg0);
sha1rnds4(abcd, e0, 2);
sha1msg1(msg3, msg0);
pxor(msg2, msg0);
// Rounds 52 - 55
sha1nexte(e1, msg1);
movdqa(e0, abcd);
sha1msg2(msg2, msg1);
sha1rnds4(abcd, e1, 2);
sha1msg1(msg0, msg1);
pxor(msg3, msg1);
// Rounds 56 - 59
sha1nexte(e0, msg2);
movdqa(e1, abcd);
sha1msg2(msg3, msg2);
sha1rnds4(abcd, e0, 2);
sha1msg1(msg1, msg2);
pxor(msg0, msg2);
// Rounds 60 - 63
sha1nexte(e1, msg3);
movdqa(e0, abcd);
sha1msg2(msg0, msg3);
sha1rnds4(abcd, e1, 3);
sha1msg1(msg2, msg3);
pxor(msg1, msg3);
// Rounds 64 - 67
sha1nexte(e0, msg0);
movdqa(e1, abcd);
sha1msg2(msg1, msg0);
sha1rnds4(abcd, e0, 3);
sha1msg1(msg3, msg0);
pxor(msg2, msg0);
// Rounds 68 - 71
sha1nexte(e1, msg1);
movdqa(e0, abcd);
sha1msg2(msg2, msg1);
sha1rnds4(abcd, e1, 3);
pxor(msg3, msg1);
// Rounds 72 - 75
sha1nexte(e0, msg2);
movdqa(e1, abcd);
sha1msg2(msg3, msg2);
sha1rnds4(abcd, e0, 3);
// Rounds 76 - 79
sha1nexte(e1, msg3);
movdqa(e0, abcd);
sha1rnds4(abcd, e1, 3);
// add current hash values with previously saved
movdqu(msg0, Address(rsp, 0));
sha1nexte(e0, msg0);
movdqu(msg0, Address(rsp, 16));
paddd(abcd, msg0);
if (multi_block) {
// increment data pointer and loop if more to process
addptr(buf, 64);
addptr(ofs, 64);
cmpptr(ofs, limit);
jcc(Assembler::belowEqual, loop0);
movptr(rax, ofs); //return ofs
}
// write hash values back in the correct order
pshufd(abcd, abcd, 0x1b);
movdqu(Address(state, 0), abcd);
pextrd(Address(state, 16), e0, 3);
bind(done_hash);
}
// xmm0 (msg) is used as an implicit argument to sh256rnds2
// and state0 and state1 can never use xmm0 register.
// ofs and limit are used for multi-block byte array.
// int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
#ifdef _LP64
void MacroAssembler::fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
Register buf, Register state, Register ofs, Register limit, Register rsp,
bool multi_block, XMMRegister shuf_mask) {
#else
void MacroAssembler::fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
Register buf, Register state, Register ofs, Register limit, Register rsp,
bool multi_block) {
#endif
Label start, done_hash, loop0;
address K256 = StubRoutines::x86::k256_addr();
address pshuffle_byte_flip_mask = StubRoutines::x86::pshuffle_byte_flip_mask_addr();
bind(start);
movdqu(state0, Address(state, 0));
movdqu(state1, Address(state, 16));
pshufd(state0, state0, 0xB1);
pshufd(state1, state1, 0x1B);
movdqa(msgtmp4, state0);
palignr(state0, state1, 8);
pblendw(state1, msgtmp4, 0xF0);
#ifdef _LP64
movdqu(shuf_mask, ExternalAddress(pshuffle_byte_flip_mask));
#endif
lea(rax, ExternalAddress(K256));
bind(loop0);
movdqu(Address(rsp, 0), state0);
movdqu(Address(rsp, 16), state1);
// Rounds 0-3
movdqu(msg, Address(buf, 0));
#ifdef _LP64
pshufb(msg, shuf_mask);
#else
pshufb(msg, ExternalAddress(pshuffle_byte_flip_mask));
#endif
movdqa(msgtmp0, msg);
paddd(msg, Address(rax, 0));
sha256rnds2(state1, state0);
pshufd(msg, msg, 0x0E);
sha256rnds2(state0, state1);
// Rounds 4-7
movdqu(msg, Address(buf, 16));
#ifdef _LP64
pshufb(msg, shuf_mask);
#else
pshufb(msg, ExternalAddress(pshuffle_byte_flip_mask));
#endif
movdqa(msgtmp1, msg);
paddd(msg, Address(rax, 16));
sha256rnds2(state1, state0);
pshufd(msg, msg, 0x0E);
sha256rnds2(state0, state1);
sha256msg1(msgtmp0, msgtmp1);
// Rounds 8-11
movdqu(msg, Address(buf, 32));
#ifdef _LP64
pshufb(msg, shuf_mask);
#else
pshufb(msg, ExternalAddress(pshuffle_byte_flip_mask));
#endif
movdqa(msgtmp2, msg);
paddd(msg, Address(rax, 32));
sha256rnds2(state1, state0);
pshufd(msg, msg, 0x0E);
sha256rnds2(state0, state1);
sha256msg1(msgtmp1, msgtmp2);
// Rounds 12-15
movdqu(msg, Address(buf, 48));
#ifdef _LP64
pshufb(msg, shuf_mask);
#else
pshufb(msg, ExternalAddress(pshuffle_byte_flip_mask));
#endif
movdqa(msgtmp3, msg);
paddd(msg, Address(rax, 48));
sha256rnds2(state1, state0);
movdqa(msgtmp4, msgtmp3);
palignr(msgtmp4, msgtmp2, 4);
paddd(msgtmp0, msgtmp4);
sha256msg2(msgtmp0, msgtmp3);
pshufd(msg, msg, 0x0E);
sha256rnds2(state0, state1);
sha256msg1(msgtmp2, msgtmp3);
// Rounds 16-19
movdqa(msg, msgtmp0);
paddd(msg, Address(rax, 64));
sha256rnds2(state1, state0);
movdqa(msgtmp4, msgtmp0);
palignr(msgtmp4, msgtmp3, 4);
paddd(msgtmp1, msgtmp4);
sha256msg2(msgtmp1, msgtmp0);
pshufd(msg, msg, 0x0E);
sha256rnds2(state0, state1);
sha256msg1(msgtmp3, msgtmp0);
// Rounds 20-23
movdqa(msg, msgtmp1);
paddd(msg, Address(rax, 80));
sha256rnds2(state1, state0);
movdqa(msgtmp4, msgtmp1);
palignr(msgtmp4, msgtmp0, 4);
paddd(msgtmp2, msgtmp4);
sha256msg2(msgtmp2, msgtmp1);
pshufd(msg, msg, 0x0E);
sha256rnds2(state0, state1);
sha256msg1(msgtmp0, msgtmp1);
// Rounds 24-27
movdqa(msg, msgtmp2);
paddd(msg, Address(rax, 96));
sha256rnds2(state1, state0);
movdqa(msgtmp4, msgtmp2);
palignr(msgtmp4, msgtmp1, 4);
paddd(msgtmp3, msgtmp4);
sha256msg2(msgtmp3, msgtmp2);
pshufd(msg, msg, 0x0E);
sha256rnds2(state0, state1);
sha256msg1(msgtmp1, msgtmp2);
// Rounds 28-31
movdqa(msg, msgtmp3);
paddd(msg, Address(rax, 112));
sha256rnds2(state1, state0);
movdqa(msgtmp4, msgtmp3);
palignr(msgtmp4, msgtmp2, 4);
paddd(msgtmp0, msgtmp4);
sha256msg2(msgtmp0, msgtmp3);
pshufd(msg, msg, 0x0E);
sha256rnds2(state0, state1);
sha256msg1(msgtmp2, msgtmp3);
// Rounds 32-35
movdqa(msg, msgtmp0);
paddd(msg, Address(rax, 128));
sha256rnds2(state1, state0);
movdqa(msgtmp4, msgtmp0);
palignr(msgtmp4, msgtmp3, 4);
paddd(msgtmp1, msgtmp4);
sha256msg2(msgtmp1, msgtmp0);
pshufd(msg, msg, 0x0E);
sha256rnds2(state0, state1);
sha256msg1(msgtmp3, msgtmp0);
// Rounds 36-39
movdqa(msg, msgtmp1);
paddd(msg, Address(rax, 144));
sha256rnds2(state1, state0);
movdqa(msgtmp4, msgtmp1);
palignr(msgtmp4, msgtmp0, 4);
paddd(msgtmp2, msgtmp4);
sha256msg2(msgtmp2, msgtmp1);
pshufd(msg, msg, 0x0E);
sha256rnds2(state0, state1);
sha256msg1(msgtmp0, msgtmp1);
// Rounds 40-43
movdqa(msg, msgtmp2);
paddd(msg, Address(rax, 160));
sha256rnds2(state1, state0);
movdqa(msgtmp4, msgtmp2);
palignr(msgtmp4, msgtmp1, 4);
paddd(msgtmp3, msgtmp4);
sha256msg2(msgtmp3, msgtmp2);
pshufd(msg, msg, 0x0E);
sha256rnds2(state0, state1);
sha256msg1(msgtmp1, msgtmp2);
// Rounds 44-47
movdqa(msg, msgtmp3);
paddd(msg, Address(rax, 176));
sha256rnds2(state1, state0);
movdqa(msgtmp4, msgtmp3);
palignr(msgtmp4, msgtmp2, 4);
paddd(msgtmp0, msgtmp4);
sha256msg2(msgtmp0, msgtmp3);
pshufd(msg, msg, 0x0E);
sha256rnds2(state0, state1);
sha256msg1(msgtmp2, msgtmp3);
// Rounds 48-51
movdqa(msg, msgtmp0);
paddd(msg, Address(rax, 192));
sha256rnds2(state1, state0);
movdqa(msgtmp4, msgtmp0);
palignr(msgtmp4, msgtmp3, 4);
paddd(msgtmp1, msgtmp4);
sha256msg2(msgtmp1, msgtmp0);
pshufd(msg, msg, 0x0E);
sha256rnds2(state0, state1);
sha256msg1(msgtmp3, msgtmp0);
// Rounds 52-55
movdqa(msg, msgtmp1);
paddd(msg, Address(rax, 208));
sha256rnds2(state1, state0);
movdqa(msgtmp4, msgtmp1);
palignr(msgtmp4, msgtmp0, 4);
paddd(msgtmp2, msgtmp4);
sha256msg2(msgtmp2, msgtmp1);
pshufd(msg, msg, 0x0E);
sha256rnds2(state0, state1);
// Rounds 56-59
movdqa(msg, msgtmp2);
paddd(msg, Address(rax, 224));
sha256rnds2(state1, state0);
movdqa(msgtmp4, msgtmp2);
palignr(msgtmp4, msgtmp1, 4);
paddd(msgtmp3, msgtmp4);
sha256msg2(msgtmp3, msgtmp2);
pshufd(msg, msg, 0x0E);
sha256rnds2(state0, state1);
// Rounds 60-63
movdqa(msg, msgtmp3);
paddd(msg, Address(rax, 240));
sha256rnds2(state1, state0);
pshufd(msg, msg, 0x0E);
sha256rnds2(state0, state1);
movdqu(msg, Address(rsp, 0));
paddd(state0, msg);
movdqu(msg, Address(rsp, 16));
paddd(state1, msg);
if (multi_block) {
// increment data pointer and loop if more to process
addptr(buf, 64);
addptr(ofs, 64);
cmpptr(ofs, limit);
jcc(Assembler::belowEqual, loop0);
movptr(rax, ofs); //return ofs
}
pshufd(state0, state0, 0x1B);
pshufd(state1, state1, 0xB1);
movdqa(msgtmp4, state0);
pblendw(state0, state1, 0xF0);
palignr(state1, msgtmp4, 8);
movdqu(Address(state, 0), state0);
movdqu(Address(state, 16), state1);
bind(done_hash);
}

View File

@ -208,13 +208,13 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
__ subptr(rsp, ymm_bytes); __ subptr(rsp, ymm_bytes);
// Save upper half of YMM registers // Save upper half of YMM registers
for (int n = 0; n < num_xmm_regs; n++) { for (int n = 0; n < num_xmm_regs; n++) {
__ vextractf128h(Address(rsp, n*16), as_XMMRegister(n)); __ vextractf128_high(Address(rsp, n*16), as_XMMRegister(n));
} }
if (UseAVX > 2) { if (UseAVX > 2) {
__ subptr(rsp, zmm_bytes); __ subptr(rsp, zmm_bytes);
// Save upper half of ZMM registers // Save upper half of ZMM registers
for (int n = 0; n < num_xmm_regs; n++) { for (int n = 0; n < num_xmm_regs; n++) {
__ vextractf64x4h(Address(rsp, n*32), as_XMMRegister(n), 1); __ vextractf64x4_high(Address(rsp, n*32), as_XMMRegister(n));
} }
} }
} }
@ -304,13 +304,13 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_ve
if (UseAVX > 2) { if (UseAVX > 2) {
// Restore upper half of ZMM registers. // Restore upper half of ZMM registers.
for (int n = 0; n < num_xmm_regs; n++) { for (int n = 0; n < num_xmm_regs; n++) {
__ vinsertf64x4h(as_XMMRegister(n), Address(rsp, n*32), 1); __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, n*32));
} }
__ addptr(rsp, zmm_bytes); __ addptr(rsp, zmm_bytes);
} }
// Restore upper half of YMM registers. // Restore upper half of YMM registers.
for (int n = 0; n < num_xmm_regs; n++) { for (int n = 0; n < num_xmm_regs; n++) {
__ vinsertf128h(as_XMMRegister(n), Address(rsp, n*16)); __ vinsertf128_high(as_XMMRegister(n), Address(rsp, n*16));
} }
__ addptr(rsp, ymm_bytes); __ addptr(rsp, ymm_bytes);
} }

View File

@ -179,13 +179,13 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
// Save upper half of YMM registers(0..15) // Save upper half of YMM registers(0..15)
int base_addr = XSAVE_AREA_YMM_BEGIN; int base_addr = XSAVE_AREA_YMM_BEGIN;
for (int n = 0; n < 16; n++) { for (int n = 0; n < 16; n++) {
__ vextractf128h(Address(rsp, base_addr+n*16), as_XMMRegister(n)); __ vextractf128_high(Address(rsp, base_addr+n*16), as_XMMRegister(n));
} }
if (VM_Version::supports_evex()) { if (VM_Version::supports_evex()) {
// Save upper half of ZMM registers(0..15) // Save upper half of ZMM registers(0..15)
base_addr = XSAVE_AREA_ZMM_BEGIN; base_addr = XSAVE_AREA_ZMM_BEGIN;
for (int n = 0; n < 16; n++) { for (int n = 0; n < 16; n++) {
__ vextractf64x4h(Address(rsp, base_addr+n*32), as_XMMRegister(n), 1); __ vextractf64x4_high(Address(rsp, base_addr+n*32), as_XMMRegister(n));
} }
// Save full ZMM registers(16..num_xmm_regs) // Save full ZMM registers(16..num_xmm_regs)
base_addr = XSAVE_AREA_UPPERBANK; base_addr = XSAVE_AREA_UPPERBANK;
@ -333,13 +333,13 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_ve
// Restore upper half of YMM registers (0..15) // Restore upper half of YMM registers (0..15)
int base_addr = XSAVE_AREA_YMM_BEGIN; int base_addr = XSAVE_AREA_YMM_BEGIN;
for (int n = 0; n < 16; n++) { for (int n = 0; n < 16; n++) {
__ vinsertf128h(as_XMMRegister(n), Address(rsp, base_addr+n*16)); __ vinsertf128_high(as_XMMRegister(n), Address(rsp, base_addr+n*16));
} }
if (VM_Version::supports_evex()) { if (VM_Version::supports_evex()) {
// Restore upper half of ZMM registers (0..15) // Restore upper half of ZMM registers (0..15)
base_addr = XSAVE_AREA_ZMM_BEGIN; base_addr = XSAVE_AREA_ZMM_BEGIN;
for (int n = 0; n < 16; n++) { for (int n = 0; n < 16; n++) {
__ vinsertf64x4h(as_XMMRegister(n), Address(rsp, base_addr+n*32), 1); __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, base_addr+n*32));
} }
// Restore full ZMM registers(16..num_xmm_regs) // Restore full ZMM registers(16..num_xmm_regs)
base_addr = XSAVE_AREA_UPPERBANK; base_addr = XSAVE_AREA_UPPERBANK;

View File

@ -3068,6 +3068,136 @@ class StubGenerator: public StubCodeGenerator {
return start; return start;
} }
address generate_upper_word_mask() {
__ align(64);
StubCodeMark mark(this, "StubRoutines", "upper_word_mask");
address start = __ pc();
__ emit_data(0x00000000, relocInfo::none, 0);
__ emit_data(0x00000000, relocInfo::none, 0);
__ emit_data(0x00000000, relocInfo::none, 0);
__ emit_data(0xFFFFFFFF, relocInfo::none, 0);
return start;
}
address generate_shuffle_byte_flip_mask() {
__ align(64);
StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask");
address start = __ pc();
__ emit_data(0x0c0d0e0f, relocInfo::none, 0);
__ emit_data(0x08090a0b, relocInfo::none, 0);
__ emit_data(0x04050607, relocInfo::none, 0);
__ emit_data(0x00010203, relocInfo::none, 0);
return start;
}
// ofs and limit are use for multi-block byte array.
// int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
address generate_sha1_implCompress(bool multi_block, const char *name) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ pc();
Register buf = rax;
Register state = rdx;
Register ofs = rcx;
Register limit = rdi;
const Address buf_param(rbp, 8 + 0);
const Address state_param(rbp, 8 + 4);
const Address ofs_param(rbp, 8 + 8);
const Address limit_param(rbp, 8 + 12);
const XMMRegister abcd = xmm0;
const XMMRegister e0 = xmm1;
const XMMRegister e1 = xmm2;
const XMMRegister msg0 = xmm3;
const XMMRegister msg1 = xmm4;
const XMMRegister msg2 = xmm5;
const XMMRegister msg3 = xmm6;
const XMMRegister shuf_mask = xmm7;
__ enter();
__ subptr(rsp, 8 * wordSize);
if (multi_block) {
__ push(limit);
}
__ movptr(buf, buf_param);
__ movptr(state, state_param);
if (multi_block) {
__ movptr(ofs, ofs_param);
__ movptr(limit, limit_param);
}
__ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask,
buf, state, ofs, limit, rsp, multi_block);
if (multi_block) {
__ pop(limit);
}
__ addptr(rsp, 8 * wordSize);
__ leave();
__ ret(0);
return start;
}
address generate_pshuffle_byte_flip_mask() {
__ align(64);
StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask");
address start = __ pc();
__ emit_data(0x00010203, relocInfo::none, 0);
__ emit_data(0x04050607, relocInfo::none, 0);
__ emit_data(0x08090a0b, relocInfo::none, 0);
__ emit_data(0x0c0d0e0f, relocInfo::none, 0);
return start;
}
// ofs and limit are use for multi-block byte array.
// int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
address generate_sha256_implCompress(bool multi_block, const char *name) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ pc();
Register buf = rbx;
Register state = rsi;
Register ofs = rdx;
Register limit = rcx;
const Address buf_param(rbp, 8 + 0);
const Address state_param(rbp, 8 + 4);
const Address ofs_param(rbp, 8 + 8);
const Address limit_param(rbp, 8 + 12);
const XMMRegister msg = xmm0;
const XMMRegister state0 = xmm1;
const XMMRegister state1 = xmm2;
const XMMRegister msgtmp0 = xmm3;
const XMMRegister msgtmp1 = xmm4;
const XMMRegister msgtmp2 = xmm5;
const XMMRegister msgtmp3 = xmm6;
const XMMRegister msgtmp4 = xmm7;
__ enter();
__ subptr(rsp, 8 * wordSize);
handleSOERegisters(true /*saving*/);
__ movptr(buf, buf_param);
__ movptr(state, state_param);
if (multi_block) {
__ movptr(ofs, ofs_param);
__ movptr(limit, limit_param);
}
__ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4,
buf, state, ofs, limit, rsp, multi_block);
handleSOERegisters(false);
__ addptr(rsp, 8 * wordSize);
__ leave();
__ ret(0);
return start;
}
// byte swap x86 long // byte swap x86 long
address generate_ghash_long_swap_mask() { address generate_ghash_long_swap_mask() {
@ -3772,6 +3902,19 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel();
} }
if (UseSHA1Intrinsics) {
StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask();
StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask();
StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress");
StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB");
}
if (UseSHA256Intrinsics) {
StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256;
StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask();
StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress");
StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB");
}
// Generate GHASH intrinsics code // Generate GHASH intrinsics code
if (UseGHASHIntrinsics) { if (UseGHASHIntrinsics) {
StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask();

View File

@ -275,7 +275,7 @@ class StubGenerator: public StubCodeGenerator {
} }
if (VM_Version::supports_evex()) { if (VM_Version::supports_evex()) {
for (int i = xmm_save_first; i <= last_reg; i++) { for (int i = xmm_save_first; i <= last_reg; i++) {
__ vextractf32x4h(xmm_save(i), as_XMMRegister(i), 0); __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0);
} }
} else { } else {
for (int i = xmm_save_first; i <= last_reg; i++) { for (int i = xmm_save_first; i <= last_reg; i++) {
@ -393,7 +393,7 @@ class StubGenerator: public StubCodeGenerator {
// emit the restores for xmm regs // emit the restores for xmm regs
if (VM_Version::supports_evex()) { if (VM_Version::supports_evex()) {
for (int i = xmm_save_first; i <= last_reg; i++) { for (int i = xmm_save_first; i <= last_reg; i++) {
__ vinsertf32x4h(as_XMMRegister(i), xmm_save(i), 0); __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0);
} }
} else { } else {
for (int i = xmm_save_first; i <= last_reg; i++) { for (int i = xmm_save_first; i <= last_reg; i++) {
@ -3695,6 +3695,133 @@ class StubGenerator: public StubCodeGenerator {
return start; return start;
} }
address generate_upper_word_mask() {
__ align(64);
StubCodeMark mark(this, "StubRoutines", "upper_word_mask");
address start = __ pc();
__ emit_data64(0x0000000000000000, relocInfo::none);
__ emit_data64(0xFFFFFFFF00000000, relocInfo::none);
return start;
}
address generate_shuffle_byte_flip_mask() {
__ align(64);
StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask");
address start = __ pc();
__ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none);
__ emit_data64(0x0001020304050607, relocInfo::none);
return start;
}
// ofs and limit are use for multi-block byte array.
// int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
address generate_sha1_implCompress(bool multi_block, const char *name) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ pc();
Register buf = c_rarg0;
Register state = c_rarg1;
Register ofs = c_rarg2;
Register limit = c_rarg3;
const XMMRegister abcd = xmm0;
const XMMRegister e0 = xmm1;
const XMMRegister e1 = xmm2;
const XMMRegister msg0 = xmm3;
const XMMRegister msg1 = xmm4;
const XMMRegister msg2 = xmm5;
const XMMRegister msg3 = xmm6;
const XMMRegister shuf_mask = xmm7;
__ enter();
#ifdef _WIN64
// save the xmm registers which must be preserved 6-7
__ subptr(rsp, 4 * wordSize);
__ movdqu(Address(rsp, 0), xmm6);
__ movdqu(Address(rsp, 2 * wordSize), xmm7);
#endif
__ subptr(rsp, 4 * wordSize);
__ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask,
buf, state, ofs, limit, rsp, multi_block);
__ addptr(rsp, 4 * wordSize);
#ifdef _WIN64
// restore xmm regs belonging to calling function
__ movdqu(xmm6, Address(rsp, 0));
__ movdqu(xmm7, Address(rsp, 2 * wordSize));
__ addptr(rsp, 4 * wordSize);
#endif
__ leave();
__ ret(0);
return start;
}
address generate_pshuffle_byte_flip_mask() {
__ align(64);
StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask");
address start = __ pc();
__ emit_data64(0x0405060700010203, relocInfo::none);
__ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none);
return start;
}
// ofs and limit are use for multi-block byte array.
// int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
address generate_sha256_implCompress(bool multi_block, const char *name) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ pc();
Register buf = c_rarg0;
Register state = c_rarg1;
Register ofs = c_rarg2;
Register limit = c_rarg3;
const XMMRegister msg = xmm0;
const XMMRegister state0 = xmm1;
const XMMRegister state1 = xmm2;
const XMMRegister msgtmp0 = xmm3;
const XMMRegister msgtmp1 = xmm4;
const XMMRegister msgtmp2 = xmm5;
const XMMRegister msgtmp3 = xmm6;
const XMMRegister msgtmp4 = xmm7;
const XMMRegister shuf_mask = xmm8;
__ enter();
#ifdef _WIN64
// save the xmm registers which must be preserved 6-7
__ subptr(rsp, 6 * wordSize);
__ movdqu(Address(rsp, 0), xmm6);
__ movdqu(Address(rsp, 2 * wordSize), xmm7);
__ movdqu(Address(rsp, 4 * wordSize), xmm8);
#endif
__ subptr(rsp, 4 * wordSize);
__ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4,
buf, state, ofs, limit, rsp, multi_block, shuf_mask);
__ addptr(rsp, 4 * wordSize);
#ifdef _WIN64
// restore xmm regs belonging to calling function
__ movdqu(xmm6, Address(rsp, 0));
__ movdqu(xmm7, Address(rsp, 2 * wordSize));
__ movdqu(xmm8, Address(rsp, 4 * wordSize));
__ addptr(rsp, 6 * wordSize);
#endif
__ leave();
__ ret(0);
return start;
}
// This is a version of CTR/AES crypt which does 6 blocks in a loop at a time // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time
// to hide instruction latency // to hide instruction latency
// //
@ -4974,6 +5101,19 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel();
} }
if (UseSHA1Intrinsics) {
StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask();
StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask();
StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress");
StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB");
}
if (UseSHA256Intrinsics) {
StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256;
StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask();
StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress");
StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB");
}
// Generate GHASH intrinsics code // Generate GHASH intrinsics code
if (UseGHASHIntrinsics) { if (UseGHASHIntrinsics) {
StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask();

View File

@ -29,6 +29,12 @@
#include "runtime/thread.inline.hpp" #include "runtime/thread.inline.hpp"
#include "crc32c.h" #include "crc32c.h"
#ifdef _MSC_VER
#define ALIGNED_(x) __declspec(align(x))
#else
#define ALIGNED_(x) __attribute__ ((aligned(x)))
#endif
// Implementation of the platform-specific part of StubRoutines - for // Implementation of the platform-specific part of StubRoutines - for
// a description of how to extend it, see the stubRoutines.hpp file. // a description of how to extend it, see the stubRoutines.hpp file.
@ -37,6 +43,10 @@ address StubRoutines::x86::_key_shuffle_mask_addr = NULL;
address StubRoutines::x86::_counter_shuffle_mask_addr = NULL; address StubRoutines::x86::_counter_shuffle_mask_addr = NULL;
address StubRoutines::x86::_ghash_long_swap_mask_addr = NULL; address StubRoutines::x86::_ghash_long_swap_mask_addr = NULL;
address StubRoutines::x86::_ghash_byte_swap_mask_addr = NULL; address StubRoutines::x86::_ghash_byte_swap_mask_addr = NULL;
address StubRoutines::x86::_upper_word_mask_addr = NULL;
address StubRoutines::x86::_shuffle_byte_flip_mask_addr = NULL;
address StubRoutines::x86::_k256_adr = NULL;
address StubRoutines::x86::_pshuffle_byte_flip_mask_addr = NULL;
uint64_t StubRoutines::x86::_crc_by128_masks[] = uint64_t StubRoutines::x86::_crc_by128_masks[] =
{ {
@ -236,3 +246,23 @@ void StubRoutines::x86::generate_CRC32C_table(bool is_pclmulqdq_table_supported)
_crc32c_table = (juint*)pclmulqdq_table; _crc32c_table = (juint*)pclmulqdq_table;
} }
} }
ALIGNED_(64) juint StubRoutines::x86::_k256[] =
{
0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL,
0x3956c25bUL, 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL,
0xd807aa98UL, 0x12835b01UL, 0x243185beUL, 0x550c7dc3UL,
0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL, 0xc19bf174UL,
0xe49b69c1UL, 0xefbe4786UL, 0x0fc19dc6UL, 0x240ca1ccUL,
0x2de92c6fUL, 0x4a7484aaUL, 0x5cb0a9dcUL, 0x76f988daUL,
0x983e5152UL, 0xa831c66dUL, 0xb00327c8UL, 0xbf597fc7UL,
0xc6e00bf3UL, 0xd5a79147UL, 0x06ca6351UL, 0x14292967UL,
0x27b70a85UL, 0x2e1b2138UL, 0x4d2c6dfcUL, 0x53380d13UL,
0x650a7354UL, 0x766a0abbUL, 0x81c2c92eUL, 0x92722c85UL,
0xa2bfe8a1UL, 0xa81a664bUL, 0xc24b8b70UL, 0xc76c51a3UL,
0xd192e819UL, 0xd6990624UL, 0xf40e3585UL, 0x106aa070UL,
0x19a4c116UL, 0x1e376c08UL, 0x2748774cUL, 0x34b0bcb5UL,
0x391c0cb3UL, 0x4ed8aa4aUL, 0x5b9cca4fUL, 0x682e6ff3UL,
0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL,
0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL
};

View File

@ -46,6 +46,17 @@
static address _ghash_long_swap_mask_addr; static address _ghash_long_swap_mask_addr;
static address _ghash_byte_swap_mask_addr; static address _ghash_byte_swap_mask_addr;
// upper word mask for sha1
static address _upper_word_mask_addr;
// byte flip mask for sha1
static address _shuffle_byte_flip_mask_addr;
//k256 table for sha256
static juint _k256[];
static address _k256_adr;
// byte flip mask for sha256
static address _pshuffle_byte_flip_mask_addr;
public: public:
static address verify_mxcsr_entry() { return _verify_mxcsr_entry; } static address verify_mxcsr_entry() { return _verify_mxcsr_entry; }
static address key_shuffle_mask_addr() { return _key_shuffle_mask_addr; } static address key_shuffle_mask_addr() { return _key_shuffle_mask_addr; }
@ -53,5 +64,9 @@
static address crc_by128_masks_addr() { return (address)_crc_by128_masks; } static address crc_by128_masks_addr() { return (address)_crc_by128_masks; }
static address ghash_long_swap_mask_addr() { return _ghash_long_swap_mask_addr; } static address ghash_long_swap_mask_addr() { return _ghash_long_swap_mask_addr; }
static address ghash_byte_swap_mask_addr() { return _ghash_byte_swap_mask_addr; } static address ghash_byte_swap_mask_addr() { return _ghash_byte_swap_mask_addr; }
static address upper_word_mask_addr() { return _upper_word_mask_addr; }
static address shuffle_byte_flip_mask_addr() { return _shuffle_byte_flip_mask_addr; }
static address k256_addr() { return _k256_adr; }
static address pshuffle_byte_flip_mask_addr() { return _pshuffle_byte_flip_mask_addr; }
static void generate_CRC32C_table(bool is_pclmulqdq_supported); static void generate_CRC32C_table(bool is_pclmulqdq_supported);
#endif // CPU_X86_VM_STUBROUTINES_X86_32_HPP #endif // CPU_X86_VM_STUBROUTINES_X86_32_HPP

View File

@ -68,10 +68,11 @@
declare_constant(VM_Version::CPU_AVX512DQ) \ declare_constant(VM_Version::CPU_AVX512DQ) \
declare_constant(VM_Version::CPU_AVX512PF) \ declare_constant(VM_Version::CPU_AVX512PF) \
declare_constant(VM_Version::CPU_AVX512ER) \ declare_constant(VM_Version::CPU_AVX512ER) \
declare_constant(VM_Version::CPU_AVX512CD) \ declare_constant(VM_Version::CPU_AVX512CD)
declare_constant(VM_Version::CPU_AVX512BW)
#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) \ #define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) \
declare_preprocessor_constant("VM_Version::CPU_AVX512VL", CPU_AVX512VL) declare_preprocessor_constant("VM_Version::CPU_AVX512BW", CPU_AVX512BW) \
declare_preprocessor_constant("VM_Version::CPU_AVX512VL", CPU_AVX512VL) \
declare_preprocessor_constant("VM_Version::CPU_SHA", CPU_SHA)
#endif // CPU_X86_VM_VMSTRUCTS_X86_HPP #endif // CPU_X86_VM_VMSTRUCTS_X86_HPP

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -385,7 +385,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ movdl(xmm0, rcx); __ movdl(xmm0, rcx);
__ pshufd(xmm0, xmm0, 0x00); __ pshufd(xmm0, xmm0, 0x00);
__ vinsertf128h(xmm0, xmm0, xmm0); __ vinsertf128_high(xmm0, xmm0);
__ vmovdqu(xmm7, xmm0); __ vmovdqu(xmm7, xmm0);
#ifdef _LP64 #ifdef _LP64
__ vmovdqu(xmm8, xmm0); __ vmovdqu(xmm8, xmm0);
@ -577,7 +577,7 @@ void VM_Version::get_processor_features() {
} }
char buf[256]; char buf[256];
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
cores_per_cpu(), threads_per_core(), cores_per_cpu(), threads_per_core(),
cpu_family(), _model, _stepping, cpu_family(), _model, _stepping,
(supports_cmov() ? ", cmov" : ""), (supports_cmov() ? ", cmov" : ""),
@ -608,7 +608,8 @@ void VM_Version::get_processor_features() {
(supports_bmi1() ? ", bmi1" : ""), (supports_bmi1() ? ", bmi1" : ""),
(supports_bmi2() ? ", bmi2" : ""), (supports_bmi2() ? ", bmi2" : ""),
(supports_adx() ? ", adx" : ""), (supports_adx() ? ", adx" : ""),
(supports_evex() ? ", evex" : "")); (supports_evex() ? ", evex" : ""),
(supports_sha() ? ", sha" : ""));
_features_string = os::strdup(buf); _features_string = os::strdup(buf);
// UseSSE is set to the smaller of what hardware supports and what // UseSSE is set to the smaller of what hardware supports and what
@ -730,17 +731,29 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(UseGHASHIntrinsics, false); FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
} }
if (UseSHA) { if (supports_sha()) {
if (FLAG_IS_DEFAULT(UseSHA)) {
UseSHA = true;
}
} else if (UseSHA) {
warning("SHA instructions are not available on this CPU"); warning("SHA instructions are not available on this CPU");
FLAG_SET_DEFAULT(UseSHA, false); FLAG_SET_DEFAULT(UseSHA, false);
} }
if (UseSHA1Intrinsics) { if (UseSHA) {
if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) {
FLAG_SET_DEFAULT(UseSHA1Intrinsics, true);
}
} else if (UseSHA1Intrinsics) {
warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU."); warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
} }
if (UseSHA256Intrinsics) { if (UseSHA) {
if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
}
} else if (UseSHA256Intrinsics) {
warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU."); warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
} }
@ -750,6 +763,10 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
} }
if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
FLAG_SET_DEFAULT(UseSHA, false);
}
if (UseAdler32Intrinsics) { if (UseAdler32Intrinsics) {
warning("Adler32Intrinsics not available on this CPU."); warning("Adler32Intrinsics not available on this CPU.");
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false); FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);

View File

@ -221,7 +221,7 @@ class VM_Version : public Abstract_VM_Version {
avx512pf : 1, avx512pf : 1,
avx512er : 1, avx512er : 1,
avx512cd : 1, avx512cd : 1,
: 1, sha : 1,
avx512bw : 1, avx512bw : 1,
avx512vl : 1; avx512vl : 1;
} bits; } bits;
@ -282,11 +282,13 @@ protected:
CPU_AVX512DQ = (1 << 27), CPU_AVX512DQ = (1 << 27),
CPU_AVX512PF = (1 << 28), CPU_AVX512PF = (1 << 28),
CPU_AVX512ER = (1 << 29), CPU_AVX512ER = (1 << 29),
CPU_AVX512CD = (1 << 30), CPU_AVX512CD = (1 << 30)
CPU_AVX512BW = (1 << 31) // Keeping sign bit 31 unassigned.
}; };
#define CPU_AVX512VL UCONST64(0x100000000) // EVEX instructions with smaller vector length : enums are limited to 32bit #define CPU_AVX512BW ((uint64_t)UCONST64(0x100000000)) // enums are limited to 31 bit
#define CPU_AVX512VL ((uint64_t)UCONST64(0x200000000)) // EVEX instructions with smaller vector length
#define CPU_SHA ((uint64_t)UCONST64(0x400000000)) // SHA instructions
enum Extended_Family { enum Extended_Family {
// AMD // AMD
@ -516,6 +518,8 @@ protected:
result |= CPU_ADX; result |= CPU_ADX;
if(_cpuid_info.sef_cpuid7_ebx.bits.bmi2 != 0) if(_cpuid_info.sef_cpuid7_ebx.bits.bmi2 != 0)
result |= CPU_BMI2; result |= CPU_BMI2;
if (_cpuid_info.sef_cpuid7_ebx.bits.sha != 0)
result |= CPU_SHA;
if(_cpuid_info.ext_cpuid1_ecx.bits.lzcnt_intel != 0) if(_cpuid_info.ext_cpuid1_ecx.bits.lzcnt_intel != 0)
result |= CPU_LZCNT; result |= CPU_LZCNT;
// for Intel, ecx.bits.misalignsse bit (bit 8) indicates support for prefetchw // for Intel, ecx.bits.misalignsse bit (bit 8) indicates support for prefetchw
@ -721,6 +725,7 @@ public:
static bool supports_avx512nobw() { return (supports_evex() && !supports_avx512bw()); } static bool supports_avx512nobw() { return (supports_evex() && !supports_avx512bw()); }
static bool supports_avx256only() { return (supports_avx2() && !supports_evex()); } static bool supports_avx256only() { return (supports_avx2() && !supports_evex()); }
static bool supports_avxonly() { return ((supports_avx2() || supports_avx()) && !supports_evex()); } static bool supports_avxonly() { return ((supports_avx2() || supports_avx()) && !supports_evex()); }
static bool supports_sha() { return (_features & CPU_SHA) != 0; }
// Intel features // Intel features
static bool is_intel_family_core() { return is_intel() && static bool is_intel_family_core() { return is_intel() &&
extended_cpu_family() == CPU_FAMILY_INTEL_CORE; } extended_cpu_family() == CPU_FAMILY_INTEL_CORE; }

View File

@ -3179,13 +3179,13 @@ instruct Repl32B(vecY dst, rRegI src) %{
"punpcklbw $dst,$dst\n\t" "punpcklbw $dst,$dst\n\t"
"pshuflw $dst,$dst,0x00\n\t" "pshuflw $dst,$dst,0x00\n\t"
"punpcklqdq $dst,$dst\n\t" "punpcklqdq $dst,$dst\n\t"
"vinserti128h $dst,$dst,$dst\t! replicate32B" %} "vinserti128_high $dst,$dst\t! replicate32B" %}
ins_encode %{ ins_encode %{
__ movdl($dst$$XMMRegister, $src$$Register); __ movdl($dst$$XMMRegister, $src$$Register);
__ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister);
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
%} %}
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
@ -3196,12 +3196,12 @@ instruct Repl32B_mem(vecY dst, memory mem) %{
format %{ "punpcklbw $dst,$mem\n\t" format %{ "punpcklbw $dst,$mem\n\t"
"pshuflw $dst,$dst,0x00\n\t" "pshuflw $dst,$dst,0x00\n\t"
"punpcklqdq $dst,$dst\n\t" "punpcklqdq $dst,$dst\n\t"
"vinserti128h $dst,$dst,$dst\t! replicate32B" %} "vinserti128_high $dst,$dst\t! replicate32B" %}
ins_encode %{ ins_encode %{
__ punpcklbw($dst$$XMMRegister, $mem$$Address); __ punpcklbw($dst$$XMMRegister, $mem$$Address);
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
%} %}
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
@ -3223,11 +3223,11 @@ instruct Repl32B_imm(vecY dst, immI con) %{
match(Set dst (ReplicateB con)); match(Set dst (ReplicateB con));
format %{ "movq $dst,[$constantaddress]\n\t" format %{ "movq $dst,[$constantaddress]\n\t"
"punpcklqdq $dst,$dst\n\t" "punpcklqdq $dst,$dst\n\t"
"vinserti128h $dst,$dst,$dst\t! lreplicate32B($con)" %} "vinserti128_high $dst,$dst\t! lreplicate32B($con)" %}
ins_encode %{ ins_encode %{
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1)));
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
%} %}
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
@ -3298,12 +3298,12 @@ instruct Repl16S(vecY dst, rRegI src) %{
format %{ "movd $dst,$src\n\t" format %{ "movd $dst,$src\n\t"
"pshuflw $dst,$dst,0x00\n\t" "pshuflw $dst,$dst,0x00\n\t"
"punpcklqdq $dst,$dst\n\t" "punpcklqdq $dst,$dst\n\t"
"vinserti128h $dst,$dst,$dst\t! replicate16S" %} "vinserti128_high $dst,$dst\t! replicate16S" %}
ins_encode %{ ins_encode %{
__ movdl($dst$$XMMRegister, $src$$Register); __ movdl($dst$$XMMRegister, $src$$Register);
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
%} %}
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
@ -3313,11 +3313,11 @@ instruct Repl16S_mem(vecY dst, memory mem) %{
match(Set dst (ReplicateS (LoadS mem))); match(Set dst (ReplicateS (LoadS mem)));
format %{ "pshuflw $dst,$mem,0x00\n\t" format %{ "pshuflw $dst,$mem,0x00\n\t"
"punpcklqdq $dst,$dst\n\t" "punpcklqdq $dst,$dst\n\t"
"vinserti128h $dst,$dst,$dst\t! replicate16S" %} "vinserti128_high $dst,$dst\t! replicate16S" %}
ins_encode %{ ins_encode %{
__ pshuflw($dst$$XMMRegister, $mem$$Address, 0x00); __ pshuflw($dst$$XMMRegister, $mem$$Address, 0x00);
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
%} %}
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
@ -3327,11 +3327,11 @@ instruct Repl16S_imm(vecY dst, immI con) %{
match(Set dst (ReplicateS con)); match(Set dst (ReplicateS con));
format %{ "movq $dst,[$constantaddress]\n\t" format %{ "movq $dst,[$constantaddress]\n\t"
"punpcklqdq $dst,$dst\n\t" "punpcklqdq $dst,$dst\n\t"
"vinserti128h $dst,$dst,$dst\t! replicate16S($con)" %} "vinserti128_high $dst,$dst\t! replicate16S($con)" %}
ins_encode %{ ins_encode %{
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2)));
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
%} %}
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
@ -3363,11 +3363,11 @@ instruct Repl8I(vecY dst, rRegI src) %{
match(Set dst (ReplicateI src)); match(Set dst (ReplicateI src));
format %{ "movd $dst,$src\n\t" format %{ "movd $dst,$src\n\t"
"pshufd $dst,$dst,0x00\n\t" "pshufd $dst,$dst,0x00\n\t"
"vinserti128h $dst,$dst,$dst\t! replicate8I" %} "vinserti128_high $dst,$dst\t! replicate8I" %}
ins_encode %{ ins_encode %{
__ movdl($dst$$XMMRegister, $src$$Register); __ movdl($dst$$XMMRegister, $src$$Register);
__ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
%} %}
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
@ -3376,10 +3376,10 @@ instruct Repl8I_mem(vecY dst, memory mem) %{
predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl());
match(Set dst (ReplicateI (LoadI mem))); match(Set dst (ReplicateI (LoadI mem)));
format %{ "pshufd $dst,$mem,0x00\n\t" format %{ "pshufd $dst,$mem,0x00\n\t"
"vinserti128h $dst,$dst,$dst\t! replicate8I" %} "vinserti128_high $dst,$dst\t! replicate8I" %}
ins_encode %{ ins_encode %{
__ pshufd($dst$$XMMRegister, $mem$$Address, 0x00); __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00);
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
%} %}
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
@ -3401,11 +3401,11 @@ instruct Repl8I_imm(vecY dst, immI con) %{
match(Set dst (ReplicateI con)); match(Set dst (ReplicateI con));
format %{ "movq $dst,[$constantaddress]\t! replicate8I($con)\n\t" format %{ "movq $dst,[$constantaddress]\t! replicate8I($con)\n\t"
"punpcklqdq $dst,$dst\n\t" "punpcklqdq $dst,$dst\n\t"
"vinserti128h $dst,$dst,$dst" %} "vinserti128_high $dst,$dst" %}
ins_encode %{ ins_encode %{
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4)));
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
%} %}
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
@ -3430,11 +3430,11 @@ instruct Repl4L(vecY dst, rRegL src) %{
match(Set dst (ReplicateL src)); match(Set dst (ReplicateL src));
format %{ "movdq $dst,$src\n\t" format %{ "movdq $dst,$src\n\t"
"punpcklqdq $dst,$dst\n\t" "punpcklqdq $dst,$dst\n\t"
"vinserti128h $dst,$dst,$dst\t! replicate4L" %} "vinserti128_high $dst,$dst\t! replicate4L" %}
ins_encode %{ ins_encode %{
__ movdq($dst$$XMMRegister, $src$$Register); __ movdq($dst$$XMMRegister, $src$$Register);
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
%} %}
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
@ -3447,13 +3447,13 @@ instruct Repl4L(vecY dst, eRegL src, regD tmp) %{
"movdl $tmp,$src.hi\n\t" "movdl $tmp,$src.hi\n\t"
"punpckldq $dst,$tmp\n\t" "punpckldq $dst,$tmp\n\t"
"punpcklqdq $dst,$dst\n\t" "punpcklqdq $dst,$dst\n\t"
"vinserti128h $dst,$dst,$dst\t! replicate4L" %} "vinserti128_high $dst,$dst\t! replicate4L" %}
ins_encode %{ ins_encode %{
__ movdl($dst$$XMMRegister, $src$$Register); __ movdl($dst$$XMMRegister, $src$$Register);
__ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register));
__ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister);
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
%} %}
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
@ -3464,11 +3464,11 @@ instruct Repl4L_imm(vecY dst, immL con) %{
match(Set dst (ReplicateL con)); match(Set dst (ReplicateL con));
format %{ "movq $dst,[$constantaddress]\n\t" format %{ "movq $dst,[$constantaddress]\n\t"
"punpcklqdq $dst,$dst\n\t" "punpcklqdq $dst,$dst\n\t"
"vinserti128h $dst,$dst,$dst\t! replicate4L($con)" %} "vinserti128_high $dst,$dst\t! replicate4L($con)" %}
ins_encode %{ ins_encode %{
__ movq($dst$$XMMRegister, $constantaddress($con)); __ movq($dst$$XMMRegister, $constantaddress($con));
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
%} %}
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
@ -3478,11 +3478,11 @@ instruct Repl4L_mem(vecY dst, memory mem) %{
match(Set dst (ReplicateL (LoadL mem))); match(Set dst (ReplicateL (LoadL mem)));
format %{ "movq $dst,$mem\n\t" format %{ "movq $dst,$mem\n\t"
"punpcklqdq $dst,$dst\n\t" "punpcklqdq $dst,$dst\n\t"
"vinserti128h $dst,$dst,$dst\t! replicate4L" %} "vinserti128_high $dst,$dst\t! replicate4L" %}
ins_encode %{ ins_encode %{
__ movq($dst$$XMMRegister, $mem$$Address); __ movq($dst$$XMMRegister, $mem$$Address);
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
%} %}
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
@ -3511,10 +3511,10 @@ instruct Repl8F(vecY dst, regF src) %{
predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl());
match(Set dst (ReplicateF src)); match(Set dst (ReplicateF src));
format %{ "pshufd $dst,$src,0x00\n\t" format %{ "pshufd $dst,$src,0x00\n\t"
"vinsertf128h $dst,$dst,$dst\t! replicate8F" %} "vinsertf128_high $dst,$dst\t! replicate8F" %}
ins_encode %{ ins_encode %{
__ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00); __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00);
__ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister);
%} %}
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
@ -3523,10 +3523,10 @@ instruct Repl8F_mem(vecY dst, memory mem) %{
predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl());
match(Set dst (ReplicateF (LoadF mem))); match(Set dst (ReplicateF (LoadF mem)));
format %{ "pshufd $dst,$mem,0x00\n\t" format %{ "pshufd $dst,$mem,0x00\n\t"
"vinsertf128h $dst,$dst,$dst\t! replicate8F" %} "vinsertf128_high $dst,$dst\t! replicate8F" %}
ins_encode %{ ins_encode %{
__ pshufd($dst$$XMMRegister, $mem$$Address, 0x00); __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00);
__ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister);
%} %}
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
@ -3576,10 +3576,10 @@ instruct Repl4D(vecY dst, regD src) %{
predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl()); predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl());
match(Set dst (ReplicateD src)); match(Set dst (ReplicateD src));
format %{ "pshufd $dst,$src,0x44\n\t" format %{ "pshufd $dst,$src,0x44\n\t"
"vinsertf128h $dst,$dst,$dst\t! replicate4D" %} "vinsertf128_high $dst,$dst\t! replicate4D" %}
ins_encode %{ ins_encode %{
__ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44); __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44);
__ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister);
%} %}
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
@ -3588,10 +3588,10 @@ instruct Repl4D_mem(vecY dst, memory mem) %{
predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl()); predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl());
match(Set dst (ReplicateD (LoadD mem))); match(Set dst (ReplicateD (LoadD mem)));
format %{ "pshufd $dst,$mem,0x44\n\t" format %{ "pshufd $dst,$mem,0x44\n\t"
"vinsertf128h $dst,$dst,$dst\t! replicate4D" %} "vinsertf128_high $dst,$dst\t! replicate4D" %}
ins_encode %{ ins_encode %{
__ pshufd($dst$$XMMRegister, $mem$$Address, 0x44); __ pshufd($dst$$XMMRegister, $mem$$Address, 0x44);
__ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister); __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister);
%} %}
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
@ -4791,7 +4791,7 @@ instruct rvadd8I_reduction_reg(rRegI dst, rRegI src1, vecY src2, regF tmp, regF
effect(TEMP tmp, TEMP tmp2); effect(TEMP tmp, TEMP tmp2);
format %{ "vphaddd $tmp,$src2,$src2\n\t" format %{ "vphaddd $tmp,$src2,$src2\n\t"
"vphaddd $tmp,$tmp,$tmp2\n\t" "vphaddd $tmp,$tmp,$tmp2\n\t"
"vextracti128 $tmp2,$tmp\n\t" "vextracti128_high $tmp2,$tmp\n\t"
"vpaddd $tmp,$tmp,$tmp2\n\t" "vpaddd $tmp,$tmp,$tmp2\n\t"
"movd $tmp2,$src1\n\t" "movd $tmp2,$src1\n\t"
"vpaddd $tmp2,$tmp2,$tmp\n\t" "vpaddd $tmp2,$tmp2,$tmp\n\t"
@ -4800,7 +4800,7 @@ instruct rvadd8I_reduction_reg(rRegI dst, rRegI src1, vecY src2, regF tmp, regF
int vector_len = 1; int vector_len = 1;
__ vphaddd($tmp$$XMMRegister, $src2$$XMMRegister, $src2$$XMMRegister, vector_len); __ vphaddd($tmp$$XMMRegister, $src2$$XMMRegister, $src2$$XMMRegister, vector_len);
__ vphaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len); __ vphaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len);
__ vextracti128h($tmp2$$XMMRegister, $tmp$$XMMRegister); __ vextracti128_high($tmp2$$XMMRegister, $tmp$$XMMRegister);
__ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, 0); __ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, 0);
__ movdl($tmp2$$XMMRegister, $src1$$Register); __ movdl($tmp2$$XMMRegister, $src1$$Register);
__ vpaddd($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0); __ vpaddd($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0);
@ -4813,7 +4813,7 @@ instruct rvadd8I_reduction_reg_evex(rRegI dst, rRegI src1, vecY src2, regF tmp,
predicate(UseAVX > 2); predicate(UseAVX > 2);
match(Set dst (AddReductionVI src1 src2)); match(Set dst (AddReductionVI src1 src2));
effect(TEMP tmp, TEMP tmp2); effect(TEMP tmp, TEMP tmp2);
format %{ "vextracti128 $tmp,$src2\n\t" format %{ "vextracti128_high $tmp,$src2\n\t"
"vpaddd $tmp,$tmp,$src2\n\t" "vpaddd $tmp,$tmp,$src2\n\t"
"pshufd $tmp2,$tmp,0xE\n\t" "pshufd $tmp2,$tmp,0xE\n\t"
"vpaddd $tmp,$tmp,$tmp2\n\t" "vpaddd $tmp,$tmp,$tmp2\n\t"
@ -4824,7 +4824,7 @@ instruct rvadd8I_reduction_reg_evex(rRegI dst, rRegI src1, vecY src2, regF tmp,
"movd $dst,$tmp2\t! add reduction8I" %} "movd $dst,$tmp2\t! add reduction8I" %}
ins_encode %{ ins_encode %{
int vector_len = 0; int vector_len = 0;
__ vextracti128h($tmp$$XMMRegister, $src2$$XMMRegister); __ vextracti128_high($tmp$$XMMRegister, $src2$$XMMRegister);
__ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $src2$$XMMRegister, vector_len); __ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $src2$$XMMRegister, vector_len);
__ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0xE); __ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0xE);
__ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len); __ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len);
@ -4841,9 +4841,9 @@ instruct rvadd16I_reduction_reg_evex(rRegI dst, rRegI src1, vecZ src2, regF tmp,
predicate(UseAVX > 2); predicate(UseAVX > 2);
match(Set dst (AddReductionVI src1 src2)); match(Set dst (AddReductionVI src1 src2));
effect(TEMP tmp, TEMP tmp2, TEMP tmp3); effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
format %{ "vextracti64x4 $tmp3,$src2,0x1\n\t" format %{ "vextracti64x4_high $tmp3,$src2\n\t"
"vpaddd $tmp3,$tmp3,$src2\n\t" "vpaddd $tmp3,$tmp3,$src2\n\t"
"vextracti128 $tmp,$tmp3\n\t" "vextracti128_high $tmp,$tmp3\n\t"
"vpaddd $tmp,$tmp,$tmp3\n\t" "vpaddd $tmp,$tmp,$tmp3\n\t"
"pshufd $tmp2,$tmp,0xE\n\t" "pshufd $tmp2,$tmp,0xE\n\t"
"vpaddd $tmp,$tmp,$tmp2\n\t" "vpaddd $tmp,$tmp,$tmp2\n\t"
@ -4853,9 +4853,9 @@ instruct rvadd16I_reduction_reg_evex(rRegI dst, rRegI src1, vecZ src2, regF tmp,
"vpaddd $tmp2,$tmp,$tmp2\n\t" "vpaddd $tmp2,$tmp,$tmp2\n\t"
"movd $dst,$tmp2\t! mul reduction16I" %} "movd $dst,$tmp2\t! mul reduction16I" %}
ins_encode %{ ins_encode %{
__ vextracti64x4h($tmp3$$XMMRegister, $src2$$XMMRegister, 1); __ vextracti64x4_high($tmp3$$XMMRegister, $src2$$XMMRegister);
__ vpaddd($tmp3$$XMMRegister, $tmp3$$XMMRegister, $src2$$XMMRegister, 1); __ vpaddd($tmp3$$XMMRegister, $tmp3$$XMMRegister, $src2$$XMMRegister, 1);
__ vextracti128h($tmp$$XMMRegister, $tmp3$$XMMRegister); __ vextracti128_high($tmp$$XMMRegister, $tmp3$$XMMRegister);
__ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp3$$XMMRegister, 0); __ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp3$$XMMRegister, 0);
__ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0xE); __ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0xE);
__ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, 0); __ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, 0);
@ -4892,7 +4892,7 @@ instruct rvadd4L_reduction_reg(rRegL dst, rRegL src1, vecY src2, regF tmp, regF
predicate(UseAVX > 2); predicate(UseAVX > 2);
match(Set dst (AddReductionVL src1 src2)); match(Set dst (AddReductionVL src1 src2));
effect(TEMP tmp, TEMP tmp2); effect(TEMP tmp, TEMP tmp2);
format %{ "vextracti128 $tmp,$src2\n\t" format %{ "vextracti128_high $tmp,$src2\n\t"
"vpaddq $tmp2,$tmp,$src2\n\t" "vpaddq $tmp2,$tmp,$src2\n\t"
"pshufd $tmp,$tmp2,0xE\n\t" "pshufd $tmp,$tmp2,0xE\n\t"
"vpaddq $tmp2,$tmp2,$tmp\n\t" "vpaddq $tmp2,$tmp2,$tmp\n\t"
@ -4900,7 +4900,7 @@ instruct rvadd4L_reduction_reg(rRegL dst, rRegL src1, vecY src2, regF tmp, regF
"vpaddq $tmp2,$tmp2,$tmp\n\t" "vpaddq $tmp2,$tmp2,$tmp\n\t"
"movdq $dst,$tmp2\t! add reduction4L" %} "movdq $dst,$tmp2\t! add reduction4L" %}
ins_encode %{ ins_encode %{
__ vextracti128h($tmp$$XMMRegister, $src2$$XMMRegister); __ vextracti128_high($tmp$$XMMRegister, $src2$$XMMRegister);
__ vpaddq($tmp2$$XMMRegister, $tmp$$XMMRegister, $src2$$XMMRegister, 0); __ vpaddq($tmp2$$XMMRegister, $tmp$$XMMRegister, $src2$$XMMRegister, 0);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
__ vpaddq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0); __ vpaddq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0);
@ -4915,9 +4915,9 @@ instruct rvadd8L_reduction_reg(rRegL dst, rRegL src1, vecZ src2, regF tmp, regF
predicate(UseAVX > 2); predicate(UseAVX > 2);
match(Set dst (AddReductionVL src1 src2)); match(Set dst (AddReductionVL src1 src2));
effect(TEMP tmp, TEMP tmp2); effect(TEMP tmp, TEMP tmp2);
format %{ "vextracti64x4 $tmp2,$src2,0x1\n\t" format %{ "vextracti64x4_high $tmp2,$src2\n\t"
"vpaddq $tmp2,$tmp2,$src2\n\t" "vpaddq $tmp2,$tmp2,$src2\n\t"
"vextracti128 $tmp,$tmp2\n\t" "vextracti128_high $tmp,$tmp2\n\t"
"vpaddq $tmp2,$tmp2,$tmp\n\t" "vpaddq $tmp2,$tmp2,$tmp\n\t"
"pshufd $tmp,$tmp2,0xE\n\t" "pshufd $tmp,$tmp2,0xE\n\t"
"vpaddq $tmp2,$tmp2,$tmp\n\t" "vpaddq $tmp2,$tmp2,$tmp\n\t"
@ -4925,9 +4925,9 @@ instruct rvadd8L_reduction_reg(rRegL dst, rRegL src1, vecZ src2, regF tmp, regF
"vpaddq $tmp2,$tmp2,$tmp\n\t" "vpaddq $tmp2,$tmp2,$tmp\n\t"
"movdq $dst,$tmp2\t! add reduction8L" %} "movdq $dst,$tmp2\t! add reduction8L" %}
ins_encode %{ ins_encode %{
__ vextracti64x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 1); __ vextracti64x4_high($tmp2$$XMMRegister, $src2$$XMMRegister);
__ vpaddq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $src2$$XMMRegister, 1); __ vpaddq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $src2$$XMMRegister, 1);
__ vextracti128h($tmp$$XMMRegister, $tmp2$$XMMRegister); __ vextracti128_high($tmp$$XMMRegister, $tmp2$$XMMRegister);
__ vpaddq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0); __ vpaddq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
__ vpaddq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0); __ vpaddq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0);
@ -5026,7 +5026,7 @@ instruct radd8F_reduction_reg(regF dst, vecY src2, regF tmp, regF tmp2) %{
"vaddss $dst,$dst,$tmp\n\t" "vaddss $dst,$dst,$tmp\n\t"
"pshufd $tmp,$src2,0x03\n\t" "pshufd $tmp,$src2,0x03\n\t"
"vaddss $dst,$dst,$tmp\n\t" "vaddss $dst,$dst,$tmp\n\t"
"vextractf128 $tmp2,$src2\n\t" "vextractf128_high $tmp2,$src2\n\t"
"vaddss $dst,$dst,$tmp2\n\t" "vaddss $dst,$dst,$tmp2\n\t"
"pshufd $tmp,$tmp2,0x01\n\t" "pshufd $tmp,$tmp2,0x01\n\t"
"vaddss $dst,$dst,$tmp\n\t" "vaddss $dst,$dst,$tmp\n\t"
@ -5042,7 +5042,7 @@ instruct radd8F_reduction_reg(regF dst, vecY src2, regF tmp, regF tmp2) %{
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03); __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03);
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ vextractf128h($tmp2$$XMMRegister, $src2$$XMMRegister); __ vextractf128_high($tmp2$$XMMRegister, $src2$$XMMRegister);
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01);
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
@ -5065,7 +5065,7 @@ instruct radd16F_reduction_reg(regF dst, vecZ src2, regF tmp, regF tmp2) %{
"vaddss $dst,$dst,$tmp\n\t" "vaddss $dst,$dst,$tmp\n\t"
"pshufd $tmp,$src2,0x03\n\t" "pshufd $tmp,$src2,0x03\n\t"
"vaddss $dst,$dst,$tmp\n\t" "vaddss $dst,$dst,$tmp\n\t"
"vextractf32x4 $tmp2,$src2, 0x1\n\t" "vextractf32x4 $tmp2,$src2,0x1\n\t"
"vaddss $dst,$dst,$tmp2\n\t" "vaddss $dst,$dst,$tmp2\n\t"
"pshufd $tmp,$tmp2,0x01\n\t" "pshufd $tmp,$tmp2,0x01\n\t"
"vaddss $dst,$dst,$tmp\n\t" "vaddss $dst,$dst,$tmp\n\t"
@ -5073,7 +5073,7 @@ instruct radd16F_reduction_reg(regF dst, vecZ src2, regF tmp, regF tmp2) %{
"vaddss $dst,$dst,$tmp\n\t" "vaddss $dst,$dst,$tmp\n\t"
"pshufd $tmp,$tmp2,0x03\n\t" "pshufd $tmp,$tmp2,0x03\n\t"
"vaddss $dst,$dst,$tmp\n\t" "vaddss $dst,$dst,$tmp\n\t"
"vextractf32x4 $tmp2,$src2, 0x2\n\t" "vextractf32x4 $tmp2,$src2,0x2\n\t"
"vaddss $dst,$dst,$tmp2\n\t" "vaddss $dst,$dst,$tmp2\n\t"
"pshufd $tmp,$tmp2,0x01\n\t" "pshufd $tmp,$tmp2,0x01\n\t"
"vaddss $dst,$dst,$tmp\n\t" "vaddss $dst,$dst,$tmp\n\t"
@ -5081,7 +5081,7 @@ instruct radd16F_reduction_reg(regF dst, vecZ src2, regF tmp, regF tmp2) %{
"vaddss $dst,$dst,$tmp\n\t" "vaddss $dst,$dst,$tmp\n\t"
"pshufd $tmp,$tmp2,0x03\n\t" "pshufd $tmp,$tmp2,0x03\n\t"
"vaddss $dst,$dst,$tmp\n\t" "vaddss $dst,$dst,$tmp\n\t"
"vextractf32x4 $tmp2,$src2, 0x3\n\t" "vextractf32x4 $tmp2,$src2,0x3\n\t"
"vaddss $dst,$dst,$tmp2\n\t" "vaddss $dst,$dst,$tmp2\n\t"
"pshufd $tmp,$tmp2,0x01\n\t" "pshufd $tmp,$tmp2,0x01\n\t"
"vaddss $dst,$dst,$tmp\n\t" "vaddss $dst,$dst,$tmp\n\t"
@ -5097,7 +5097,7 @@ instruct radd16F_reduction_reg(regF dst, vecZ src2, regF tmp, regF tmp2) %{
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03); __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03);
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1); __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1);
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01);
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
@ -5105,7 +5105,7 @@ instruct radd16F_reduction_reg(regF dst, vecZ src2, regF tmp, regF tmp2) %{
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x03); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x03);
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x2); __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x2);
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01);
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
@ -5113,7 +5113,7 @@ instruct radd16F_reduction_reg(regF dst, vecZ src2, regF tmp, regF tmp2) %{
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x03); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x03);
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x3); __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x3);
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01);
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
@ -5162,7 +5162,7 @@ instruct rvadd4D_reduction_reg(regD dst, vecY src2, regD tmp, regD tmp2) %{
format %{ "vaddsd $dst,$dst,$src2\n\t" format %{ "vaddsd $dst,$dst,$src2\n\t"
"pshufd $tmp,$src2,0xE\n\t" "pshufd $tmp,$src2,0xE\n\t"
"vaddsd $dst,$dst,$tmp\n\t" "vaddsd $dst,$dst,$tmp\n\t"
"vextractf32x4h $tmp2,$src2, 0x1\n\t" "vextractf32x4 $tmp2,$src2,0x1\n\t"
"vaddsd $dst,$dst,$tmp2\n\t" "vaddsd $dst,$dst,$tmp2\n\t"
"pshufd $tmp,$tmp2,0xE\n\t" "pshufd $tmp,$tmp2,0xE\n\t"
"vaddsd $dst,$dst,$tmp\t! add reduction4D" %} "vaddsd $dst,$dst,$tmp\t! add reduction4D" %}
@ -5170,7 +5170,7 @@ instruct rvadd4D_reduction_reg(regD dst, vecY src2, regD tmp, regD tmp2) %{
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister); __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE); __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE);
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1); __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1);
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
@ -5185,15 +5185,15 @@ instruct rvadd8D_reduction_reg(regD dst, vecZ src2, regD tmp, regD tmp2) %{
format %{ "vaddsd $dst,$dst,$src2\n\t" format %{ "vaddsd $dst,$dst,$src2\n\t"
"pshufd $tmp,$src2,0xE\n\t" "pshufd $tmp,$src2,0xE\n\t"
"vaddsd $dst,$dst,$tmp\n\t" "vaddsd $dst,$dst,$tmp\n\t"
"vextractf32x4 $tmp2,$src2, 0x1\n\t" "vextractf32x4 $tmp2,$src2,0x1\n\t"
"vaddsd $dst,$dst,$tmp2\n\t" "vaddsd $dst,$dst,$tmp2\n\t"
"pshufd $tmp,$tmp2,0xE\n\t" "pshufd $tmp,$tmp2,0xE\n\t"
"vaddsd $dst,$dst,$tmp\n\t" "vaddsd $dst,$dst,$tmp\n\t"
"vextractf32x4 $tmp2,$src2, 0x2\n\t" "vextractf32x4 $tmp2,$src2,0x2\n\t"
"vaddsd $dst,$dst,$tmp2\n\t" "vaddsd $dst,$dst,$tmp2\n\t"
"pshufd $tmp,$tmp2,0xE\n\t" "pshufd $tmp,$tmp2,0xE\n\t"
"vaddsd $dst,$dst,$tmp\n\t" "vaddsd $dst,$dst,$tmp\n\t"
"vextractf32x4 $tmp2,$src2, 0x3\n\t" "vextractf32x4 $tmp2,$src2,0x3\n\t"
"vaddsd $dst,$dst,$tmp2\n\t" "vaddsd $dst,$dst,$tmp2\n\t"
"pshufd $tmp,$tmp2,0xE\n\t" "pshufd $tmp,$tmp2,0xE\n\t"
"vaddsd $dst,$dst,$tmp\t! add reduction8D" %} "vaddsd $dst,$dst,$tmp\t! add reduction8D" %}
@ -5201,15 +5201,15 @@ instruct rvadd8D_reduction_reg(regD dst, vecZ src2, regD tmp, regD tmp2) %{
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister); __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE); __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE);
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1); __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1);
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x2); __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x2);
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x3); __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x3);
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
@ -5307,7 +5307,7 @@ instruct rvmul8I_reduction_reg(rRegI dst, rRegI src1, vecY src2, regF tmp, regF
predicate(UseAVX > 0); predicate(UseAVX > 0);
match(Set dst (MulReductionVI src1 src2)); match(Set dst (MulReductionVI src1 src2));
effect(TEMP tmp, TEMP tmp2); effect(TEMP tmp, TEMP tmp2);
format %{ "vextracti128 $tmp,$src2\n\t" format %{ "vextracti128_high $tmp,$src2\n\t"
"vpmulld $tmp,$tmp,$src2\n\t" "vpmulld $tmp,$tmp,$src2\n\t"
"pshufd $tmp2,$tmp,0xE\n\t" "pshufd $tmp2,$tmp,0xE\n\t"
"vpmulld $tmp,$tmp,$tmp2\n\t" "vpmulld $tmp,$tmp,$tmp2\n\t"
@ -5318,7 +5318,7 @@ instruct rvmul8I_reduction_reg(rRegI dst, rRegI src1, vecY src2, regF tmp, regF
"movd $dst,$tmp2\t! mul reduction8I" %} "movd $dst,$tmp2\t! mul reduction8I" %}
ins_encode %{ ins_encode %{
int vector_len = 0; int vector_len = 0;
__ vextracti128h($tmp$$XMMRegister, $src2$$XMMRegister); __ vextracti128_high($tmp$$XMMRegister, $src2$$XMMRegister);
__ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $src2$$XMMRegister, vector_len); __ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $src2$$XMMRegister, vector_len);
__ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0xE); __ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0xE);
__ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len); __ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len);
@ -5335,9 +5335,9 @@ instruct rvmul16I_reduction_reg(rRegI dst, rRegI src1, vecZ src2, regF tmp, regF
predicate(UseAVX > 2); predicate(UseAVX > 2);
match(Set dst (MulReductionVI src1 src2)); match(Set dst (MulReductionVI src1 src2));
effect(TEMP tmp, TEMP tmp2, TEMP tmp3); effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
format %{ "vextracti64x4 $tmp3,$src2,0x1\n\t" format %{ "vextracti64x4_high $tmp3,$src2\n\t"
"vpmulld $tmp3,$tmp3,$src2\n\t" "vpmulld $tmp3,$tmp3,$src2\n\t"
"vextracti128 $tmp,$tmp3\n\t" "vextracti128_high $tmp,$tmp3\n\t"
"vpmulld $tmp,$tmp,$src2\n\t" "vpmulld $tmp,$tmp,$src2\n\t"
"pshufd $tmp2,$tmp,0xE\n\t" "pshufd $tmp2,$tmp,0xE\n\t"
"vpmulld $tmp,$tmp,$tmp2\n\t" "vpmulld $tmp,$tmp,$tmp2\n\t"
@ -5347,9 +5347,9 @@ instruct rvmul16I_reduction_reg(rRegI dst, rRegI src1, vecZ src2, regF tmp, regF
"vpmulld $tmp2,$tmp,$tmp2\n\t" "vpmulld $tmp2,$tmp,$tmp2\n\t"
"movd $dst,$tmp2\t! mul reduction16I" %} "movd $dst,$tmp2\t! mul reduction16I" %}
ins_encode %{ ins_encode %{
__ vextracti64x4h($tmp3$$XMMRegister, $src2$$XMMRegister, 1); __ vextracti64x4_high($tmp3$$XMMRegister, $src2$$XMMRegister);
__ vpmulld($tmp3$$XMMRegister, $tmp3$$XMMRegister, $src2$$XMMRegister, 1); __ vpmulld($tmp3$$XMMRegister, $tmp3$$XMMRegister, $src2$$XMMRegister, 1);
__ vextracti128h($tmp$$XMMRegister, $tmp3$$XMMRegister); __ vextracti128_high($tmp$$XMMRegister, $tmp3$$XMMRegister);
__ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp3$$XMMRegister, 0); __ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp3$$XMMRegister, 0);
__ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0xE); __ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0xE);
__ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, 0); __ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, 0);
@ -5386,7 +5386,7 @@ instruct rvmul4L_reduction_reg(rRegL dst, rRegL src1, vecY src2, regF tmp, regF
predicate(UseAVX > 2 && VM_Version::supports_avx512dq()); predicate(UseAVX > 2 && VM_Version::supports_avx512dq());
match(Set dst (MulReductionVL src1 src2)); match(Set dst (MulReductionVL src1 src2));
effect(TEMP tmp, TEMP tmp2); effect(TEMP tmp, TEMP tmp2);
format %{ "vextracti128 $tmp,$src2\n\t" format %{ "vextracti128_high $tmp,$src2\n\t"
"vpmullq $tmp2,$tmp,$src2\n\t" "vpmullq $tmp2,$tmp,$src2\n\t"
"pshufd $tmp,$tmp2,0xE\n\t" "pshufd $tmp,$tmp2,0xE\n\t"
"vpmullq $tmp2,$tmp2,$tmp\n\t" "vpmullq $tmp2,$tmp2,$tmp\n\t"
@ -5394,7 +5394,7 @@ instruct rvmul4L_reduction_reg(rRegL dst, rRegL src1, vecY src2, regF tmp, regF
"vpmullq $tmp2,$tmp2,$tmp\n\t" "vpmullq $tmp2,$tmp2,$tmp\n\t"
"movdq $dst,$tmp2\t! mul reduction4L" %} "movdq $dst,$tmp2\t! mul reduction4L" %}
ins_encode %{ ins_encode %{
__ vextracti128h($tmp$$XMMRegister, $src2$$XMMRegister); __ vextracti128_high($tmp$$XMMRegister, $src2$$XMMRegister);
__ vpmullq($tmp2$$XMMRegister, $tmp$$XMMRegister, $src2$$XMMRegister, 0); __ vpmullq($tmp2$$XMMRegister, $tmp$$XMMRegister, $src2$$XMMRegister, 0);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
__ vpmullq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0); __ vpmullq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0);
@ -5409,9 +5409,9 @@ instruct rvmul8L_reduction_reg(rRegL dst, rRegL src1, vecZ src2, regF tmp, regF
predicate(UseAVX > 2 && VM_Version::supports_avx512dq()); predicate(UseAVX > 2 && VM_Version::supports_avx512dq());
match(Set dst (MulReductionVL src1 src2)); match(Set dst (MulReductionVL src1 src2));
effect(TEMP tmp, TEMP tmp2); effect(TEMP tmp, TEMP tmp2);
format %{ "vextracti64x4 $tmp2,$src2,0x1\n\t" format %{ "vextracti64x4_high $tmp2,$src2\n\t"
"vpmullq $tmp2,$tmp2,$src2\n\t" "vpmullq $tmp2,$tmp2,$src2\n\t"
"vextracti128 $tmp,$tmp2\n\t" "vextracti128_high $tmp,$tmp2\n\t"
"vpmullq $tmp2,$tmp2,$tmp\n\t" "vpmullq $tmp2,$tmp2,$tmp\n\t"
"pshufd $tmp,$tmp2,0xE\n\t" "pshufd $tmp,$tmp2,0xE\n\t"
"vpmullq $tmp2,$tmp2,$tmp\n\t" "vpmullq $tmp2,$tmp2,$tmp\n\t"
@ -5419,9 +5419,9 @@ instruct rvmul8L_reduction_reg(rRegL dst, rRegL src1, vecZ src2, regF tmp, regF
"vpmullq $tmp2,$tmp2,$tmp\n\t" "vpmullq $tmp2,$tmp2,$tmp\n\t"
"movdq $dst,$tmp2\t! mul reduction8L" %} "movdq $dst,$tmp2\t! mul reduction8L" %}
ins_encode %{ ins_encode %{
__ vextracti64x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 1); __ vextracti64x4_high($tmp2$$XMMRegister, $src2$$XMMRegister);
__ vpmullq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $src2$$XMMRegister, 1); __ vpmullq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $src2$$XMMRegister, 1);
__ vextracti128h($tmp$$XMMRegister, $tmp2$$XMMRegister); __ vextracti128_high($tmp$$XMMRegister, $tmp2$$XMMRegister);
__ vpmullq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0); __ vpmullq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
__ vpmullq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0); __ vpmullq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0);
@ -5520,7 +5520,7 @@ instruct rvmul8F_reduction_reg(regF dst, vecY src2, regF tmp, regF tmp2) %{
"vmulss $dst,$dst,$tmp\n\t" "vmulss $dst,$dst,$tmp\n\t"
"pshufd $tmp,$src2,0x03\n\t" "pshufd $tmp,$src2,0x03\n\t"
"vmulss $dst,$dst,$tmp\n\t" "vmulss $dst,$dst,$tmp\n\t"
"vextractf128 $tmp2,$src2\n\t" "vextractf128_high $tmp2,$src2\n\t"
"vmulss $dst,$dst,$tmp2\n\t" "vmulss $dst,$dst,$tmp2\n\t"
"pshufd $tmp,$tmp2,0x01\n\t" "pshufd $tmp,$tmp2,0x01\n\t"
"vmulss $dst,$dst,$tmp\n\t" "vmulss $dst,$dst,$tmp\n\t"
@ -5536,7 +5536,7 @@ instruct rvmul8F_reduction_reg(regF dst, vecY src2, regF tmp, regF tmp2) %{
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03); __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03);
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ vextractf128h($tmp2$$XMMRegister, $src2$$XMMRegister); __ vextractf128_high($tmp2$$XMMRegister, $src2$$XMMRegister);
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01);
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
@ -5559,7 +5559,7 @@ instruct rvmul16F_reduction_reg(regF dst, vecZ src2, regF tmp, regF tmp2) %{
"vmulss $dst,$dst,$tmp\n\t" "vmulss $dst,$dst,$tmp\n\t"
"pshufd $tmp,$src2,0x03\n\t" "pshufd $tmp,$src2,0x03\n\t"
"vmulss $dst,$dst,$tmp\n\t" "vmulss $dst,$dst,$tmp\n\t"
"vextractf32x4 $tmp2,$src2, 0x1\n\t" "vextractf32x4 $tmp2,$src2,0x1\n\t"
"vmulss $dst,$dst,$tmp2\n\t" "vmulss $dst,$dst,$tmp2\n\t"
"pshufd $tmp,$tmp2,0x01\n\t" "pshufd $tmp,$tmp2,0x01\n\t"
"vmulss $dst,$dst,$tmp\n\t" "vmulss $dst,$dst,$tmp\n\t"
@ -5567,7 +5567,7 @@ instruct rvmul16F_reduction_reg(regF dst, vecZ src2, regF tmp, regF tmp2) %{
"vmulss $dst,$dst,$tmp\n\t" "vmulss $dst,$dst,$tmp\n\t"
"pshufd $tmp,$tmp2,0x03\n\t" "pshufd $tmp,$tmp2,0x03\n\t"
"vmulss $dst,$dst,$tmp\n\t" "vmulss $dst,$dst,$tmp\n\t"
"vextractf32x4 $tmp2,$src2, 0x2\n\t" "vextractf32x4 $tmp2,$src2,0x2\n\t"
"vmulss $dst,$dst,$tmp2\n\t" "vmulss $dst,$dst,$tmp2\n\t"
"pshufd $tmp,$tmp2,0x01\n\t" "pshufd $tmp,$tmp2,0x01\n\t"
"vmulss $dst,$dst,$tmp\n\t" "vmulss $dst,$dst,$tmp\n\t"
@ -5575,7 +5575,7 @@ instruct rvmul16F_reduction_reg(regF dst, vecZ src2, regF tmp, regF tmp2) %{
"vmulss $dst,$dst,$tmp\n\t" "vmulss $dst,$dst,$tmp\n\t"
"pshufd $tmp,$tmp2,0x03\n\t" "pshufd $tmp,$tmp2,0x03\n\t"
"vmulss $dst,$dst,$tmp\n\t" "vmulss $dst,$dst,$tmp\n\t"
"vextractf32x4 $tmp2,$src2, 0x3\n\t" "vextractf32x4 $tmp2,$src2,0x3\n\t"
"vmulss $dst,$dst,$tmp2\n\t" "vmulss $dst,$dst,$tmp2\n\t"
"pshufd $tmp,$tmp2,0x01\n\t" "pshufd $tmp,$tmp2,0x01\n\t"
"vmulss $dst,$dst,$tmp\n\t" "vmulss $dst,$dst,$tmp\n\t"
@ -5591,7 +5591,7 @@ instruct rvmul16F_reduction_reg(regF dst, vecZ src2, regF tmp, regF tmp2) %{
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03); __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03);
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1); __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1);
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01);
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
@ -5599,7 +5599,7 @@ instruct rvmul16F_reduction_reg(regF dst, vecZ src2, regF tmp, regF tmp2) %{
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x03); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x03);
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x2); __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x2);
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01);
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
@ -5607,7 +5607,7 @@ instruct rvmul16F_reduction_reg(regF dst, vecZ src2, regF tmp, regF tmp2) %{
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x03); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x03);
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x3); __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x3);
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01);
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
@ -5656,7 +5656,7 @@ instruct rvmul4D_reduction_reg(regD dst, vecY src2, regD tmp, regD tmp2) %{
format %{ "vmulsd $dst,$dst,$src2\n\t" format %{ "vmulsd $dst,$dst,$src2\n\t"
"pshufd $tmp,$src2,0xE\n\t" "pshufd $tmp,$src2,0xE\n\t"
"vmulsd $dst,$dst,$tmp\n\t" "vmulsd $dst,$dst,$tmp\n\t"
"vextractf128 $tmp2,$src2\n\t" "vextractf128_high $tmp2,$src2\n\t"
"vmulsd $dst,$dst,$tmp2\n\t" "vmulsd $dst,$dst,$tmp2\n\t"
"pshufd $tmp,$tmp2,0xE\n\t" "pshufd $tmp,$tmp2,0xE\n\t"
"vmulsd $dst,$dst,$tmp\t! mul reduction4D" %} "vmulsd $dst,$dst,$tmp\t! mul reduction4D" %}
@ -5664,7 +5664,7 @@ instruct rvmul4D_reduction_reg(regD dst, vecY src2, regD tmp, regD tmp2) %{
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister); __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE); __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE);
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ vextractf128h($tmp2$$XMMRegister, $src2$$XMMRegister); __ vextractf128_high($tmp2$$XMMRegister, $src2$$XMMRegister);
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
@ -5679,15 +5679,15 @@ instruct rvmul8D_reduction_reg(regD dst, vecZ src2, regD tmp, regD tmp2) %{
format %{ "vmulsd $dst,$dst,$src2\n\t" format %{ "vmulsd $dst,$dst,$src2\n\t"
"pshufd $tmp,$src2,0xE\n\t" "pshufd $tmp,$src2,0xE\n\t"
"vmulsd $dst,$dst,$tmp\n\t" "vmulsd $dst,$dst,$tmp\n\t"
"vextractf32x4 $tmp2,$src2, 0x1\n\t" "vextractf32x4 $tmp2,$src2,0x1\n\t"
"vmulsd $dst,$dst,$tmp2\n\t" "vmulsd $dst,$dst,$tmp2\n\t"
"pshufd $tmp,$src2,0xE\n\t" "pshufd $tmp,$src2,0xE\n\t"
"vmulsd $dst,$dst,$tmp\n\t" "vmulsd $dst,$dst,$tmp\n\t"
"vextractf32x4 $tmp2,$src2, 0x2\n\t" "vextractf32x4 $tmp2,$src2,0x2\n\t"
"vmulsd $dst,$dst,$tmp2\n\t" "vmulsd $dst,$dst,$tmp2\n\t"
"pshufd $tmp,$tmp2,0xE\n\t" "pshufd $tmp,$tmp2,0xE\n\t"
"vmulsd $dst,$dst,$tmp\n\t" "vmulsd $dst,$dst,$tmp\n\t"
"vextractf32x4 $tmp2,$src2, 0x3\n\t" "vextractf32x4 $tmp2,$src2,0x3\n\t"
"vmulsd $dst,$dst,$tmp2\n\t" "vmulsd $dst,$dst,$tmp2\n\t"
"pshufd $tmp,$tmp2,0xE\n\t" "pshufd $tmp,$tmp2,0xE\n\t"
"vmulsd $dst,$dst,$tmp\t! mul reduction8D" %} "vmulsd $dst,$dst,$tmp\t! mul reduction8D" %}
@ -5695,15 +5695,15 @@ instruct rvmul8D_reduction_reg(regD dst, vecZ src2, regD tmp, regD tmp2) %{
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister); __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE); __ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE);
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1); __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1);
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x2); __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x2);
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);
__ vextractf32x4h($tmp2$$XMMRegister, $src2$$XMMRegister, 0x3); __ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x3);
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister);
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); __ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE);
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); __ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister);

View File

@ -1420,9 +1420,6 @@ const bool Matcher::isSimpleConstant64(jlong value) {
// The ecx parameter to rep stos for the ClearArray node is in dwords. // The ecx parameter to rep stos for the ClearArray node is in dwords.
const bool Matcher::init_array_count_is_in_bytes = false; const bool Matcher::init_array_count_is_in_bytes = false;
// Threshold size for cleararray.
const int Matcher::init_array_short_size = 8 * BytesPerLong;
// Needs 2 CMOV's for longs. // Needs 2 CMOV's for longs.
const int Matcher::long_cmove_cost() { return 1; } const int Matcher::long_cmove_cost() { return 1; }
@ -11369,27 +11366,54 @@ instruct MoveL2D_reg_reg_sse(regD dst, eRegL src, regD tmp) %{
// ======================================================================= // =======================================================================
// fast clearing of an array // fast clearing of an array
instruct rep_stos(eCXRegI cnt, eDIRegP base, eAXRegI zero, Universe dummy, eFlagsReg cr) %{ instruct rep_stos(eCXRegI cnt, eDIRegP base, eAXRegI zero, Universe dummy, eFlagsReg cr) %{
predicate(!UseFastStosb); predicate(!((ClearArrayNode*)n)->is_large());
match(Set dummy (ClearArray cnt base)); match(Set dummy (ClearArray cnt base));
effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr); effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr);
format %{ "XOR EAX,EAX\t# ClearArray:\n\t"
"SHL ECX,1\t# Convert doublewords to words\n\t" format %{ $$template
"REP STOS\t# store EAX into [EDI++] while ECX--" %} $$emit$$"XOR EAX,EAX\t# ClearArray:\n\t"
$$emit$$"CMP InitArrayShortSize,rcx\n\t"
$$emit$$"JG LARGE\n\t"
$$emit$$"SHL ECX, 1\n\t"
$$emit$$"DEC ECX\n\t"
$$emit$$"JS DONE\t# Zero length\n\t"
$$emit$$"MOV EAX,(EDI,ECX,4)\t# LOOP\n\t"
$$emit$$"DEC ECX\n\t"
$$emit$$"JGE LOOP\n\t"
$$emit$$"JMP DONE\n\t"
$$emit$$"# LARGE:\n\t"
if (UseFastStosb) {
$$emit$$"SHL ECX,3\t# Convert doublewords to bytes\n\t"
$$emit$$"REP STOSB\t# store EAX into [EDI++] while ECX--\n\t"
} else {
$$emit$$"SHL ECX,1\t# Convert doublewords to words\n\t"
$$emit$$"REP STOS\t# store EAX into [EDI++] while ECX--\n\t"
}
$$emit$$"# DONE"
%}
ins_encode %{ ins_encode %{
__ clear_mem($base$$Register, $cnt$$Register, $zero$$Register); __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register, false);
%} %}
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
instruct rep_fast_stosb(eCXRegI cnt, eDIRegP base, eAXRegI zero, Universe dummy, eFlagsReg cr) %{ instruct rep_stos_large(eCXRegI cnt, eDIRegP base, eAXRegI zero, Universe dummy, eFlagsReg cr) %{
predicate(UseFastStosb); predicate(((ClearArrayNode*)n)->is_large());
match(Set dummy (ClearArray cnt base)); match(Set dummy (ClearArray cnt base));
effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr); effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr);
format %{ "XOR EAX,EAX\t# ClearArray:\n\t" format %{ $$template
"SHL ECX,3\t# Convert doublewords to bytes\n\t" $$emit$$"XOR EAX,EAX\t# ClearArray:\n\t"
"REP STOSB\t# store EAX into [EDI++] while ECX--" %} if (UseFastStosb) {
$$emit$$"SHL ECX,3\t# Convert doublewords to bytes\n\t"
$$emit$$"REP STOSB\t# store EAX into [EDI++] while ECX--\n\t"
} else {
$$emit$$"SHL ECX,1\t# Convert doublewords to words\n\t"
$$emit$$"REP STOS\t# store EAX into [EDI++] while ECX--\n\t"
}
$$emit$$"# DONE"
%}
ins_encode %{ ins_encode %{
__ clear_mem($base$$Register, $cnt$$Register, $zero$$Register); __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register, true);
%} %}
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}

View File

@ -1637,9 +1637,6 @@ const bool Matcher::isSimpleConstant64(jlong value) {
// The ecx parameter to rep stosq for the ClearArray node is in words. // The ecx parameter to rep stosq for the ClearArray node is in words.
const bool Matcher::init_array_count_is_in_bytes = false; const bool Matcher::init_array_count_is_in_bytes = false;
// Threshold size for cleararray.
const int Matcher::init_array_short_size = 8 * BytesPerLong;
// No additional cost for CMOVL. // No additional cost for CMOVL.
const int Matcher::long_cmove_cost() { return 0; } const int Matcher::long_cmove_cost() { return 0; }
@ -10460,31 +10457,55 @@ instruct MoveL2D_reg_reg(regD dst, rRegL src) %{
instruct rep_stos(rcx_RegL cnt, rdi_RegP base, rax_RegI zero, Universe dummy, instruct rep_stos(rcx_RegL cnt, rdi_RegP base, rax_RegI zero, Universe dummy,
rFlagsReg cr) rFlagsReg cr)
%{ %{
predicate(!UseFastStosb); predicate(!((ClearArrayNode*)n)->is_large());
match(Set dummy (ClearArray cnt base)); match(Set dummy (ClearArray cnt base));
effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr); effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr);
format %{ "xorq rax, rax\t# ClearArray:\n\t" format %{ $$template
"rep stosq\t# Store rax to *rdi++ while rcx--" %} $$emit$$"xorq rax, rax\t# ClearArray:\n\t"
$$emit$$"cmp InitArrayShortSize,rcx\n\t"
$$emit$$"jg LARGE\n\t"
$$emit$$"dec rcx\n\t"
$$emit$$"js DONE\t# Zero length\n\t"
$$emit$$"mov rax,(rdi,rcx,8)\t# LOOP\n\t"
$$emit$$"dec rcx\n\t"
$$emit$$"jge LOOP\n\t"
$$emit$$"jmp DONE\n\t"
$$emit$$"# LARGE:\n\t"
if (UseFastStosb) {
$$emit$$"shlq rcx,3\t# Convert doublewords to bytes\n\t"
$$emit$$"rep stosb\t# Store rax to *rdi++ while rcx--\n\t"
} else {
$$emit$$"rep stosq\t# Store rax to *rdi++ while rcx--\n\t"
}
$$emit$$"# DONE"
%}
ins_encode %{ ins_encode %{
__ clear_mem($base$$Register, $cnt$$Register, $zero$$Register); __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register, false);
%} %}
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
instruct rep_fast_stosb(rcx_RegL cnt, rdi_RegP base, rax_RegI zero, Universe dummy, instruct rep_stos_large(rcx_RegL cnt, rdi_RegP base, rax_RegI zero, Universe dummy,
rFlagsReg cr) rFlagsReg cr)
%{ %{
predicate(UseFastStosb); predicate(((ClearArrayNode*)n)->is_large());
match(Set dummy (ClearArray cnt base)); match(Set dummy (ClearArray cnt base));
effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr); effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr);
format %{ "xorq rax, rax\t# ClearArray:\n\t"
"shlq rcx,3\t# Convert doublewords to bytes\n\t" format %{ $$template
"rep stosb\t# Store rax to *rdi++ while rcx--" %} $$emit$$"xorq rax, rax\t# ClearArray:\n\t"
ins_encode %{ if (UseFastStosb) {
__ clear_mem($base$$Register, $cnt$$Register, $zero$$Register); $$emit$$"shlq rcx,3\t# Convert doublewords to bytes\n\t"
$$emit$$"rep stosb\t# Store rax to *rdi++ while rcx--"
} else {
$$emit$$"rep stosq\t# Store rax to *rdi++ while rcx--"
}
%} %}
ins_pipe( pipe_slow ); ins_encode %{
__ clear_mem($base$$Register, $cnt$$Register, $zero$$Register, true);
%}
ins_pipe(pipe_slow);
%} %}
instruct string_compareL(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2, instruct string_compareL(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2,

View File

@ -203,7 +203,8 @@ public class AMD64 extends Architecture {
AVX512ER, AVX512ER,
AVX512CD, AVX512CD,
AVX512BW, AVX512BW,
AVX512VL AVX512VL,
SHA
} }
private final EnumSet<CPUFeature> features; private final EnumSet<CPUFeature> features;

View File

@ -122,6 +122,9 @@ public class AMD64HotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFacto
if ((config.vmVersionFeatures & config.amd64AVX512VL) != 0) { if ((config.vmVersionFeatures & config.amd64AVX512VL) != 0) {
features.add(AMD64.CPUFeature.AVX512VL); features.add(AMD64.CPUFeature.AVX512VL);
} }
if ((config.vmVersionFeatures & config.amd64SHA) != 0) {
features.add(AMD64.CPUFeature.SHA);
}
return features; return features;
} }

View File

@ -339,7 +339,7 @@ public class HotSpotConstantReflectionProvider implements ConstantReflectionProv
public JavaConstant readStableFieldValue(ResolvedJavaField field, JavaConstant receiver, boolean isDefaultStable) { public JavaConstant readStableFieldValue(ResolvedJavaField field, JavaConstant receiver, boolean isDefaultStable) {
JavaConstant fieldValue = readNonStableFieldValue(field, receiver); JavaConstant fieldValue = readNonStableFieldValue(field, receiver);
if (fieldValue.isNonNull()) { if (fieldValue != null && fieldValue.isNonNull()) {
JavaType declaredType = field.getType(); JavaType declaredType = field.getType();
if (declaredType.getComponentType() != null) { if (declaredType.getComponentType() != null) {
int stableDimension = getArrayDimension(declaredType); int stableDimension = getArrayDimension(declaredType);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@ package jdk.vm.ci.hotspot;
import jdk.vm.ci.code.CompilationRequest; import jdk.vm.ci.code.CompilationRequest;
import jdk.vm.ci.code.CompilationRequestResult; import jdk.vm.ci.code.CompilationRequestResult;
import jdk.vm.ci.common.JVMCIError; import jdk.vm.ci.common.JVMCIError;
import jdk.vm.ci.hotspot.HotSpotJVMCIRuntime.Option;
import jdk.vm.ci.runtime.JVMCICompiler; import jdk.vm.ci.runtime.JVMCICompiler;
import jdk.vm.ci.runtime.JVMCICompilerFactory; import jdk.vm.ci.runtime.JVMCICompilerFactory;
import jdk.vm.ci.runtime.JVMCIRuntime; import jdk.vm.ci.runtime.JVMCIRuntime;
@ -47,29 +48,33 @@ final class HotSpotJVMCICompilerConfig {
} }
} }
/**
* Factory of the selected system compiler.
*/
private static JVMCICompilerFactory compilerFactory; private static JVMCICompilerFactory compilerFactory;
/** /**
* Selects the system compiler. * Gets the selected system compiler factory.
* *
* Called from VM. This method has an object return type to allow it to be called with a VM * @return the selected system compiler factory
* utility function used to call other static initialization methods.
*/ */
static Boolean selectCompiler(String compilerName) {
assert compilerFactory == null;
for (JVMCICompilerFactory factory : Services.load(JVMCICompilerFactory.class)) {
if (factory.getCompilerName().equals(compilerName)) {
compilerFactory = factory;
return Boolean.TRUE;
}
}
throw new JVMCIError("JVMCI compiler '%s' not found", compilerName);
}
static JVMCICompilerFactory getCompilerFactory() { static JVMCICompilerFactory getCompilerFactory() {
if (compilerFactory == null) { if (compilerFactory == null) {
compilerFactory = new DummyCompilerFactory(); JVMCICompilerFactory factory = null;
String compilerName = Option.Compiler.getString();
if (compilerName != null) {
for (JVMCICompilerFactory f : Services.load(JVMCICompilerFactory.class)) {
if (f.getCompilerName().equals(compilerName)) {
factory = f;
}
}
if (factory == null) {
throw new JVMCIError("JVMCI compiler '%s' not found", compilerName);
}
} else {
factory = new DummyCompilerFactory();
}
compilerFactory = factory;
} }
return compilerFactory; return compilerFactory;
} }

View File

@ -91,6 +91,7 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider, H
* A list of all supported JVMCI options. * A list of all supported JVMCI options.
*/ */
public enum Option { public enum Option {
Compiler(String.class, null, "Selects the system compiler."),
ImplicitStableValues(boolean.class, true, "Mark well-known stable fields as such."), ImplicitStableValues(boolean.class, true, "Mark well-known stable fields as such."),
// Note: The following one is not used (see InitTimer.ENABLED). // Note: The following one is not used (see InitTimer.ENABLED).
InitTimer(boolean.class, false, "Specifies if initialization timing is enabled."), InitTimer(boolean.class, false, "Specifies if initialization timing is enabled."),

View File

@ -41,7 +41,6 @@ import jdk.vm.ci.meta.DeoptimizationAction;
import jdk.vm.ci.meta.DeoptimizationReason; import jdk.vm.ci.meta.DeoptimizationReason;
import jdk.vm.ci.meta.JavaConstant; import jdk.vm.ci.meta.JavaConstant;
import jdk.vm.ci.meta.JavaKind; import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.JavaType;
import jdk.vm.ci.meta.MetaAccessProvider; import jdk.vm.ci.meta.MetaAccessProvider;
import jdk.vm.ci.meta.ResolvedJavaField; import jdk.vm.ci.meta.ResolvedJavaField;
import jdk.vm.ci.meta.ResolvedJavaMethod; import jdk.vm.ci.meta.ResolvedJavaMethod;
@ -111,23 +110,26 @@ public class HotSpotMetaAccessProvider implements MetaAccessProvider, HotSpotPro
} }
public ResolvedJavaField lookupJavaField(Field reflectionField) { public ResolvedJavaField lookupJavaField(Field reflectionField) {
String name = reflectionField.getName();
Class<?> fieldHolder = reflectionField.getDeclaringClass(); Class<?> fieldHolder = reflectionField.getDeclaringClass();
Class<?> fieldType = reflectionField.getType();
// java.lang.reflect.Field's modifiers should be enough here since VM internal modifier bits
// are not used (yet).
final int modifiers = reflectionField.getModifiers();
final long offset = Modifier.isStatic(modifiers) ? UNSAFE.staticFieldOffset(reflectionField) : UNSAFE.objectFieldOffset(reflectionField);
HotSpotResolvedObjectType holder = fromObjectClass(fieldHolder); HotSpotResolvedObjectType holder = fromObjectClass(fieldHolder);
JavaType type = runtime.fromClass(fieldType); if (Modifier.isStatic(reflectionField.getModifiers())) {
final long offset = UNSAFE.staticFieldOffset(reflectionField);
if (offset != -1) { for (ResolvedJavaField field : holder.getStaticFields()) {
HotSpotResolvedObjectType resolved = holder; if (offset == ((HotSpotResolvedJavaField) field).offset()) {
return resolved.createField(name, type, offset, modifiers); return field;
}
}
} else { } else {
throw new JVMCIError("unresolved field %s", reflectionField); final long offset = UNSAFE.objectFieldOffset(reflectionField);
for (ResolvedJavaField field : holder.getInstanceFields(false)) {
if (offset == ((HotSpotResolvedJavaField) field).offset()) {
return field;
}
}
} }
throw new JVMCIError("unresolved field %s", reflectionField);
} }
private static int intMaskRight(int n) { private static int intMaskRight(int n) {

View File

@ -945,6 +945,7 @@ public class HotSpotVMConfig {
@HotSpotVMConstant(name = "VM_Version::CPU_AVX512CD", archs = {"amd64"}) @Stable public long amd64AVX512CD; @HotSpotVMConstant(name = "VM_Version::CPU_AVX512CD", archs = {"amd64"}) @Stable public long amd64AVX512CD;
@HotSpotVMConstant(name = "VM_Version::CPU_AVX512BW", archs = {"amd64"}) @Stable public long amd64AVX512BW; @HotSpotVMConstant(name = "VM_Version::CPU_AVX512BW", archs = {"amd64"}) @Stable public long amd64AVX512BW;
@HotSpotVMConstant(name = "VM_Version::CPU_AVX512VL", archs = {"amd64"}) @Stable public long amd64AVX512VL; @HotSpotVMConstant(name = "VM_Version::CPU_AVX512VL", archs = {"amd64"}) @Stable public long amd64AVX512VL;
@HotSpotVMConstant(name = "VM_Version::CPU_SHA", archs = {"amd64"}) @Stable public long amd64SHA;
// SPARC specific values // SPARC specific values
@HotSpotVMConstant(name = "VM_Version::vis3_instructions_m", archs = {"sparc"}) @Stable public int sparcVis3Instructions; @HotSpotVMConstant(name = "VM_Version::vis3_instructions_m", archs = {"sparc"}) @Stable public int sparcVis3Instructions;

View File

@ -144,6 +144,7 @@ pthread_t os::Linux::_main_thread;
int os::Linux::_page_size = -1; int os::Linux::_page_size = -1;
const int os::Linux::_vm_default_page_size = (8 * K); const int os::Linux::_vm_default_page_size = (8 * K);
bool os::Linux::_supports_fast_thread_cpu_time = false; bool os::Linux::_supports_fast_thread_cpu_time = false;
uint32_t os::Linux::_os_version = 0;
const char * os::Linux::_glibc_version = NULL; const char * os::Linux::_glibc_version = NULL;
const char * os::Linux::_libpthread_version = NULL; const char * os::Linux::_libpthread_version = NULL;
pthread_condattr_t os::Linux::_condattr[1]; pthread_condattr_t os::Linux::_condattr[1];
@ -4356,6 +4357,48 @@ jlong os::Linux::fast_thread_cpu_time(clockid_t clockid) {
return (tp.tv_sec * NANOSECS_PER_SEC) + tp.tv_nsec; return (tp.tv_sec * NANOSECS_PER_SEC) + tp.tv_nsec;
} }
void os::Linux::initialize_os_info() {
assert(_os_version == 0, "OS info already initialized");
struct utsname _uname;
uint32_t major;
uint32_t minor;
uint32_t fix;
int rc;
// Kernel version is unknown if
// verification below fails.
_os_version = 0x01000000;
rc = uname(&_uname);
if (rc != -1) {
rc = sscanf(_uname.release,"%d.%d.%d", &major, &minor, &fix);
if (rc == 3) {
if (major < 256 && minor < 256 && fix < 256) {
// Kernel version format is as expected,
// set it overriding unknown state.
_os_version = (major << 16) |
(minor << 8 ) |
(fix << 0 ) ;
}
}
}
}
uint32_t os::Linux::os_version() {
assert(_os_version != 0, "not initialized");
return _os_version & 0x00FFFFFF;
}
bool os::Linux::os_version_is_known() {
assert(_os_version != 0, "not initialized");
return _os_version & 0x01000000 ? false : true;
}
///// /////
// glibc on Linux platform uses non-documented flag // glibc on Linux platform uses non-documented flag
// to indicate, that some special sort of signal // to indicate, that some special sort of signal
@ -4578,6 +4621,8 @@ void os::init(void) {
Linux::initialize_system_info(); Linux::initialize_system_info();
Linux::initialize_os_info();
// main_thread points to the aboriginal thread // main_thread points to the aboriginal thread
Linux::_main_thread = pthread_self(); Linux::_main_thread = pthread_self();

View File

@ -56,6 +56,15 @@ class Linux {
static GrowableArray<int>* _cpu_to_node; static GrowableArray<int>* _cpu_to_node;
// 0x00000000 = uninitialized,
// 0x01000000 = kernel version unknown,
// otherwise a 32-bit number:
// Ox00AABBCC
// AA, Major Version
// BB, Minor Version
// CC, Fix Version
static uint32_t _os_version;
protected: protected:
static julong _physical_memory; static julong _physical_memory;
@ -198,6 +207,10 @@ class Linux {
static jlong fast_thread_cpu_time(clockid_t clockid); static jlong fast_thread_cpu_time(clockid_t clockid);
static void initialize_os_info();
static bool os_version_is_known();
static uint32_t os_version();
// pthread_cond clock suppport // pthread_cond clock suppport
private: private:
static pthread_condattr_t _condattr[1]; static pthread_condattr_t _condattr[1];

View File

@ -336,13 +336,13 @@ char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
const char *start; const char *start;
if (lib_name != NULL) { if (lib_name != NULL) {
len = name_len = strlen(lib_name); name_len = strlen(lib_name);
if (is_absolute_path) { if (is_absolute_path) {
// Need to strip path, prefix and suffix // Need to strip path, prefix and suffix
if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
lib_name = ++start; lib_name = ++start;
} }
if (len <= (prefix_len + suffix_len)) { if (strlen(lib_name) <= (prefix_len + suffix_len)) {
return NULL; return NULL;
} }
lib_name += prefix_len; lib_name += prefix_len;

View File

@ -951,11 +951,11 @@ bool os::getTimesSecs(double* process_real_time,
FILETIME wt; FILETIME wt;
GetSystemTimeAsFileTime(&wt); GetSystemTimeAsFileTime(&wt);
jlong rtc_millis = windows_to_java_time(wt); jlong rtc_millis = windows_to_java_time(wt);
jlong user_millis = windows_to_java_time(user_time);
jlong system_millis = windows_to_java_time(kernel_time);
*process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
*process_user_time = ((double) user_millis) / ((double) MILLIUNITS); *process_user_time =
*process_system_time = ((double) system_millis) / ((double) MILLIUNITS); (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
*process_system_time =
(double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
return true; return true;
} else { } else {
return false; return false;

View File

@ -26,44 +26,108 @@
#ifndef OS_CPU_LINUX_AARCH64_VM_COPY_LINUX_AARCH64_INLINE_HPP #ifndef OS_CPU_LINUX_AARCH64_VM_COPY_LINUX_AARCH64_INLINE_HPP
#define OS_CPU_LINUX_AARCH64_VM_COPY_LINUX_AARCH64_INLINE_HPP #define OS_CPU_LINUX_AARCH64_VM_COPY_LINUX_AARCH64_INLINE_HPP
#define COPY_SMALL(from, to, count) \
{ \
long tmp0, tmp1, tmp2, tmp3; \
long tmp4, tmp5, tmp6, tmp7; \
__asm volatile( \
" adr %[t0], 0f;" \
" add %[t0], %[t0], %[cnt], lsl #5;" \
" br %[t0];" \
" .align 5;" \
"0:" \
" b 1f;" \
" .align 5;" \
" ldr %[t0], [%[s], #0];" \
" str %[t0], [%[d], #0];" \
" b 1f;" \
" .align 5;" \
" ldp %[t0], %[t1], [%[s], #0];" \
" stp %[t0], %[t1], [%[d], #0];" \
" b 1f;" \
" .align 5;" \
" ldp %[t0], %[t1], [%[s], #0];" \
" ldr %[t2], [%[s], #16];" \
" stp %[t0], %[t1], [%[d], #0];" \
" str %[t2], [%[d], #16];" \
" b 1f;" \
" .align 5;" \
" ldp %[t0], %[t1], [%[s], #0];" \
" ldp %[t2], %[t3], [%[s], #16];" \
" stp %[t0], %[t1], [%[d], #0];" \
" stp %[t2], %[t3], [%[d], #16];" \
" b 1f;" \
" .align 5;" \
" ldp %[t0], %[t1], [%[s], #0];" \
" ldp %[t2], %[t3], [%[s], #16];" \
" ldr %[t4], [%[s], #32];" \
" stp %[t0], %[t1], [%[d], #0];" \
" stp %[t2], %[t3], [%[d], #16];" \
" str %[t4], [%[d], #32];" \
" b 1f;" \
" .align 5;" \
" ldp %[t0], %[t1], [%[s], #0];" \
" ldp %[t2], %[t3], [%[s], #16];" \
" ldp %[t4], %[t5], [%[s], #32];" \
"2:" \
" stp %[t0], %[t1], [%[d], #0];" \
" stp %[t2], %[t3], [%[d], #16];" \
" stp %[t4], %[t5], [%[d], #32];" \
" b 1f;" \
" .align 5;" \
" ldr %[t6], [%[s], #0];" \
" ldp %[t0], %[t1], [%[s], #8];" \
" ldp %[t2], %[t3], [%[s], #24];" \
" ldp %[t4], %[t5], [%[s], #40];" \
" str %[t6], [%[d]], #8;" \
" b 2b;" \
" .align 5;" \
" ldp %[t0], %[t1], [%[s], #0];" \
" ldp %[t2], %[t3], [%[s], #16];" \
" ldp %[t4], %[t5], [%[s], #32];" \
" ldp %[t6], %[t7], [%[s], #48];" \
" stp %[t0], %[t1], [%[d], #0];" \
" stp %[t2], %[t3], [%[d], #16];" \
" stp %[t4], %[t5], [%[d], #32];" \
" stp %[t6], %[t7], [%[d], #48];" \
"1:" \
\
: [s]"+r"(from), [d]"+r"(to), [cnt]"+r"(count), \
[t0]"=&r"(tmp0), [t1]"=&r"(tmp1), [t2]"=&r"(tmp2), [t3]"=&r"(tmp3), \
[t4]"=&r"(tmp4), [t5]"=&r"(tmp5), [t6]"=&r"(tmp6), [t7]"=&r"(tmp7) \
: \
: "memory", "cc"); \
}
static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
(void)memmove(to, from, count * HeapWordSize); __asm volatile( "prfm pldl1strm, [%[s], #0];" :: [s]"r"(from) : "memory");
if (__builtin_expect(count <= 8, 1)) {
COPY_SMALL(from, to, count);
return;
}
_Copy_conjoint_words(from, to, count);
} }
static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
switch (count) { if (__builtin_constant_p(count)) {
case 8: to[7] = from[7]; memcpy(to, from, count * sizeof(HeapWord));
case 7: to[6] = from[6]; return;
case 6: to[5] = from[5];
case 5: to[4] = from[4];
case 4: to[3] = from[3];
case 3: to[2] = from[2];
case 2: to[1] = from[1];
case 1: to[0] = from[0];
case 0: break;
default:
(void)memcpy(to, from, count * HeapWordSize);
break;
} }
__asm volatile( "prfm pldl1strm, [%[s], #0];" :: [s]"r"(from) : "memory");
if (__builtin_expect(count <= 8, 1)) {
COPY_SMALL(from, to, count);
return;
}
_Copy_disjoint_words(from, to, count);
} }
static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) {
switch (count) { __asm volatile( "prfm pldl1strm, [%[s], #0];" :: [s]"r"(from) : "memory");
case 8: to[7] = from[7]; if (__builtin_expect(count <= 8, 1)) {
case 7: to[6] = from[6]; COPY_SMALL(from, to, count);
case 6: to[5] = from[5]; return;
case 5: to[4] = from[4];
case 4: to[3] = from[3];
case 3: to[2] = from[2];
case 2: to[1] = from[1];
case 1: to[0] = from[0];
case 0: break;
default:
while (count-- > 0) {
*to++ = *from++;
}
break;
} }
_Copy_disjoint_words(from, to, count);
} }
static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {

View File

@ -0,0 +1,236 @@
/*
* Copyright (c) 2016, Linaro Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
.global _Copy_conjoint_words
.global _Copy_disjoint_words
s .req x0
d .req x1
count .req x2
t0 .req x3
t1 .req x4
t2 .req x5
t3 .req x6
t4 .req x7
t5 .req x8
t6 .req x9
t7 .req x10
.align 6
_Copy_disjoint_words:
// Ensure 2 word aligned
tbz s, #3, fwd_copy_aligned
ldr t0, [s], #8
str t0, [d], #8
sub count, count, #1
fwd_copy_aligned:
// Bias s & d so we only pre index on the last copy
sub s, s, #16
sub d, d, #16
ldp t0, t1, [s, #16]
ldp t2, t3, [s, #32]
ldp t4, t5, [s, #48]
ldp t6, t7, [s, #64]!
subs count, count, #16
blo fwd_copy_drain
fwd_copy_again:
prfm pldl1keep, [s, #256]
stp t0, t1, [d, #16]
ldp t0, t1, [s, #16]
stp t2, t3, [d, #32]
ldp t2, t3, [s, #32]
stp t4, t5, [d, #48]
ldp t4, t5, [s, #48]
stp t6, t7, [d, #64]!
ldp t6, t7, [s, #64]!
subs count, count, #8
bhs fwd_copy_again
fwd_copy_drain:
stp t0, t1, [d, #16]
stp t2, t3, [d, #32]
stp t4, t5, [d, #48]
stp t6, t7, [d, #64]!
// count is now -8..-1 for 0..7 words to copy
adr t0, 0f
add t0, t0, count, lsl #5
br t0
.align 5
ret // -8 == 0 words
.align 5
ldr t0, [s, #16] // -7 == 1 word
str t0, [d, #16]
ret
.align 5
ldp t0, t1, [s, #16] // -6 = 2 words
stp t0, t1, [d, #16]
ret
.align 5
ldp t0, t1, [s, #16] // -5 = 3 words
ldr t2, [s, #32]
stp t0, t1, [d, #16]
str t2, [d, #32]
ret
.align 5
ldp t0, t1, [s, #16] // -4 = 4 words
ldp t2, t3, [s, #32]
stp t0, t1, [d, #16]
stp t2, t3, [d, #32]
ret
.align 5
ldp t0, t1, [s, #16] // -3 = 5 words
ldp t2, t3, [s, #32]
ldr t4, [s, #48]
stp t0, t1, [d, #16]
stp t2, t3, [d, #32]
str t4, [d, #48]
ret
.align 5
ldp t0, t1, [s, #16] // -2 = 6 words
ldp t2, t3, [s, #32]
ldp t4, t5, [s, #48]
stp t0, t1, [d, #16]
stp t2, t3, [d, #32]
stp t4, t5, [d, #48]
ret
.align 5
ldp t0, t1, [s, #16] // -1 = 7 words
ldp t2, t3, [s, #32]
ldp t4, t5, [s, #48]
ldr t6, [s, #64]
stp t0, t1, [d, #16]
stp t2, t3, [d, #32]
stp t4, t5, [d, #48]
str t6, [d, #64]
// Is always aligned here, code for 7 words is one instruction
// too large so it just falls through.
.align 5
0:
ret
.align 6
_Copy_conjoint_words:
sub t0, d, s
cmp t0, count, lsl #3
bhs _Copy_disjoint_words
add s, s, count, lsl #3
add d, d, count, lsl #3
// Ensure 2 word aligned
tbz s, #3, bwd_copy_aligned
ldr t0, [s, #-8]!
str t0, [d, #-8]!
sub count, count, #1
bwd_copy_aligned:
ldp t0, t1, [s, #-16]
ldp t2, t3, [s, #-32]
ldp t4, t5, [s, #-48]
ldp t6, t7, [s, #-64]!
subs count, count, #16
blo bwd_copy_drain
bwd_copy_again:
prfm pldl1keep, [s, #-256]
stp t0, t1, [d, #-16]
ldp t0, t1, [s, #-16]
stp t2, t3, [d, #-32]
ldp t2, t3, [s, #-32]
stp t4, t5, [d, #-48]
ldp t4, t5, [s, #-48]
stp t6, t7, [d, #-64]!
ldp t6, t7, [s, #-64]!
subs count, count, #8
bhs bwd_copy_again
bwd_copy_drain:
stp t0, t1, [d, #-16]
stp t2, t3, [d, #-32]
stp t4, t5, [d, #-48]
stp t6, t7, [d, #-64]!
// count is now -8..-1 for 0..7 words to copy
adr t0, 0f
add t0, t0, count, lsl #5
br t0
.align 5
ret // -8 == 0 words
.align 5
ldr t0, [s, #-8] // -7 == 1 word
str t0, [d, #-8]
ret
.align 5
ldp t0, t1, [s, #-16] // -6 = 2 words
stp t0, t1, [d, #-16]
ret
.align 5
ldp t0, t1, [s, #-16] // -5 = 3 words
ldr t2, [s, #-24]
stp t0, t1, [d, #-16]
str t2, [d, #-24]
ret
.align 5
ldp t0, t1, [s, #-16] // -4 = 4 words
ldp t2, t3, [s, #-32]
stp t0, t1, [d, #-16]
stp t2, t3, [d, #-32]
ret
.align 5
ldp t0, t1, [s, #-16] // -3 = 5 words
ldp t2, t3, [s, #-32]
ldr t4, [s, #-40]
stp t0, t1, [d, #-16]
stp t2, t3, [d, #-32]
str t4, [d, #-40]
ret
.align 5
ldp t0, t1, [s, #-16] // -2 = 6 words
ldp t2, t3, [s, #-32]
ldp t4, t5, [s, #-48]
stp t0, t1, [d, #-16]
stp t2, t3, [d, #-32]
stp t4, t5, [d, #-48]
ret
.align 5
ldp t0, t1, [s, #-16] // -1 = 7 words
ldp t2, t3, [s, #-32]
ldp t4, t5, [s, #-48]
ldr t6, [s, #-56]
stp t0, t1, [d, #-16]
stp t2, t3, [d, #-32]
stp t4, t5, [d, #-48]
str t6, [d, #-56]
// Is always aligned here, code for 7 words is one instruction
// too large so it just falls through.
.align 5
0:
ret

View File

@ -257,7 +257,38 @@ void Canonicalizer::do_ArrayLength (ArrayLength* x) {
} }
} }
void Canonicalizer::do_LoadIndexed (LoadIndexed* x) {} void Canonicalizer::do_LoadIndexed (LoadIndexed* x) {
StableArrayConstant* array = x->array()->type()->as_StableArrayConstant();
IntConstant* index = x->index()->type()->as_IntConstant();
assert(array == NULL || FoldStableValues, "not enabled");
// Constant fold loads from stable arrays.
if (array != NULL && index != NULL) {
jint idx = index->value();
if (idx < 0 || idx >= array->value()->length()) {
// Leave the load as is. The range check will handle it.
return;
}
ciConstant field_val = array->value()->element_value(idx);
if (!field_val.is_null_or_zero()) {
jint dimension = array->dimension();
assert(dimension <= array->value()->array_type()->dimension(), "inconsistent info");
ValueType* value = NULL;
if (dimension > 1) {
// Preserve information about the dimension for the element.
assert(field_val.as_object()->is_array(), "not an array");
value = new StableArrayConstant(field_val.as_object()->as_array(), dimension - 1);
} else {
assert(dimension == 1, "sanity");
value = as_ValueType(field_val);
}
set_canonical(new Constant(value));
}
}
}
void Canonicalizer::do_StoreIndexed (StoreIndexed* x) { void Canonicalizer::do_StoreIndexed (StoreIndexed* x) {
// If a value is going to be stored into a field or array some of // If a value is going to be stored into a field or array some of
// the conversions emitted by javac are unneeded because the fields // the conversions emitted by javac are unneeded because the fields
@ -471,7 +502,7 @@ void Canonicalizer::do_Intrinsic (Intrinsic* x) {
InstanceConstant* c = x->argument_at(0)->type()->as_InstanceConstant(); InstanceConstant* c = x->argument_at(0)->type()->as_InstanceConstant();
if (c != NULL && !c->value()->is_null_object()) { if (c != NULL && !c->value()->is_null_object()) {
// ciInstance::java_mirror_type() returns non-NULL only for Java mirrors // ciInstance::java_mirror_type() returns non-NULL only for Java mirrors
ciType* t = c->value()->as_instance()->java_mirror_type(); ciType* t = c->value()->java_mirror_type();
if (t->is_klass()) { if (t->is_klass()) {
// substitute cls.isInstance(obj) of a constant Class into // substitute cls.isInstance(obj) of a constant Class into
// an InstantOf instruction // an InstantOf instruction
@ -487,6 +518,17 @@ void Canonicalizer::do_Intrinsic (Intrinsic* x) {
} }
break; break;
} }
case vmIntrinsics::_isPrimitive : {
assert(x->number_of_arguments() == 1, "wrong type");
// Class.isPrimitive is known on constant classes:
InstanceConstant* c = x->argument_at(0)->type()->as_InstanceConstant();
if (c != NULL && !c->value()->is_null_object()) {
ciType* t = c->value()->java_mirror_type();
set_constant(t->is_primitive_type());
}
break;
}
} }
} }

View File

@ -148,6 +148,7 @@ bool Compiler::is_intrinsic_supported(const methodHandle& method) {
case vmIntrinsics::_longBitsToDouble: case vmIntrinsics::_longBitsToDouble:
case vmIntrinsics::_getClass: case vmIntrinsics::_getClass:
case vmIntrinsics::_isInstance: case vmIntrinsics::_isInstance:
case vmIntrinsics::_isPrimitive:
case vmIntrinsics::_currentThread: case vmIntrinsics::_currentThread:
case vmIntrinsics::_dabs: case vmIntrinsics::_dabs:
case vmIntrinsics::_dsqrt: case vmIntrinsics::_dsqrt:

View File

@ -1519,6 +1519,29 @@ void GraphBuilder::method_return(Value x) {
append(new Return(x)); append(new Return(x));
} }
Value GraphBuilder::make_constant(ciConstant field_value, ciField* field) {
BasicType field_type = field_value.basic_type();
ValueType* value = as_ValueType(field_value);
// Attach dimension info to stable arrays.
if (FoldStableValues &&
field->is_stable() && field_type == T_ARRAY && !field_value.is_null_or_zero()) {
ciArray* array = field_value.as_object()->as_array();
jint dimension = field->type()->as_array_klass()->dimension();
value = new StableArrayConstant(array, dimension);
}
switch (field_type) {
case T_ARRAY:
case T_OBJECT:
if (field_value.as_object()->should_be_constant()) {
return new Constant(value);
}
return NULL; // Not a constant.
default:
return new Constant(value);
}
}
void GraphBuilder::access_field(Bytecodes::Code code) { void GraphBuilder::access_field(Bytecodes::Code code) {
bool will_link; bool will_link;
@ -1563,22 +1586,13 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
switch (code) { switch (code) {
case Bytecodes::_getstatic: { case Bytecodes::_getstatic: {
// check for compile-time constants, i.e., initialized static final fields // check for compile-time constants, i.e., initialized static final fields
Instruction* constant = NULL; Value constant = NULL;
if (field->is_constant() && !PatchALot) { if (field->is_constant() && !PatchALot) {
ciConstant field_val = field->constant_value(); ciConstant field_value = field->constant_value();
BasicType field_type = field_val.basic_type();
switch (field_type) {
case T_ARRAY:
case T_OBJECT:
if (field_val.as_object()->should_be_constant()) {
constant = new Constant(as_ValueType(field_val));
}
break;
default:
constant = new Constant(as_ValueType(field_val));
}
// Stable static fields are checked for non-default values in ciField::initialize_from(). // Stable static fields are checked for non-default values in ciField::initialize_from().
assert(!field->is_stable() || !field_value.is_null_or_zero(),
"stable static w/ default value shouldn't be a constant");
constant = make_constant(field_value, field);
} }
if (constant != NULL) { if (constant != NULL) {
push(type, append(constant)); push(type, append(constant));
@ -1591,38 +1605,29 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
} }
break; break;
} }
case Bytecodes::_putstatic: case Bytecodes::_putstatic: {
{ Value val = pop(type); Value val = pop(type);
if (state_before == NULL) { if (state_before == NULL) {
state_before = copy_state_for_exception(); state_before = copy_state_for_exception();
}
append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching));
} }
append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching));
break; break;
}
case Bytecodes::_getfield: { case Bytecodes::_getfield: {
// Check for compile-time constants, i.e., trusted final non-static fields. // Check for compile-time constants, i.e., trusted final non-static fields.
Instruction* constant = NULL; Value constant = NULL;
obj = apop(); obj = apop();
ObjectType* obj_type = obj->type()->as_ObjectType(); ObjectType* obj_type = obj->type()->as_ObjectType();
if (obj_type->is_constant() && !PatchALot) { if (obj_type->is_constant() && !PatchALot) {
ciObject* const_oop = obj_type->constant_value(); ciObject* const_oop = obj_type->constant_value();
if (!const_oop->is_null_object() && const_oop->is_loaded()) { if (!const_oop->is_null_object() && const_oop->is_loaded()) {
if (field->is_constant()) { if (field->is_constant()) {
ciConstant field_val = field->constant_value_of(const_oop); ciConstant field_value = field->constant_value_of(const_oop);
BasicType field_type = field_val.basic_type(); if (FoldStableValues && field->is_stable() && field_value.is_null_or_zero()) {
switch (field_type) {
case T_ARRAY:
case T_OBJECT:
if (field_val.as_object()->should_be_constant()) {
constant = new Constant(as_ValueType(field_val));
}
break;
default:
constant = new Constant(as_ValueType(field_val));
}
if (FoldStableValues && field->is_stable() && field_val.is_null_or_zero()) {
// Stable field with default value can't be constant. // Stable field with default value can't be constant.
constant = NULL; constant = NULL;
} else {
constant = make_constant(field_value, field);
} }
} else { } else {
// For CallSite objects treat the target field as a compile time constant. // For CallSite objects treat the target field as a compile time constant.
@ -3942,7 +3947,7 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode
bool GraphBuilder::try_method_handle_inline(ciMethod* callee) { bool GraphBuilder::try_method_handle_inline(ciMethod* callee) {
ValueStack* state_before = state()->copy_for_parsing(); ValueStack* state_before = copy_state_before();
vmIntrinsics::ID iid = callee->intrinsic_id(); vmIntrinsics::ID iid = callee->intrinsic_id();
switch (iid) { switch (iid) {
case vmIntrinsics::_invokeBasic: case vmIntrinsics::_invokeBasic:
@ -4032,7 +4037,7 @@ bool GraphBuilder::try_method_handle_inline(ciMethod* callee) {
fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)); fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
break; break;
} }
set_state(state_before); set_state(state_before->copy_for_parsing());
return false; return false;
} }

View File

@ -276,6 +276,7 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
void iterate_all_blocks(bool start_in_current_block_for_inlining = false); void iterate_all_blocks(bool start_in_current_block_for_inlining = false);
Dependencies* dependency_recorder() const; // = compilation()->dependencies() Dependencies* dependency_recorder() const; // = compilation()->dependencies()
bool direct_compare(ciKlass* k); bool direct_compare(ciKlass* k);
Value make_constant(ciConstant value, ciField* field);
void kill_all(); void kill_all();

View File

@ -1296,6 +1296,25 @@ void LIRGenerator::do_getClass(Intrinsic* x) {
__ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result); __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
} }
// java.lang.Class::isPrimitive()
void LIRGenerator::do_isPrimitive(Intrinsic* x) {
assert(x->number_of_arguments() == 1, "wrong type");
LIRItem rcvr(x->argument_at(0), this);
rcvr.load_item();
LIR_Opr temp = new_register(T_METADATA);
LIR_Opr result = rlock_result(x);
CodeEmitInfo* info = NULL;
if (x->needs_null_check()) {
info = state_for(x);
}
__ move(new LIR_Address(rcvr.result(), java_lang_Class::klass_offset_in_bytes(), T_ADDRESS), temp, info);
__ cmp(lir_cond_notEqual, temp, LIR_OprFact::intConst(0));
__ cmove(lir_cond_notEqual, LIR_OprFact::intConst(0), LIR_OprFact::intConst(1), result, T_BOOLEAN);
}
// Example: Thread.currentThread() // Example: Thread.currentThread()
void LIRGenerator::do_currentThread(Intrinsic* x) { void LIRGenerator::do_currentThread(Intrinsic* x) {
@ -3098,6 +3117,7 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break; case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;
case vmIntrinsics::_isInstance: do_isInstance(x); break; case vmIntrinsics::_isInstance: do_isInstance(x); break;
case vmIntrinsics::_isPrimitive: do_isPrimitive(x); break;
case vmIntrinsics::_getClass: do_getClass(x); break; case vmIntrinsics::_getClass: do_getClass(x); break;
case vmIntrinsics::_currentThread: do_currentThread(x); break; case vmIntrinsics::_currentThread: do_currentThread(x); break;

View File

@ -246,6 +246,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
void do_RegisterFinalizer(Intrinsic* x); void do_RegisterFinalizer(Intrinsic* x);
void do_isInstance(Intrinsic* x); void do_isInstance(Intrinsic* x);
void do_isPrimitive(Intrinsic* x);
void do_getClass(Intrinsic* x); void do_getClass(Intrinsic* x);
void do_currentThread(Intrinsic* x); void do_currentThread(Intrinsic* x);
void do_MathIntrinsic(Intrinsic* x); void do_MathIntrinsic(Intrinsic* x);

View File

@ -335,6 +335,7 @@ JRT_ENTRY(void, Runtime1::new_instance(JavaThread* thread, Klass* klass))
NOT_PRODUCT(_new_instance_slowcase_cnt++;) NOT_PRODUCT(_new_instance_slowcase_cnt++;)
assert(klass->is_klass(), "not a class"); assert(klass->is_klass(), "not a class");
Handle holder(THREAD, klass->klass_holder()); // keep the klass alive
instanceKlassHandle h(thread, klass); instanceKlassHandle h(thread, klass);
h->check_valid_for_instantiation(true, CHECK); h->check_valid_for_instantiation(true, CHECK);
// make sure klass is initialized // make sure klass is initialized
@ -370,6 +371,7 @@ JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* thread, Klass* array_klas
// anymore after new_objArray() and no GC can happen before. // anymore after new_objArray() and no GC can happen before.
// (This may have to change if this code changes!) // (This may have to change if this code changes!)
assert(array_klass->is_klass(), "not a class"); assert(array_klass->is_klass(), "not a class");
Handle holder(THREAD, array_klass->klass_holder()); // keep the klass alive
Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass(); Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK); objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
thread->set_vm_result(obj); thread->set_vm_result(obj);
@ -386,6 +388,7 @@ JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* thread, Klass* klass, int
assert(klass->is_klass(), "not a class"); assert(klass->is_klass(), "not a class");
assert(rank >= 1, "rank must be nonzero"); assert(rank >= 1, "rank must be nonzero");
Handle holder(THREAD, klass->klass_holder()); // keep the klass alive
oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK); oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
thread->set_vm_result(obj); thread->set_vm_result(obj);
JRT_END JRT_END

View File

@ -45,6 +45,7 @@ class ObjectType;
class ObjectConstant; class ObjectConstant;
class ArrayType; class ArrayType;
class ArrayConstant; class ArrayConstant;
class StableArrayConstant;
class InstanceType; class InstanceType;
class InstanceConstant; class InstanceConstant;
class MetadataType; class MetadataType;
@ -168,6 +169,7 @@ class ValueType: public CompilationResourceObj {
virtual MethodConstant* as_MethodConstant() { return NULL; } virtual MethodConstant* as_MethodConstant() { return NULL; }
virtual MethodDataConstant* as_MethodDataConstant() { return NULL; } virtual MethodDataConstant* as_MethodDataConstant() { return NULL; }
virtual ArrayConstant* as_ArrayConstant() { return NULL; } virtual ArrayConstant* as_ArrayConstant() { return NULL; }
virtual StableArrayConstant* as_StableArrayConstant() { return NULL; }
virtual AddressConstant* as_AddressConstant() { return NULL; } virtual AddressConstant* as_AddressConstant() { return NULL; }
// type operations // type operations
@ -355,6 +357,20 @@ class ArrayConstant: public ArrayType {
virtual ciType* exact_type() const; virtual ciType* exact_type() const;
}; };
class StableArrayConstant: public ArrayConstant {
private:
jint _dimension;
public:
StableArrayConstant(ciArray* value, jint dimension) : ArrayConstant(value) {
assert(dimension > 0, "not a stable array");
_dimension = dimension;
}
jint dimension() const { return _dimension; }
virtual StableArrayConstant* as_StableArrayConstant() { return this; }
};
class InstanceType: public ObjectType { class InstanceType: public ObjectType {
public: public:

View File

@ -81,7 +81,7 @@ ciMethodData::ciMethodData() : ciMetadata(NULL) {
void ciMethodData::load_extra_data() { void ciMethodData::load_extra_data() {
MethodData* mdo = get_MethodData(); MethodData* mdo = get_MethodData();
MutexLocker(mdo->extra_data_lock()); MutexLocker ml(mdo->extra_data_lock());
// speculative trap entries also hold a pointer to a Method so need to be translated // speculative trap entries also hold a pointer to a Method so need to be translated
DataLayout* dp_src = mdo->extra_data_base(); DataLayout* dp_src = mdo->extra_data_base();
@ -103,16 +103,13 @@ void ciMethodData::load_extra_data() {
switch(tag) { switch(tag) {
case DataLayout::speculative_trap_data_tag: { case DataLayout::speculative_trap_data_tag: {
ciSpeculativeTrapData* data_dst = new ciSpeculativeTrapData(dp_dst); ciSpeculativeTrapData data_dst(dp_dst);
SpeculativeTrapData* data_src = new SpeculativeTrapData(dp_src); SpeculativeTrapData data_src(dp_src);
data_dst->translate_from(data_src);
#ifdef ASSERT
SpeculativeTrapData* data_src2 = new SpeculativeTrapData(dp_src);
assert(data_src2->method() == data_src->method() && data_src2->bci() == data_src->bci(), "entries changed while translating");
#endif
{ // During translation a safepoint can happen or VM lock can be taken (e.g., Compile_lock).
MutexUnlocker ml(mdo->extra_data_lock());
data_dst.translate_from(&data_src);
}
break; break;
} }
case DataLayout::bit_data_tag: case DataLayout::bit_data_tag:
@ -120,9 +117,11 @@ void ciMethodData::load_extra_data() {
case DataLayout::no_tag: case DataLayout::no_tag:
case DataLayout::arg_info_data_tag: case DataLayout::arg_info_data_tag:
// An empty slot or ArgInfoData entry marks the end of the trap data // An empty slot or ArgInfoData entry marks the end of the trap data
return; {
return; // Need a block to avoid SS compiler bug
}
default: default:
fatal("bad tag = %d", dp_dst->tag()); fatal("bad tag = %d", tag);
} }
} }
} }

View File

@ -1060,14 +1060,15 @@
do_name( updateByteBuffer_A_name, "updateByteBuffer") \ do_name( updateByteBuffer_A_name, "updateByteBuffer") \
\ \
/* support for Unsafe */ \ /* support for Unsafe */ \
do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \
do_class(jdk_internal_misc_Unsafe, "jdk/internal/misc/Unsafe") \ do_class(jdk_internal_misc_Unsafe, "jdk/internal/misc/Unsafe") \
\ \
do_intrinsic(_allocateInstance, jdk_internal_misc_Unsafe, allocateInstance_name, allocateInstance_signature, F_RN) \ do_intrinsic(_allocateInstance, jdk_internal_misc_Unsafe, allocateInstance_name, allocateInstance_signature, F_RN) \
do_name( allocateInstance_name, "allocateInstance") \ do_name( allocateInstance_name, "allocateInstance") \
do_signature(allocateInstance_signature, "(Ljava/lang/Class;)Ljava/lang/Object;") \ do_signature(allocateInstance_signature, "(Ljava/lang/Class;)Ljava/lang/Object;") \
do_intrinsic(_allocateUninitializedArray, jdk_internal_misc_Unsafe, allocateUninitializedArray_name, newArray_signature, F_R) \
do_name( allocateUninitializedArray_name, "allocateUninitializedArray0") \
do_intrinsic(_copyMemory, jdk_internal_misc_Unsafe, copyMemory_name, copyMemory_signature, F_RN) \ do_intrinsic(_copyMemory, jdk_internal_misc_Unsafe, copyMemory_name, copyMemory_signature, F_RN) \
do_name( copyMemory_name, "copyMemory") \ do_name( copyMemory_name, "copyMemory0") \
do_signature(copyMemory_signature, "(Ljava/lang/Object;JLjava/lang/Object;JJ)V") \ do_signature(copyMemory_signature, "(Ljava/lang/Object;JLjava/lang/Object;JJ)V") \
do_intrinsic(_loadFence, jdk_internal_misc_Unsafe, loadFence_name, loadFence_signature, F_RN) \ do_intrinsic(_loadFence, jdk_internal_misc_Unsafe, loadFence_name, loadFence_signature, F_RN) \
do_name( loadFence_name, "loadFence") \ do_name( loadFence_name, "loadFence") \

View File

@ -637,16 +637,19 @@ void CodeCache::blobs_do(CodeBlobClosure* f) {
} }
// Walk the list of methods which might contain non-perm oops. // Walk the list of methods which might contain non-perm oops.
void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) { void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) {
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
if (UseG1GC) { if (UseG1GC) {
return; return;
} }
const bool fix_relocations = f->fix_relocations();
debug_only(mark_scavenge_root_nmethods()); debug_only(mark_scavenge_root_nmethods());
for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { nmethod* prev = NULL;
nmethod* cur = scavenge_root_nmethods();
while (cur != NULL) {
debug_only(cur->clear_scavenge_root_marked()); debug_only(cur->clear_scavenge_root_marked());
assert(cur->scavenge_root_not_marked(), ""); assert(cur->scavenge_root_not_marked(), "");
assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
@ -659,6 +662,18 @@ void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
// Perform cur->oops_do(f), maybe just once per nmethod. // Perform cur->oops_do(f), maybe just once per nmethod.
f->do_code_blob(cur); f->do_code_blob(cur);
} }
nmethod* const next = cur->scavenge_root_link();
// The scavengable nmethod list must contain all methods with scavengable
// oops. It is safe to include more nmethod on the list, but we do not
// expect any live non-scavengable nmethods on the list.
if (fix_relocations) {
if (!is_live || !cur->detect_scavenge_root_oops()) {
unlink_scavenge_root_nmethod(cur, prev);
} else {
prev = cur;
}
}
cur = next;
} }
// Check for stray marks. // Check for stray marks.
@ -678,6 +693,24 @@ void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
print_trace("add_scavenge_root", nm); print_trace("add_scavenge_root", nm);
} }
void CodeCache::unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev) {
assert_locked_or_safepoint(CodeCache_lock);
assert((prev == NULL && scavenge_root_nmethods() == nm) ||
(prev != NULL && prev->scavenge_root_link() == nm), "precondition");
assert(!UseG1GC, "G1 does not use the scavenge_root_nmethods list");
print_trace("unlink_scavenge_root", nm);
if (prev == NULL) {
set_scavenge_root_nmethods(nm->scavenge_root_link());
} else {
prev->set_scavenge_root_link(nm->scavenge_root_link());
}
nm->set_scavenge_root_link(NULL);
nm->clear_on_scavenge_root_list();
}
void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
@ -686,20 +719,13 @@ void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
} }
print_trace("drop_scavenge_root", nm); print_trace("drop_scavenge_root", nm);
nmethod* last = NULL; nmethod* prev = NULL;
nmethod* cur = scavenge_root_nmethods(); for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
while (cur != NULL) {
nmethod* next = cur->scavenge_root_link();
if (cur == nm) { if (cur == nm) {
if (last != NULL) unlink_scavenge_root_nmethod(cur, prev);
last->set_scavenge_root_link(next);
else set_scavenge_root_nmethods(next);
nm->set_scavenge_root_link(NULL);
nm->clear_on_scavenge_root_list();
return; return;
} }
last = cur; prev = cur;
cur = next;
} }
assert(false, "should have been on list"); assert(false, "should have been on list");
} }
@ -728,11 +754,7 @@ void CodeCache::prune_scavenge_root_nmethods() {
} else { } else {
// Prune it from the list, so we don't have to look at it any more. // Prune it from the list, so we don't have to look at it any more.
print_trace("prune_scavenge_root", cur); print_trace("prune_scavenge_root", cur);
cur->set_scavenge_root_link(NULL); unlink_scavenge_root_nmethod(cur, last);
cur->clear_on_scavenge_root_list();
if (last != NULL)
last->set_scavenge_root_link(next);
else set_scavenge_root_nmethods(next);
} }
cur = next; cur = next;
} }

View File

@ -116,6 +116,10 @@ class CodeCache : AllStatic {
static int allocated_segments(); static int allocated_segments();
static size_t freelists_length(); static size_t freelists_length();
static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }
static void prune_scavenge_root_nmethods();
static void unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev);
public: public:
// Initialization // Initialization
static void initialize(); static void initialize();
@ -153,13 +157,17 @@ class CodeCache : AllStatic {
// to "true" iff some code got unloaded. // to "true" iff some code got unloaded.
static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred); static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN; static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
static void scavenge_root_nmethods_do(CodeBlobClosure* f);
// Apply f to every live code blob in scavengable nmethods. Prune nmethods
// from the list of scavengable nmethods if f->fix_relocations() and a nmethod
// no longer has scavengable oops. If f->fix_relocations(), then f must copy
// objects to their new location immediately to avoid fixing nmethods on the
// basis of the old object locations.
static void scavenge_root_nmethods_do(CodeBlobToOopClosure* f);
static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; } static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; }
static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }
static void add_scavenge_root_nmethod(nmethod* nm); static void add_scavenge_root_nmethod(nmethod* nm);
static void drop_scavenge_root_nmethod(nmethod* nm); static void drop_scavenge_root_nmethod(nmethod* nm);
static void prune_scavenge_root_nmethods();
// Printing/debugging // Printing/debugging
static void print(); // prints summary static void print(); // prints summary

View File

@ -369,7 +369,6 @@ void DebugInformationRecorder::describe_scope(int pc_offset,
assert(method == NULL || assert(method == NULL ||
(method->is_native() && bci == 0) || (method->is_native() && bci == 0) ||
(!method->is_native() && 0 <= bci && bci < method->code_size()) || (!method->is_native() && 0 <= bci && bci < method->code_size()) ||
(method->is_compiled_lambda_form() && bci == -99) || // this might happen in C1
bci == -1, "illegal bci"); bci == -1, "illegal bci");
// serialize the locals/expressions/monitors // serialize the locals/expressions/monitors

View File

@ -1381,7 +1381,6 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
assert(_method == NULL, "Tautology"); assert(_method == NULL, "Tautology");
set_osr_link(NULL); set_osr_link(NULL);
//set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
NMethodSweeper::report_state_change(this); NMethodSweeper::report_state_change(this);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -838,12 +838,8 @@ void CompileBroker::compile_method_base(const methodHandle& method,
const methodHandle& hot_method, const methodHandle& hot_method,
int hot_count, int hot_count,
const char* comment, const char* comment,
bool blocking,
Thread* thread) { Thread* thread) {
// do nothing if compiler thread(s) is not available
if (!_initialized) {
return;
}
guarantee(!method->is_abstract(), "cannot compile abstract methods"); guarantee(!method->is_abstract(), "cannot compile abstract methods");
assert(method->method_holder()->is_instance_klass(), assert(method->method_holder()->is_instance_klass(),
"sanity check"); "sanity check");
@ -916,7 +912,6 @@ void CompileBroker::compile_method_base(const methodHandle& method,
// Outputs from the following MutexLocker block: // Outputs from the following MutexLocker block:
CompileTask* task = NULL; CompileTask* task = NULL;
bool blocking = false;
CompileQueue* queue = compile_queue(comp_level); CompileQueue* queue = compile_queue(comp_level);
// Acquire our lock. // Acquire our lock.
@ -946,9 +941,6 @@ void CompileBroker::compile_method_base(const methodHandle& method,
return; return;
} }
// Should this thread wait for completion of the compile?
blocking = is_compile_blocking();
#if INCLUDE_JVMCI #if INCLUDE_JVMCI
if (UseJVMCICompiler) { if (UseJVMCICompiler) {
if (blocking) { if (blocking) {
@ -1034,11 +1026,28 @@ void CompileBroker::compile_method_base(const methodHandle& method,
} }
} }
nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci, nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
int comp_level, int comp_level,
const methodHandle& hot_method, int hot_count, const methodHandle& hot_method, int hot_count,
const char* comment, Thread* THREAD) { const char* comment, Thread* THREAD) {
// do nothing if compilebroker is not available
if (!_initialized) {
return NULL;
}
AbstractCompiler *comp = CompileBroker::compiler(comp_level);
assert(comp != NULL, "Ensure we don't compile before compilebroker init");
DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, comp);
nmethod* nm = CompileBroker::compile_method(method, osr_bci, comp_level, hot_method, hot_count, comment, directive, THREAD);
DirectivesStack::release(directive);
return nm;
}
nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
int comp_level,
const methodHandle& hot_method, int hot_count,
const char* comment, DirectiveSet* directive,
Thread* THREAD) {
// make sure arguments make sense // make sure arguments make sense
assert(method->method_holder()->is_instance_klass(), "not an instance method"); assert(method->method_holder()->is_instance_klass(), "not an instance method");
assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range"); assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range");
@ -1051,8 +1060,8 @@ nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
// lock, make sure that the compilation // lock, make sure that the compilation
// isn't prohibited in a straightforward way. // isn't prohibited in a straightforward way.
AbstractCompiler *comp = CompileBroker::compiler(comp_level); AbstractCompiler *comp = CompileBroker::compiler(comp_level);
if (comp == NULL || !comp->can_compile_method(method) || if (!comp->can_compile_method(method) ||
compilation_is_prohibited(method, osr_bci, comp_level)) { compilation_is_prohibited(method, osr_bci, comp_level, directive->ExcludeOption)) {
return NULL; return NULL;
} }
@ -1160,7 +1169,7 @@ nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
CompilationPolicy::policy()->delay_compilation(method()); CompilationPolicy::policy()->delay_compilation(method());
return NULL; return NULL;
} }
compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, comment, THREAD); compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, comment, !directive->BackgroundCompilationOption, THREAD);
} }
// return requested nmethod // return requested nmethod
@ -1217,7 +1226,7 @@ bool CompileBroker::compilation_is_in_queue(const methodHandle& method) {
// CompileBroker::compilation_is_prohibited // CompileBroker::compilation_is_prohibited
// //
// See if this compilation is not allowed. // See if this compilation is not allowed.
bool CompileBroker::compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level) { bool CompileBroker::compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level, bool excluded) {
bool is_native = method->is_native(); bool is_native = method->is_native();
// Some compilers may not support the compilation of natives. // Some compilers may not support the compilation of natives.
AbstractCompiler *comp = compiler(comp_level); AbstractCompiler *comp = compiler(comp_level);
@ -1235,11 +1244,6 @@ bool CompileBroker::compilation_is_prohibited(const methodHandle& method, int os
return true; return true;
} }
// Breaking the abstraction - directives are only used inside a compilation otherwise.
DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, comp);
bool excluded = directive->ExcludeOption;
DirectivesStack::release(directive);
// The method may be explicitly excluded by the user. // The method may be explicitly excluded by the user.
double scale; double scale;
if (excluded || (CompilerOracle::has_option_value(method, "CompileThresholdScaling", scale) && scale == 0)) { if (excluded || (CompilerOracle::has_option_value(method, "CompileThresholdScaling", scale) && scale == 0)) {
@ -1304,16 +1308,6 @@ uint CompileBroker::assign_compile_id_unlocked(Thread* thread, const methodHandl
return assign_compile_id(method, osr_bci); return assign_compile_id(method, osr_bci);
} }
/**
* Should the current thread block until this compilation request
* has been fulfilled?
*/
bool CompileBroker::is_compile_blocking() {
assert(!InstanceRefKlass::owns_pending_list_lock(JavaThread::current()), "possible deadlock");
return !BackgroundCompilation;
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// CompileBroker::preload_classes // CompileBroker::preload_classes
void CompileBroker::preload_classes(const methodHandle& method, TRAPS) { void CompileBroker::preload_classes(const methodHandle& method, TRAPS) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -222,8 +222,7 @@ class CompileBroker: AllStatic {
static JavaThread* make_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, bool compiler_thread, TRAPS); static JavaThread* make_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, bool compiler_thread, TRAPS);
static void init_compiler_sweeper_threads(int c1_compiler_count, int c2_compiler_count); static void init_compiler_sweeper_threads(int c1_compiler_count, int c2_compiler_count);
static bool compilation_is_complete (const methodHandle& method, int osr_bci, int comp_level); static bool compilation_is_complete (const methodHandle& method, int osr_bci, int comp_level);
static bool compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level); static bool compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level, bool excluded);
static bool is_compile_blocking();
static void preload_classes (const methodHandle& method, TRAPS); static void preload_classes (const methodHandle& method, TRAPS);
static CompileTask* create_compile_task(CompileQueue* queue, static CompileTask* create_compile_task(CompileQueue* queue,
@ -253,6 +252,7 @@ class CompileBroker: AllStatic {
const methodHandle& hot_method, const methodHandle& hot_method,
int hot_count, int hot_count,
const char* comment, const char* comment,
bool blocking,
Thread* thread); Thread* thread);
static CompileQueue* compile_queue(int comp_level); static CompileQueue* compile_queue(int comp_level);
@ -291,6 +291,15 @@ public:
int hot_count, int hot_count,
const char* comment, Thread* thread); const char* comment, Thread* thread);
static nmethod* compile_method(const methodHandle& method,
int osr_bci,
int comp_level,
const methodHandle& hot_method,
int hot_count,
const char* comment,
DirectiveSet* directive,
Thread* thread);
// Acquire any needed locks and assign a compile id // Acquire any needed locks and assign a compile id
static uint assign_compile_id_unlocked(Thread* thread, const methodHandle& method, int osr_bci); static uint assign_compile_id_unlocked(Thread* thread, const methodHandle& method, int osr_bci);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -472,9 +472,12 @@ void DirectivesStack::push(CompilerDirectives* directive) {
_depth++; _depth++;
} }
void DirectivesStack::pop() { void DirectivesStack::pop(int count) {
MutexLockerEx locker(DirectivesStack_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx locker(DirectivesStack_lock, Mutex::_no_safepoint_check_flag);
pop_inner(); assert(count > -1, "No negative values");
for (int i = 0; i < count; i++) {
pop_inner();
}
} }
void DirectivesStack::pop_inner() { void DirectivesStack::pop_inner() {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -42,6 +42,7 @@
cflags(PrintAssembly, bool, PrintAssembly, PrintAssembly) \ cflags(PrintAssembly, bool, PrintAssembly, PrintAssembly) \
cflags(PrintInlining, bool, PrintInlining, PrintInlining) \ cflags(PrintInlining, bool, PrintInlining, PrintInlining) \
cflags(PrintNMethods, bool, PrintNMethods, PrintNMethods) \ cflags(PrintNMethods, bool, PrintNMethods, PrintNMethods) \
cflags(BackgroundCompilation, bool, BackgroundCompilation, BackgroundCompilation) \
cflags(ReplayInline, bool, false, ReplayInline) \ cflags(ReplayInline, bool, false, ReplayInline) \
cflags(DumpReplay, bool, false, DumpReplay) \ cflags(DumpReplay, bool, false, DumpReplay) \
cflags(DumpInline, bool, false, DumpInline) \ cflags(DumpInline, bool, false, DumpInline) \
@ -87,7 +88,7 @@ public:
static DirectiveSet* getMatchingDirective(methodHandle mh, AbstractCompiler* comp); static DirectiveSet* getMatchingDirective(methodHandle mh, AbstractCompiler* comp);
static DirectiveSet* getDefaultDirective(AbstractCompiler* comp); static DirectiveSet* getDefaultDirective(AbstractCompiler* comp);
static void push(CompilerDirectives* directive); static void push(CompilerDirectives* directive);
static void pop(); static void pop(int count);
static bool check_capacity(int request_size, outputStream* st); static bool check_capacity(int request_size, outputStream* st);
static void clear(); static void clear();
static void print(outputStream* st); static void print(outputStream* st);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -55,7 +55,7 @@ void DirectivesParser::clean_tmp() {
assert(_tmp_depth == 0, "Consistency"); assert(_tmp_depth == 0, "Consistency");
} }
bool DirectivesParser::parse_string(const char* text, outputStream* st) { int DirectivesParser::parse_string(const char* text, outputStream* st) {
DirectivesParser cd(text, st); DirectivesParser cd(text, st);
if (cd.valid()) { if (cd.valid()) {
return cd.install_directives(); return cd.install_directives();
@ -63,7 +63,7 @@ bool DirectivesParser::parse_string(const char* text, outputStream* st) {
cd.clean_tmp(); cd.clean_tmp();
st->flush(); st->flush();
st->print_cr("Parsing of compiler directives failed"); st->print_cr("Parsing of compiler directives failed");
return false; return -1;
} }
} }
@ -97,17 +97,17 @@ bool DirectivesParser::parse_from_file_inner(const char* filename, outputStream*
buffer[num_read] = '\0'; buffer[num_read] = '\0';
// close file // close file
os::close(file_handle); os::close(file_handle);
return parse_string(buffer, stream); return parse_string(buffer, stream) > 0;
} }
} }
return false; return false;
} }
bool DirectivesParser::install_directives() { int DirectivesParser::install_directives() {
// Check limit // Check limit
if (!DirectivesStack::check_capacity(_tmp_depth, _st)) { if (!DirectivesStack::check_capacity(_tmp_depth, _st)) {
clean_tmp(); clean_tmp();
return false; return 0;
} }
// Pop from internal temporary stack and push to compileBroker. // Pop from internal temporary stack and push to compileBroker.
@ -120,14 +120,14 @@ bool DirectivesParser::install_directives() {
} }
if (i == 0) { if (i == 0) {
_st->print_cr("No directives in file"); _st->print_cr("No directives in file");
return false; return 0;
} else { } else {
_st->print_cr("%i compiler directives added", i); _st->print_cr("%i compiler directives added", i);
if (CompilerDirectivesPrint) { if (CompilerDirectivesPrint) {
// Print entire directives stack after new has been pushed. // Print entire directives stack after new has been pushed.
DirectivesStack::print(_st); DirectivesStack::print(_st);
} }
return true; return i;
} }
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -51,8 +51,8 @@ public:
static bool has_file(); static bool has_file();
static bool parse_from_flag(); static bool parse_from_flag();
static bool parse_from_file(const char* filename, outputStream* st); static bool parse_from_file(const char* filename, outputStream* st);
static bool parse_string(const char* string, outputStream* st); static int parse_string(const char* string, outputStream* st);
bool install_directives(); int install_directives();
private: private:
DirectivesParser(const char* text, outputStream* st); DirectivesParser(const char* text, outputStream* st);

View File

@ -2329,9 +2329,13 @@ void G1CollectedHeap::register_concurrent_cycle_end() {
GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id()); GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id());
if (_cm->has_aborted()) { if (_cm->has_aborted()) {
_gc_tracer_cm->report_concurrent_mode_failure(); _gc_tracer_cm->report_concurrent_mode_failure();
// ConcurrentGCTimer will be ended as well.
_cm->register_concurrent_gc_end_and_stop_timer();
} else {
_gc_timer_cm->register_gc_end();
} }
_gc_timer_cm->register_gc_end();
_gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
// Clear state variables to prepare for the next concurrent cycle. // Clear state variables to prepare for the next concurrent cycle.

View File

@ -269,6 +269,8 @@ G1CollectorPolicy::G1CollectorPolicy() :
_reserve_regions = 0; _reserve_regions = 0;
_cset_chooser = new CollectionSetChooser(); _cset_chooser = new CollectionSetChooser();
_ihop_control = create_ihop_control();
} }
G1CollectorPolicy::~G1CollectorPolicy() { G1CollectorPolicy::~G1CollectorPolicy() {
@ -469,8 +471,6 @@ void G1CollectorPolicy::post_heap_initialize() {
if (max_young_size != MaxNewSize) { if (max_young_size != MaxNewSize) {
FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size); FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
} }
_ihop_control = create_ihop_control();
} }
void G1CollectorPolicy::initialize_flags() { void G1CollectorPolicy::initialize_flags() {
@ -565,6 +565,8 @@ void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
_reserve_regions = (uint) ceil(reserve_regions_d); _reserve_regions = (uint) ceil(reserve_regions_d);
_young_gen_sizer->heap_size_changed(new_number_of_regions); _young_gen_sizer->heap_size_changed(new_number_of_regions);
_ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
} }
uint G1CollectorPolicy::calculate_young_list_desired_min_length( uint G1CollectorPolicy::calculate_young_list_desired_min_length(
@ -1234,13 +1236,11 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
G1IHOPControl* G1CollectorPolicy::create_ihop_control() const { G1IHOPControl* G1CollectorPolicy::create_ihop_control() const {
if (G1UseAdaptiveIHOP) { if (G1UseAdaptiveIHOP) {
return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent, return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
G1CollectedHeap::heap()->max_capacity(),
&_predictor, &_predictor,
G1ReservePercent, G1ReservePercent,
G1HeapWastePercent); G1HeapWastePercent);
} else { } else {
return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent, return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
G1CollectedHeap::heap()->max_capacity());
} }
} }

View File

@ -441,7 +441,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
_has_aborted(false), _has_aborted(false),
_restart_for_overflow(false), _restart_for_overflow(false),
_concurrent_marking_in_progress(false), _concurrent_marking_in_progress(false),
_concurrent_phase_started(false), _concurrent_phase_status(ConcPhaseNotStarted),
// _verbose_level set below // _verbose_level set below
@ -1008,16 +1008,43 @@ void G1ConcurrentMark::scanRootRegions() {
} }
void G1ConcurrentMark::register_concurrent_phase_start(const char* title) { void G1ConcurrentMark::register_concurrent_phase_start(const char* title) {
assert(!_concurrent_phase_started, "Sanity"); uint old_val = 0;
_concurrent_phase_started = true; do {
old_val = Atomic::cmpxchg(ConcPhaseStarted, &_concurrent_phase_status, ConcPhaseNotStarted);
} while (old_val != ConcPhaseNotStarted);
_g1h->gc_timer_cm()->register_gc_concurrent_start(title); _g1h->gc_timer_cm()->register_gc_concurrent_start(title);
} }
void G1ConcurrentMark::register_concurrent_phase_end() { void G1ConcurrentMark::register_concurrent_phase_end_common(bool end_timer) {
if (_concurrent_phase_started) { if (_concurrent_phase_status == ConcPhaseNotStarted) {
_concurrent_phase_started = false; return;
_g1h->gc_timer_cm()->register_gc_concurrent_end();
} }
uint old_val = Atomic::cmpxchg(ConcPhaseStopping, &_concurrent_phase_status, ConcPhaseStarted);
if (old_val == ConcPhaseStarted) {
_g1h->gc_timer_cm()->register_gc_concurrent_end();
// If 'end_timer' is true, we came here to end timer which needs concurrent phase ended.
// We need to end it before changing the status to 'ConcPhaseNotStarted' to prevent
// starting a new concurrent phase by 'ConcurrentMarkThread'.
if (end_timer) {
_g1h->gc_timer_cm()->register_gc_end();
}
old_val = Atomic::cmpxchg(ConcPhaseNotStarted, &_concurrent_phase_status, ConcPhaseStopping);
assert(old_val == ConcPhaseStopping, "Should not have changed since we entered this scope.");
} else {
do {
// Let other thread finish changing '_concurrent_phase_status' to 'ConcPhaseNotStarted'.
os::naked_short_sleep(1);
} while (_concurrent_phase_status != ConcPhaseNotStarted);
}
}
void G1ConcurrentMark::register_concurrent_phase_end() {
register_concurrent_phase_end_common(false);
}
void G1ConcurrentMark::register_concurrent_gc_end_and_stop_timer() {
register_concurrent_phase_end_common(true);
} }
void G1ConcurrentMark::markFromRoots() { void G1ConcurrentMark::markFromRoots() {
@ -2605,9 +2632,6 @@ void G1ConcurrentMark::abort() {
_g1h->trace_heap_after_concurrent_cycle(); _g1h->trace_heap_after_concurrent_cycle();
// Close any open concurrent phase timing
register_concurrent_phase_end();
_g1h->register_concurrent_cycle_end(); _g1h->register_concurrent_cycle_end();
} }

View File

@ -352,8 +352,17 @@ protected:
// time of remark. // time of remark.
volatile bool _concurrent_marking_in_progress; volatile bool _concurrent_marking_in_progress;
// Keep track of whether we have started concurrent phase or not. // There would be a race between ConcurrentMarkThread and VMThread(ConcurrentMark::abort())
bool _concurrent_phase_started; // to call ConcurrentGCTimer::register_gc_concurrent_end().
// And this variable is used to keep track of concurrent phase.
volatile uint _concurrent_phase_status;
// Concurrent phase is not yet started.
static const uint ConcPhaseNotStarted = 0;
// Concurrent phase is started.
static const uint ConcPhaseStarted = 1;
// Caller thread of ConcurrentGCTimer::register_gc_concurrent_end() is ending concurrent phase.
// So other thread should wait until the status to be changed to ConcPhaseNotStarted.
static const uint ConcPhaseStopping = 2;
// All of these times are in ms // All of these times are in ms
NumberSeq _init_times; NumberSeq _init_times;
@ -485,6 +494,9 @@ protected:
// Set to true when initialization is complete // Set to true when initialization is complete
bool _completed_initialization; bool _completed_initialization;
// end_timer, true to end gc timer after ending concurrent phase.
void register_concurrent_phase_end_common(bool end_timer);
public: public:
// Manipulation of the global mark stack. // Manipulation of the global mark stack.
// The push and pop operations are used by tasks for transfers // The push and pop operations are used by tasks for transfers
@ -520,6 +532,8 @@ public:
void register_concurrent_phase_start(const char* title); void register_concurrent_phase_start(const char* title);
void register_concurrent_phase_end(); void register_concurrent_phase_end();
// Ends both concurrent phase and timer.
void register_concurrent_gc_end_and_stop_timer();
void update_accum_task_vtime(int i, double vtime) { void update_accum_task_vtime(int i, double vtime) {
_accum_task_vtime[i] += vtime; _accum_task_vtime[i] += vtime;

View File

@ -29,15 +29,21 @@
#include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTrace.hpp"
#include "logging/log.hpp" #include "logging/log.hpp"
G1IHOPControl::G1IHOPControl(double initial_ihop_percent, size_t target_occupancy) : G1IHOPControl::G1IHOPControl(double initial_ihop_percent) :
_initial_ihop_percent(initial_ihop_percent), _initial_ihop_percent(initial_ihop_percent),
_target_occupancy(target_occupancy), _target_occupancy(0),
_last_allocated_bytes(0), _last_allocated_bytes(0),
_last_allocation_time_s(0.0) _last_allocation_time_s(0.0)
{ {
assert(_initial_ihop_percent >= 0.0 && _initial_ihop_percent <= 100.0, "Initial IHOP value must be between 0 and 100 but is %.3f", initial_ihop_percent); assert(_initial_ihop_percent >= 0.0 && _initial_ihop_percent <= 100.0, "Initial IHOP value must be between 0 and 100 but is %.3f", initial_ihop_percent);
} }
void G1IHOPControl::update_target_occupancy(size_t new_target_occupancy) {
log_debug(gc, ihop)("Target occupancy update: old: " SIZE_FORMAT "B, new: " SIZE_FORMAT "B",
_target_occupancy, new_target_occupancy);
_target_occupancy = new_target_occupancy;
}
void G1IHOPControl::update_allocation_info(double allocation_time_s, size_t allocated_bytes, size_t additional_buffer_size) { void G1IHOPControl::update_allocation_info(double allocation_time_s, size_t allocated_bytes, size_t additional_buffer_size) {
assert(allocation_time_s >= 0.0, "Allocation time must be positive but is %.3f", allocation_time_s); assert(allocation_time_s >= 0.0, "Allocation time must be positive but is %.3f", allocation_time_s);
@ -46,6 +52,7 @@ void G1IHOPControl::update_allocation_info(double allocation_time_s, size_t allo
} }
void G1IHOPControl::print() { void G1IHOPControl::print() {
assert(_target_occupancy > 0, "Target occupancy still not updated yet.");
size_t cur_conc_mark_start_threshold = get_conc_mark_start_threshold(); size_t cur_conc_mark_start_threshold = get_conc_mark_start_threshold();
log_debug(gc, ihop)("Basic information (value update), threshold: " SIZE_FORMAT "B (%1.2f), target occupancy: " SIZE_FORMAT "B, current occupancy: " SIZE_FORMAT "B, " log_debug(gc, ihop)("Basic information (value update), threshold: " SIZE_FORMAT "B (%1.2f), target occupancy: " SIZE_FORMAT "B, current occupancy: " SIZE_FORMAT "B, "
"recent allocation size: " SIZE_FORMAT "B, recent allocation duration: %1.2fms, recent old gen allocation rate: %1.2fB/s, recent marking phase length: %1.2fms", "recent allocation size: " SIZE_FORMAT "B, recent allocation duration: %1.2fms, recent old gen allocation rate: %1.2fB/s, recent marking phase length: %1.2fms",
@ -60,6 +67,7 @@ void G1IHOPControl::print() {
} }
void G1IHOPControl::send_trace_event(G1NewTracer* tracer) { void G1IHOPControl::send_trace_event(G1NewTracer* tracer) {
assert(_target_occupancy > 0, "Target occupancy still not updated yet.");
tracer->report_basic_ihop_statistics(get_conc_mark_start_threshold(), tracer->report_basic_ihop_statistics(get_conc_mark_start_threshold(),
_target_occupancy, _target_occupancy,
G1CollectedHeap::heap()->used(), G1CollectedHeap::heap()->used(),
@ -68,10 +76,9 @@ void G1IHOPControl::send_trace_event(G1NewTracer* tracer) {
last_marking_length_s()); last_marking_length_s());
} }
G1StaticIHOPControl::G1StaticIHOPControl(double ihop_percent, size_t target_occupancy) : G1StaticIHOPControl::G1StaticIHOPControl(double ihop_percent) :
G1IHOPControl(ihop_percent, target_occupancy), G1IHOPControl(ihop_percent),
_last_marking_length_s(0.0) { _last_marking_length_s(0.0) {
assert(_target_occupancy > 0, "Target occupancy must be larger than zero.");
} }
#ifndef PRODUCT #ifndef PRODUCT
@ -85,7 +92,8 @@ static void test_update(G1IHOPControl* ctrl, double alloc_time, size_t alloc_amo
void G1StaticIHOPControl::test() { void G1StaticIHOPControl::test() {
size_t const initial_ihop = 45; size_t const initial_ihop = 45;
G1StaticIHOPControl ctrl(initial_ihop, 100); G1StaticIHOPControl ctrl(initial_ihop);
ctrl.update_target_occupancy(100);
size_t threshold = ctrl.get_conc_mark_start_threshold(); size_t threshold = ctrl.get_conc_mark_start_threshold();
assert(threshold == initial_ihop, assert(threshold == initial_ihop,
@ -115,11 +123,10 @@ void G1StaticIHOPControl::test() {
#endif #endif
G1AdaptiveIHOPControl::G1AdaptiveIHOPControl(double ihop_percent, G1AdaptiveIHOPControl::G1AdaptiveIHOPControl(double ihop_percent,
size_t initial_target_occupancy,
G1Predictions const* predictor, G1Predictions const* predictor,
size_t heap_reserve_percent, size_t heap_reserve_percent,
size_t heap_waste_percent) : size_t heap_waste_percent) :
G1IHOPControl(ihop_percent, initial_target_occupancy), G1IHOPControl(ihop_percent),
_predictor(predictor), _predictor(predictor),
_marking_times_s(10, 0.95), _marking_times_s(10, 0.95),
_allocation_rate_s(10, 0.95), _allocation_rate_s(10, 0.95),
@ -130,6 +137,7 @@ G1AdaptiveIHOPControl::G1AdaptiveIHOPControl(double ihop_percent,
} }
size_t G1AdaptiveIHOPControl::actual_target_threshold() const { size_t G1AdaptiveIHOPControl::actual_target_threshold() const {
guarantee(_target_occupancy > 0, "Target occupancy still not updated yet.");
// The actual target threshold takes the heap reserve and the expected waste in // The actual target threshold takes the heap reserve and the expected waste in
// free space into account. // free space into account.
// _heap_reserve is that part of the total heap capacity that is reserved for // _heap_reserve is that part of the total heap capacity that is reserved for
@ -227,7 +235,8 @@ void G1AdaptiveIHOPControl::test() {
// target_size - (young_size + alloc_amount/alloc_time * marking_time) // target_size - (young_size + alloc_amount/alloc_time * marking_time)
G1Predictions pred(0.95); G1Predictions pred(0.95);
G1AdaptiveIHOPControl ctrl(initial_threshold, target_size, &pred, 0, 0); G1AdaptiveIHOPControl ctrl(initial_threshold, &pred, 0, 0);
ctrl.update_target_occupancy(target_size);
// First "load". // First "load".
size_t const alloc_time1 = 2; size_t const alloc_time1 = 2;
@ -288,5 +297,6 @@ void G1AdaptiveIHOPControl::test() {
void IHOP_test() { void IHOP_test() {
G1StaticIHOPControl::test(); G1StaticIHOPControl::test();
G1AdaptiveIHOPControl::test();
} }
#endif #endif

View File

@ -38,7 +38,8 @@ class G1IHOPControl : public CHeapObj<mtGC> {
protected: protected:
// The initial IHOP value relative to the target occupancy. // The initial IHOP value relative to the target occupancy.
double _initial_ihop_percent; double _initial_ihop_percent;
// The target maximum occupancy of the heap. // The target maximum occupancy of the heap. The target occupancy is the number
// of bytes when marking should be finished and reclaim started.
size_t _target_occupancy; size_t _target_occupancy;
// Most recent complete mutator allocation period in seconds. // Most recent complete mutator allocation period in seconds.
@ -46,10 +47,9 @@ class G1IHOPControl : public CHeapObj<mtGC> {
// Amount of bytes allocated during _last_allocation_time_s. // Amount of bytes allocated during _last_allocation_time_s.
size_t _last_allocated_bytes; size_t _last_allocated_bytes;
// Initialize an instance with the initial IHOP value in percent and the target // Initialize an instance with the initial IHOP value in percent. The target
// occupancy. The target occupancy is the number of bytes when marking should // occupancy will be updated at the first heap expansion.
// be finished and reclaim started. G1IHOPControl(double initial_ihop_percent);
G1IHOPControl(double initial_ihop_percent, size_t target_occupancy);
// Most recent time from the end of the initial mark to the start of the first // Most recent time from the end of the initial mark to the start of the first
// mixed gc. // mixed gc.
@ -60,6 +60,8 @@ class G1IHOPControl : public CHeapObj<mtGC> {
// Get the current non-young occupancy at which concurrent marking should start. // Get the current non-young occupancy at which concurrent marking should start.
virtual size_t get_conc_mark_start_threshold() = 0; virtual size_t get_conc_mark_start_threshold() = 0;
// Adjust target occupancy.
virtual void update_target_occupancy(size_t new_target_occupancy);
// Update information about time during which allocations in the Java heap occurred, // Update information about time during which allocations in the Java heap occurred,
// how large these allocations were in bytes, and an additional buffer. // how large these allocations were in bytes, and an additional buffer.
// The allocations should contain any amount of space made unusable for further // The allocations should contain any amount of space made unusable for further
@ -86,9 +88,12 @@ class G1StaticIHOPControl : public G1IHOPControl {
protected: protected:
double last_marking_length_s() const { return _last_marking_length_s; } double last_marking_length_s() const { return _last_marking_length_s; }
public: public:
G1StaticIHOPControl(double ihop_percent, size_t target_occupancy); G1StaticIHOPControl(double ihop_percent);
size_t get_conc_mark_start_threshold() { return (size_t) (_initial_ihop_percent * _target_occupancy / 100.0); } size_t get_conc_mark_start_threshold() {
guarantee(_target_occupancy > 0, "Target occupancy must have been initialized.");
return (size_t) (_initial_ihop_percent * _target_occupancy / 100.0);
}
virtual void update_marking_length(double marking_length_s) { virtual void update_marking_length(double marking_length_s) {
assert(marking_length_s > 0.0, "Marking length must be larger than zero but is %.3f", marking_length_s); assert(marking_length_s > 0.0, "Marking length must be larger than zero but is %.3f", marking_length_s);
@ -132,7 +137,6 @@ class G1AdaptiveIHOPControl : public G1IHOPControl {
virtual double last_marking_length_s() const { return _marking_times_s.last(); } virtual double last_marking_length_s() const { return _marking_times_s.last(); }
public: public:
G1AdaptiveIHOPControl(double ihop_percent, G1AdaptiveIHOPControl(double ihop_percent,
size_t initial_target_occupancy,
G1Predictions const* predictor, G1Predictions const* predictor,
size_t heap_reserve_percent, // The percentage of total heap capacity that should not be tapped into. size_t heap_reserve_percent, // The percentage of total heap capacity that should not be tapped into.
size_t heap_waste_percent); // The percentage of the free space in the heap that we think is not usable for allocation. size_t heap_waste_percent); // The percentage of the free space in the heap that we think is not usable for allocation.

View File

@ -171,14 +171,7 @@ inline void G1UpdateRSOrPushRefOopClosure::do_oop_work(T* p) {
#ifdef ASSERT #ifdef ASSERT
// can't do because of races // can't do because of races
// assert(obj == NULL || obj->is_oop(), "expected an oop"); // assert(obj == NULL || obj->is_oop(), "expected an oop");
assert(check_obj_alignment(obj), "not oop aligned");
// Do the safe subset of is_oop
#ifdef CHECK_UNHANDLED_OOPS
oopDesc* o = obj.obj();
#else
oopDesc* o = obj;
#endif // CHECK_UNHANDLED_OOPS
assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
assert(_g1->is_in_reserved(obj), "must be in heap"); assert(_g1->is_in_reserved(obj), "must be in heap");
#endif // ASSERT #endif // ASSERT

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -44,14 +44,7 @@ inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, uint tid) {
#ifdef ASSERT #ifdef ASSERT
// can't do because of races // can't do because of races
// assert(obj == NULL || obj->is_oop(), "expected an oop"); // assert(obj == NULL || obj->is_oop(), "expected an oop");
assert(check_obj_alignment(obj), "not oop aligned");
// Do the safe subset of is_oop
#ifdef CHECK_UNHANDLED_OOPS
oopDesc* o = obj.obj();
#else
oopDesc* o = obj;
#endif // CHECK_UNHANDLED_OOPS
assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
assert(_g1->is_in_reserved(obj), "must be in heap"); assert(_g1->is_in_reserved(obj), "must be in heap");
#endif // ASSERT #endif // ASSERT

View File

@ -125,14 +125,14 @@ inline void oop_pc_follow_contents_specialized(objArrayOop obj, int index, ParCo
T* const beg = base + beg_index; T* const beg = base + beg_index;
T* const end = base + end_index; T* const end = base + end_index;
if (end_index < len) {
cm->push_objarray(obj, end_index); // Push the continuation.
}
// Push the non-NULL elements of the next stride on the marking stack. // Push the non-NULL elements of the next stride on the marking stack.
for (T* e = beg; e < end; e++) { for (T* e = beg; e < end; e++) {
cm->mark_and_push<T>(e); cm->mark_and_push<T>(e);
} }
if (end_index < len) {
cm->push_objarray(obj, end_index); // Push the continuation.
}
} }
inline void ParCompactionManager::follow_contents(objArrayOop obj, int index) { inline void ParCompactionManager::follow_contents(objArrayOop obj, int index) {

View File

@ -1483,17 +1483,6 @@ void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
} }
} }
void
PSParallelCompact::clear_source_region(HeapWord* beg_addr, HeapWord* end_addr)
{
RegionData* const beg_ptr = _summary_data.addr_to_region_ptr(beg_addr);
HeapWord* const end_aligned_up = _summary_data.region_align_up(end_addr);
RegionData* const end_ptr = _summary_data.addr_to_region_ptr(end_aligned_up);
for (RegionData* cur = beg_ptr; cur < end_ptr; ++cur) {
cur->set_source_region(0);
}
}
void void
PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction) PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
{ {

View File

@ -1065,9 +1065,6 @@ class PSParallelCompact : AllStatic {
// non-empty. // non-empty.
static void fill_dense_prefix_end(SpaceId id); static void fill_dense_prefix_end(SpaceId id);
// Clear the summary data source_region field for the specified addresses.
static void clear_source_region(HeapWord* beg_addr, HeapWord* end_addr);
static void summarize_spaces_quick(); static void summarize_spaces_quick();
static void summarize_space(SpaceId id, bool maximum_compaction); static void summarize_space(SpaceId id, bool maximum_compaction);
static void summary_phase(ParCompactionManager* cm, bool maximum_compaction); static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);

View File

@ -600,12 +600,6 @@ bool PSScavenge::invoke_no_policy() {
NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
{
GCTraceTime(Debug, gc, phases) tm("Prune Scavenge Root Methods", &_gc_timer);
CodeCache::prune_scavenge_root_nmethods();
}
// Re-verify object start arrays // Re-verify object start arrays
if (VerifyObjectStartArray && if (VerifyObjectStartArray &&
VerifyAfterGC) { VerifyAfterGC) {

View File

@ -561,7 +561,7 @@ void GenCollectedHeap::process_roots(StrongRootsScope* scope,
OopClosure* weak_roots, OopClosure* weak_roots,
CLDClosure* strong_cld_closure, CLDClosure* strong_cld_closure,
CLDClosure* weak_cld_closure, CLDClosure* weak_cld_closure,
CodeBlobClosure* code_roots) { CodeBlobToOopClosure* code_roots) {
// General roots. // General roots.
assert(Threads::thread_claim_parity() != 0, "must have called prologue code"); assert(Threads::thread_claim_parity() != 0, "must have called prologue code");
assert(code_roots != NULL, "code root closure should always be set"); assert(code_roots != NULL, "code root closure should always be set");
@ -578,7 +578,7 @@ void GenCollectedHeap::process_roots(StrongRootsScope* scope,
// Don't process them if they will be processed during the ClassLoaderDataGraph phase. // Don't process them if they will be processed during the ClassLoaderDataGraph phase.
CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL; CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
// Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots; CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
bool is_par = scope->n_threads() > 1; bool is_par = scope->n_threads() > 1;
Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_clds_p, roots_from_code_p); Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_clds_p, roots_from_code_p);

View File

@ -399,7 +399,7 @@ public:
OopClosure* weak_roots, OopClosure* weak_roots,
CLDClosure* strong_cld_closure, CLDClosure* strong_cld_closure,
CLDClosure* weak_cld_closure, CLDClosure* weak_cld_closure,
CodeBlobClosure* code_roots); CodeBlobToOopClosure* code_roots);
public: public:
static const bool StrongAndWeakRoots = false; static const bool StrongAndWeakRoots = false;

View File

@ -162,6 +162,9 @@ void JVMCICompiler::compile_method(const methodHandle& method, int entry_bci, JV
CLEAR_PENDING_EXCEPTION; CLEAR_PENDING_EXCEPTION;
java_lang_Throwable::java_printStackTrace(exception, THREAD); java_lang_Throwable::java_printStackTrace(exception, THREAD);
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
}
env->set_failure("exception throw", false); env->set_failure("exception throw", false);
} else { } else {

View File

@ -51,7 +51,6 @@
jobject JVMCIRuntime::_HotSpotJVMCIRuntime_instance = NULL; jobject JVMCIRuntime::_HotSpotJVMCIRuntime_instance = NULL;
bool JVMCIRuntime::_HotSpotJVMCIRuntime_initialized = false; bool JVMCIRuntime::_HotSpotJVMCIRuntime_initialized = false;
bool JVMCIRuntime::_well_known_classes_initialized = false; bool JVMCIRuntime::_well_known_classes_initialized = false;
const char* JVMCIRuntime::_compiler = NULL;
int JVMCIRuntime::_trivial_prefixes_count = 0; int JVMCIRuntime::_trivial_prefixes_count = 0;
char** JVMCIRuntime::_trivial_prefixes = NULL; char** JVMCIRuntime::_trivial_prefixes = NULL;
bool JVMCIRuntime::_shutdown_called = false; bool JVMCIRuntime::_shutdown_called = false;
@ -104,6 +103,7 @@ static void deopt_caller() {
JRT_BLOCK_ENTRY(void, JVMCIRuntime::new_instance(JavaThread* thread, Klass* klass)) JRT_BLOCK_ENTRY(void, JVMCIRuntime::new_instance(JavaThread* thread, Klass* klass))
JRT_BLOCK; JRT_BLOCK;
assert(klass->is_klass(), "not a class"); assert(klass->is_klass(), "not a class");
Handle holder(THREAD, klass->klass_holder()); // keep the klass alive
instanceKlassHandle h(thread, klass); instanceKlassHandle h(thread, klass);
h->check_valid_for_instantiation(true, CHECK); h->check_valid_for_instantiation(true, CHECK);
// make sure klass is initialized // make sure klass is initialized
@ -129,6 +129,7 @@ JRT_BLOCK_ENTRY(void, JVMCIRuntime::new_array(JavaThread* thread, Klass* array_k
BasicType elt_type = TypeArrayKlass::cast(array_klass)->element_type(); BasicType elt_type = TypeArrayKlass::cast(array_klass)->element_type();
obj = oopFactory::new_typeArray(elt_type, length, CHECK); obj = oopFactory::new_typeArray(elt_type, length, CHECK);
} else { } else {
Handle holder(THREAD, array_klass->klass_holder()); // keep the klass alive
Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass(); Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
obj = oopFactory::new_objArray(elem_klass, length, CHECK); obj = oopFactory::new_objArray(elem_klass, length, CHECK);
} }
@ -172,6 +173,7 @@ void JVMCIRuntime::new_store_pre_barrier(JavaThread* thread) {
JRT_ENTRY(void, JVMCIRuntime::new_multi_array(JavaThread* thread, Klass* klass, int rank, jint* dims)) JRT_ENTRY(void, JVMCIRuntime::new_multi_array(JavaThread* thread, Klass* klass, int rank, jint* dims))
assert(klass->is_klass(), "not a class"); assert(klass->is_klass(), "not a class");
assert(rank >= 1, "rank must be nonzero"); assert(rank >= 1, "rank must be nonzero");
Handle holder(THREAD, klass->klass_holder()); // keep the klass alive
oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK); oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
thread->set_vm_result(obj); thread->set_vm_result(obj);
JRT_END JRT_END
@ -642,15 +644,6 @@ void JVMCIRuntime::initialize_HotSpotJVMCIRuntime(TRAPS) {
"HotSpotJVMCIRuntime initialization should only be triggered through JVMCI initialization"); "HotSpotJVMCIRuntime initialization should only be triggered through JVMCI initialization");
#endif #endif
if (_compiler != NULL) {
JavaCallArguments args;
oop compiler = java_lang_String::create_oop_from_str(_compiler, CHECK);
args.push_oop(compiler);
callStatic("jdk/vm/ci/hotspot/HotSpotJVMCICompilerConfig",
"selectCompiler",
"(Ljava/lang/String;)Ljava/lang/Boolean;", &args, CHECK);
}
Handle result = callStatic("jdk/vm/ci/hotspot/HotSpotJVMCIRuntime", Handle result = callStatic("jdk/vm/ci/hotspot/HotSpotJVMCIRuntime",
"runtime", "runtime",
"()Ljdk/vm/ci/hotspot/HotSpotJVMCIRuntime;", NULL, CHECK); "()Ljdk/vm/ci/hotspot/HotSpotJVMCIRuntime;", NULL, CHECK);
@ -783,66 +776,6 @@ JVM_ENTRY(void, JVM_RegisterJVMCINatives(JNIEnv *env, jclass c2vmClass))
} }
JVM_END JVM_END
/**
* Closure for parsing a line from a *.properties file in jre/lib/jvmci/properties.
* The line must match the regular expression "[^=]+=.*". That is one or more
* characters other than '=' followed by '=' followed by zero or more characters.
* Everything before the '=' is the property name and everything after '=' is the value.
* Lines that start with '#' are treated as comments and ignored.
* No special processing of whitespace or any escape characters is performed.
* The last definition of a property "wins" (i.e., it overrides all earlier
* definitions of the property).
*/
class JVMCIPropertiesFileClosure : public ParseClosure {
SystemProperty** _plist;
public:
JVMCIPropertiesFileClosure(SystemProperty** plist) : _plist(plist) {}
void do_line(char* line) {
if (line[0] == '#') {
// skip comment
return;
}
size_t len = strlen(line);
char* sep = strchr(line, '=');
if (sep == NULL) {
warn_and_abort("invalid format: could not find '=' character");
return;
}
if (sep == line) {
warn_and_abort("invalid format: name cannot be empty");
return;
}
*sep = '\0';
const char* name = line;
char* value = sep + 1;
Arguments::PropertyList_unique_add(_plist, name, value);
}
};
void JVMCIRuntime::init_system_properties(SystemProperty** plist) {
char jvmciDir[JVM_MAXPATHLEN];
const char* fileSep = os::file_separator();
jio_snprintf(jvmciDir, sizeof(jvmciDir), "%s%slib%sjvmci",
Arguments::get_java_home(), fileSep, fileSep, fileSep);
DIR* dir = os::opendir(jvmciDir);
if (dir != NULL) {
struct dirent *entry;
char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(jvmciDir), mtInternal);
JVMCIPropertiesFileClosure closure(plist);
const unsigned suffix_len = (unsigned)strlen(".properties");
while ((entry = os::readdir(dir, (dirent *) dbuf)) != NULL && !closure.is_aborted()) {
const char* name = entry->d_name;
if (strlen(name) > suffix_len && strcmp(name + strlen(name) - suffix_len, ".properties") == 0) {
char propertiesFilePath[JVM_MAXPATHLEN];
jio_snprintf(propertiesFilePath, sizeof(propertiesFilePath), "%s%s%s",jvmciDir, fileSep, name);
JVMCIRuntime::parse_lines(propertiesFilePath, &closure, false);
}
}
FREE_C_HEAP_ARRAY(char, dbuf);
os::closedir(dir);
}
}
#define CHECK_WARN_ABORT_(message) THREAD); \ #define CHECK_WARN_ABORT_(message) THREAD); \
if (HAS_PENDING_EXCEPTION) { \ if (HAS_PENDING_EXCEPTION) { \
warning(message); \ warning(message); \
@ -853,12 +786,6 @@ void JVMCIRuntime::init_system_properties(SystemProperty** plist) {
} \ } \
(void)(0 (void)(0
void JVMCIRuntime::save_compiler(const char* compiler) {
assert(compiler != NULL, "npe");
assert(_compiler == NULL, "cannot reassign JVMCI compiler");
_compiler = compiler;
}
void JVMCIRuntime::shutdown(TRAPS) { void JVMCIRuntime::shutdown(TRAPS) {
if (_HotSpotJVMCIRuntime_instance != NULL) { if (_HotSpotJVMCIRuntime_instance != NULL) {
_shutdown_called = true; _shutdown_called = true;
@ -884,69 +811,3 @@ bool JVMCIRuntime::treat_as_trivial(Method* method) {
} }
return false; return false;
} }
void JVMCIRuntime::parse_lines(char* path, ParseClosure* closure, bool warnStatFailure) {
struct stat st;
if (::stat(path, &st) == 0 && (st.st_mode & S_IFREG) == S_IFREG) { // exists & is regular file
int file_handle = ::open(path, os::default_file_open_flags(), 0);
if (file_handle != -1) {
char* buffer = NEW_C_HEAP_ARRAY(char, st.st_size + 1, mtInternal);
int num_read;
num_read = (int) ::read(file_handle, (char*) buffer, st.st_size);
if (num_read == -1) {
warning("Error reading file %s due to %s", path, strerror(errno));
} else if (num_read != st.st_size) {
warning("Only read %d of " SIZE_FORMAT " bytes from %s", num_read, (size_t) st.st_size, path);
}
::close(file_handle);
closure->set_filename(path);
if (num_read == st.st_size) {
buffer[num_read] = '\0';
char* line = buffer;
while (line - buffer < num_read && !closure->is_aborted()) {
// find line end (\r, \n or \r\n)
char* nextline = NULL;
char* cr = strchr(line, '\r');
char* lf = strchr(line, '\n');
if (cr != NULL && lf != NULL) {
char* min = MIN2(cr, lf);
*min = '\0';
if (lf == cr + 1) {
nextline = lf + 1;
} else {
nextline = min + 1;
}
} else if (cr != NULL) {
*cr = '\0';
nextline = cr + 1;
} else if (lf != NULL) {
*lf = '\0';
nextline = lf + 1;
}
// trim left
while (*line == ' ' || *line == '\t') line++;
char* end = line + strlen(line);
// trim right
while (end > line && (*(end -1) == ' ' || *(end -1) == '\t')) end--;
*end = '\0';
// skip comments and empty lines
if (*line != '#' && strlen(line) > 0) {
closure->parse_line(line);
}
if (nextline != NULL) {
line = nextline;
} else {
// File without newline at the end
break;
}
}
}
FREE_C_HEAP_ARRAY(char, buffer);
} else {
warning("Error opening file %s due to %s", path, strerror(errno));
}
} else if (warnStatFailure) {
warning("Could not stat file %s due to %s", path, strerror(errno));
}
}

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -70,7 +70,6 @@ class JVMCIRuntime: public AllStatic {
static jobject _HotSpotJVMCIRuntime_instance; static jobject _HotSpotJVMCIRuntime_instance;
static bool _HotSpotJVMCIRuntime_initialized; static bool _HotSpotJVMCIRuntime_initialized;
static bool _well_known_classes_initialized; static bool _well_known_classes_initialized;
static const char* _compiler;
static int _trivial_prefixes_count; static int _trivial_prefixes_count;
static char** _trivial_prefixes; static char** _trivial_prefixes;
@ -85,19 +84,9 @@ class JVMCIRuntime: public AllStatic {
static Handle create_Service(const char* name, TRAPS); static Handle create_Service(const char* name, TRAPS);
public: public:
static bool is_HotSpotJVMCIRuntime_initialized() {
/** return _HotSpotJVMCIRuntime_initialized;
* Parses *.properties files in jre/lib/jvmci/ and adds the properties to plist. }
*/
static void init_system_properties(SystemProperty** plist);
/**
* Saves the value of the "jvmci.compiler" system property for processing
* when JVMCI is initialized.
*/
static void save_compiler(const char* compiler);
static bool is_HotSpotJVMCIRuntime_initialized() { return _HotSpotJVMCIRuntime_initialized; }
/** /**
* Gets the singleton HotSpotJVMCIRuntime instance, initializing it if necessary * Gets the singleton HotSpotJVMCIRuntime instance, initializing it if necessary
@ -136,7 +125,6 @@ class JVMCIRuntime: public AllStatic {
} }
static bool treat_as_trivial(Method* method); static bool treat_as_trivial(Method* method);
static void parse_lines(char* path, ParseClosure* closure, bool warnStatFailure);
static BasicType kindToBasicType(Handle kind, TRAPS); static BasicType kindToBasicType(Handle kind, TRAPS);

View File

@ -639,11 +639,12 @@
declare_constant(VM_Version::CPU_AVX512DQ) \ declare_constant(VM_Version::CPU_AVX512DQ) \
declare_constant(VM_Version::CPU_AVX512PF) \ declare_constant(VM_Version::CPU_AVX512PF) \
declare_constant(VM_Version::CPU_AVX512ER) \ declare_constant(VM_Version::CPU_AVX512ER) \
declare_constant(VM_Version::CPU_AVX512CD) \ declare_constant(VM_Version::CPU_AVX512CD)
declare_constant(VM_Version::CPU_AVX512BW)
#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) \ #define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) \
declare_preprocessor_constant("VM_Version::CPU_AVX512VL", CPU_AVX512VL) declare_preprocessor_constant("VM_Version::CPU_AVX512BW", CPU_AVX512BW) \
declare_preprocessor_constant("VM_Version::CPU_AVX512VL", CPU_AVX512VL) \
declare_preprocessor_constant("VM_Version::CPU_SHA", CPU_SHA)
#endif // TARGET_ARCH_x86 #endif // TARGET_ARCH_x86

View File

@ -107,18 +107,25 @@ class Log VALUE_OBJ_CLASS_SPEC {
return LogTagSetMapping<T0, T1, T2, T3, T4>::tagset().is_level(level); return LogTagSetMapping<T0, T1, T2, T3, T4>::tagset().is_level(level);
} }
ATTRIBUTE_PRINTF(2, 3)
static void write(LogLevelType level, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
vwrite(level, fmt, args);
va_end(args);
};
template <LogLevelType Level> template <LogLevelType Level>
ATTRIBUTE_PRINTF(1, 2) ATTRIBUTE_PRINTF(1, 2)
static void write(const char* fmt, ...) { static void write(const char* fmt, ...) {
va_list args; va_list args;
va_start(args, fmt); va_start(args, fmt);
vwrite<Level>(fmt, args); vwrite(Level, fmt, args);
va_end(args); va_end(args);
}; };
template <LogLevelType Level> ATTRIBUTE_PRINTF(2, 0)
ATTRIBUTE_PRINTF(1, 0) static void vwrite(LogLevelType level, const char* fmt, va_list args) {
static void vwrite(const char* fmt, va_list args) {
char buf[LogBufferSize]; char buf[LogBufferSize];
va_list saved_args; // For re-format on buf overflow. va_list saved_args; // For re-format on buf overflow.
va_copy(saved_args, args); va_copy(saved_args, args);
@ -132,27 +139,26 @@ class Log VALUE_OBJ_CLASS_SPEC {
prefix_len = LogPrefix<T0, T1, T2, T3, T4>::prefix(newbuf, newbuf_len); prefix_len = LogPrefix<T0, T1, T2, T3, T4>::prefix(newbuf, newbuf_len);
ret = os::log_vsnprintf(newbuf + prefix_len, newbuf_len - prefix_len, fmt, saved_args); ret = os::log_vsnprintf(newbuf + prefix_len, newbuf_len - prefix_len, fmt, saved_args);
assert(ret >= 0, "Log message buffer issue"); assert(ret >= 0, "Log message buffer issue");
puts<Level>(newbuf); puts(level, newbuf);
FREE_C_HEAP_ARRAY(char, newbuf); FREE_C_HEAP_ARRAY(char, newbuf);
} else { } else {
puts<Level>(buf); puts(level, buf);
} }
} }
template <LogLevelType Level> static void puts(LogLevelType level, const char* string) {
static void puts(const char* string) { LogTagSetMapping<T0, T1, T2, T3, T4>::tagset().log(level, string);
LogTagSetMapping<T0, T1, T2, T3, T4>::tagset().log(Level, string);
} }
#define LOG_LEVEL(level, name) ATTRIBUTE_PRINTF(2, 0) \ #define LOG_LEVEL(level, name) ATTRIBUTE_PRINTF(2, 0) \
Log& v##name(const char* fmt, va_list args) { \ Log& v##name(const char* fmt, va_list args) { \
vwrite<LogLevel::level>(fmt, args); \ vwrite(LogLevel::level, fmt, args); \
return *this; \ return *this; \
} \ } \
Log& name(const char* fmt, ...) ATTRIBUTE_PRINTF(2, 3) { \ Log& name(const char* fmt, ...) ATTRIBUTE_PRINTF(2, 3) { \
va_list args; \ va_list args; \
va_start(args, fmt); \ va_start(args, fmt); \
vwrite<LogLevel::level>(fmt, args); \ vwrite(LogLevel::level, fmt, args); \
va_end(args); \ va_end(args); \
return *this; \ return *this; \
} \ } \

View File

@ -285,9 +285,12 @@ class CodeBlobToOopClosure : public CodeBlobClosure {
protected: protected:
void do_nmethod(nmethod* nm); void do_nmethod(nmethod* nm);
public: public:
// If fix_relocations(), then cl must copy objects to their new location immediately to avoid
// patching nmethods with the old locations.
CodeBlobToOopClosure(OopClosure* cl, bool fix_relocations) : _cl(cl), _fix_relocations(fix_relocations) {} CodeBlobToOopClosure(OopClosure* cl, bool fix_relocations) : _cl(cl), _fix_relocations(fix_relocations) {}
virtual void do_code_blob(CodeBlob* cb); virtual void do_code_blob(CodeBlob* cb);
bool fix_relocations() const { return _fix_relocations; }
const static bool FixRelocations = true; const static bool FixRelocations = true;
}; };

View File

@ -1338,73 +1338,6 @@ vmSymbols::SID Method::klass_id_for_intrinsics(const Klass* holder) {
return vmSymbols::find_sid(klass_name); return vmSymbols::find_sid(klass_name);
} }
static bool is_unsafe_alias(vmSymbols::SID name_id) {
// All 70 intrinsic candidate methods from sun.misc.Unsafe in 1.8.
// Some have the same method name but different signature, e.g.
// getByte(long), getByte(Object,long)
switch (name_id) {
case vmSymbols::VM_SYMBOL_ENUM_NAME(allocateInstance_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(copyMemory_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(loadFence_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(storeFence_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(fullFence_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getObject_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getBoolean_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getByte_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getShort_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getChar_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getInt_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getLong_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getFloat_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getDouble_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(putObject_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(putBoolean_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(putByte_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(putShort_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(putChar_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(putInt_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(putLong_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(putFloat_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(putDouble_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getObjectVolatile_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getBooleanVolatile_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getByteVolatile_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getShortVolatile_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getCharVolatile_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getIntVolatile_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getLongVolatile_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getFloatVolatile_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getDoubleVolatile_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(putObjectVolatile_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(putBooleanVolatile_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(putByteVolatile_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(putShortVolatile_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(putCharVolatile_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(putIntVolatile_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(putLongVolatile_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(putFloatVolatile_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(putDoubleVolatile_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getAddress_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(putAddress_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(compareAndSwapObject_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(compareAndSwapLong_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(compareAndSwapInt_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(putOrderedObject_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(putOrderedLong_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(putOrderedInt_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getAndAddInt_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getAndAddLong_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getAndSetInt_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getAndSetLong_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(getAndSetObject_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(park_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(unpark_name):
return true;
}
return false;
}
void Method::init_intrinsic_id() { void Method::init_intrinsic_id() {
assert(_intrinsic_id == vmIntrinsics::_none, "do this just once"); assert(_intrinsic_id == vmIntrinsics::_none, "do this just once");
const uintptr_t max_id_uint = right_n_bits((int)(sizeof(_intrinsic_id) * BitsPerByte)); const uintptr_t max_id_uint = right_n_bits((int)(sizeof(_intrinsic_id) * BitsPerByte));
@ -1457,14 +1390,6 @@ void Method::init_intrinsic_id() {
if (is_static() != MethodHandles::is_signature_polymorphic_static(id)) if (is_static() != MethodHandles::is_signature_polymorphic_static(id))
id = vmIntrinsics::_none; id = vmIntrinsics::_none;
break; break;
case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_misc_Unsafe):
// Map sun.misc.Unsafe to jdk.internal.misc.Unsafe
if (!is_unsafe_alias(name_id)) break;
// pretend it is the corresponding method in the internal Unsafe class:
klass_id = vmSymbols::VM_SYMBOL_ENUM_NAME(jdk_internal_misc_Unsafe);
id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
break;
} }
if (id != vmIntrinsics::_none) { if (id != vmIntrinsics::_none) {

View File

@ -295,7 +295,7 @@ address* oopDesc::address_field_addr(int offset) const { return (address*) f
// in inner GC loops so these are separated. // in inner GC loops so these are separated.
inline bool check_obj_alignment(oop obj) { inline bool check_obj_alignment(oop obj) {
return cast_from_oop<intptr_t>(obj) % MinObjAlignmentInBytes == 0; return (cast_from_oop<intptr_t>(obj) & MinObjAlignmentInBytesMask) == 0;
} }
oop oopDesc::decode_heap_oop_not_null(narrowOop v) { oop oopDesc::decode_heap_oop_not_null(narrowOop v) {

View File

@ -158,9 +158,21 @@ void Symbol::print_utf8_on(outputStream* st) const {
} }
void Symbol::print_symbol_on(outputStream* st) const { void Symbol::print_symbol_on(outputStream* st) const {
ResourceMark rm; char *s;
st = st ? st : tty; st = st ? st : tty;
st->print("%s", as_quoted_ascii()); {
// ResourceMark may not affect st->print(). If st is a string
// stream it could resize, using the same resource arena.
ResourceMark rm;
s = as_quoted_ascii();
s = os::strdup(s);
}
if (s == NULL) {
st->print("(null)");
} else {
st->print("%s", s);
os::free(s);
}
} }
char* Symbol::as_quoted_ascii() const { char* Symbol::as_quoted_ascii() const {

Some files were not shown because too many files have changed in this diff Show More