This commit is contained in:
Prasanta Sadhukhan 2019-11-20 10:52:28 +05:30
commit 9d6ace51a2
1080 changed files with 21210 additions and 42547 deletions

View File

@ -594,3 +594,5 @@ e84d8379815ba0d3e50fb096d28c25894cb50b8c jdk-14+18
9b67dd88a9313e982ec5f710a7747161bc8f0c23 jdk-14+19
54ffb15c48399dd59922ee22bb592d815307e77c jdk-14+20
c16ac7a2eba4e73cb4f7ee9294dd647860eebff0 jdk-14+21
83810b7d12e7ff761ad3dd91f323a22dad96f108 jdk-14+22
15936b142f86731afa4b1a2c0fe4a01e806c4944 jdk-14+23

View File

@ -43,6 +43,7 @@
<li><a href="#docker-tests">Docker Tests</a></li>
<li><a href="#non-us-locale">Non-US locale</a></li>
<li><a href="#pkcs11-tests">PKCS11 Tests</a></li>
<li><a href="#client-ui-tests">Client UI Tests</a></li>
</ul></li>
</ul>
</nav>
@ -207,5 +208,15 @@ $ make test JTREG=&quot;VM_OPTIONS=-Duser.language=en -Duser.country=US&quot; TE
<p>It is highly recommended to use the latest NSS version when running PKCS11 tests. Improper NSS version may lead to unexpected failures which are hard to diagnose. For example, sun/security/pkcs11/Secmod/AddTrustedCert.java may fail on Ubuntu 18.04 with the default NSS version in the system. To run these tests correctly, the system property <code>test.nss.lib.paths</code> is required on Ubuntu 18.04 to specify the alternative NSS lib directories. For example:</p>
<pre><code>$ make test TEST=&quot;jtreg:sun/security/pkcs11/Secmod/AddTrustedCert.java&quot; JTREG=&quot;JAVA_OPTIONS=-Dtest.nss.lib.paths=/path/to/your/latest/NSS-libs&quot;</code></pre>
<p>For more notes about the PKCS11 tests, please refer to test/jdk/sun/security/pkcs11/README.</p>
<h3 id="client-ui-tests">Client UI Tests</h3>
<p>Some Client UI tests use key sequences which may be reserved by the operating system. Usually that causes the test failure. So it is highly recommended to disable system key shortcuts prior testing. The steps to access and disable system key shortcuts for various platforms are provided below.</p>
<h4 id="macos">MacOS</h4>
<p>Choose Apple menu; System Preferences, click Keyboard, then click Shortcuts; select or deselect desired shortcut.</p>
<p>For example, test/jdk/javax/swing/TooltipManager/JMenuItemToolTipKeyBindingsTest/JMenuItemToolTipKeyBindingsTest.java fails on MacOS because it uses <code>CTRL + F1</code> key sequence to show or hide tooltip message but the key combination is reserved by the operating system. To run the test correctly the default global key shortcut should be disabled using the steps described above, and then deselect "Turn keyboard access on or off" option which is responsible for <code>CTRL + F1</code> combination.</p>
<h4 id="linux">Linux</h4>
<p>Open the Activities overview and start typing Settings; Choose Settings, click Devices, then click Keyboard; set or override desired shortcut.</p>
<h4 id="windows">Windows</h4>
<p>Type <code>gpedit</code> in the Search and then click Edit group policy; navigate to User Configuration -&gt; Administrative Templates -&gt; Windows Components -&gt; File Explorer; in the right-side pane look for "Turn off Windows key hotkeys" and double click on it; enable or disable hotkeys.</p>
<p>Note: restart is required to make the settings take effect.</p>
</body>
</html>

View File

@ -421,6 +421,35 @@ For example:
For more notes about the PKCS11 tests, please refer to test/jdk/sun/security/pkcs11/README.
### Client UI Tests
Some Client UI tests use key sequences which may be reserved by the operating
system. Usually that causes the test failure. So it is highly recommended to disable
system key shortcuts prior testing. The steps to access and disable system key shortcuts
for various platforms are provided below.
#### MacOS
Choose Apple menu; System Preferences, click Keyboard, then click Shortcuts;
select or deselect desired shortcut.
For example, test/jdk/javax/swing/TooltipManager/JMenuItemToolTipKeyBindingsTest/JMenuItemToolTipKeyBindingsTest.java fails
on MacOS because it uses `CTRL + F1` key sequence to show or hide tooltip message
but the key combination is reserved by the operating system. To run the test correctly
the default global key shortcut should be disabled using the steps described above, and then deselect
"Turn keyboard access on or off" option which is responsible for `CTRL + F1` combination.
#### Linux
Open the Activities overview and start typing Settings; Choose Settings, click Devices,
then click Keyboard; set or override desired shortcut.
#### Windows
Type `gpedit` in the Search and then click Edit group policy; navigate to
User Configuration -> Administrative Templates -> Windows Components -> File Explorer;
in the right-side pane look for "Turn off Windows key hotkeys" and double click on it;
enable or disable hotkeys.
Note: restart is required to make the settings take effect.
---
# Override some definitions in the global css file that are not optimal for
# this document.

View File

@ -247,11 +247,29 @@ ifeq ($(TEST_JOBS), 0)
CORES_DIVIDER := 4
endif
endif
# For some big multi-core machines with low ulimit -u setting we hit the max
# threads/process limit. In such a setup the memory/cores-only-guided
# TEST_JOBS config is insufficient. From experience a concurrency setting of
# 14 works reasonably well for low ulimit values (<= 4096). Thus, use
# divider 4096/14. For high ulimit -u values this shouldn't make a difference.
ULIMIT_DIVIDER := (4096/14)
PROC_ULIMIT := -1
ifneq ($(OPENJDK_TARGET_OS), windows)
PROC_ULIMIT := $(shell $(ULIMIT) -u)
ifeq ($(PROC_ULIMIT), unlimited)
PROC_ULIMIT := -1
endif
endif
MEMORY_DIVIDER := 2048
TEST_JOBS := $(shell $(AWK) \
'BEGIN { \
c = $(NUM_CORES) / $(CORES_DIVIDER); \
m = $(MEMORY_SIZE) / $(MEMORY_DIVIDER); \
u = $(PROC_ULIMIT); \
if (u > -1) { \
u = u / $(ULIMIT_DIVIDER); \
if (u < c) c = u; \
} \
if (c > m) c = m; \
c = c * $(TEST_JOBS_FACTOR); \
c = c * $(TEST_JOBS_FACTOR_JDL); \

View File

@ -175,6 +175,7 @@ UNZIP := unzip
EXPR := expr
FILE := file
HG := hg
ULIMIT := ulimit
# On Solaris gnu versions of some tools are required.
ifeq ($(OPENJDK_BUILD_OS), solaris)

View File

@ -573,6 +573,26 @@ AC_DEFUN([BASIC_REQUIRE_SPECIAL],
BASIC_CHECK_NONEMPTY($1)
])
###############################################################################
# Like BASIC_REQUIRE_PROGS but also allows for bash built-ins
# $1: variable to set
# $2: executable name (or list of names) to look for
# $3: [path]
AC_DEFUN([BASIC_REQUIRE_BUILTIN_PROGS],
[
BASIC_SETUP_TOOL($1, [AC_PATH_PROGS($1, $2, , $3)])
if test "x[$]$1" = x; then
AC_MSG_NOTICE([Required tool $2 not found in PATH, checking built-in])
if help $2 > /dev/null 2>&1; then
AC_MSG_NOTICE([Found $2 as shell built-in. Using it])
$1="$2"
else
AC_MSG_ERROR([Required tool $2 also not found as built-in.])
fi
fi
BASIC_CHECK_NONEMPTY($1)
])
###############################################################################
# Setup the most fundamental tools that relies on not much else to set up,
# but is used by much of the early bootstrap code.
@ -1284,6 +1304,9 @@ AC_DEFUN_ONCE([BASIC_SETUP_COMPLEX_TOOLS],
elif test "x$OPENJDK_TARGET_OS" = "xsolaris"; then
BASIC_REQUIRE_PROGS(ELFEDIT, elfedit)
fi
if ! test "x$OPENJDK_TARGET_OS" = "xwindows"; then
BASIC_REQUIRE_BUILTIN_PROGS(ULIMIT, ulimit)
fi
])
###############################################################################

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -25,11 +25,11 @@
# All valid JVM features, regardless of platform
VALID_JVM_FEATURES="compiler1 compiler2 zero minimal dtrace jvmti jvmci \
graal vm-structs jni-check services management cmsgc epsilongc g1gc parallelgc serialgc shenandoahgc zgc nmt cds \
graal vm-structs jni-check services management epsilongc g1gc parallelgc serialgc shenandoahgc zgc nmt cds \
static-build link-time-opt aot jfr"
# Deprecated JVM features (these are ignored, but with a warning)
DEPRECATED_JVM_FEATURES="trace"
DEPRECATED_JVM_FEATURES="trace cmsgc"
# All valid JVM variants
VALID_JVM_VARIANTS="server client minimal core zero custom"
@ -326,10 +326,6 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
AC_MSG_ERROR([Specified JVM feature 'jvmci' requires feature 'compiler2' or 'compiler1'])
fi
if HOTSPOT_CHECK_JVM_FEATURE(cmsgc) && ! HOTSPOT_CHECK_JVM_FEATURE(serialgc); then
AC_MSG_ERROR([Specified JVM feature 'cmsgc' requires feature 'serialgc'])
fi
# Enable JFR by default, except for Zero, linux-sparcv9 and on minimal.
if ! HOTSPOT_CHECK_JVM_VARIANT(zero); then
if test "x$OPENJDK_TARGET_OS" != xaix; then
@ -351,7 +347,8 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
# Only enable ZGC on supported platforms
AC_MSG_CHECKING([if zgc can be built])
if (test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xx86_64") || \
(test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xaarch64"); then
(test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xaarch64") ||
(test "x$OPENJDK_TARGET_OS" = "xmacosx" && test "x$OPENJDK_TARGET_CPU" = "xx86_64"); then
AC_MSG_RESULT([yes])
else
DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES zgc"
@ -490,7 +487,7 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
fi
# All variants but minimal (and custom) get these features
NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cmsgc g1gc parallelgc serialgc epsilongc shenandoahgc jni-check jvmti management nmt services vm-structs zgc"
NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES g1gc parallelgc serialgc epsilongc shenandoahgc jni-check jvmti management nmt services vm-structs zgc"
# Disable CDS on AIX.
if test "x$OPENJDK_TARGET_OS" = "xaix"; then

View File

@ -767,6 +767,7 @@ JIB_HOME:=@JIB_HOME@
XCODEBUILD=@XCODEBUILD@
DTRACE := @DTRACE@
FIXPATH:=@FIXPATH@
ULIMIT:=@ULIMIT@
TAR_TYPE:=@TAR_TYPE@
TAR_CREATE_EXTRA_PARAM:=@TAR_CREATE_EXTRA_PARAM@

View File

@ -481,7 +481,7 @@ AC_DEFUN([TOOLCHAIN_EXTRACT_COMPILER_VERSION],
COMPILER_VERSION_STRING=`$ECHO $COMPILER_VERSION_OUTPUT | \
$SED -e 's/ *Copyright .*//'`
COMPILER_VERSION_NUMBER=`$ECHO $COMPILER_VERSION_OUTPUT | \
$SED -e 's/^.* \(@<:@1-9@:>@\.@<:@0-9.@:>@*\)@<:@^0-9.@:>@.*$/\1/'`
$SED -e 's/^.* \(@<:@1-9@:>@<:@0-9@:>@*\.@<:@0-9.@:>@*\)@<:@^0-9.@:>@.*$/\1/'`
elif test "x$TOOLCHAIN_TYPE" = xclang; then
# clang --version output typically looks like
# Apple LLVM version 5.0 (clang-500.2.79) (based on LLVM 3.3svn)

View File

@ -0,0 +1,40 @@
Owner: CN=LuxTrust Global Root 2, O=LuxTrust S.A., C=LU
Issuer: CN=LuxTrust Global Root 2, O=LuxTrust S.A., C=LU
Serial number: a7ea6df4b449eda6a24859ee6b815d3167fbbb1
Valid from: Thu Mar 05 13:21:57 GMT 2015 until: Mon Mar 05 13:21:57 GMT 2035
Signature algorithm name: SHA256withRSA
Subject Public Key Algorithm: 4096-bit RSA key
Version: 3
-----BEGIN CERTIFICATE-----
MIIFwzCCA6ugAwIBAgIUCn6m30tEntpqJIWe5rgV0xZ/u7EwDQYJKoZIhvcNAQEL
BQAwRjELMAkGA1UEBhMCTFUxFjAUBgNVBAoMDUx1eFRydXN0IFMuQS4xHzAdBgNV
BAMMFkx1eFRydXN0IEdsb2JhbCBSb290IDIwHhcNMTUwMzA1MTMyMTU3WhcNMzUw
MzA1MTMyMTU3WjBGMQswCQYDVQQGEwJMVTEWMBQGA1UECgwNTHV4VHJ1c3QgUy5B
LjEfMB0GA1UEAwwWTHV4VHJ1c3QgR2xvYmFsIFJvb3QgMjCCAiIwDQYJKoZIhvcN
AQEBBQADggIPADCCAgoCggIBANeFl78RmOnwYoNMPIf5U2o3C/IPPIfOb9wmKb3F
ibrJgz337spbxm1Jc7TJRqMbNBM/wYlFV/TZsfs2ZUv7COJIcRHIbjuend+JZTem
hfY7RBi2xjcwYkSSl2l9QjAk5A0MiWtj3sXh306pFGxT4GHO9hcvHTy95iJMHZP1
EMShduxq3sVs35a0VkBCwGKSMKEtFZSg0iAGCW5qbeXrt77U8PEVfIvmTroTzEsn
Xpk8F12PgX8zPU/TPxvsXD/wPEx1bvKm1Z3aLQdjAsZy6ZS8TEmVT4hSyNvoaYL4
zDRbIvCGp4m9SAptZoFtyMhk+wHh9OHe2Z7d21vUKpkmFRseTJIpgp7VkoGSQXAZ
96Tlk0u8d2cx3Rz9MXANF5kM+Qw5GSoXtTBxVdUPrljhPS80m8+f9niFwpN6cj5m
j5wWEWCPnolvZ77gR1o7DJpni89Gxq44o/KnvObWhWszJHAiS8sIm7vI+AIpHb4g
DEa/a4ebsypmQjVGbKq6rfmYe+lQVRQxv7HaLe2ArWgk+2mr2HETMOZns4dA/Yl+
8kPREd8vZS9kzl8UubG/Mb2HeFpZZYiq/FkySIbWTLkpS5XTdvN3JW1CHDiDTf2j
X5t/Lax5Gw5CMZdjpPuKadUiDTSQMC6otOBttpSsvItO13D8xTiOZCXhTTmQzsmH
hFhxAgMBAAGjgagwgaUwDwYDVR0TAQH/BAUwAwEB/zBCBgNVHSAEOzA5MDcGByuB
KwEBAQowLDAqBggrBgEFBQcCARYeaHR0cHM6Ly9yZXBvc2l0b3J5Lmx1eHRydXN0
Lmx1MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBT/GCh2+UgFLKGu8SsbK7JT
+Et8szAdBgNVHQ4EFgQU/xgodvlIBSyhrvErGyuyU/hLfLMwDQYJKoZIhvcNAQEL
BQADggIBAGoZFO1uecEsh9QNcH7X9njJCwROxLHOk3D+sFTAMs2ZMGQXvw/l4jP9
BzZAcg4atmpZ1gDlaCDdLnINH2pkMSCEfUmmWjfrRcmF9dTHF5kH5ptV5AzoqbTO
jFu1EVzPig4N1qx3gf4ynCSecs5U89BvolbW7MM3LGVYvlcAGvI1+ut7MV3CwRI9
loGIlonBWVx65n9wNOeD4rHh4bhY79SV5GCc8JaXcozrhAIuZY+kt9J/Z93I055c
qqmkoCUUBpvsT34tC38ddfEz2O3OuHVtPlu5mB0xDVbYQw8wkbIEa91WvpWAVWe+
2M2D2RjuLg+GLZKecBPs3lHJQ3gCpU3I+V/EkVhGFndadKpAvAefMLmx9xIX3eP/
JEAdemrRTxgKqpAd60Ae36EeRJIQmvKN4dFLRp7oRUKX6kWZ8+xm1QL68qZKJKre
zrnK+T+Tb/mjuuqlPpmt/f97mfVl7vBZKGfXkJWkE4SphMHozs51k2MavDzq1WQf
LSoSOcbDWjLtR5EWDrw4wVDej8oqkDQc7kGUnF4ZLvhFSZl0kbAEb+MEWrGrKqv+
x9CWttrhSmQGbmBNvUJO/3jaJMobtNeWOWyu8Q6qp31IiyBMz2TWuJdGsE7RKlY6
oJO9r4Ak4Ap+58rVyuiFVdw2KuGUaJPHZnJED4AhMmwlxyOAgwrr
-----END CERTIFICATE-----

View File

@ -57,7 +57,7 @@ JVM_LIBS += \
JVM_EXCLUDE_FILES += args.cc
JVM_EXCLUDES += adlc
# Needed by vm_version.cpp
# Needed by abstract_vm_version.cpp
ifeq ($(call isTargetCpu, x86_64), true)
OPENJDK_TARGET_CPU_VM_VERSION := amd64
else ifeq ($(call isTargetCpu, sparcv9), true)
@ -183,7 +183,7 @@ $(eval $(call SetupNativeCompilation, BUILD_LIBJVM, \
EXCLUDE_PATTERNS := $(JVM_EXCLUDE_PATTERNS), \
EXTRA_OBJECT_FILES := $(DTRACE_EXTRA_OBJECT_FILES), \
CFLAGS := $(JVM_CFLAGS), \
vm_version.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
abstract_vm_version.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
arguments.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
DISABLED_WARNINGS_gcc := $(DISABLED_WARNINGS_gcc), \
DISABLED_WARNINGS_clang := $(DISABLED_WARNINGS_clang), \
@ -206,11 +206,11 @@ $(eval $(call SetupNativeCompilation, BUILD_LIBJVM, \
DEFINE_THIS_FILE := false, \
))
# Always recompile vm_version.cpp if libjvm needs to be relinked. This ensures
# Always recompile abstract_vm_version.cpp if libjvm needs to be relinked. This ensures
# that the internal vm version is updated as it relies on __DATE__ and __TIME__
# macros.
VM_VERSION_OBJ := $(JVM_OUTPUTDIR)/objs/vm_version$(OBJ_SUFFIX)
$(VM_VERSION_OBJ): $(filter-out $(VM_VERSION_OBJ) $(JVM_MAPFILE), \
ABSTRACT_VM_VERSION_OBJ := $(JVM_OUTPUTDIR)/objs/abstract_vm_version$(OBJ_SUFFIX)
$(ABSTRACT_VM_VERSION_OBJ): $(filter-out $(ABSTRACT_VM_VERSION_OBJ) $(JVM_MAPFILE), \
$(BUILD_LIBJVM_TARGET_DEPS))
ifneq ($(GENERATE_COMPILE_COMMANDS_ONLY), true)

View File

@ -79,12 +79,6 @@ ifeq ($(call check-jvm-feature, dtrace), true)
vmThread.o \
)
ifeq ($(call check-jvm-feature, cmsgc), true)
DTRACE_INSTRUMENTED_OBJS += $(addprefix $(JVM_OUTPUTDIR)/objs/, \
cmsVMOperations.o \
)
endif
ifeq ($(call check-jvm-feature, parallelgc), true)
DTRACE_INSTRUMENTED_OBJS += $(addprefix $(JVM_OUTPUTDIR)/objs/, \
psVMOperations.o \

View File

@ -138,11 +138,6 @@ ifneq ($(call check-jvm-feature, aot), true)
aotLoader.cpp compiledIC_aot.cpp
endif
ifneq ($(call check-jvm-feature, cmsgc), true)
JVM_CFLAGS_FEATURES += -DINCLUDE_CMSGC=0
JVM_EXCLUDE_PATTERNS += gc/cms
endif
ifneq ($(call check-jvm-feature, g1gc), true)
JVM_CFLAGS_FEATURES += -DINCLUDE_G1GC=0
JVM_EXCLUDE_PATTERNS += gc/g1

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -63,7 +63,6 @@ typedef enum GEN_variant {
*/
#pragma weak tty
#pragma weak CMSExpAvgFactor
#if defined(i386) || defined(__i386) || defined(__amd64)
#pragma weak noreg

View File

@ -391,26 +391,23 @@ class Bundle {
}
private void handleMultipleInheritance(Map<String, Object> map, Map<String, Object> parents, String key) {
String formatKey = key + "/format";
Object format = map.get(formatKey);
String formatMapKey = key + "/format";
Object format = map.get(formatMapKey);
if (format != null) {
map.remove(formatKey);
map.remove(formatMapKey);
map.put(key, format);
if (fillInElements(parents, formatKey, format)) {
if (fillInElements(parents, formatMapKey, format)) {
map.remove(key);
}
}
String standaloneKey = key + "/stand-alone";
Object standalone = map.get(standaloneKey);
String standaloneMapKey = key + "/stand-alone";
Object standalone = map.get(standaloneMapKey);
if (standalone != null) {
map.remove(standaloneKey);
String realKey = key;
if (format != null) {
realKey = "standalone." + key;
}
map.put(realKey, standalone);
if (fillInElements(parents, standaloneKey, standalone)) {
map.remove(realKey);
map.remove(standaloneMapKey);
String standaloneResourceKey = "standalone." + key;
map.put(standaloneResourceKey, standalone);
if (fillInElements(parents, standaloneMapKey, standalone)) {
map.remove(standaloneResourceKey);
}
}
}

View File

@ -383,7 +383,7 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBLCMS, \
libawt/java2d, \
HEADERS_FROM_SRC := $(LIBLCMS_HEADERS_FROM_SRC), \
DISABLED_WARNINGS_gcc := format-nonliteral type-limits \
misleading-indentation undef unused-function, \
misleading-indentation undef unused-function stringop-truncation, \
DISABLED_WARNINGS_clang := tautological-compare format-nonliteral undef, \
DISABLED_WARNINGS_solstudio := E_STATEMENT_NOT_REACHED, \
DISABLED_WARNINGS_microsoft := 4819, \

View File

@ -55,7 +55,7 @@ endif
SA_TOOLCHAIN := $(TOOLCHAIN_DEFAULT)
ifeq ($(call isTargetOs, linux), true)
SA_TOOLCHAIN := TOOLCHAIN_BUILD_LINK_CXX
SA_TOOLCHAIN := TOOLCHAIN_LINK_CXX
endif
################################################################################

View File

@ -1192,9 +1192,6 @@ class HandlerImpl {
// predicate controlling translation of CompareAndSwapX
bool needs_acquiring_load_exclusive(const Node *load);
// predicate controlling translation of StoreCM
bool unnecessary_storestore(const Node *storecm);
// predicate controlling addressing modes
bool size_fits_all_mem_uses(AddPNode* addp, int shift);
%}
@ -1583,29 +1580,6 @@ bool needs_acquiring_load_exclusive(const Node *n)
return true;
}
// predicate controlling translation of StoreCM
//
// returns true if a StoreStore must precede the card write otherwise
// false
bool unnecessary_storestore(const Node *storecm)
{
assert(storecm->Opcode() == Op_StoreCM, "expecting a StoreCM");
// we need to generate a dmb ishst between an object put and the
// associated card mark when we are using CMS without conditional
// card marking
if (UseConcMarkSweepGC && !UseCondCardMark) {
return false;
}
// a storestore is unnecesary in all other cases
return true;
}
#define __ _masm.
// advance declarations for helper functions to convert register
@ -7220,7 +7194,6 @@ instruct loadConD(vRegD dst, immD con) %{
instruct storeimmCM0(immI0 zero, memory mem)
%{
match(Set mem (StoreCM mem zero));
predicate(unnecessary_storestore(n));
ins_cost(INSN_COST);
format %{ "storestore (elided)\n\t"

View File

@ -1978,6 +1978,9 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
case T_ADDRESS:
imm = opr2->as_constant_ptr()->as_jint();
break;
case T_METADATA:
imm = (intptr_t)(opr2->as_constant_ptr()->as_metadata());
break;
case T_OBJECT:
case T_ARRAY:
jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);

View File

@ -840,6 +840,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ sub(arr_size, arr_size, t1); // body length
__ add(t1, t1, obj); // body start
__ initialize_body(t1, arr_size, 0, t2);
__ membar(Assembler::StoreStore);
__ verify_oop(obj);
__ ret(lr);

View File

@ -22,6 +22,7 @@
*/
#include "precompiled.hpp"
#include "gc/shenandoah/shenandoahBarrierSet.hpp"
#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
#include "gc/shenandoah/shenandoahForwarding.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
@ -253,32 +254,16 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier_not_null(MacroAssembl
dst = rscratch1;
}
RegSet to_save_r1 = RegSet::of(r1);
// If outgoing register is r1, we can clobber it
if (result_dst != r1) {
__ push(to_save_r1, sp);
}
// Save r0 and r1, unless it is an output register
RegSet to_save = RegSet::of(r0, r1) - result_dst;
__ push(to_save, sp);
__ lea(r1, load_addr);
RegSet to_save_r0 = RegSet::of(r0);
if (dst != r0) {
__ push(to_save_r0, sp);
__ mov(r0, dst);
}
__ mov(r0, dst);
__ far_call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_lrb())));
if (result_dst != r0) {
__ mov(result_dst, r0);
}
if (dst != r0) {
__ pop(to_save_r0, sp);
}
if (result_dst != r1) {
__ pop(to_save_r1, sp);
}
__ mov(result_dst, r0);
__ pop(to_save, sp);
__ bind(done);
__ leave();
@ -346,29 +331,42 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
}
}
//
// Arguments:
//
// Inputs:
// src: oop location to load from, might be clobbered
//
// Output:
// dst: oop loaded from src location
//
// Kill:
// rscratch1 (scratch reg)
//
// Alias:
// dst: rscratch1 (might use rscratch1 as temporary output register to avoid clobbering src)
//
void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register dst, Address src, Register tmp1, Register tmp_thread) {
bool on_oop = is_reference_type(type);
bool not_in_heap = (decorators & IN_NATIVE) != 0;
bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
bool on_reference = on_weak || on_phantom;
bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode();
bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0 || is_traversal_mode;
// 1: non-reference load, no additional barrier is needed
if (!is_reference_type(type)) {
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
return;
}
Register result_dst = dst;
// 2: load a reference from src location and apply LRB if needed
if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
Register result_dst = dst;
if (on_oop) {
// We want to preserve src
// Preserve src location for LRB
if (dst == src.base() || dst == src.index()) {
dst = rscratch1;
}
assert_different_registers(dst, src.base(), src.index());
}
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
if (on_oop) {
if (not_in_heap && !is_traversal_mode) {
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
if (ShenandoahBarrierSet::use_load_reference_barrier_native(decorators, type)) {
load_reference_barrier_native(masm, dst, src);
} else {
load_reference_barrier(masm, dst, src);
@ -378,18 +376,21 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d
__ mov(result_dst, dst);
dst = result_dst;
}
} else {
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
}
if (ShenandoahKeepAliveBarrier && on_reference && keep_alive) {
__ enter();
satb_write_barrier_pre(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
rthread /* thread */,
tmp1 /* tmp */,
true /* tosca_live */,
true /* expand_call */);
__ leave();
}
// 3: apply keep-alive barrier if needed
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
__ enter();
satb_write_barrier_pre(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
rthread /* thread */,
tmp1 /* tmp */,
true /* tosca_live */,
true /* expand_call */);
__ leave();
}
}

View File

@ -64,9 +64,6 @@ define_pd_global(bool, RewriteFrequentPairs, true);
define_pd_global(bool, PreserveFramePointer, false);
// GC Ergo Flags
define_pd_global(uintx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread
define_pd_global(uintx, TypeProfileLevel, 111);
define_pd_global(bool, CompactStrings, true);

View File

@ -287,8 +287,6 @@ void NativeMovConstReg::print() {
//-------------------------------------------------------------------
address NativeMovRegMem::instruction_address() const { return addr_at(instruction_offset); }
int NativeMovRegMem::offset() const {
address pc = instruction_address();
unsigned insn = *(unsigned*)pc;

View File

@ -381,11 +381,11 @@ class NativeMovRegMem: public NativeInstruction {
public:
// helper
int instruction_start() const;
int instruction_start() const { return instruction_offset; }
address instruction_address() const;
address instruction_address() const { return addr_at(instruction_offset); }
address next_instruction_address() const;
int num_bytes_to_end_of_patch() const { return instruction_offset + instruction_size; }
int offset() const;

View File

@ -285,7 +285,6 @@ void TemplateInterpreterGenerator::generate_transcendental_entry(AbstractInterpr
}
break;
case Interpreter::java_lang_math_pow :
fpargs = 2;
if (StubRoutines::dpow() == NULL) {
fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
} else {

View File

@ -30,8 +30,8 @@
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/macros.hpp"
#include "vm_version_aarch64.hpp"
#include OS_HEADER_INLINE(os)

View File

@ -26,8 +26,8 @@
#ifndef CPU_AARCH64_VM_VERSION_AARCH64_HPP
#define CPU_AARCH64_VM_VERSION_AARCH64_HPP
#include "runtime/abstract_vm_version.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/sizes.hpp"
class VM_Version : public Abstract_VM_Version {

View File

@ -25,8 +25,8 @@
#ifndef CPU_AARCH64_VM_VERSION_EXT_AARCH64_HPP
#define CPU_AARCH64_VM_VERSION_EXT_AARCH64_HPP
#include "runtime/vm_version.hpp"
#include "utilities/macros.hpp"
#include "vm_version_aarch64.hpp"
class VM_Version_Ext : public VM_Version {
private:

View File

@ -1817,6 +1817,11 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
assert(opr2->as_constant_ptr()->as_jobject() == NULL, "cannot handle otherwise");
__ cmp(opr1->as_register(), 0);
break;
case T_METADATA:
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "Only equality tests");
assert(opr2->as_constant_ptr()->as_metadata() == NULL, "cannot handle otherwise");
__ cmp(opr1->as_register(), 0);
break;
default:
ShouldNotReachHere();
}

View File

@ -1310,9 +1310,16 @@ void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
CodeEmitInfo* info) {
if (value->is_double_cpu()) {
assert(address->index()->is_illegal(), "should have a constant displacement");
LIR_Opr tmp = new_pointer_register();
add_large_constant(address->base(), address->disp(), tmp);
__ volatile_store_mem_reg(value, new LIR_Address(tmp, (intx)0, address->type()), info);
LIR_Address* store_addr = NULL;
if (address->disp() != 0) {
LIR_Opr tmp = new_pointer_register();
add_large_constant(address->base(), address->disp(), tmp);
store_addr = new LIR_Address(tmp, (intx)0, address->type());
} else {
// address->disp() can be 0, if the address is referenced using the unsafe intrinsic
store_addr = address;
}
__ volatile_store_mem_reg(value, store_addr, info);
return;
}
__ store(value, address, info, lir_patch_none);
@ -1322,9 +1329,16 @@ void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
CodeEmitInfo* info) {
if (result->is_double_cpu()) {
assert(address->index()->is_illegal(), "should have a constant displacement");
LIR_Opr tmp = new_pointer_register();
add_large_constant(address->base(), address->disp(), tmp);
__ volatile_load_mem_reg(new LIR_Address(tmp, (intx)0, address->type()), result, info);
LIR_Address* load_addr = NULL;
if (address->disp() != 0) {
LIR_Opr tmp = new_pointer_register();
add_large_constant(address->base(), address->disp(), tmp);
load_addr = new LIR_Address(tmp, (intx)0, address->type());
} else {
// address->disp() can be 0, if the address is referenced using the unsafe intrinsic
load_addr = address;
}
__ volatile_load_mem_reg(load_addr, result, info);
return;
}
__ load(address, result, info, lir_patch_none);

View File

@ -63,9 +63,6 @@ define_pd_global(bool, RewriteFrequentPairs, true);
define_pd_global(bool, PreserveFramePointer, false);
// GC Ergo Flags
define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
define_pd_global(uintx, TypeProfileLevel, 0);
// No performance work done here yet.

View File

@ -349,6 +349,11 @@ NativeCall* rawNativeCall_before(address return_address);
// (field access patching is handled differently in that case)
class NativeMovRegMem: public NativeInstruction {
public:
enum arm_specific_constants {
instruction_size = 8
};
int num_bytes_to_end_of_patch() const { return instruction_size; }
int offset() const;
void set_offset(int x);

View File

@ -26,7 +26,7 @@
#define CPU_ARM_REGISTER_ARM_HPP
#include "asm/register.hpp"
#include "vm_version_arm.hpp"
#include "runtime/vm_version.hpp"
class VMRegImpl;
typedef VMRegImpl* VMReg;

View File

@ -25,8 +25,8 @@
#ifndef CPU_ARM_VM_VERSION_ARM_HPP
#define CPU_ARM_VM_VERSION_ARM_HPP
#include "runtime/abstract_vm_version.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/vm_version.hpp"
class VM_Version: public Abstract_VM_Version {
friend class JVMCIVMStructs;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@
#include "runtime/java.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "vm_version_arm.hpp"
#include "runtime/vm_version.hpp"
int VM_Version::_stored_pc_adjustment = 4;
int VM_Version::_arm_arch = 5;

View File

@ -25,8 +25,8 @@
#ifndef CPU_ARM_VM_VERSION_EXT_ARM_HPP
#define CPU_ARM_VM_VERSION_EXT_ARM_HPP
#include "runtime/vm_version.hpp"
#include "utilities/macros.hpp"
#include "vm_version_arm.hpp"
class VM_Version_Ext : public VM_Version {
private:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,6 +32,7 @@
#include "oops/compiledICHolder.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klassVtable.hpp"
#include "oops/klass.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "vmreg_arm.inline.hpp"
#ifdef COMPILER2

View File

@ -322,7 +322,7 @@ inline void compare_with_patch_site(address template_start, address pc_start, in
void PatchingStub::emit_code(LIR_Assembler* ce) {
// copy original code here
assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
"not enough room for call");
"not enough room for call, need %d", _bytes_to_copy);
assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes");
Label call_patch;
@ -340,7 +340,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
__ load_const(_obj, addrlit, R0);
DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); )
} else {
// Make a copy the code which is going to be patched.
// Make a copy of the code which is going to be patched.
for (int i = 0; i < _bytes_to_copy; i++) {
address ptr = (address)(_pc_start + i);
int a_byte = (*ptr) & 0xFF;

View File

@ -743,10 +743,11 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType
if (UseCompressedOops && !wide) {
// Encoding done in caller
__ stw(from_reg->as_register(), offset, base);
__ verify_coop(from_reg->as_register(), FILE_AND_LINE);
} else {
__ std(from_reg->as_register(), offset, base);
__ verify_oop(from_reg->as_register(), FILE_AND_LINE);
}
__ verify_oop(from_reg->as_register());
break;
}
case T_FLOAT : __ stfs(from_reg->as_float_reg(), offset, base); break;
@ -783,10 +784,11 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicTy
if (UseCompressedOops && !wide) {
// Encoding done in caller.
__ stwx(from_reg->as_register(), base, disp);
__ verify_coop(from_reg->as_register(), FILE_AND_LINE); // kills R0
} else {
__ stdx(from_reg->as_register(), base, disp);
__ verify_oop(from_reg->as_register(), FILE_AND_LINE); // kills R0
}
__ verify_oop(from_reg->as_register()); // kills R0
break;
}
case T_FLOAT : __ stfsx(from_reg->as_float_reg(), base, disp); break;
@ -831,7 +833,7 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ
} else {
__ ld(to_reg->as_register(), offset, base);
}
__ verify_oop(to_reg->as_register());
__ verify_oop(to_reg->as_register(), FILE_AND_LINE);
break;
}
case T_FLOAT: __ lfs(to_reg->as_float_reg(), offset, base); break;
@ -862,7 +864,7 @@ int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType
} else {
__ ldx(to_reg->as_register(), base, disp);
}
__ verify_oop(to_reg->as_register());
__ verify_oop(to_reg->as_register(), FILE_AND_LINE);
break;
}
case T_FLOAT: __ lfsx(to_reg->as_float_reg() , base, disp); break;
@ -1141,7 +1143,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
}
if (addr->base()->type() == T_OBJECT) {
__ verify_oop(src);
__ verify_oop(src, FILE_AND_LINE);
}
PatchingStub* patch = NULL;
@ -1238,7 +1240,7 @@ void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
ShouldNotReachHere();
}
if (is_reference_type(to_reg->type())) {
__ verify_oop(to_reg->as_register());
__ verify_oop(to_reg->as_register(), FILE_AND_LINE);
}
}
@ -1265,7 +1267,7 @@ void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
}
if (addr->base()->is_oop_register()) {
__ verify_oop(src);
__ verify_oop(src, FILE_AND_LINE);
}
PatchingStub* patch = NULL;
@ -1467,6 +1469,19 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
}
break;
case T_METADATA:
// We only need, for now, comparison with NULL for metadata.
{
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
Metadata* p = opr2->as_constant_ptr()->as_metadata();
if (p == NULL) {
__ cmpdi(BOOL_RESULT, opr1->as_register(), 0);
} else {
ShouldNotReachHere();
}
}
break;
default:
ShouldNotReachHere();
break;
@ -2308,7 +2323,7 @@ void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
*op->stub()->entry());
__ bind(*op->stub()->continuation());
__ verify_oop(op->obj()->as_register());
__ verify_oop(op->obj()->as_register(), FILE_AND_LINE);
}
@ -2533,7 +2548,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
Register Rtmp1 = op->tmp3()->as_register();
bool should_profile = op->should_profile();
__ verify_oop(value);
__ verify_oop(value, FILE_AND_LINE);
CodeStub* stub = op->stub();
// Check if it needs to be profiled.
ciMethodData* md = NULL;
@ -3086,7 +3101,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
assert(do_null || do_update, "why are we here?");
assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
__ verify_oop(obj);
__ verify_oop(obj, FILE_AND_LINE);
if (do_null) {
if (!TypeEntries::was_null_seen(current_klass)) {

View File

@ -44,7 +44,7 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
const Register temp_reg = R12_scratch2;
Label Lmiss;
verify_oop(receiver);
verify_oop(receiver, FILE_AND_LINE);
MacroAssembler::null_check(receiver, oopDesc::klass_offset_in_bytes(), &Lmiss);
load_klass(temp_reg, receiver);
@ -100,7 +100,7 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
// Load object header.
ld(Rmark, oopDesc::mark_offset_in_bytes(), Roop);
verify_oop(Roop);
verify_oop(Roop, FILE_AND_LINE);
// Save object being locked into the BasicObjectLock...
std(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
@ -157,7 +157,7 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
if (UseBiasedLocking) {
// Load the object out of the BasicObjectLock.
ld(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
verify_oop(Roop);
verify_oop(Roop, FILE_AND_LINE);
biased_locking_exit(CCR0, Roop, R0, done);
}
// Test first it it is a fast recursive unlock.
@ -167,7 +167,7 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
if (!UseBiasedLocking) {
// Load object.
ld(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
verify_oop(Roop);
verify_oop(Roop, FILE_AND_LINE);
}
// Check if it is still a light weight lock, this is is true if we see
@ -316,7 +316,7 @@ void C1_MacroAssembler::initialize_object(
// relocInfo::runtime_call_type);
}
verify_oop(obj);
verify_oop(obj, FILE_AND_LINE);
}
@ -383,7 +383,7 @@ void C1_MacroAssembler::allocate_array(
// relocInfo::runtime_call_type);
}
verify_oop(obj);
verify_oop(obj, FILE_AND_LINE);
}
@ -399,8 +399,7 @@ void C1_MacroAssembler::verify_not_null_oop(Register r) {
bne(CCR0, not_null);
stop("non-null oop required");
bind(not_null);
if (!VerifyOops) return;
verify_oop(r);
verify_oop(r, FILE_AND_LINE);
}
#endif // PRODUCT

View File

@ -335,12 +335,12 @@ void G1BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value
__ ld(value, 0, tmp1); // Resolve (untagged) jobject.
__ beq(CCR0, not_weak); // Test for jweak tag.
__ verify_oop(value);
__ verify_oop(value, FILE_AND_LINE);
g1_write_barrier_pre(masm, IN_NATIVE | ON_PHANTOM_OOP_REF,
noreg, noreg, value,
tmp1, tmp2, needs_frame);
__ bind(not_weak);
__ verify_oop(value);
__ verify_oop(value, FILE_AND_LINE);
__ bind(done);
}

View File

@ -113,7 +113,7 @@ void BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value,
__ clrrdi(tmp1, value, JNIHandles::weak_tag_size);
__ ld(value, 0, tmp1); // Resolve (untagged) jobject.
__ verify_oop(value);
__ verify_oop(value, FILE_AND_LINE);
__ bind(done);
}

View File

@ -30,6 +30,10 @@
#error "CC_INTERP is no longer supported. Removed in change 8145117."
#endif
#ifndef FILE_AND_LINE
#define FILE_AND_LINE __FILE__ ":" XSTR(__LINE__)
#endif
// Size of PPC Instructions
const int BytesPerInstWord = 4;

View File

@ -67,9 +67,6 @@ define_pd_global(bool, RewriteFrequentPairs, true);
define_pd_global(bool, PreserveFramePointer, false);
// GC Ergo Flags
define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // Default max size of CMS young gen, per GC worker thread.
define_pd_global(uintx, TypeProfileLevel, 111);
define_pd_global(bool, CompactStrings, true);

View File

@ -2313,7 +2313,7 @@ void InterpreterMacroAssembler::increment_invocation_counter(Register Rcounters,
}
void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
if (state == atos) { MacroAssembler::verify_oop(reg); }
if (state == atos) { MacroAssembler::verify_oop(reg, FILE_AND_LINE); }
}
// Local helper function for the verify_oop_or_return_address macro.

View File

@ -3120,7 +3120,7 @@ void MacroAssembler::get_vm_result(Register oop_result) {
li(R0, 0);
std(R0, in_bytes(JavaThread::vm_result_offset()), R16_thread);
verify_oop(oop_result);
verify_oop(oop_result, FILE_AND_LINE);
}
void MacroAssembler::get_vm_result_2(Register metadata_result) {
@ -4917,6 +4917,13 @@ void MacroAssembler::verify_thread() {
}
}
void MacroAssembler::verify_coop(Register coop, const char* msg) {
if (!VerifyOops) { return; }
if (UseCompressedOops) { decode_heap_oop(coop); }
verify_oop(coop, msg);
if (UseCompressedOops) { encode_heap_oop(coop, coop); }
}
// READ: oop. KILL: R0. Volatile floats perhaps.
void MacroAssembler::verify_oop(Register oop, const char* msg) {
if (!VerifyOops) {
@ -4926,6 +4933,9 @@ void MacroAssembler::verify_oop(Register oop, const char* msg) {
address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address();
const Register tmp = R11; // Will be preserved.
const int nbytes_save = MacroAssembler::num_volatile_regs * 8;
BLOCK_COMMENT("verify_oop {");
save_volatile_gprs(R1_SP, -nbytes_save); // except R0
mr_if_needed(R4_ARG2, oop);
@ -4942,6 +4952,8 @@ void MacroAssembler::verify_oop(Register oop, const char* msg) {
pop_frame();
restore_LR_CR(tmp);
restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
BLOCK_COMMENT("} verify_oop");
}
void MacroAssembler::verify_oop_addr(RegisterOrConstant offs, Register base, const char* msg) {

View File

@ -914,6 +914,9 @@ class MacroAssembler: public Assembler {
// Verify R16_thread contents.
void verify_thread();
// Calls verify_oop. If UseCompressedOops is on, decodes the oop.
// Preserves reg.
void verify_coop(Register reg, const char*);
// Emit code to verify that reg contains a valid oop if +VerifyOops is set.
void verify_oop(Register reg, const char* s = "broken oop");
void verify_oop_addr(RegisterOrConstant offs, Register base, const char* s = "contains broken oop");

View File

@ -77,7 +77,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Klass* klass = SystemDictionary::well_known_klass(klass_id);
Label L_ok, L_bad;
BLOCK_COMMENT("verify_klass {");
__ verify_oop(obj_reg);
__ verify_oop(obj_reg, FILE_AND_LINE);
__ cmpdi(CCR0, obj_reg, 0);
__ beq(CCR0, L_bad);
__ load_klass(temp_reg, obj_reg);
@ -172,16 +172,16 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
assert(method_temp == R19_method, "required register for loading method");
// Load the invoker, as MH -> MH.form -> LF.vmentry
__ verify_oop(recv);
__ verify_oop(recv, FILE_AND_LINE);
__ load_heap_oop(method_temp, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes()), recv,
temp2, noreg, false, IS_NOT_NULL);
__ verify_oop(method_temp);
__ verify_oop(method_temp, FILE_AND_LINE);
__ load_heap_oop(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()), method_temp,
temp2, noreg, false, IS_NOT_NULL);
__ verify_oop(method_temp);
__ verify_oop(method_temp, FILE_AND_LINE);
__ load_heap_oop(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()), method_temp,
temp2, noreg, false, IS_NOT_NULL);
__ verify_oop(method_temp);
__ verify_oop(method_temp, FILE_AND_LINE);
__ ld(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()), method_temp);
if (VerifyMethodHandles && !for_compiler_entry) {
@ -318,7 +318,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
Register temp1_recv_klass = temp1;
if (iid != vmIntrinsics::_linkToStatic) {
__ verify_oop(receiver_reg);
__ verify_oop(receiver_reg, FILE_AND_LINE);
if (iid == vmIntrinsics::_linkToSpecial) {
// Don't actually load the klass; just null-check the receiver.
__ null_check_throw(receiver_reg, -1, temp1,

View File

@ -462,6 +462,8 @@ class NativeMovRegMem: public NativeInstruction {
address instruction_address() const { return addr_at(0); }
int num_bytes_to_end_of_patch() const { return instruction_size; }
intptr_t offset() const {
#ifdef VM_LITTLE_ENDIAN
short *hi_ptr = (short*)(addr_at(0));

View File

@ -6928,25 +6928,6 @@ instruct storeCM_CMS(memory mem, iRegLdst releaseFieldAddr, flagsReg crx) %{
ins_pipe(pipe_class_memory);
%}
// Card-mark for CMS garbage collection.
// This cardmark does an optimization so that it must not always
// do a releasing store. For this, it needs the constant address of
// CMSCollectorCardTableBarrierSetBSExt::_requires_release.
// This constant address is split off here by expand so we can use
// adlc / matcher functionality to load it from the constant section.
instruct storeCM_CMS_ExEx(memory mem, immI_0 zero) %{
match(Set mem (StoreCM mem zero));
predicate(UseConcMarkSweepGC);
expand %{
immL baseImm %{ 0 /* TODO: PPC port (jlong)CMSCollectorCardTableBarrierSetBSExt::requires_release_address() */ %}
iRegLdst releaseFieldAddress;
flagsReg crx;
loadConL_Ex(releaseFieldAddress, baseImm);
storeCM_CMS(mem, releaseFieldAddress, crx);
%}
%}
instruct storeCM_G1(memory mem, immI_0 zero) %{
match(Set mem (StoreCM mem zero));
predicate(UseG1GC);

View File

@ -1742,9 +1742,9 @@ static void verify_oop_args(MacroAssembler* masm,
assert(r->is_valid(), "bad oop arg");
if (r->is_stack()) {
__ ld(temp_reg, reg2offset(r), R1_SP);
__ verify_oop(temp_reg);
__ verify_oop(temp_reg, FILE_AND_LINE);
} else {
__ verify_oop(r->as_Register());
__ verify_oop(r->as_Register(), FILE_AND_LINE);
}
}
}
@ -2107,7 +2107,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ cmpdi(CCR0, R3_ARG1, 0);
__ beq(CCR0, ic_miss);
__ verify_oop(R3_ARG1);
__ verify_oop(R3_ARG1, FILE_AND_LINE);
__ load_klass(receiver_klass, R3_ARG1);
__ cmpd(CCR0, receiver_klass, ic);

View File

@ -440,7 +440,6 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", "forward_exception");
address start = __ pc();
#if !defined(PRODUCT)
if (VerifyOops) {
// Get pending exception oop.
__ ld(R3_ARG1,
@ -456,7 +455,6 @@ class StubGenerator: public StubCodeGenerator {
}
__ verify_oop(R3_ARG1, "StubRoutines::forward exception: not an oop");
}
#endif
// Save LR/CR and copy exception pc (LR) into R4_ARG2.
__ save_LR_CR(R4_ARG2);
@ -702,9 +700,9 @@ class StubGenerator: public StubCodeGenerator {
#if !defined(PRODUCT)
// Wrapper which calls oopDesc::is_oop_or_null()
// Only called by MacroAssembler::verify_oop
static void verify_oop_helper(const char* message, oop o) {
static void verify_oop_helper(const char* message, oopDesc* o) {
if (!oopDesc::is_oop_or_null(o)) {
fatal("%s", message);
fatal("%s. oop: " PTR_FORMAT, message, p2i(o));
}
++ StubRoutines::_verify_oop_count;
}
@ -725,7 +723,6 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
// -XX:+OptimizeFill : convert fill/copy loops into intrinsic
//
// The code is implemented(ported from sparc) as we believe it benefits JVM98, however

View File

@ -25,8 +25,8 @@
#ifndef CPU_PPC_VM_VERSION_EXT_PPC_HPP
#define CPU_PPC_VM_VERSION_EXT_PPC_HPP
#include "runtime/vm_version.hpp"
#include "utilities/macros.hpp"
#include "vm_version_ppc.hpp"
#define CPU_INFO "cpu_info"
#define CPU_TYPE "fpu_type"

View File

@ -32,10 +32,10 @@
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/align.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/globalDefinitions.hpp"
#include "vm_version_ppc.hpp"
#include <sys/sysinfo.h>
#if defined(_AIX)

View File

@ -26,8 +26,8 @@
#ifndef CPU_PPC_VM_VERSION_PPC_HPP
#define CPU_PPC_VM_VERSION_PPC_HPP
#include "runtime/abstract_vm_version.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/vm_version.hpp"
class VM_Version: public Abstract_VM_Version {
protected:

View File

@ -1322,6 +1322,15 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
} else {
__ z_cfi(reg1, c->as_jint());
}
} else if (c->type() == T_METADATA) {
// We only need, for now, comparison with NULL for metadata.
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
Metadata* m = c->as_metadata();
if (m == NULL) {
__ z_cghi(reg1, 0);
} else {
ShouldNotReachHere();
}
} else if (is_reference_type(c->type())) {
// In 64bit oops are single register.
jobject o = c->as_jobject();

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* Copyright (c) 2016, 2019, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -75,6 +75,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
return stub;
#else
ShouldNotReachHere();
return NULL;
#endif
}

View File

@ -28,8 +28,6 @@
#include "code/codeCache.hpp"
#include "compiler/disassembler.hpp"
#include "depChecker_s390.hpp"
#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
#include "gc/cms/parOopClosures.inline.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/genOopClosures.inline.hpp"

View File

@ -69,9 +69,6 @@ define_pd_global(bool, RewriteFrequentPairs, true);
define_pd_global(bool, PreserveFramePointer, false);
// GC Ergo Flags
define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // Default max size of CMS young gen, per GC worker thread.
define_pd_global(uintx, TypeProfileLevel, 111);
define_pd_global(bool, CompactStrings, true);

View File

@ -37,9 +37,11 @@
#include "oops/accessDecorators.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/klass.inline.hpp"
#ifdef COMPILER2
#include "opto/compile.hpp"
#include "opto/intrinsicnode.hpp"
#include "opto/matcher.hpp"
#endif
#include "prims/methodHandles.hpp"
#include "registerSaver_s390.hpp"
#include "runtime/biasedLocking.hpp"
@ -2925,7 +2927,7 @@ unsigned int MacroAssembler::call_ic_miss_handler(Label& ICM, int trapMarker, in
}
void MacroAssembler::nmethod_UEP(Label& ic_miss) {
Register ic_reg = as_Register(Matcher::inline_cache_reg_encode());
Register ic_reg = Z_inline_cache;
int klass_offset = oopDesc::klass_offset_in_bytes();
if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) {
if (VM_Version::has_CompareBranch()) {
@ -4590,6 +4592,7 @@ unsigned int MacroAssembler::CopyRawMemory_AlignedDisjoint(Register src_reg, Reg
return block_end - block_start;
}
#ifdef COMPILER2
//------------------------------------------------------
// Special String Intrinsics. Implementation
//------------------------------------------------------
@ -5837,7 +5840,7 @@ unsigned int MacroAssembler::string_indexof_char(Register result, Register hayst
return offset() - block_start;
}
#endif
//-------------------------------------------------
// Constants (scalar and oop) in constant pool
@ -6150,96 +6153,6 @@ void MacroAssembler::translate_tt(Register r1, Register r2, uint m3) {
Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
}
void MacroAssembler::generate_type_profiling(const Register Rdata,
const Register Rreceiver_klass,
const Register Rwanted_receiver_klass,
const Register Rmatching_row,
bool is_virtual_call) {
const int row_size = in_bytes(ReceiverTypeData::receiver_offset(1)) -
in_bytes(ReceiverTypeData::receiver_offset(0));
const int num_rows = ReceiverTypeData::row_limit();
NearLabel found_free_row;
NearLabel do_increment;
NearLabel found_no_slot;
BLOCK_COMMENT("type profiling {");
// search for:
// a) The type given in Rwanted_receiver_klass.
// b) The *first* empty row.
// First search for a) only, just running over b) with no regard.
// This is possible because
// wanted_receiver_class == receiver_class && wanted_receiver_class == 0
// is never true (receiver_class can't be zero).
for (int row_num = 0; row_num < num_rows; row_num++) {
// Row_offset should be a well-behaved positive number. The generated code relies
// on that wrt constant code size. Add2reg can handle all row_offset values, but
// will have to vary generated code size.
int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num));
assert(Displacement::is_shortDisp(row_offset), "Limitation of generated code");
// Is Rwanted_receiver_klass in this row?
if (VM_Version::has_CompareBranch()) {
z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata);
// Rmatching_row = Rdata + row_offset;
add2reg(Rmatching_row, row_offset, Rdata);
// if (*row_recv == (intptr_t) receiver_klass) goto fill_existing_slot;
compare64_and_branch(Rwanted_receiver_klass, Rreceiver_klass, Assembler::bcondEqual, do_increment);
} else {
add2reg(Rmatching_row, row_offset, Rdata);
z_cg(Rreceiver_klass, row_offset, Z_R0, Rdata);
z_bre(do_increment);
}
}
// Now that we did not find a match, let's search for b).
// We could save the first calculation of Rmatching_row if we woud search for a) in reverse order.
// We would then end up here with Rmatching_row containing the value for row_num == 0.
// We would not see much benefit, if any at all, because the CPU can schedule
// two instructions together with a branch anyway.
for (int row_num = 0; row_num < num_rows; row_num++) {
int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num));
// Has this row a zero receiver_klass, i.e. is it empty?
if (VM_Version::has_CompareBranch()) {
z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata);
// Rmatching_row = Rdata + row_offset
add2reg(Rmatching_row, row_offset, Rdata);
// if (*row_recv == (intptr_t) 0) goto found_free_row
compare64_and_branch(Rwanted_receiver_klass, (intptr_t)0, Assembler::bcondEqual, found_free_row);
} else {
add2reg(Rmatching_row, row_offset, Rdata);
load_and_test_long(Rwanted_receiver_klass, Address(Rdata, row_offset));
z_bre(found_free_row); // zero -> Found a free row.
}
}
// No match, no empty row found.
// Increment total counter to indicate polymorphic case.
if (is_virtual_call) {
add2mem_64(Address(Rdata, CounterData::count_offset()), 1, Rmatching_row);
}
z_bru(found_no_slot);
// Here we found an empty row, but we have not found Rwanted_receiver_klass.
// Rmatching_row holds the address to the first empty row.
bind(found_free_row);
// Store receiver_klass into empty slot.
z_stg(Rreceiver_klass, 0, Z_R0, Rmatching_row);
// Increment the counter of Rmatching_row.
bind(do_increment);
ByteSize counter_offset = ReceiverTypeData::receiver_count_offset(0) - ReceiverTypeData::receiver_offset(0);
add2mem_64(Address(Rmatching_row, counter_offset), 1, Rdata);
bind(found_no_slot);
BLOCK_COMMENT("} type profiling");
}
//---------------------------------------
// Helpers for Intrinsic Emitters
//---------------------------------------

View File

@ -850,6 +850,7 @@ class MacroAssembler: public Assembler {
// Kills: tmp, Z_R0, Z_R1.
// Early clobber: result.
// Boolean precise controls accuracy of result value.
#ifdef COMPILER2
unsigned int string_compress(Register result, Register src, Register dst, Register cnt,
Register tmp, bool precise);
@ -885,6 +886,7 @@ class MacroAssembler: public Assembler {
unsigned int string_indexof_char(Register result, Register haystack, Register haycnt,
Register needle, jchar needleChar, Register odd_reg, Register even_reg, bool is_byte);
#endif
// Emit an oop const to the constant pool and set a relocation info
// with address current_pc. Return the TOC offset of the constant.
@ -918,13 +920,6 @@ class MacroAssembler: public Assembler {
// Offset is +/- 2**32 -> use long.
static long get_load_const_from_toc_offset(address a);
void generate_type_profiling(const Register Rdata,
const Register Rreceiver_klass,
const Register Rwanted_receiver_klass,
const Register Rmatching_row,
bool is_virtual_call);
// Bit operations for single register operands.
inline void lshift(Register r, int places, bool doubl = true); // <<
inline void rshift(Register r, int places, bool doubl = true); // >>

View File

@ -535,6 +535,12 @@ class NativeMovRegMem;
inline NativeMovRegMem* nativeMovRegMem_at (address address);
class NativeMovRegMem: public NativeInstruction {
public:
enum z_specific_constants {
instruction_size = 12 // load_const used with access_field_id
};
int num_bytes_to_end_of_patch() const { return instruction_size; }
intptr_t offset() const {
return nativeMovConstReg_at(addr_at(0))->data();
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
* Copyright (c) 2016, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,7 +27,7 @@
#define CPU_S390_REGISTER_S390_HPP
#include "asm/register.hpp"
#include "vm_version_s390.hpp"
#include "runtime/vm_version.hpp"
class Address;
class VMRegImpl;

View File

@ -32,6 +32,7 @@
#include "interpreter/interpreter.hpp"
#include "interpreter/interp_masm.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_s390.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "registerSaver_s390.hpp"
@ -1521,7 +1522,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
VMRegPair *in_regs,
BasicType ret_type,
address critical_entry) {
#ifdef COMPILER2
int total_in_args = method->size_of_parameters();
if (method->is_method_handle_intrinsic()) {
vmIntrinsics::ID iid = method->intrinsic_id();
@ -2401,10 +2401,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
}
return nm;
#else
ShouldNotReachHere();
return NULL;
#endif // COMPILER2
}
static address gen_c2i_adapter(MacroAssembler *masm,
@ -2880,7 +2876,7 @@ void SharedRuntime::generate_deopt_blob() {
// to Deoptimization::fetch_unroll_info below.
// The (int) cast is necessary, because -((unsigned int)14)
// is an unsigned int.
__ add2reg(Z_R14, -(int)HandlerImpl::size_deopt_handler());
__ add2reg(Z_R14, -(int)NativeCall::max_instruction_size());
const Register exec_mode_reg = Z_tmp_1;

View File

@ -25,8 +25,8 @@
#ifndef CPU_S390_VM_VERSION_EXT_S390_HPP
#define CPU_S390_VM_VERSION_EXT_S390_HPP
#include "runtime/vm_version.hpp"
#include "utilities/macros.hpp"
#include "vm_version_s390.hpp"
#define CPU_INFO "cpu_info"
#define CPU_TYPE "fpu_type"

View File

@ -31,7 +31,7 @@
#include "memory/resourceArea.hpp"
#include "runtime/java.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "vm_version_s390.hpp"
#include "runtime/vm_version.hpp"
# include <sys/sysinfo.h>
@ -44,8 +44,8 @@ unsigned long VM_Version::_msgdigest_features[_features_buffer_len] = {0, 0, 0,
unsigned int VM_Version::_nfeatures = 0;
unsigned int VM_Version::_ncipher_features = 0;
unsigned int VM_Version::_nmsgdigest_features = 0;
unsigned int VM_Version::_Dcache_lineSize = 256;
unsigned int VM_Version::_Icache_lineSize = 256;
unsigned int VM_Version::_Dcache_lineSize = DEFAULT_CACHE_LINE_SIZE;
unsigned int VM_Version::_Icache_lineSize = DEFAULT_CACHE_LINE_SIZE;
static const char* z_gen[] = {" ", "G1", "G2", "G3", "G4", "G5", "G6", "G7" };
static const char* z_machine[] = {" ", "2064", "2084", "2094", "2097", "2817", " ", "2964" };
@ -61,7 +61,9 @@ void VM_Version::initialize() {
intx cache_line_size = Dcache_lineSize(0);
#ifdef COMPILER2
MaxVectorSize = 8;
#endif
if (has_PrefetchRaw()) {
if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { // not preset
@ -217,6 +219,7 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseSHA, false);
}
#ifdef COMPILER2
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true);
}
@ -226,6 +229,7 @@ void VM_Version::initialize() {
if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, true);
}
#endif
if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
FLAG_SET_DEFAULT(UsePopCountInstruction, true);
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
* Copyright (c) 2016, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,8 +27,8 @@
#define CPU_S390_VM_VERSION_S390_HPP
#include "runtime/abstract_vm_version.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/vm_version.hpp"
class VM_Version: public Abstract_VM_Version {

View File

@ -1511,6 +1511,18 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
}
break;
case T_METADATA:
// We only need, for now, comparison with NULL for metadata.
{ assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
Metadata* m = opr2->as_constant_ptr()->as_metadata();
if (m == NULL) {
__ cmp(opr1->as_register(), 0);
} else {
ShouldNotReachHere();
}
}
break;
default:
ShouldNotReachHere();
break;

View File

@ -66,9 +66,9 @@ void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_by
void C1_MacroAssembler::verified_entry() {
if (C1Breakpoint) breakpoint_trap();
// build frame
verify_FPU(0, "method_entry");
if (C1Breakpoint) {
breakpoint_trap();
}
}

View File

@ -74,9 +74,6 @@ define_pd_global(bool, RewriteFrequentPairs, true);
define_pd_global(bool, PreserveFramePointer, false);
// GC Ergo Flags
define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
define_pd_global(uintx, TypeProfileLevel, 111);
define_pd_global(bool, CompactStrings, true);

View File

@ -91,7 +91,6 @@ void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {
// dispatch.
void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) {
assert_not_delayed();
verify_FPU(1, state);
interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
jmp( IdispatchAddress, 0 );
if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr);
@ -264,7 +263,6 @@ void InterpreterMacroAssembler::dispatch_only(TosState state) {
// dispatch value in Lbyte_code and increment Lbcp
void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify, bool generate_poll) {
verify_FPU(1, state);
// %%%%% maybe implement +VerifyActivationFrameSize here
//verify_thread(); //too slow; we will just verify on method entry & exit
if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
@ -2545,11 +2543,6 @@ void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Regis
}
void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);
}
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
int increment, Address mask_addr,

View File

@ -321,7 +321,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
// Debugging
void interp_verify_oop(Register reg, TosState state, const char * file, int line); // only if +VerifyOops && state == atos
void verify_oop_or_return_address(Register reg, Register rtmp); // for astore
void verify_FPU(int stack_depth, TosState state = ftos); // only if +VerifyFPU && (state == ftos || state == dtos)
void verify_FPU(int stack_depth, TosState state = ftos) {} // No-op.
// support for JVMTI/Dtrace
typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;

View File

@ -1130,21 +1130,6 @@ void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) {
}
}
// pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
void MacroAssembler::push_fTOS() {
// %%%%%% need to implement this
}
// pops double TOS element from CPU stack and pushes on FPU stack
void MacroAssembler::pop_fTOS() {
// %%%%%% need to implement this
}
void MacroAssembler::empty_FPU_stack() {
// %%%%%% need to implement this
}
void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) {
// plausibility check for oops
if (!VerifyOops) return;
@ -2826,47 +2811,6 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
bind(done);
}
void MacroAssembler::print_CPU_state() {
// %%%%% need to implement this
}
void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
// %%%%% need to implement this
}
void MacroAssembler::push_IU_state() {
// %%%%% need to implement this
}
void MacroAssembler::pop_IU_state() {
// %%%%% need to implement this
}
void MacroAssembler::push_FPU_state() {
// %%%%% need to implement this
}
void MacroAssembler::pop_FPU_state() {
// %%%%% need to implement this
}
void MacroAssembler::push_CPU_state() {
// %%%%% need to implement this
}
void MacroAssembler::pop_CPU_state() {
// %%%%% need to implement this
}
void MacroAssembler::verify_tlab() {
#ifdef ASSERT
if (UseTLAB && VerifyOops) {

View File

@ -649,10 +649,6 @@ class MacroAssembler : public Assembler {
inline void callr( Register s1, Register s2 );
inline void callr( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
// Emits nothing on V8
inline void iprefetch( address d, relocInfo::relocType rt = relocInfo::none );
inline void iprefetch( Label& L);
inline void tst( Register s );
inline void ret( bool trace = false );
@ -1056,23 +1052,6 @@ public:
// check_and_forward_exception to handle exceptions when it is safe
void check_and_forward_exception(Register scratch_reg);
// pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
void push_fTOS();
// pops double TOS element from CPU stack and pushes on FPU stack
void pop_fTOS();
void empty_FPU_stack();
void push_IU_state();
void pop_IU_state();
void push_FPU_state();
void pop_FPU_state();
void push_CPU_state();
void pop_CPU_state();
// Returns the byte size of the instructions generated by decode_klass_not_null().
static int instr_size_for_decode_klass_not_null();
@ -1092,15 +1071,11 @@ public:
#define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
#define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
// only if +VerifyOops
void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
// only if +VerifyFPU
void stop(const char* msg); // prints msg, dumps registers and stops execution
void warn(const char* msg); // prints msg, but don't stop
void untested(const char* what = "");
void unimplemented(const char* what = "");
void should_not_reach_here() { stop("should not reach here"); }
void print_CPU_state();
// oops in code
AddressLiteral allocate_oop_address(jobject obj); // allocate_index

View File

@ -278,13 +278,6 @@ inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); }
inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); }
// prefetch instruction
inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
Assembler::bp( never, true, xcc, pt, d, rt );
Assembler::bp( never, true, xcc, pt, d, rt );
}
inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
inline void MacroAssembler::tst( Register s ) { orcc( G0, s, G0 ); }
inline void MacroAssembler::ret( bool trace ) {

View File

@ -574,15 +574,6 @@ void NativeMovConstRegPatching::test() {
//-------------------------------------------------------------------
void NativeMovRegMem::copy_instruction_to(address new_instruction_address) {
Untested("copy_instruction_to");
int instruction_size = next_instruction_address() - instruction_address();
for (int i = 0; i < instruction_size; i += BytesPerInstWord) {
*(int*)(new_instruction_address + i) = *(int*)(address(this) + i);
}
}
void NativeMovRegMem::verify() {
NativeInstruction::verify();
// make sure code pattern is actually a "ld" or "st" of some sort.

View File

@ -576,7 +576,8 @@ class NativeMovConstReg: public NativeInstruction {
// sethi and the add. The nop is required to be in the delay slot of the call instruction
// which overwrites the sethi during patching.
class NativeMovConstRegPatching;
inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address);class NativeMovConstRegPatching: public NativeInstruction {
inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address);
class NativeMovConstRegPatching: public NativeInstruction {
public:
enum Sparc_specific_constants {
sethi_offset = 0,
@ -664,10 +665,13 @@ class NativeMovRegMem: public NativeInstruction {
return (is_op(i0, Assembler::ldst_op));
}
address instruction_address() const { return addr_at(0); }
address next_instruction_address() const {
return addr_at(is_immediate() ? 4 : (7 * BytesPerInstWord));
address instruction_address() const { return addr_at(0); }
int num_bytes_to_end_of_patch() const {
return is_immediate()? BytesPerInstWord :
NativeMovConstReg::instruction_size;
}
intptr_t offset() const {
return is_immediate()? inv_simm(long_at(0), offset_width) :
nativeMovConstReg_at(addr_at(0))->data();
@ -684,8 +688,6 @@ class NativeMovRegMem: public NativeInstruction {
set_offset (offset() + radd_offset);
}
void copy_instruction_to(address new_instruction_address);
void verify();
void print ();

View File

@ -25,8 +25,8 @@
#ifndef CPU_SPARC_VM_VERSION_EXT_SPARC_HPP
#define CPU_SPARC_VM_VERSION_EXT_SPARC_HPP
#include "runtime/vm_version.hpp"
#include "utilities/macros.hpp"
#include "vm_version_sparc.hpp"
#if defined(SOLARIS)
#include <kstat.h>

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,7 +32,7 @@
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "vm_version_sparc.hpp"
#include "runtime/vm_version.hpp"
#include <sys/mman.h>

View File

@ -25,8 +25,8 @@
#ifndef CPU_SPARC_VM_VERSION_SPARC_HPP
#define CPU_SPARC_VM_VERSION_SPARC_HPP
#include "runtime/abstract_vm_version.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/vm_version.hpp"
class VM_Version: public Abstract_VM_Version {
friend class VMStructs;

View File

@ -4227,7 +4227,7 @@ void Assembler::pshufb(XMMRegister dst, XMMRegister src) {
void Assembler::vpshufb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
vector_len == AVX_256bit? VM_Version::supports_avx2() :
0, "");
vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, "");
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
emit_int8(0x00);
@ -7197,7 +7197,6 @@ void Assembler::evpbroadcastq(XMMRegister dst, Register src, int vector_len) {
emit_int8(0x7C);
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::evpgatherdd(XMMRegister dst, KRegister mask, Address src, int vector_len) {
assert(VM_Version::supports_evex(), "");
assert(dst != xnoreg, "sanity");
@ -7212,7 +7211,6 @@ void Assembler::evpgatherdd(XMMRegister dst, KRegister mask, Address src, int ve
emit_int8((unsigned char)0x90);
emit_operand(dst, src);
}
// Carry-Less Multiplication Quadword
void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) {
assert(VM_Version::supports_clmul(), "");
@ -9165,6 +9163,26 @@ void Assembler::notq(Register dst) {
emit_int8((unsigned char)(0xD0 | encode));
}
void Assembler::btsq(Address dst, int imm8) {
assert(isByte(imm8), "not a byte");
InstructionMark im(this);
prefixq(dst);
emit_int8((unsigned char)0x0F);
emit_int8((unsigned char)0xBA);
emit_operand(rbp /* 5 */, dst, 1);
emit_int8(imm8);
}
void Assembler::btrq(Address dst, int imm8) {
assert(isByte(imm8), "not a byte");
InstructionMark im(this);
prefixq(dst);
emit_int8((unsigned char)0x0F);
emit_int8((unsigned char)0xBA);
emit_operand(rsi /* 6 */, dst, 1);
emit_int8(imm8);
}
void Assembler::orq(Address dst, int32_t imm32) {
InstructionMark im(this);
prefixq(dst);

View File

@ -26,7 +26,7 @@
#define CPU_X86_ASSEMBLER_X86_HPP
#include "asm/register.hpp"
#include "vm_version_x86.hpp"
#include "runtime/vm_version.hpp"
class BiasedLockingCounters;
@ -1592,6 +1592,9 @@ private:
#ifdef _LP64
void notq(Register dst);
void btsq(Address dst, int imm8);
void btrq(Address dst, int imm8);
#endif
void orl(Address dst, int32_t imm32);

View File

@ -2641,6 +2641,15 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
LIR_Const* c = opr2->as_constant_ptr();
if (c->type() == T_INT) {
__ cmpl(reg1, c->as_jint());
} else if (c->type() == T_METADATA) {
// All we need for now is a comparison with NULL for equality.
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
Metadata* m = c->as_metadata();
if (m == NULL) {
__ cmpptr(reg1, (int32_t)0);
} else {
ShouldNotReachHere();
}
} else if (is_reference_type(c->type())) {
// In 64bit oops are single register
jobject o = c->as_jobject();

View File

@ -22,6 +22,7 @@
*/
#include "precompiled.hpp"
#include "gc/shenandoah/shenandoahBarrierSet.hpp"
#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
#include "gc/shenandoah/shenandoahForwarding.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
@ -445,21 +446,35 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
}
}
//
// Arguments:
//
// Inputs:
// src: oop location, might be clobbered
// tmp1: scratch register, might not be valid.
//
// Output:
// dst: oop loaded from src location
//
// Kill:
// tmp1 (if it is valid)
//
void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register dst, Address src, Register tmp1, Register tmp_thread) {
bool on_oop = is_reference_type(type);
bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
bool not_in_heap = (decorators & IN_NATIVE) != 0;
bool on_reference = on_weak || on_phantom;
bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode();
bool keep_alive = ((decorators & AS_NO_KEEPALIVE) == 0) || is_traversal_mode;
// 1: non-reference load, no additional barrier is needed
if (!is_reference_type(type)) {
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
return;
}
Register result_dst = dst;
bool use_tmp1_for_dst = false;
assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Not expected");
if (on_oop) {
// We want to preserve src
// 2: load a reference from src location and apply LRB if needed
if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
Register result_dst = dst;
bool use_tmp1_for_dst = false;
// Preserve src location for LRB
if (dst == src.base() || dst == src.index()) {
// Use tmp1 for dst if possible, as it is not used in BarrierAssembler::load_at()
if (tmp1->is_valid() && tmp1 != src.base() && tmp1 != src.index()) {
@ -469,19 +484,18 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d
dst = rdi;
__ push(dst);
}
assert_different_registers(dst, src.base(), src.index());
}
assert_different_registers(dst, src.base(), src.index());
}
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
if (on_oop) {
if (not_in_heap && !is_traversal_mode) {
if (ShenandoahBarrierSet::use_load_reference_barrier_native(decorators, type)) {
load_reference_barrier_native(masm, dst, src);
} else {
load_reference_barrier(masm, dst, src);
}
// Move loaded oop to final destination
if (dst != result_dst) {
__ movptr(result_dst, dst);
@ -491,21 +505,24 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d
dst = result_dst;
}
} else {
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
}
if (ShenandoahKeepAliveBarrier && on_reference && keep_alive) {
const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
assert_different_registers(dst, tmp1, tmp_thread);
NOT_LP64(__ get_thread(thread));
// Generate the SATB pre-barrier code to log the value of
// the referent field in an SATB buffer.
shenandoah_write_barrier_pre(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
thread /* thread */,
tmp1 /* tmp */,
true /* tosca_live */,
true /* expand_call */);
}
// 3: apply keep-alive barrier if needed
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
assert_different_registers(dst, tmp1, tmp_thread);
NOT_LP64(__ get_thread(thread));
// Generate the SATB pre-barrier code to log the value of
// the referent field in an SATB buffer.
shenandoah_write_barrier_pre(masm /* masm */,
noreg /* obj */,
dst /* pre_val */,
thread /* thread */,
tmp1 /* tmp */,
true /* tosca_live */,
true /* expand_call */);
}
}

View File

@ -81,9 +81,6 @@ define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES);
define_pd_global(bool, RewriteBytecodes, true);
define_pd_global(bool, RewriteFrequentPairs, true);
// GC Ergo Flags
define_pd_global(size_t, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread
define_pd_global(uintx, TypeProfileLevel, 111);
define_pd_global(bool, CompactStrings, true);

View File

@ -3770,6 +3770,16 @@ void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src
}
}
void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
assert(UseAVX > 0, "requires some form of AVX");
if (reachable(src)) {
Assembler::vpaddd(dst, nds, as_Address(src), vector_len);
} else {
lea(rscratch, src);
Assembler::vpaddd(dst, nds, Address(rscratch, 0), vector_len);
}
}
void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len) {
assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15");
vandps(dst, nds, negate_field, vector_len);

View File

@ -993,6 +993,8 @@ private:
public:
void aesecb_encrypt(Register source_addr, Register dest_addr, Register key, Register len);
void aesecb_decrypt(Register source_addr, Register dest_addr, Register key, Register len);
void aesctr_encrypt(Register src_addr, Register dest_addr, Register key, Register counter,
Register len_reg, Register used, Register used_addr, Register saved_encCounter_start);
#endif
@ -1238,6 +1240,10 @@ public:
void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch);
void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Intel Corporation.
* Copyright (c) 2019, Intel Corporation.
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -778,4 +778,493 @@ void MacroAssembler::avx_ghash(Register input_state, Register htbl,
vpxor(xmm3, xmm3, xmm3, Assembler::AVX_128bit);
vpxor(xmm15, xmm15, xmm15, Assembler::AVX_128bit);
}
// AES Counter Mode using VAES instructions
void MacroAssembler::aesctr_encrypt(Register src_addr, Register dest_addr, Register key, Register counter,
Register len_reg, Register used, Register used_addr, Register saved_encCounter_start) {
const Register rounds = 0;
const Register pos = r12;
Label PRELOOP_START, EXIT_PRELOOP, REMAINDER, REMAINDER_16, LOOP, END, EXIT, END_LOOP,
AES192, AES256, AES192_REMAINDER16, REMAINDER16_END_LOOP, AES256_REMAINDER16,
REMAINDER_8, REMAINDER_4, AES192_REMAINDER8, REMAINDER_LOOP, AES256_REMINDER,
AES192_REMAINDER, END_REMAINDER_LOOP, AES256_REMAINDER8, REMAINDER8_END_LOOP,
AES192_REMAINDER4, AES256_REMAINDER4, AES256_REMAINDER, END_REMAINDER4, EXTRACT_TAILBYTES,
EXTRACT_TAIL_4BYTES, EXTRACT_TAIL_2BYTES, EXTRACT_TAIL_1BYTE, STORE_CTR;
cmpl(len_reg, 0);
jcc(Assembler::belowEqual, EXIT);
movl(pos, 0);
// if the number of used encrypted counter bytes < 16,
// XOR PT with saved encrypted counter to obtain CT
bind(PRELOOP_START);
cmpl(used, 16);
jcc(Assembler::aboveEqual, EXIT_PRELOOP);
movb(rbx, Address(saved_encCounter_start, used));
xorb(rbx, Address(src_addr, pos));
movb(Address(dest_addr, pos), rbx);
addptr(pos, 1);
addptr(used, 1);
decrement(len_reg);
jmp(PRELOOP_START);
bind(EXIT_PRELOOP);
movl(Address(used_addr, 0), used);
// Calculate number of rounds i.e. 10, 12, 14, based on key length(128, 192, 256).
movl(rounds, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
vpxor(xmm0, xmm0, xmm0, Assembler::AVX_128bit);
// Move initial counter value in xmm0
movdqu(xmm0, Address(counter, 0));
// broadcast counter value to zmm8
evshufi64x2(xmm8, xmm0, xmm0, 0, Assembler::AVX_512bit);
// load lbswap mask
evmovdquq(xmm16, ExternalAddress(StubRoutines::x86::counter_mask_addr()), Assembler::AVX_512bit, r15);
//shuffle counter using lbswap_mask
vpshufb(xmm8, xmm8, xmm16, Assembler::AVX_512bit);
// pre-increment and propagate counter values to zmm9-zmm15 registers.
// Linc0 increments the zmm8 by 1 (initial value being 0), Linc4 increments the counters zmm9-zmm15 by 4
// The counter is incremented after each block i.e. 16 bytes is processed;
// each zmm register has 4 counter values as its MSB
// the counters are incremented in parallel
vpaddd(xmm8, xmm8, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 64), Assembler::AVX_512bit, r15);//linc0
vpaddd(xmm9, xmm8, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//linc4(rip)
vpaddd(xmm10, xmm9, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//Linc4(rip)
vpaddd(xmm11, xmm10, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//Linc4(rip)
vpaddd(xmm12, xmm11, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//Linc4(rip)
vpaddd(xmm13, xmm12, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//Linc4(rip)
vpaddd(xmm14, xmm13, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//Linc4(rip)
vpaddd(xmm15, xmm14, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//Linc4(rip)
// load linc32 mask in zmm register.linc32 increments counter by 32
evmovdquq(xmm19, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 256), Assembler::AVX_512bit, r15);//Linc32
// xmm31 contains the key shuffle mask.
movdqu(xmm31, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
// Load key function loads 128 bit key and shuffles it. Then we broadcast the shuffled key to convert it into a 512 bit value.
// For broadcasting the values to ZMM, vshufi64 is used instead of evbroadcasti64x2 as the source in this case is ZMM register
// that holds shuffled key value.
ev_load_key(xmm20, key, 0, xmm31);
ev_load_key(xmm21, key, 1 * 16, xmm31);
ev_load_key(xmm22, key, 2 * 16, xmm31);
ev_load_key(xmm23, key, 3 * 16, xmm31);
ev_load_key(xmm24, key, 4 * 16, xmm31);
ev_load_key(xmm25, key, 5 * 16, xmm31);
ev_load_key(xmm26, key, 6 * 16, xmm31);
ev_load_key(xmm27, key, 7 * 16, xmm31);
ev_load_key(xmm28, key, 8 * 16, xmm31);
ev_load_key(xmm29, key, 9 * 16, xmm31);
ev_load_key(xmm30, key, 10 * 16, xmm31);
// Process 32 blocks or 512 bytes of data
bind(LOOP);
cmpl(len_reg, 512);
jcc(Assembler::less, REMAINDER);
subq(len_reg, 512);
//Shuffle counter and Exor it with roundkey1. Result is stored in zmm0-7
vpshufb(xmm0, xmm8, xmm16, Assembler::AVX_512bit);
evpxorq(xmm0, xmm0, xmm20, Assembler::AVX_512bit);
vpshufb(xmm1, xmm9, xmm16, Assembler::AVX_512bit);
evpxorq(xmm1, xmm1, xmm20, Assembler::AVX_512bit);
vpshufb(xmm2, xmm10, xmm16, Assembler::AVX_512bit);
evpxorq(xmm2, xmm2, xmm20, Assembler::AVX_512bit);
vpshufb(xmm3, xmm11, xmm16, Assembler::AVX_512bit);
evpxorq(xmm3, xmm3, xmm20, Assembler::AVX_512bit);
vpshufb(xmm4, xmm12, xmm16, Assembler::AVX_512bit);
evpxorq(xmm4, xmm4, xmm20, Assembler::AVX_512bit);
vpshufb(xmm5, xmm13, xmm16, Assembler::AVX_512bit);
evpxorq(xmm5, xmm5, xmm20, Assembler::AVX_512bit);
vpshufb(xmm6, xmm14, xmm16, Assembler::AVX_512bit);
evpxorq(xmm6, xmm6, xmm20, Assembler::AVX_512bit);
vpshufb(xmm7, xmm15, xmm16, Assembler::AVX_512bit);
evpxorq(xmm7, xmm7, xmm20, Assembler::AVX_512bit);
// Perform AES encode operations and put results in zmm0-zmm7.
// This is followed by incrementing counter values in zmm8-zmm15.
// Since we will be processing 32 blocks at a time, the counter is incremented by 32.
roundEnc(xmm21, 7);
vpaddq(xmm8, xmm8, xmm19, Assembler::AVX_512bit);
roundEnc(xmm22, 7);
vpaddq(xmm9, xmm9, xmm19, Assembler::AVX_512bit);
roundEnc(xmm23, 7);
vpaddq(xmm10, xmm10, xmm19, Assembler::AVX_512bit);
roundEnc(xmm24, 7);
vpaddq(xmm11, xmm11, xmm19, Assembler::AVX_512bit);
roundEnc(xmm25, 7);
vpaddq(xmm12, xmm12, xmm19, Assembler::AVX_512bit);
roundEnc(xmm26, 7);
vpaddq(xmm13, xmm13, xmm19, Assembler::AVX_512bit);
roundEnc(xmm27, 7);
vpaddq(xmm14, xmm14, xmm19, Assembler::AVX_512bit);
roundEnc(xmm28, 7);
vpaddq(xmm15, xmm15, xmm19, Assembler::AVX_512bit);
roundEnc(xmm29, 7);
cmpl(rounds, 52);
jcc(Assembler::aboveEqual, AES192);
lastroundEnc(xmm30, 7);
jmp(END_LOOP);
bind(AES192);
roundEnc(xmm30, 7);
ev_load_key(xmm18, key, 11 * 16, xmm31);
roundEnc(xmm18, 7);
cmpl(rounds, 60);
jcc(Assembler::aboveEqual, AES256);
ev_load_key(xmm18, key, 12 * 16, xmm31);
lastroundEnc(xmm18, 7);
jmp(END_LOOP);
bind(AES256);
ev_load_key(xmm18, key, 12 * 16, xmm31);
roundEnc(xmm18, 7);
ev_load_key(xmm18, key, 13 * 16, xmm31);
roundEnc(xmm18, 7);
ev_load_key(xmm18, key, 14 * 16, xmm31);
lastroundEnc(xmm18, 7);
// After AES encode rounds, the encrypted block cipher lies in zmm0-zmm7
// xor encrypted block cipher and input plaintext and store resultant ciphertext
bind(END_LOOP);
evpxorq(xmm0, xmm0, Address(src_addr, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit);
evmovdquq(Address(dest_addr, pos, Address::times_1, 0), xmm0, Assembler::AVX_512bit);
evpxorq(xmm1, xmm1, Address(src_addr, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit);
evmovdquq(Address(dest_addr, pos, Address::times_1, 64), xmm1, Assembler::AVX_512bit);
evpxorq(xmm2, xmm2, Address(src_addr, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit);
evmovdquq(Address(dest_addr, pos, Address::times_1, 2 * 64), xmm2, Assembler::AVX_512bit);
evpxorq(xmm3, xmm3, Address(src_addr, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit);
evmovdquq(Address(dest_addr, pos, Address::times_1, 3 * 64), xmm3, Assembler::AVX_512bit);
evpxorq(xmm4, xmm4, Address(src_addr, pos, Address::times_1, 4 * 64), Assembler::AVX_512bit);
evmovdquq(Address(dest_addr, pos, Address::times_1, 4 * 64), xmm4, Assembler::AVX_512bit);
evpxorq(xmm5, xmm5, Address(src_addr, pos, Address::times_1, 5 * 64), Assembler::AVX_512bit);
evmovdquq(Address(dest_addr, pos, Address::times_1, 5 * 64), xmm5, Assembler::AVX_512bit);
evpxorq(xmm6, xmm6, Address(src_addr, pos, Address::times_1, 6 * 64), Assembler::AVX_512bit);
evmovdquq(Address(dest_addr, pos, Address::times_1, 6 * 64), xmm6, Assembler::AVX_512bit);
evpxorq(xmm7, xmm7, Address(src_addr, pos, Address::times_1, 7 * 64), Assembler::AVX_512bit);
evmovdquq(Address(dest_addr, pos, Address::times_1, 7 * 64), xmm7, Assembler::AVX_512bit);
addq(pos, 512);
jmp(LOOP);
// Encode 256, 128, 64 or 16 bytes at a time if length is less than 512 bytes
bind(REMAINDER);
cmpl(len_reg, 0);
jcc(Assembler::equal, END);
cmpl(len_reg, 256);
jcc(Assembler::aboveEqual, REMAINDER_16);
cmpl(len_reg, 128);
jcc(Assembler::aboveEqual, REMAINDER_8);
cmpl(len_reg, 64);
jcc(Assembler::aboveEqual, REMAINDER_4);
// At this point, we will process 16 bytes of data at a time.
// So load xmm19 with counter increment value as 1
evmovdquq(xmm19, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 80), Assembler::AVX_128bit, r15);
jmp(REMAINDER_LOOP);
// Each ZMM register can be used to encode 64 bytes of data, so we have 4 ZMM registers to encode 256 bytes of data
bind(REMAINDER_16);
subq(len_reg, 256);
// As we process 16 blocks at a time, load mask for incrementing the counter value by 16
evmovdquq(xmm19, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 320), Assembler::AVX_512bit, r15);//Linc16(rip)
// shuffle counter and XOR counter with roundkey1
vpshufb(xmm0, xmm8, xmm16, Assembler::AVX_512bit);
evpxorq(xmm0, xmm0, xmm20, Assembler::AVX_512bit);
vpshufb(xmm1, xmm9, xmm16, Assembler::AVX_512bit);
evpxorq(xmm1, xmm1, xmm20, Assembler::AVX_512bit);
vpshufb(xmm2, xmm10, xmm16, Assembler::AVX_512bit);
evpxorq(xmm2, xmm2, xmm20, Assembler::AVX_512bit);
vpshufb(xmm3, xmm11, xmm16, Assembler::AVX_512bit);
evpxorq(xmm3, xmm3, xmm20, Assembler::AVX_512bit);
// Increment counter values by 16
vpaddq(xmm8, xmm8, xmm19, Assembler::AVX_512bit);
vpaddq(xmm9, xmm9, xmm19, Assembler::AVX_512bit);
// AES encode rounds
roundEnc(xmm21, 3);
roundEnc(xmm22, 3);
roundEnc(xmm23, 3);
roundEnc(xmm24, 3);
roundEnc(xmm25, 3);
roundEnc(xmm26, 3);
roundEnc(xmm27, 3);
roundEnc(xmm28, 3);
roundEnc(xmm29, 3);
cmpl(rounds, 52);
jcc(Assembler::aboveEqual, AES192_REMAINDER16);
lastroundEnc(xmm30, 3);
jmp(REMAINDER16_END_LOOP);
bind(AES192_REMAINDER16);
roundEnc(xmm30, 3);
ev_load_key(xmm18, key, 11 * 16, xmm31);
roundEnc(xmm18, 3);
ev_load_key(xmm5, key, 12 * 16, xmm31);
cmpl(rounds, 60);
jcc(Assembler::aboveEqual, AES256_REMAINDER16);
lastroundEnc(xmm5, 3);
jmp(REMAINDER16_END_LOOP);
bind(AES256_REMAINDER16);
roundEnc(xmm5, 3);
ev_load_key(xmm6, key, 13 * 16, xmm31);
roundEnc(xmm6, 3);
ev_load_key(xmm7, key, 14 * 16, xmm31);
lastroundEnc(xmm7, 3);
// After AES encode rounds, the encrypted block cipher lies in zmm0-zmm3
// xor 256 bytes of PT with the encrypted counters to produce CT.
bind(REMAINDER16_END_LOOP);
evpxorq(xmm0, xmm0, Address(src_addr, pos, Address::times_1, 0), Assembler::AVX_512bit);
evmovdquq(Address(dest_addr, pos, Address::times_1, 0), xmm0, Assembler::AVX_512bit);
evpxorq(xmm1, xmm1, Address(src_addr, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit);
evmovdquq(Address(dest_addr, pos, Address::times_1, 1 * 64), xmm1, Assembler::AVX_512bit);
evpxorq(xmm2, xmm2, Address(src_addr, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit);
evmovdquq(Address(dest_addr, pos, Address::times_1, 2 * 64), xmm2, Assembler::AVX_512bit);
evpxorq(xmm3, xmm3, Address(src_addr, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit);
evmovdquq(Address(dest_addr, pos, Address::times_1, 3 * 64), xmm3, Assembler::AVX_512bit);
addq(pos, 256);
cmpl(len_reg, 128);
jcc(Assembler::aboveEqual, REMAINDER_8);
cmpl(len_reg, 64);
jcc(Assembler::aboveEqual, REMAINDER_4);
//load mask for incrementing the counter value by 1
evmovdquq(xmm19, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 80), Assembler::AVX_128bit, r15);//Linc0 + 16(rip)
jmp(REMAINDER_LOOP);
// Each ZMM register can be used to encode 64 bytes of data, so we have 2 ZMM registers to encode 128 bytes of data
bind(REMAINDER_8);
subq(len_reg, 128);
// As we process 8 blocks at a time, load mask for incrementing the counter value by 8
evmovdquq(xmm19, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 192), Assembler::AVX_512bit, r15);//Linc8(rip)
// shuffle counters and xor with roundkey1
vpshufb(xmm0, xmm8, xmm16, Assembler::AVX_512bit);
evpxorq(xmm0, xmm0, xmm20, Assembler::AVX_512bit);
vpshufb(xmm1, xmm9, xmm16, Assembler::AVX_512bit);
evpxorq(xmm1, xmm1, xmm20, Assembler::AVX_512bit);
// increment counter by 8
vpaddq(xmm8, xmm8, xmm19, Assembler::AVX_512bit);
// AES encode
roundEnc(xmm21, 1);
roundEnc(xmm22, 1);
roundEnc(xmm23, 1);
roundEnc(xmm24, 1);
roundEnc(xmm25, 1);
roundEnc(xmm26, 1);
roundEnc(xmm27, 1);
roundEnc(xmm28, 1);
roundEnc(xmm29, 1);
cmpl(rounds, 52);
jcc(Assembler::aboveEqual, AES192_REMAINDER8);
lastroundEnc(xmm30, 1);
jmp(REMAINDER8_END_LOOP);
bind(AES192_REMAINDER8);
roundEnc(xmm30, 1);
ev_load_key(xmm18, key, 11 * 16, xmm31);
roundEnc(xmm18, 1);
ev_load_key(xmm5, key, 12 * 16, xmm31);
cmpl(rounds, 60);
jcc(Assembler::aboveEqual, AES256_REMAINDER8);
lastroundEnc(xmm5, 1);
jmp(REMAINDER8_END_LOOP);
bind(AES256_REMAINDER8);
roundEnc(xmm5, 1);
ev_load_key(xmm6, key, 13 * 16, xmm31);
roundEnc(xmm6, 1);
ev_load_key(xmm7, key, 14 * 16, xmm31);
lastroundEnc(xmm7, 1);
bind(REMAINDER8_END_LOOP);
// After AES encode rounds, the encrypted block cipher lies in zmm0-zmm1
// XOR PT with the encrypted counter and store as CT
evpxorq(xmm0, xmm0, Address(src_addr, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit);
evmovdquq(Address(dest_addr, pos, Address::times_1, 0 * 64), xmm0, Assembler::AVX_512bit);
evpxorq(xmm1, xmm1, Address(src_addr, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit);
evmovdquq(Address(dest_addr, pos, Address::times_1, 1 * 64), xmm1, Assembler::AVX_512bit);
addq(pos, 128);
cmpl(len_reg, 64);
jcc(Assembler::aboveEqual, REMAINDER_4);
// load mask for incrementing the counter value by 1
evmovdquq(xmm19, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 80), Assembler::AVX_128bit, r15);//Linc0 + 16(rip)
jmp(REMAINDER_LOOP);
// Each ZMM register can be used to encode 64 bytes of data, so we have 1 ZMM register used in this block of code
bind(REMAINDER_4);
subq(len_reg, 64);
// As we process 4 blocks at a time, load mask for incrementing the counter value by 4
evmovdquq(xmm19, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//Linc4(rip)
// XOR counter with first roundkey
vpshufb(xmm0, xmm8, xmm16, Assembler::AVX_512bit);
evpxorq(xmm0, xmm0, xmm20, Assembler::AVX_512bit);
// Increment counter
vpaddq(xmm8, xmm8, xmm19, Assembler::AVX_512bit);
vaesenc(xmm0, xmm0, xmm21, Assembler::AVX_512bit);
vaesenc(xmm0, xmm0, xmm22, Assembler::AVX_512bit);
vaesenc(xmm0, xmm0, xmm23, Assembler::AVX_512bit);
vaesenc(xmm0, xmm0, xmm24, Assembler::AVX_512bit);
vaesenc(xmm0, xmm0, xmm25, Assembler::AVX_512bit);
vaesenc(xmm0, xmm0, xmm26, Assembler::AVX_512bit);
vaesenc(xmm0, xmm0, xmm27, Assembler::AVX_512bit);
vaesenc(xmm0, xmm0, xmm28, Assembler::AVX_512bit);
vaesenc(xmm0, xmm0, xmm29, Assembler::AVX_512bit);
cmpl(rounds, 52);
jcc(Assembler::aboveEqual, AES192_REMAINDER4);
vaesenclast(xmm0, xmm0, xmm30, Assembler::AVX_512bit);
jmp(END_REMAINDER4);
bind(AES192_REMAINDER4);
vaesenc(xmm0, xmm0, xmm30, Assembler::AVX_512bit);
ev_load_key(xmm18, key, 11 * 16, xmm31);
vaesenc(xmm0, xmm0, xmm18, Assembler::AVX_512bit);
ev_load_key(xmm5, key, 12 * 16, xmm31);
cmpl(rounds, 60);
jcc(Assembler::aboveEqual, AES256_REMAINDER4);
vaesenclast(xmm0, xmm0, xmm5, Assembler::AVX_512bit);
jmp(END_REMAINDER4);
bind(AES256_REMAINDER4);
vaesenc(xmm0, xmm0, xmm5, Assembler::AVX_512bit);
ev_load_key(xmm6, key, 13 * 16, xmm31);
vaesenc(xmm0, xmm0, xmm6, Assembler::AVX_512bit);
ev_load_key(xmm7, key, 14 * 16, xmm31);
vaesenclast(xmm0, xmm0, xmm7, Assembler::AVX_512bit);
// After AES encode rounds, the encrypted block cipher lies in zmm0.
// XOR encrypted block cipher with PT and store 64 bytes of ciphertext
bind(END_REMAINDER4);
evpxorq(xmm0, xmm0, Address(src_addr, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit);
evmovdquq(Address(dest_addr, pos, Address::times_1, 0), xmm0, Assembler::AVX_512bit);
addq(pos, 64);
// load mask for incrementing the counter value by 1
evmovdquq(xmm19, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 80), Assembler::AVX_128bit, r15);//Linc0 + 16(rip)
// For a single block, the AES rounds start here.
bind(REMAINDER_LOOP);
cmpl(len_reg, 0);
jcc(Assembler::belowEqual, END);
// XOR counter with first roundkey
vpshufb(xmm0, xmm8, xmm16, Assembler::AVX_128bit);
evpxorq(xmm0, xmm0, xmm20, Assembler::AVX_128bit);
vaesenc(xmm0, xmm0, xmm21, Assembler::AVX_128bit);
// Increment counter by 1
vpaddq(xmm8, xmm8, xmm19, Assembler::AVX_128bit);
vaesenc(xmm0, xmm0, xmm22, Assembler::AVX_128bit);
vaesenc(xmm0, xmm0, xmm23, Assembler::AVX_128bit);
vaesenc(xmm0, xmm0, xmm24, Assembler::AVX_128bit);
vaesenc(xmm0, xmm0, xmm25, Assembler::AVX_128bit);
vaesenc(xmm0, xmm0, xmm26, Assembler::AVX_128bit);
vaesenc(xmm0, xmm0, xmm27, Assembler::AVX_128bit);
vaesenc(xmm0, xmm0, xmm28, Assembler::AVX_128bit);
vaesenc(xmm0, xmm0, xmm29, Assembler::AVX_128bit);
cmpl(rounds, 52);
jcc(Assembler::aboveEqual, AES192_REMAINDER);
vaesenclast(xmm0, xmm0, xmm30, Assembler::AVX_128bit);
jmp(END_REMAINDER_LOOP);
bind(AES192_REMAINDER);
vaesenc(xmm0, xmm0, xmm30, Assembler::AVX_128bit);
ev_load_key(xmm18, key, 11 * 16, xmm31);
vaesenc(xmm0, xmm0, xmm18, Assembler::AVX_128bit);
ev_load_key(xmm5, key, 12 * 16, xmm31);
cmpl(rounds, 60);
jcc(Assembler::aboveEqual, AES256_REMAINDER);
vaesenclast(xmm0, xmm0, xmm5, Assembler::AVX_128bit);
jmp(END_REMAINDER_LOOP);
bind(AES256_REMAINDER);
vaesenc(xmm0, xmm0, xmm5, Assembler::AVX_128bit);
ev_load_key(xmm6, key, 13 * 16, xmm31);
vaesenc(xmm0, xmm0, xmm6, Assembler::AVX_128bit);
ev_load_key(xmm7, key, 14 * 16, xmm31);
vaesenclast(xmm0, xmm0, xmm7, Assembler::AVX_128bit);
bind(END_REMAINDER_LOOP);
// If the length register is less than the blockSize i.e. 16
// then we store only those bytes of the CT to the destination
// corresponding to the length register value
// extracting the exact number of bytes is handled by EXTRACT_TAILBYTES
cmpl(len_reg, 16);
jcc(Assembler::less, EXTRACT_TAILBYTES);
subl(len_reg, 16);
// After AES encode rounds, the encrypted block cipher lies in xmm0.
// If the length register is equal to 16 bytes, store CT in dest after XOR operation.
evpxorq(xmm0, xmm0, Address(src_addr, pos, Address::times_1, 0), Assembler::AVX_128bit);
evmovdquq(Address(dest_addr, pos, Address::times_1, 0), xmm0, Assembler::AVX_128bit);
addl(pos, 16);
jmp(REMAINDER_LOOP);
bind(EXTRACT_TAILBYTES);
// Save encrypted counter value in xmm0 for next invocation, before XOR operation
movdqu(Address(saved_encCounter_start, 0), xmm0);
// XOR encryted block cipher in xmm0 with PT to produce CT
evpxorq(xmm0, xmm0, Address(src_addr, pos, Address::times_1, 0), Assembler::AVX_128bit);
// extract upto 15 bytes of CT from xmm0 as specified by length register
testptr(len_reg, 8);
jcc(Assembler::zero, EXTRACT_TAIL_4BYTES);
pextrq(Address(dest_addr, pos), xmm0, 0);
psrldq(xmm0, 8);
addl(pos, 8);
bind(EXTRACT_TAIL_4BYTES);
testptr(len_reg, 4);
jcc(Assembler::zero, EXTRACT_TAIL_2BYTES);
pextrd(Address(dest_addr, pos), xmm0, 0);
psrldq(xmm0, 4);
addq(pos, 4);
bind(EXTRACT_TAIL_2BYTES);
testptr(len_reg, 2);
jcc(Assembler::zero, EXTRACT_TAIL_1BYTE);
pextrw(Address(dest_addr, pos), xmm0, 0);
psrldq(xmm0, 2);
addl(pos, 2);
bind(EXTRACT_TAIL_1BYTE);
testptr(len_reg, 1);
jcc(Assembler::zero, END);
pextrb(Address(dest_addr, pos), xmm0, 0);
addl(pos, 1);
bind(END);
// If there are no tail bytes, store counter value and exit
cmpl(len_reg, 0);
jcc(Assembler::equal, STORE_CTR);
movl(Address(used_addr, 0), len_reg);
bind(STORE_CTR);
//shuffle updated counter and store it
vpshufb(xmm8, xmm8, xmm16, Assembler::AVX_128bit);
movdqu(Address(counter, 0), xmm8);
// Zero out counter and key registers
evpxorq(xmm8, xmm8, xmm8, Assembler::AVX_512bit);
evpxorq(xmm20, xmm20, xmm20, Assembler::AVX_512bit);
evpxorq(xmm21, xmm21, xmm21, Assembler::AVX_512bit);
evpxorq(xmm22, xmm22, xmm22, Assembler::AVX_512bit);
evpxorq(xmm23, xmm23, xmm23, Assembler::AVX_512bit);
evpxorq(xmm24, xmm24, xmm24, Assembler::AVX_512bit);
evpxorq(xmm25, xmm25, xmm25, Assembler::AVX_512bit);
evpxorq(xmm26, xmm26, xmm26, Assembler::AVX_512bit);
evpxorq(xmm27, xmm27, xmm27, Assembler::AVX_512bit);
evpxorq(xmm28, xmm28, xmm28, Assembler::AVX_512bit);
evpxorq(xmm29, xmm29, xmm29, Assembler::AVX_512bit);
evpxorq(xmm30, xmm30, xmm30, Assembler::AVX_512bit);
cmpl(rounds, 44);
jcc(Assembler::belowEqual, EXIT);
evpxorq(xmm18, xmm18, xmm18, Assembler::AVX_512bit);
evpxorq(xmm5, xmm5, xmm5, Assembler::AVX_512bit);
cmpl(rounds, 52);
jcc(Assembler::belowEqual, EXIT);
evpxorq(xmm6, xmm6, xmm6, Assembler::AVX_512bit);
evpxorq(xmm7, xmm7, xmm7, Assembler::AVX_512bit);
bind(EXIT);
}
#endif // _LP64

View File

@ -355,60 +355,7 @@ int NativeMovRegMem::instruction_start() const {
return off;
}
address NativeMovRegMem::instruction_address() const {
return addr_at(instruction_start());
}
address NativeMovRegMem::next_instruction_address() const {
address ret = instruction_address() + instruction_size;
u_char instr_0 = *(u_char*) instruction_address();
switch (instr_0) {
case instruction_operandsize_prefix:
fatal("should have skipped instruction_operandsize_prefix");
break;
case instruction_extended_prefix:
fatal("should have skipped instruction_extended_prefix");
break;
case instruction_code_mem2reg_movslq: // 0x63
case instruction_code_mem2reg_movzxb: // 0xB6
case instruction_code_mem2reg_movsxb: // 0xBE
case instruction_code_mem2reg_movzxw: // 0xB7
case instruction_code_mem2reg_movsxw: // 0xBF
case instruction_code_reg2mem: // 0x89 (q/l)
case instruction_code_mem2reg: // 0x8B (q/l)
case instruction_code_reg2memb: // 0x88
case instruction_code_mem2regb: // 0x8a
case instruction_code_lea: // 0x8d
case instruction_code_float_s: // 0xd9 fld_s a
case instruction_code_float_d: // 0xdd fld_d a
case instruction_code_xmm_load: // 0x10
case instruction_code_xmm_store: // 0x11
case instruction_code_xmm_lpd: // 0x12
{
// If there is an SIB then instruction is longer than expected
u_char mod_rm = *(u_char*)(instruction_address() + 1);
if ((mod_rm & 7) == 0x4) {
ret++;
}
}
case instruction_code_xor:
fatal("should have skipped xor lead in");
break;
default:
fatal("not a NativeMovRegMem");
}
return ret;
}
int NativeMovRegMem::offset() const{
int NativeMovRegMem::patch_offset() const {
int off = data_offset + instruction_start();
u_char mod_rm = *(u_char*)(instruction_address() + 1);
// nnnn(r12|rsp) isn't coded as simple mod/rm since that is
@ -417,19 +364,7 @@ int NativeMovRegMem::offset() const{
if ((mod_rm & 7) == 0x4) {
off++;
}
return int_at(off);
}
void NativeMovRegMem::set_offset(int x) {
int off = data_offset + instruction_start();
u_char mod_rm = *(u_char*)(instruction_address() + 1);
// nnnn(r12|rsp) isn't coded as simple mod/rm since that is
// the encoding to use an SIB byte. Which will have the nnnn
// field off by one byte
if ((mod_rm & 7) == 0x4) {
off++;
}
set_int_at(off, x);
return off;
}
void NativeMovRegMem::verify() {

View File

@ -361,7 +361,6 @@ class NativeMovRegMem: public NativeInstruction {
instruction_VEX_prefix_3bytes = Assembler::VEX_3bytes,
instruction_EVEX_prefix_4bytes = Assembler::EVEX_4bytes,
instruction_size = 4,
instruction_offset = 0,
data_offset = 2,
next_instruction_offset = 4
@ -370,15 +369,26 @@ class NativeMovRegMem: public NativeInstruction {
// helper
int instruction_start() const;
address instruction_address() const;
address instruction_address() const {
return addr_at(instruction_start());
}
address next_instruction_address() const;
int num_bytes_to_end_of_patch() const {
return patch_offset() + sizeof(jint);
}
int offset() const;
int offset() const {
return int_at(patch_offset());
}
void set_offset(int x);
void set_offset(int x) {
set_int_at(patch_offset(), x);
}
void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); }
void add_offset_in_bytes(int add_offset) {
int patch_off = patch_offset();
set_int_at(patch_off, int_at(patch_off) + add_offset);
}
void verify();
void print ();
@ -387,6 +397,7 @@ class NativeMovRegMem: public NativeInstruction {
static void test() {}
private:
int patch_offset() const;
inline friend NativeMovRegMem* nativeMovRegMem_at (address address);
};

View File

@ -38,6 +38,7 @@
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/align.hpp"
#include "vmreg_x86.inline.hpp"
#ifdef COMPILER1
@ -46,7 +47,6 @@
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
#include "vm_version_x86.hpp"
#define __ masm->

View File

@ -45,9 +45,9 @@
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/align.hpp"
#include "utilities/formatBuffer.hpp"
#include "vm_version_x86.hpp"
#include "vmreg_x86.inline.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"

View File

@ -3982,6 +3982,123 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
// This mask is used for incrementing counter value(linc0, linc4, etc.)
address counter_mask_addr() {
__ align(64);
StubCodeMark mark(this, "StubRoutines", "counter_mask_addr");
address start = __ pc();
__ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none);//lbswapmask
__ emit_data64(0x0001020304050607, relocInfo::none);
__ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none);
__ emit_data64(0x0001020304050607, relocInfo::none);
__ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none);
__ emit_data64(0x0001020304050607, relocInfo::none);
__ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none);
__ emit_data64(0x0001020304050607, relocInfo::none);
__ emit_data64(0x0000000000000000, relocInfo::none);//linc0 = counter_mask_addr+64
__ emit_data64(0x0000000000000000, relocInfo::none);
__ emit_data64(0x0000000000000001, relocInfo::none);//counter_mask_addr() + 80
__ emit_data64(0x0000000000000000, relocInfo::none);
__ emit_data64(0x0000000000000002, relocInfo::none);
__ emit_data64(0x0000000000000000, relocInfo::none);
__ emit_data64(0x0000000000000003, relocInfo::none);
__ emit_data64(0x0000000000000000, relocInfo::none);
__ emit_data64(0x0000000000000004, relocInfo::none);//linc4 = counter_mask_addr() + 128
__ emit_data64(0x0000000000000000, relocInfo::none);
__ emit_data64(0x0000000000000004, relocInfo::none);
__ emit_data64(0x0000000000000000, relocInfo::none);
__ emit_data64(0x0000000000000004, relocInfo::none);
__ emit_data64(0x0000000000000000, relocInfo::none);
__ emit_data64(0x0000000000000004, relocInfo::none);
__ emit_data64(0x0000000000000000, relocInfo::none);
__ emit_data64(0x0000000000000008, relocInfo::none);//linc8 = counter_mask_addr() + 192
__ emit_data64(0x0000000000000000, relocInfo::none);
__ emit_data64(0x0000000000000008, relocInfo::none);
__ emit_data64(0x0000000000000000, relocInfo::none);
__ emit_data64(0x0000000000000008, relocInfo::none);
__ emit_data64(0x0000000000000000, relocInfo::none);
__ emit_data64(0x0000000000000008, relocInfo::none);
__ emit_data64(0x0000000000000000, relocInfo::none);
__ emit_data64(0x0000000000000020, relocInfo::none);//linc32 = counter_mask_addr() + 256
__ emit_data64(0x0000000000000000, relocInfo::none);
__ emit_data64(0x0000000000000020, relocInfo::none);
__ emit_data64(0x0000000000000000, relocInfo::none);
__ emit_data64(0x0000000000000020, relocInfo::none);
__ emit_data64(0x0000000000000000, relocInfo::none);
__ emit_data64(0x0000000000000020, relocInfo::none);
__ emit_data64(0x0000000000000000, relocInfo::none);
__ emit_data64(0x0000000000000010, relocInfo::none);//linc16 = counter_mask_addr() + 320
__ emit_data64(0x0000000000000000, relocInfo::none);
__ emit_data64(0x0000000000000010, relocInfo::none);
__ emit_data64(0x0000000000000000, relocInfo::none);
__ emit_data64(0x0000000000000010, relocInfo::none);
__ emit_data64(0x0000000000000000, relocInfo::none);
__ emit_data64(0x0000000000000010, relocInfo::none);
__ emit_data64(0x0000000000000000, relocInfo::none);
return start;
}
// Vector AES Counter implementation
address generate_counterMode_VectorAESCrypt() {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt");
address start = __ pc();
const Register from = c_rarg0; // source array address
const Register to = c_rarg1; // destination array address
const Register key = c_rarg2; // key array address r8
const Register counter = c_rarg3; // counter byte array initialized from counter array address
// and updated with the incremented counter in the end
#ifndef _WIN64
const Register len_reg = c_rarg4;
const Register saved_encCounter_start = c_rarg5;
const Register used_addr = r10;
const Address used_mem(rbp, 2 * wordSize);
const Register used = r11;
#else
const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64
const Address saved_encCounter_mem(rbp, 7 * wordSize); // saved encrypted counter is on stack on Win64
const Address used_mem(rbp, 8 * wordSize); // used length is on stack on Win64
const Register len_reg = r10; // pick the first volatile windows register
const Register saved_encCounter_start = r11;
const Register used_addr = r13;
const Register used = r14;
#endif
__ enter();
// Save state before entering routine
__ push(r12);
__ push(r13);
__ push(r14);
__ push(r15);
#ifdef _WIN64
// on win64, fill len_reg from stack position
__ movl(len_reg, len_mem);
__ movptr(saved_encCounter_start, saved_encCounter_mem);
__ movptr(used_addr, used_mem);
__ movl(used, Address(used_addr, 0));
#else
__ push(len_reg); // Save
__ movptr(used_addr, used_mem);
__ movl(used, Address(used_addr, 0));
#endif
__ push(rbx);
__ aesctr_encrypt(from, to, key, counter, len_reg, used, used_addr, saved_encCounter_start);
// Restore state before leaving routine
__ pop(rbx);
#ifdef _WIN64
__ movl(rax, len_mem); // return length
#else
__ pop(rax); // return length
#endif
__ pop(r15);
__ pop(r14);
__ pop(r13);
__ pop(r12);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
return start;
}
// This is a version of CTR/AES crypt which does 6 blocks in a loop at a time
// to hide instruction latency
//
@ -6111,9 +6228,14 @@ address generate_avx_ghash_processBlocks() {
StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
}
}
if (UseAESCTRIntrinsics){
StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask();
StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel();
if (UseAESCTRIntrinsics) {
if (VM_Version::supports_vaes() && VM_Version::supports_avx512bw() && VM_Version::supports_avx512vl()) {
StubRoutines::x86::_counter_mask_addr = counter_mask_addr();
StubRoutines::_counterMode_AESCrypt = generate_counterMode_VectorAESCrypt();
} else {
StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask();
StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel();
}
}
if (UseSHA1Intrinsics) {

View File

@ -62,7 +62,7 @@ address StubRoutines::x86::_right_shift_mask = NULL;
address StubRoutines::x86::_left_shift_mask = NULL;
address StubRoutines::x86::_and_mask = NULL;
address StubRoutines::x86::_url_charset = NULL;
address StubRoutines::x86::_counter_mask_addr = NULL;
#endif
address StubRoutines::x86::_pshuffle_byte_flip_mask_addr = NULL;

View File

@ -154,6 +154,7 @@ class x86 {
static address _k512_W_addr;
// byte flip mask for sha512
static address _pshuffle_byte_flip_mask_addr_sha512;
static address _counter_mask_addr;
// Masks for base64
static address _base64_charset;
static address _bswap_mask;
@ -258,6 +259,7 @@ class x86 {
static address base64_right_shift_mask_addr() { return _right_shift_mask; }
static address base64_left_shift_mask_addr() { return _left_shift_mask; }
static address base64_and_mask_addr() { return _and_mask; }
static address counter_mask_addr() { return _counter_mask_addr; }
#endif
static address pshuffle_byte_flip_mask_addr() { return _pshuffle_byte_flip_mask_addr; }
static void generate_CRC32C_table(bool is_pclmulqdq_supported);

View File

@ -25,8 +25,8 @@
#ifndef CPU_X86_VM_VERSION_EXT_X86_HPP
#define CPU_X86_VM_VERSION_EXT_X86_HPP
#include "runtime/vm_version.hpp"
#include "utilities/macros.hpp"
#include "vm_version_x86.hpp"
class VM_Version_Ext : public VM_Version {

View File

@ -32,8 +32,8 @@
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/virtualizationSupport.hpp"
#include "vm_version_x86.hpp"
#include OS_HEADER_INLINE(os)

View File

@ -26,8 +26,8 @@
#define CPU_X86_VM_VERSION_X86_HPP
#include "memory/universe.hpp"
#include "runtime/abstract_vm_version.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/vm_version.hpp"
class VM_Version : public Abstract_VM_Version {
friend class VMStructs;

View File

@ -3116,6 +3116,26 @@ operand immL32()
interface(CONST_INTER);
%}
operand immL_Pow2()
%{
predicate(is_power_of_2_long(n->get_long()));
match(ConL);
op_cost(15);
format %{ %}
interface(CONST_INTER);
%}
operand immL_NotPow2()
%{
predicate(is_power_of_2_long(~n->get_long()));
match(ConL);
op_cost(15);
format %{ %}
interface(CONST_INTER);
%}
// Long Immediate zero
operand immL0()
%{
@ -9841,6 +9861,23 @@ instruct andL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
ins_pipe(ialu_mem_imm);
%}
instruct btrL_mem_imm(memory dst, immL_NotPow2 con, rFlagsReg cr)
%{
// con should be a pure 64-bit immediate given that not(con) is a power of 2
// because AND/OR works well enough for 8/32-bit values.
predicate(log2_long(~n->in(3)->in(2)->get_long()) > 30);
match(Set dst (StoreL dst (AndL (LoadL dst) con)));
effect(KILL cr);
ins_cost(125);
format %{ "btrq $dst, log2(not($con))\t# long" %}
ins_encode %{
__ btrq($dst$$Address, log2_long(~$con$$constant));
%}
ins_pipe(ialu_mem_imm);
%}
// BMI1 instructions
instruct andnL_rReg_rReg_mem(rRegL dst, rRegL src1, memory src2, immL_M1 minus_1, rFlagsReg cr) %{
match(Set dst (AndL (XorL src1 minus_1) (LoadL src2)));
@ -10034,6 +10071,23 @@ instruct orL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
ins_pipe(ialu_mem_imm);
%}
instruct btsL_mem_imm(memory dst, immL_Pow2 con, rFlagsReg cr)
%{
// con should be a pure 64-bit power of 2 immediate
// because AND/OR works well enough for 8/32-bit values.
predicate(log2_long(n->in(3)->in(2)->get_long()) > 31);
match(Set dst (StoreL dst (OrL (LoadL dst) con)));
effect(KILL cr);
ins_cost(125);
format %{ "btsq $dst, log2($con)\t# long" %}
ins_encode %{
__ btsq($dst$$Address, log2_long($con$$constant));
%}
ins_pipe(ialu_mem_imm);
%}
// Xor Instructions
// Xor Register with Register
instruct xorL_rReg(rRegL dst, rRegL src, rFlagsReg cr)

View File

@ -66,9 +66,6 @@ define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES);
define_pd_global(bool, RewriteBytecodes, true);
define_pd_global(bool, RewriteFrequentPairs, true);
// GC Ergo Flags
define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
define_pd_global(uintx, TypeProfileLevel, 0);
define_pd_global(bool, PreserveFramePointer, false);

Some files were not shown because too many files have changed in this diff Show More