Merge
This commit is contained in:
commit
62a67c5d90
.hgtags
make
autoconf
common
data/cacerts
src/hotspot
cpu
aarch64
aarch64-asmtest.pyassembler_aarch64.cppassembler_aarch64.hppc1_LIR_aarch64.cppmacroAssembler_aarch64.cppnativeInst_aarch64.hppstubGenerator_aarch64.cppvm_version_aarch64.cpp
arm
ppc
s390
x86
os
share
adlc
c1
ci
classfile
code
compiler
gc
epsilon
g1
g1CollectedHeap.cppg1CollectedHeap.hppg1ConcurrentMarkThread.cppg1GCPhaseTimes.cppg1GCPhaseTimes.hppg1Policy.cppg1Policy.hppg1RootProcessor.cppg1YoungRemSetSamplingThread.cpp
parallel
parallelScavengeHeap.cppparallelScavengeHeap.hpppsParallelCompact.cpppsParallelCompact.hpppsRootType.hpppsScavenge.cpp
serial
shared
c2
collectedHeap.cppcollectedHeap.hppgenCollectedHeap.cppgenCollectedHeap.hppgeneration.hppoopStorageSet.hppweakProcessorPhases.cppweakProcessorPhases.hppshenandoah
shenandoahControlThread.cppshenandoahHeap.cppshenandoahHeap.hppshenandoahPhaseTimings.hppshenandoahRootProcessor.cppshenandoahRootProcessor.hppshenandoahRootVerifier.cppshenandoah_globals.hpp
z
interpreter
jfr
4
.hgtags
4
.hgtags
@ -652,4 +652,8 @@ a32f58c6b8be81877411767de7ba9c4cf087c1b5 jdk-15+31
|
||||
4a8fd81d64bafa523cddb45f82805536edace106 jdk-16+6
|
||||
6b65f4e7a975628df51ef755b02642075390041d jdk-15+33
|
||||
c3a4a7ea7c304cabdacdc31741eb94c51351668d jdk-16+7
|
||||
b0817631d2f4395508cb10e81c3858a94d9ae4de jdk-15+34
|
||||
0a73d6f3aab48ff6d7e61e47f0bc2d87a054f217 jdk-16+8
|
||||
fd60c3146a024037cdd9be34c645bb793995a7cc jdk-15+35
|
||||
c075a286cc7df767cce28e8057d6ec5051786490 jdk-16+9
|
||||
b01985b4f88f554f97901e53e1ba314681dd9c19 jdk-16+10
|
||||
|
@ -72,7 +72,6 @@ AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS],
|
||||
UTIL_REQUIRE_PROGS(UNAME, uname)
|
||||
UTIL_REQUIRE_PROGS(UNIQ, uniq)
|
||||
UTIL_REQUIRE_PROGS(WC, wc)
|
||||
UTIL_REQUIRE_PROGS(WHICH, which)
|
||||
UTIL_REQUIRE_PROGS(XARGS, xargs)
|
||||
|
||||
# Then required tools that require some special treatment.
|
||||
|
12
make/autoconf/configure
vendored
12
make/autoconf/configure
vendored
@ -78,11 +78,11 @@ generated_script="$build_support_dir/generated-configure.sh"
|
||||
###
|
||||
|
||||
autoconf_missing_help() {
|
||||
APT_GET="`which apt-get 2> /dev/null | grep -v '^no apt-get in'`"
|
||||
YUM="`which yum 2> /dev/null | grep -v '^no yum in'`"
|
||||
BREW="`which brew 2> /dev/null | grep -v '^no brew in'`"
|
||||
ZYPPER="`which zypper 2> /dev/null | grep -v '^no zypper in'`"
|
||||
CYGWIN="`which cygpath 2> /dev/null | grep -v '^no cygpath in'`"
|
||||
APT_GET="`type -p apt-get 2> /dev/null`"
|
||||
YUM="`type -p yum 2> /dev/null`"
|
||||
BREW="`type -p brew 2> /dev/null`"
|
||||
ZYPPER="`type -p zypper 2> /dev/null`"
|
||||
CYGWIN="`type -p cygpath 2> /dev/null`"
|
||||
|
||||
if test "x$ZYPPER" != x; then
|
||||
PKGHANDLER_COMMAND="sudo zypper install autoconf"
|
||||
@ -111,7 +111,7 @@ generate_configure_script() {
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
AUTOCONF="`which autoconf 2> /dev/null | grep -v '^no autoconf in'`"
|
||||
AUTOCONF="`type -p autoconf 2> /dev/null`"
|
||||
if test "x$AUTOCONF" = x; then
|
||||
echo
|
||||
echo "Autoconf is not found on the PATH, and AUTOCONF is not set."
|
||||
|
@ -218,10 +218,12 @@ AC_DEFUN([FLAGS_SETUP_SYSROOT_FLAGS],
|
||||
# We also need -iframework<path>/System/Library/Frameworks
|
||||
$1SYSROOT_CFLAGS="[$]$1SYSROOT_CFLAGS -iframework [$]$1SYSROOT/System/Library/Frameworks"
|
||||
$1SYSROOT_LDFLAGS="[$]$1SYSROOT_LDFLAGS -iframework [$]$1SYSROOT/System/Library/Frameworks"
|
||||
# These always need to be set, or we can't find the frameworks embedded in JavaVM.framework
|
||||
# set this here so it doesn't have to be peppered throughout the forest
|
||||
$1SYSROOT_CFLAGS="[$]$1SYSROOT_CFLAGS -F [$]$1SYSROOT/System/Library/Frameworks/JavaVM.framework/Frameworks"
|
||||
$1SYSROOT_LDFLAGS="[$]$1SYSROOT_LDFLAGS -F [$]$1SYSROOT/System/Library/Frameworks/JavaVM.framework/Frameworks"
|
||||
if test -d "[$]$1SYSROOT/System/Library/Frameworks/JavaVM.framework/Frameworks" ; then
|
||||
# These always need to be set on macOS 10.X, or we can't find the frameworks embedded in JavaVM.framework
|
||||
# set this here so it doesn't have to be peppered throughout the forest
|
||||
$1SYSROOT_CFLAGS="[$]$1SYSROOT_CFLAGS -F [$]$1SYSROOT/System/Library/Frameworks/JavaVM.framework/Frameworks"
|
||||
$1SYSROOT_LDFLAGS="[$]$1SYSROOT_LDFLAGS -F [$]$1SYSROOT/System/Library/Frameworks/JavaVM.framework/Frameworks"
|
||||
fi
|
||||
fi
|
||||
|
||||
AC_SUBST($1SYSROOT_CFLAGS)
|
||||
|
@ -902,9 +902,14 @@ AC_DEFUN_ONCE([TOOLCHAIN_SETUP_BUILD_COMPILERS],
|
||||
# FIXME: we should list the discovered compilers as an exclude pattern!
|
||||
# If we do that, we can do this detection before POST_DETECTION, and still
|
||||
# find the build compilers in the tools dir, if needed.
|
||||
UTIL_REQUIRE_PROGS(BUILD_CC, [cl cc gcc])
|
||||
if test "x$OPENJDK_BUILD_OS" = xmacosx; then
|
||||
UTIL_REQUIRE_PROGS(BUILD_CC, [clang cl cc gcc])
|
||||
UTIL_REQUIRE_PROGS(BUILD_CXX, [clang++ cl CC g++])
|
||||
else
|
||||
UTIL_REQUIRE_PROGS(BUILD_CC, [cl cc gcc])
|
||||
UTIL_REQUIRE_PROGS(BUILD_CXX, [cl CC g++])
|
||||
fi
|
||||
UTIL_FIXUP_EXECUTABLE(BUILD_CC)
|
||||
UTIL_REQUIRE_PROGS(BUILD_CXX, [cl CC g++])
|
||||
UTIL_FIXUP_EXECUTABLE(BUILD_CXX)
|
||||
UTIL_PATH_PROGS(BUILD_NM, nm gcc-nm)
|
||||
UTIL_FIXUP_EXECUTABLE(BUILD_NM)
|
||||
|
@ -601,7 +601,7 @@ AC_DEFUN([UTIL_REQUIRE_BUILTIN_PROGS],
|
||||
UTIL_SETUP_TOOL($1, [AC_PATH_PROGS($1, $2, , $3)])
|
||||
if test "x[$]$1" = x; then
|
||||
AC_MSG_NOTICE([Required tool $2 not found in PATH, checking built-in])
|
||||
if command -v $2 > /dev/null 2>&1; then
|
||||
if type -p $2 > /dev/null 2>&1; then
|
||||
AC_MSG_NOTICE([Found $2 as shell built-in. Using it])
|
||||
$1="$2"
|
||||
else
|
||||
|
@ -242,7 +242,7 @@ AC_DEFUN([UTIL_FIXUP_EXECUTABLE_CYGWIN],
|
||||
new_path=`$CYGPATH -u "$path"`
|
||||
|
||||
# Now try to locate executable using which
|
||||
new_path=`$WHICH "$new_path" 2> /dev/null`
|
||||
new_path=`type -p "$new_path" 2> /dev/null`
|
||||
# bat and cmd files are not always considered executable in cygwin causing which
|
||||
# to not find them
|
||||
if test "x$new_path" = x \
|
||||
@ -258,7 +258,7 @@ AC_DEFUN([UTIL_FIXUP_EXECUTABLE_CYGWIN],
|
||||
path="$complete"
|
||||
arguments="EOL"
|
||||
new_path=`$CYGPATH -u "$path"`
|
||||
new_path=`$WHICH "$new_path" 2> /dev/null`
|
||||
new_path=`type -p "$new_path" 2> /dev/null`
|
||||
# bat and cmd files are not always considered executable in cygwin causing which
|
||||
# to not find them
|
||||
if test "x$new_path" = x \
|
||||
@ -324,7 +324,7 @@ AC_DEFUN([UTIL_FIXUP_EXECUTABLE_MSYS],
|
||||
UTIL_REWRITE_AS_UNIX_PATH(new_path)
|
||||
|
||||
# Now try to locate executable using which
|
||||
new_path=`$WHICH "$new_path" 2> /dev/null`
|
||||
new_path=`type -p "$new_path" 2> /dev/null`
|
||||
|
||||
if test "x$new_path" = x; then
|
||||
# Oops. Which didn't find the executable.
|
||||
@ -336,7 +336,7 @@ AC_DEFUN([UTIL_FIXUP_EXECUTABLE_MSYS],
|
||||
new_path="$path"
|
||||
UTIL_REWRITE_AS_UNIX_PATH(new_path)
|
||||
|
||||
new_path=`$WHICH "$new_path" 2> /dev/null`
|
||||
new_path=`type -p "$new_path" 2> /dev/null`
|
||||
# bat and cmd files are not always considered executable in MSYS causing which
|
||||
# to not find them
|
||||
if test "x$new_path" = x \
|
||||
@ -392,7 +392,7 @@ AC_DEFUN([UTIL_FIXUP_EXECUTABLE_WSL],
|
||||
|
||||
# Now try to locate executable using which
|
||||
new_path_bak="$new_path"
|
||||
new_path=`$WHICH "$new_path" 2> /dev/null`
|
||||
new_path=`type -p "$new_path" 2> /dev/null`
|
||||
# bat and cmd files are not considered executable in WSL
|
||||
if test "x$new_path" = x \
|
||||
&& test "x`$ECHO \"$path\" | $GREP -i -e \"\\.bat$\" -e \"\\.cmd$\"`" != x \
|
||||
@ -409,7 +409,7 @@ AC_DEFUN([UTIL_FIXUP_EXECUTABLE_WSL],
|
||||
new_path="$path"
|
||||
UTIL_REWRITE_AS_UNIX_PATH([new_path])
|
||||
new_path_bak="$new_path"
|
||||
new_path=`$WHICH "$new_path" 2> /dev/null`
|
||||
new_path=`type -p "$new_path" 2> /dev/null`
|
||||
# bat and cmd files are not considered executable in WSL
|
||||
if test "x$new_path" = x \
|
||||
&& test "x`$ECHO \"$path\" | $GREP -i -e \"\\.bat$\" -e \"\\.cmd$\"`" != x \
|
||||
|
@ -346,7 +346,7 @@ $(MODULE_DEPS_MAKEFILE): $(MODULE_INFOS) \
|
||||
sub(/\/\*.*\*\//, ""); \
|
||||
gsub(/^ +\*.*/, ""); \
|
||||
gsub(/ /, ""); \
|
||||
gsub(/\r/, ""); \
|
||||
gsub(/\r/, ""); \
|
||||
printf(" %s", $$0) } \
|
||||
END { printf("\n") }' $m && \
|
||||
$(PRINTF) "TRANSITIVE_MODULES_$(call GetModuleNameFromModuleInfo, $m) :=" && \
|
||||
@ -360,7 +360,7 @@ $(MODULE_DEPS_MAKEFILE): $(MODULE_INFOS) \
|
||||
sub(/\/\*.*\*\//, ""); \
|
||||
gsub(/^ +\*.*/, ""); \
|
||||
gsub(/ /, ""); \
|
||||
gsub(/\r/, ""); \
|
||||
gsub(/\r/, ""); \
|
||||
printf(" %s", $$0) } \
|
||||
END { printf("\n") }' $m \
|
||||
) >> $@ $(NEWLINE))
|
||||
|
23
make/data/cacerts/sslrooteccca
Normal file
23
make/data/cacerts/sslrooteccca
Normal file
@ -0,0 +1,23 @@
|
||||
Owner: CN=SSL.com Root Certification Authority ECC, O=SSL Corporation, L=Houston, ST=Texas, C=US
|
||||
Issuer: CN=SSL.com Root Certification Authority ECC, O=SSL Corporation, L=Houston, ST=Texas, C=US
|
||||
Serial number: 75e6dfcbc1685ba8
|
||||
Valid from: Fri Feb 12 18:14:03 GMT 2016 until: Tue Feb 12 18:14:03 GMT 2041
|
||||
Signature algorithm name: SHA256withECDSA
|
||||
Subject Public Key Algorithm: 384-bit EC key
|
||||
Version: 3
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC
|
||||
VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T
|
||||
U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0
|
||||
aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz
|
||||
WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0
|
||||
b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS
|
||||
b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB
|
||||
BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI
|
||||
7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg
|
||||
CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud
|
||||
EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD
|
||||
VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T
|
||||
kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+
|
||||
gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl
|
||||
-----END CERTIFICATE-----
|
41
make/data/cacerts/sslrootevrsaca
Normal file
41
make/data/cacerts/sslrootevrsaca
Normal file
@ -0,0 +1,41 @@
|
||||
Owner: CN=SSL.com EV Root Certification Authority RSA R2, O=SSL Corporation, L=Houston, ST=Texas, C=US
|
||||
Issuer: CN=SSL.com EV Root Certification Authority RSA R2, O=SSL Corporation, L=Houston, ST=Texas, C=US
|
||||
Serial number: 56b629cd34bc78f6
|
||||
Valid from: Wed May 31 18:14:37 GMT 2017 until: Fri May 30 18:14:37 GMT 2042
|
||||
Signature algorithm name: SHA256withRSA
|
||||
Subject Public Key Algorithm: 4096-bit RSA key
|
||||
Version: 3
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV
|
||||
BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE
|
||||
CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy
|
||||
dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy
|
||||
MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G
|
||||
A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD
|
||||
DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy
|
||||
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq
|
||||
M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf
|
||||
OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa
|
||||
4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9
|
||||
HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR
|
||||
aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA
|
||||
b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ
|
||||
Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV
|
||||
PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO
|
||||
pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu
|
||||
UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY
|
||||
MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV
|
||||
HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4
|
||||
9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW
|
||||
s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5
|
||||
Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg
|
||||
cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM
|
||||
79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz
|
||||
/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt
|
||||
ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm
|
||||
Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK
|
||||
QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ
|
||||
w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi
|
||||
S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07
|
||||
mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w==
|
||||
-----END CERTIFICATE-----
|
41
make/data/cacerts/sslrootrsaca
Normal file
41
make/data/cacerts/sslrootrsaca
Normal file
@ -0,0 +1,41 @@
|
||||
Owner: CN=SSL.com Root Certification Authority RSA, O=SSL Corporation, L=Houston, ST=Texas, C=US
|
||||
Issuer: CN=SSL.com Root Certification Authority RSA, O=SSL Corporation, L=Houston, ST=Texas, C=US
|
||||
Serial number: 7b2c9bd316803299
|
||||
Valid from: Fri Feb 12 17:39:39 GMT 2016 until: Tue Feb 12 17:39:39 GMT 2041
|
||||
Signature algorithm name: SHA256withRSA
|
||||
Subject Public Key Algorithm: 4096-bit RSA key
|
||||
Version: 3
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE
|
||||
BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK
|
||||
DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp
|
||||
Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz
|
||||
OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv
|
||||
dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv
|
||||
bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN
|
||||
AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R
|
||||
xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX
|
||||
qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC
|
||||
C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3
|
||||
6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh
|
||||
/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF
|
||||
YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E
|
||||
JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc
|
||||
US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8
|
||||
ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm
|
||||
+Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi
|
||||
M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV
|
||||
HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G
|
||||
A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV
|
||||
cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc
|
||||
Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs
|
||||
PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/
|
||||
q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0
|
||||
cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr
|
||||
a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I
|
||||
H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y
|
||||
K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu
|
||||
nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf
|
||||
oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY
|
||||
Ic2wBlX7Jz9TkHCpBB5XJ7k=
|
||||
-----END CERTIFICATE-----
|
@ -865,6 +865,37 @@ class LdStSIMDOp(Instruction):
|
||||
def aname(self):
|
||||
return self._name
|
||||
|
||||
class SHA512SIMDOp(Instruction):
|
||||
|
||||
def generate(self):
|
||||
if (self._name == 'sha512su0'):
|
||||
self.reg = [FloatRegister().generate(), FloatRegister().generate()]
|
||||
else:
|
||||
self.reg = [FloatRegister().generate(), FloatRegister().generate(),
|
||||
FloatRegister().generate()]
|
||||
return self
|
||||
|
||||
def cstr(self):
|
||||
if (self._name == 'sha512su0'):
|
||||
return (super(SHA512SIMDOp, self).cstr()
|
||||
+ ('%s, __ T2D, %s);' % (self.reg[0], self.reg[1])))
|
||||
else:
|
||||
return (super(SHA512SIMDOp, self).cstr()
|
||||
+ ('%s, __ T2D, %s, %s);' % (self.reg[0], self.reg[1], self.reg[2])))
|
||||
|
||||
def astr(self):
|
||||
if (self._name == 'sha512su0'):
|
||||
return (super(SHA512SIMDOp, self).astr()
|
||||
+ ('\t%s.2D, %s.2D' % (self.reg[0].astr("v"), self.reg[1].astr("v"))))
|
||||
elif (self._name == 'sha512su1'):
|
||||
return (super(SHA512SIMDOp, self).astr()
|
||||
+ ('\t%s.2D, %s.2D, %s.2D' % (self.reg[0].astr("v"),
|
||||
self.reg[1].astr("v"), self.reg[2].astr("v"))))
|
||||
else:
|
||||
return (super(SHA512SIMDOp, self).astr()
|
||||
+ ('\t%s, %s, %s.2D' % (self.reg[0].astr("q"),
|
||||
self.reg[1].astr("q"), self.reg[2].astr("v"))))
|
||||
|
||||
class LSEOp(Instruction):
|
||||
def __init__(self, args):
|
||||
self._name, self.asmname, self.size, self.suffix = args
|
||||
@ -1100,6 +1131,8 @@ generate(LdStSIMDOp, [["ld1", 1, "8B", Address.base_only],
|
||||
["ld4r", 4, "2S", Address.post_reg],
|
||||
])
|
||||
|
||||
generate(SHA512SIMDOp, ["sha512h", "sha512h2", "sha512su0", "sha512su1"])
|
||||
|
||||
generate(SpecialCases, [["ccmn", "__ ccmn(zr, zr, 3u, Assembler::LE);", "ccmn\txzr, xzr, #3, LE"],
|
||||
["ccmnw", "__ ccmnw(zr, zr, 5u, Assembler::EQ);", "ccmn\twzr, wzr, #5, EQ"],
|
||||
["ccmp", "__ ccmp(zr, 1, 4u, Assembler::NE);", "ccmp\txzr, 1, #4, NE"],
|
||||
@ -1147,8 +1180,8 @@ outfile.close()
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
# compile for 8.1 because of lse atomics
|
||||
subprocess.check_call([AARCH64_AS, "-march=armv8.1-a", "aarch64ops.s", "-o", "aarch64ops.o"])
|
||||
# compile for 8.1 and sha2 because of lse atomics and sha512 crypto extension.
|
||||
subprocess.check_call([AARCH64_AS, "-march=armv8.1-a+sha2", "aarch64ops.s", "-o", "aarch64ops.o"])
|
||||
|
||||
print
|
||||
print "/*",
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -2372,6 +2372,30 @@ public:
|
||||
|
||||
#undef INSN
|
||||
|
||||
#define INSN(NAME, opc) \
|
||||
void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \
|
||||
starti; \
|
||||
assert(T == T2D, "arrangement must be T2D"); \
|
||||
f(0b11001110011, 31, 21), rf(Vm, 16), f(opc, 15, 10), rf(Vn, 5), rf(Vd, 0); \
|
||||
}
|
||||
|
||||
INSN(sha512h, 0b100000);
|
||||
INSN(sha512h2, 0b100001);
|
||||
INSN(sha512su1, 0b100010);
|
||||
|
||||
#undef INSN
|
||||
|
||||
#define INSN(NAME, opc) \
|
||||
void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \
|
||||
starti; \
|
||||
assert(T == T2D, "arrangement must be T2D"); \
|
||||
f(opc, 31, 10), rf(Vn, 5), rf(Vd, 0); \
|
||||
}
|
||||
|
||||
INSN(sha512su0, 0b1100111011000000100000);
|
||||
|
||||
#undef INSN
|
||||
|
||||
#define INSN(NAME, opc) \
|
||||
void NAME(FloatRegister Vd, FloatRegister Vn) { \
|
||||
starti; \
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -48,7 +48,7 @@ LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) {
|
||||
void LIR_Address::verify() const {
|
||||
assert(base()->is_cpu_register(), "wrong base operand");
|
||||
assert(index()->is_illegal() || index()->is_double_cpu() || index()->is_single_cpu(), "wrong index operand");
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
|
||||
assert(base()->type() == T_ADDRESS || base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
|
||||
"wrong type for addresses");
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
@ -1507,7 +1507,7 @@ void MacroAssembler::movptr(Register r, uintptr_t imm64) {
|
||||
block_comment(buffer);
|
||||
}
|
||||
#endif
|
||||
assert(imm64 < (1ul << 48), "48-bit overflow in address constant");
|
||||
assert(imm64 < (1ull << 48), "48-bit overflow in address constant");
|
||||
movz(r, imm64 & 0xffff);
|
||||
imm64 >>= 16;
|
||||
movk(r, imm64 & 0xffff, 16);
|
||||
|
@ -53,7 +53,7 @@ class NativeCall;
|
||||
class NativeInstruction {
|
||||
friend class Relocation;
|
||||
friend bool is_NativeCallTrampolineStub_at(address);
|
||||
public:
|
||||
public:
|
||||
enum {
|
||||
instruction_size = 4
|
||||
};
|
||||
@ -62,12 +62,16 @@ class NativeInstruction {
|
||||
return uint_at(0);
|
||||
}
|
||||
|
||||
bool is_blr() const { return (encoding() & 0xff9ffc1f) == 0xd61f0000; } // blr(register) or br(register)
|
||||
bool is_adr_aligned() const { return (encoding() & 0xff000000) == 0x10000000; } // adr Xn, <label>, where label is aligned to 4 bytes (address of instruction).
|
||||
bool is_blr() const {
|
||||
// blr(register) or br(register)
|
||||
return (encoding() & 0xff9ffc1f) == 0xd61f0000;
|
||||
}
|
||||
bool is_adr_aligned() const {
|
||||
// adr Xn, <label>, where label is aligned to 4 bytes (address of instruction).
|
||||
return (encoding() & 0xff000000) == 0x10000000;
|
||||
}
|
||||
|
||||
inline bool is_nop();
|
||||
inline bool is_illegal();
|
||||
inline bool is_return();
|
||||
bool is_jump();
|
||||
bool is_general_jump();
|
||||
inline bool is_jump_or_nop();
|
||||
@ -78,29 +82,25 @@ class NativeInstruction {
|
||||
bool is_sigill_zombie_not_entrant();
|
||||
bool is_stop();
|
||||
|
||||
protected:
|
||||
address addr_at(int offset) const { return address(this) + offset; }
|
||||
protected:
|
||||
address addr_at(int offset) const { return address(this) + offset; }
|
||||
|
||||
s_char sbyte_at(int offset) const { return *(s_char*) addr_at(offset); }
|
||||
u_char ubyte_at(int offset) const { return *(u_char*) addr_at(offset); }
|
||||
s_char sbyte_at(int offset) const { return *(s_char*)addr_at(offset); }
|
||||
u_char ubyte_at(int offset) const { return *(u_char*)addr_at(offset); }
|
||||
jint int_at(int offset) const { return *(jint*)addr_at(offset); }
|
||||
juint uint_at(int offset) const { return *(juint*)addr_at(offset); }
|
||||
address ptr_at(int offset) const { return *(address*)addr_at(offset); }
|
||||
oop oop_at(int offset) const { return *(oop*)addr_at(offset); }
|
||||
|
||||
jint int_at(int offset) const { return *(jint*) addr_at(offset); }
|
||||
juint uint_at(int offset) const { return *(juint*) addr_at(offset); }
|
||||
|
||||
address ptr_at(int offset) const { return *(address*) addr_at(offset); }
|
||||
|
||||
oop oop_at (int offset) const { return *(oop*) addr_at(offset); }
|
||||
|
||||
|
||||
void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; }
|
||||
void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; }
|
||||
void set_uint_at(int offset, jint i) { *(juint*)addr_at(offset) = i; }
|
||||
void set_ptr_at (int offset, address ptr) { *(address*) addr_at(offset) = ptr; }
|
||||
void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; }
|
||||
void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; }
|
||||
void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; }
|
||||
void set_uint_at(int offset, jint i) { *(juint*)addr_at(offset) = i; }
|
||||
void set_ptr_at(int offset, address ptr) { *(address*)addr_at(offset) = ptr; }
|
||||
void set_oop_at(int offset, oop o) { *(oop*)addr_at(offset) = o; }
|
||||
|
||||
void wrote(int offset);
|
||||
|
||||
public:
|
||||
public:
|
||||
|
||||
inline friend NativeInstruction* nativeInstruction_at(address address);
|
||||
|
||||
@ -146,7 +146,7 @@ inline NativeInstruction* nativeInstruction_at(address address) {
|
||||
}
|
||||
|
||||
// The natural type of an AArch64 instruction is uint32_t
|
||||
inline NativeInstruction* nativeInstruction_at(uint32_t *address) {
|
||||
inline NativeInstruction* nativeInstruction_at(uint32_t* address) {
|
||||
return (NativeInstruction*)address;
|
||||
}
|
||||
|
||||
@ -171,17 +171,15 @@ public:
|
||||
address plt_c2i_stub() const;
|
||||
void set_stub_to_clean();
|
||||
|
||||
void reset_to_plt_resolve_call();
|
||||
void set_destination_mt_safe(address dest);
|
||||
void reset_to_plt_resolve_call();
|
||||
void set_destination_mt_safe(address dest);
|
||||
|
||||
void verify() const;
|
||||
};
|
||||
|
||||
inline NativePltCall* nativePltCall_at(address address) {
|
||||
NativePltCall* call = (NativePltCall*) address;
|
||||
#ifdef ASSERT
|
||||
call->verify();
|
||||
#endif
|
||||
NativePltCall* call = (NativePltCall*)address;
|
||||
DEBUG_ONLY(call->verify());
|
||||
return call;
|
||||
}
|
||||
|
||||
@ -196,7 +194,7 @@ inline NativeCall* nativeCall_at(address address);
|
||||
// DSO calls, etc.).
|
||||
|
||||
class NativeCall: public NativeInstruction {
|
||||
public:
|
||||
public:
|
||||
enum Aarch64_specific_constants {
|
||||
instruction_size = 4,
|
||||
instruction_offset = 0,
|
||||
@ -204,14 +202,14 @@ class NativeCall: public NativeInstruction {
|
||||
return_address_offset = 4
|
||||
};
|
||||
|
||||
address instruction_address() const { return addr_at(instruction_offset); }
|
||||
address next_instruction_address() const { return addr_at(return_address_offset); }
|
||||
int displacement() const { return (int_at(displacement_offset) << 6) >> 4; }
|
||||
address displacement_address() const { return addr_at(displacement_offset); }
|
||||
address return_address() const { return addr_at(return_address_offset); }
|
||||
address instruction_address() const { return addr_at(instruction_offset); }
|
||||
address next_instruction_address() const { return addr_at(return_address_offset); }
|
||||
int displacement() const { return (int_at(displacement_offset) << 6) >> 4; }
|
||||
address displacement_address() const { return addr_at(displacement_offset); }
|
||||
address return_address() const { return addr_at(return_address_offset); }
|
||||
address destination() const;
|
||||
|
||||
void set_destination(address dest) {
|
||||
void set_destination(address dest) {
|
||||
int offset = dest - instruction_address();
|
||||
unsigned int insn = 0b100101 << 26;
|
||||
assert((offset & 3) == 0, "should be");
|
||||
@ -221,9 +219,8 @@ class NativeCall: public NativeInstruction {
|
||||
set_int_at(displacement_offset, insn);
|
||||
}
|
||||
|
||||
void verify_alignment() { ; }
|
||||
void verify();
|
||||
void print();
|
||||
void verify_alignment() { ; }
|
||||
void verify();
|
||||
|
||||
// Creation
|
||||
inline friend NativeCall* nativeCall_at(address address);
|
||||
@ -269,32 +266,29 @@ class NativeCall: public NativeInstruction {
|
||||
|
||||
inline NativeCall* nativeCall_at(address address) {
|
||||
NativeCall* call = (NativeCall*)(address - NativeCall::instruction_offset);
|
||||
#ifdef ASSERT
|
||||
call->verify();
|
||||
#endif
|
||||
DEBUG_ONLY(call->verify());
|
||||
return call;
|
||||
}
|
||||
|
||||
inline NativeCall* nativeCall_before(address return_address) {
|
||||
NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset);
|
||||
#ifdef ASSERT
|
||||
call->verify();
|
||||
#endif
|
||||
DEBUG_ONLY(call->verify());
|
||||
return call;
|
||||
}
|
||||
|
||||
// An interface for accessing/manipulating native mov reg, imm instructions.
|
||||
// (used to manipulate inlined 64-bit data calls, etc.)
|
||||
class NativeMovConstReg: public NativeInstruction {
|
||||
public:
|
||||
public:
|
||||
enum Aarch64_specific_constants {
|
||||
instruction_size = 3 * 4, // movz, movk, movk. See movptr().
|
||||
instruction_offset = 0,
|
||||
displacement_offset = 0,
|
||||
};
|
||||
|
||||
address instruction_address() const { return addr_at(instruction_offset); }
|
||||
address next_instruction_address() const {
|
||||
address instruction_address() const { return addr_at(instruction_offset); }
|
||||
|
||||
address next_instruction_address() const {
|
||||
if (nativeInstruction_at(instruction_address())->is_movz())
|
||||
// Assume movz, movk, movk
|
||||
return addr_at(instruction_size);
|
||||
@ -307,7 +301,7 @@ class NativeMovConstReg: public NativeInstruction {
|
||||
}
|
||||
|
||||
intptr_t data() const;
|
||||
void set_data(intptr_t x);
|
||||
void set_data(intptr_t x);
|
||||
|
||||
void flush() {
|
||||
if (! maybe_cpool_ref(instruction_address())) {
|
||||
@ -315,8 +309,8 @@ class NativeMovConstReg: public NativeInstruction {
|
||||
}
|
||||
}
|
||||
|
||||
void verify();
|
||||
void print();
|
||||
void verify();
|
||||
void print();
|
||||
|
||||
// Creation
|
||||
inline friend NativeMovConstReg* nativeMovConstReg_at(address address);
|
||||
@ -325,29 +319,23 @@ class NativeMovConstReg: public NativeInstruction {
|
||||
|
||||
inline NativeMovConstReg* nativeMovConstReg_at(address address) {
|
||||
NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_offset);
|
||||
#ifdef ASSERT
|
||||
test->verify();
|
||||
#endif
|
||||
DEBUG_ONLY(test->verify());
|
||||
return test;
|
||||
}
|
||||
|
||||
inline NativeMovConstReg* nativeMovConstReg_before(address address) {
|
||||
NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset);
|
||||
#ifdef ASSERT
|
||||
test->verify();
|
||||
#endif
|
||||
DEBUG_ONLY(test->verify());
|
||||
return test;
|
||||
}
|
||||
|
||||
class NativeMovConstRegPatching: public NativeMovConstReg {
|
||||
private:
|
||||
friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) {
|
||||
private:
|
||||
friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) {
|
||||
NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset);
|
||||
#ifdef ASSERT
|
||||
test->verify();
|
||||
#endif
|
||||
DEBUG_ONLY(test->verify());
|
||||
return test;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// An interface for accessing/manipulating native moves of the form:
|
||||
@ -374,7 +362,7 @@ class NativeMovRegMem: public NativeInstruction {
|
||||
next_instruction_offset = 4
|
||||
};
|
||||
|
||||
public:
|
||||
public:
|
||||
// helper
|
||||
int instruction_start() const { return instruction_offset; }
|
||||
|
||||
@ -382,30 +370,32 @@ class NativeMovRegMem: public NativeInstruction {
|
||||
|
||||
int num_bytes_to_end_of_patch() const { return instruction_offset + instruction_size; }
|
||||
|
||||
int offset() const;
|
||||
int offset() const;
|
||||
|
||||
void set_offset(int x);
|
||||
void set_offset(int x);
|
||||
|
||||
void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); }
|
||||
void add_offset_in_bytes(int add_offset) {
|
||||
set_offset(offset() + add_offset);
|
||||
}
|
||||
|
||||
void verify();
|
||||
void print ();
|
||||
|
||||
private:
|
||||
inline friend NativeMovRegMem* nativeMovRegMem_at (address address);
|
||||
private:
|
||||
inline friend NativeMovRegMem* nativeMovRegMem_at(address address);
|
||||
};
|
||||
|
||||
inline NativeMovRegMem* nativeMovRegMem_at (address address) {
|
||||
inline NativeMovRegMem* nativeMovRegMem_at(address address) {
|
||||
NativeMovRegMem* test = (NativeMovRegMem*)(address - NativeMovRegMem::instruction_offset);
|
||||
#ifdef ASSERT
|
||||
test->verify();
|
||||
#endif
|
||||
DEBUG_ONLY(test->verify());
|
||||
return test;
|
||||
}
|
||||
|
||||
class NativeMovRegMemPatching: public NativeMovRegMem {
|
||||
private:
|
||||
friend NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) {Unimplemented(); return 0; }
|
||||
private:
|
||||
friend NativeMovRegMemPatching* nativeMovRegMemPatching_at(address address) {
|
||||
Unimplemented();
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
// An interface for accessing/manipulating native leal instruction of form:
|
||||
@ -419,10 +409,8 @@ class NativeLoadAddress: public NativeInstruction {
|
||||
next_instruction_offset = 4
|
||||
};
|
||||
|
||||
public:
|
||||
public:
|
||||
void verify();
|
||||
void print ();
|
||||
|
||||
};
|
||||
|
||||
// adrp x16, #page
|
||||
@ -441,7 +429,7 @@ public:
|
||||
address next_instruction_address() const { return return_address(); }
|
||||
intptr_t data() const;
|
||||
void set_data(intptr_t data) {
|
||||
intptr_t *addr = (intptr_t *) got_address();
|
||||
intptr_t* addr = (intptr_t*)got_address();
|
||||
*addr = data;
|
||||
}
|
||||
|
||||
@ -451,15 +439,13 @@ private:
|
||||
};
|
||||
|
||||
inline NativeLoadGot* nativeLoadGot_at(address addr) {
|
||||
NativeLoadGot* load = (NativeLoadGot*) addr;
|
||||
#ifdef ASSERT
|
||||
load->verify();
|
||||
#endif
|
||||
NativeLoadGot* load = (NativeLoadGot*)addr;
|
||||
DEBUG_ONLY(load->verify());
|
||||
return load;
|
||||
}
|
||||
|
||||
class NativeJump: public NativeInstruction {
|
||||
public:
|
||||
public:
|
||||
enum AArch64_specific_constants {
|
||||
instruction_size = 4,
|
||||
instruction_offset = 0,
|
||||
@ -467,8 +453,8 @@ class NativeJump: public NativeInstruction {
|
||||
next_instruction_offset = 4
|
||||
};
|
||||
|
||||
address instruction_address() const { return addr_at(instruction_offset); }
|
||||
address next_instruction_address() const { return addr_at(instruction_size); }
|
||||
address instruction_address() const { return addr_at(instruction_offset); }
|
||||
address next_instruction_address() const { return addr_at(instruction_size); }
|
||||
address jump_destination() const;
|
||||
void set_jump_destination(address dest);
|
||||
|
||||
@ -486,9 +472,7 @@ class NativeJump: public NativeInstruction {
|
||||
|
||||
inline NativeJump* nativeJump_at(address address) {
|
||||
NativeJump* jump = (NativeJump*)(address - NativeJump::instruction_offset);
|
||||
#ifdef ASSERT
|
||||
jump->verify();
|
||||
#endif
|
||||
DEBUG_ONLY(jump->verify());
|
||||
return jump;
|
||||
}
|
||||
|
||||
@ -511,7 +495,7 @@ public:
|
||||
|
||||
inline NativeGeneralJump* nativeGeneralJump_at(address address) {
|
||||
NativeGeneralJump* jump = (NativeGeneralJump*)(address);
|
||||
debug_only(jump->verify();)
|
||||
DEBUG_ONLY(jump->verify());
|
||||
return jump;
|
||||
}
|
||||
|
||||
@ -529,46 +513,47 @@ public:
|
||||
address next_instruction_address() const { return addr_at(instruction_size); }
|
||||
bool is_GotJump() const;
|
||||
|
||||
void set_jump_destination(address dest) {
|
||||
address* got = (address *)got_address();
|
||||
void set_jump_destination(address dest) {
|
||||
address* got = (address*)got_address();
|
||||
*got = dest;
|
||||
}
|
||||
};
|
||||
|
||||
inline NativeGotJump* nativeGotJump_at(address addr) {
|
||||
NativeGotJump* jump = (NativeGotJump*)(addr);
|
||||
DEBUG_ONLY(jump->verify());
|
||||
return jump;
|
||||
}
|
||||
|
||||
class NativePopReg : public NativeInstruction {
|
||||
public:
|
||||
public:
|
||||
// Insert a pop instruction
|
||||
static void insert(address code_pos, Register reg);
|
||||
};
|
||||
|
||||
|
||||
class NativeIllegalInstruction: public NativeInstruction {
|
||||
public:
|
||||
public:
|
||||
// Insert illegal opcode as specific address
|
||||
static void insert(address code_pos);
|
||||
};
|
||||
|
||||
// return instruction that does not pop values of the stack
|
||||
class NativeReturn: public NativeInstruction {
|
||||
public:
|
||||
public:
|
||||
};
|
||||
|
||||
// return instruction that does pop values of the stack
|
||||
class NativeReturnX: public NativeInstruction {
|
||||
public:
|
||||
public:
|
||||
};
|
||||
|
||||
// Simple test vs memory
|
||||
class NativeTstRegMem: public NativeInstruction {
|
||||
public:
|
||||
public:
|
||||
};
|
||||
|
||||
inline bool NativeInstruction::is_nop() {
|
||||
inline bool NativeInstruction::is_nop() {
|
||||
uint32_t insn = *(uint32_t*)addr_at(0);
|
||||
return insn == 0xd503201f;
|
||||
}
|
||||
@ -598,7 +583,7 @@ inline bool NativeInstruction::is_jump_or_nop() {
|
||||
|
||||
// Call trampoline stubs.
|
||||
class NativeCallTrampolineStub : public NativeInstruction {
|
||||
public:
|
||||
public:
|
||||
|
||||
enum AArch64_specific_constants {
|
||||
instruction_size = 4 * 4,
|
||||
@ -607,7 +592,7 @@ class NativeCallTrampolineStub : public NativeInstruction {
|
||||
next_instruction_offset = 4 * 4
|
||||
};
|
||||
|
||||
address destination(nmethod *nm = NULL) const;
|
||||
address destination(nmethod* nm = NULL) const;
|
||||
void set_destination(address new_destination);
|
||||
ptrdiff_t destination_offset() const;
|
||||
};
|
||||
@ -617,7 +602,7 @@ inline bool is_NativeCallTrampolineStub_at(address addr) {
|
||||
// ldr xscratch1, L
|
||||
// br xscratch1
|
||||
// L:
|
||||
uint32_t *i = (uint32_t *)addr;
|
||||
uint32_t* i = (uint32_t*)addr;
|
||||
return i[0] == 0x58000048 && i[1] == 0xd61f0100;
|
||||
}
|
||||
|
||||
@ -632,7 +617,7 @@ public:
|
||||
void set_kind(int order_kind) { Instruction_aarch64::patch(addr_at(0), 11, 8, order_kind); }
|
||||
};
|
||||
|
||||
inline NativeMembar *NativeMembar_at(address addr) {
|
||||
inline NativeMembar* NativeMembar_at(address addr) {
|
||||
assert(nativeInstruction_at(addr)->is_Membar(), "no membar found");
|
||||
return (NativeMembar*)addr;
|
||||
}
|
||||
@ -686,8 +671,9 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
inline NativeLdSt *NativeLdSt_at(address addr) {
|
||||
inline NativeLdSt* NativeLdSt_at(address addr) {
|
||||
assert(nativeInstruction_at(addr)->is_Imm_LdSt(), "no immediate load/store found");
|
||||
return (NativeLdSt*)addr;
|
||||
}
|
||||
|
||||
#endif // CPU_AARCH64_NATIVEINST_AARCH64_HPP
|
||||
|
@ -3125,6 +3125,172 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
// Arguments:
|
||||
//
|
||||
// Inputs:
|
||||
// c_rarg0 - byte[] source+offset
|
||||
// c_rarg1 - int[] SHA.state
|
||||
// c_rarg2 - int offset
|
||||
// c_rarg3 - int limit
|
||||
//
|
||||
address generate_sha512_implCompress(bool multi_block, const char *name) {
|
||||
static const uint64_t round_consts[80] = {
|
||||
0x428A2F98D728AE22L, 0x7137449123EF65CDL, 0xB5C0FBCFEC4D3B2FL,
|
||||
0xE9B5DBA58189DBBCL, 0x3956C25BF348B538L, 0x59F111F1B605D019L,
|
||||
0x923F82A4AF194F9BL, 0xAB1C5ED5DA6D8118L, 0xD807AA98A3030242L,
|
||||
0x12835B0145706FBEL, 0x243185BE4EE4B28CL, 0x550C7DC3D5FFB4E2L,
|
||||
0x72BE5D74F27B896FL, 0x80DEB1FE3B1696B1L, 0x9BDC06A725C71235L,
|
||||
0xC19BF174CF692694L, 0xE49B69C19EF14AD2L, 0xEFBE4786384F25E3L,
|
||||
0x0FC19DC68B8CD5B5L, 0x240CA1CC77AC9C65L, 0x2DE92C6F592B0275L,
|
||||
0x4A7484AA6EA6E483L, 0x5CB0A9DCBD41FBD4L, 0x76F988DA831153B5L,
|
||||
0x983E5152EE66DFABL, 0xA831C66D2DB43210L, 0xB00327C898FB213FL,
|
||||
0xBF597FC7BEEF0EE4L, 0xC6E00BF33DA88FC2L, 0xD5A79147930AA725L,
|
||||
0x06CA6351E003826FL, 0x142929670A0E6E70L, 0x27B70A8546D22FFCL,
|
||||
0x2E1B21385C26C926L, 0x4D2C6DFC5AC42AEDL, 0x53380D139D95B3DFL,
|
||||
0x650A73548BAF63DEL, 0x766A0ABB3C77B2A8L, 0x81C2C92E47EDAEE6L,
|
||||
0x92722C851482353BL, 0xA2BFE8A14CF10364L, 0xA81A664BBC423001L,
|
||||
0xC24B8B70D0F89791L, 0xC76C51A30654BE30L, 0xD192E819D6EF5218L,
|
||||
0xD69906245565A910L, 0xF40E35855771202AL, 0x106AA07032BBD1B8L,
|
||||
0x19A4C116B8D2D0C8L, 0x1E376C085141AB53L, 0x2748774CDF8EEB99L,
|
||||
0x34B0BCB5E19B48A8L, 0x391C0CB3C5C95A63L, 0x4ED8AA4AE3418ACBL,
|
||||
0x5B9CCA4F7763E373L, 0x682E6FF3D6B2B8A3L, 0x748F82EE5DEFB2FCL,
|
||||
0x78A5636F43172F60L, 0x84C87814A1F0AB72L, 0x8CC702081A6439ECL,
|
||||
0x90BEFFFA23631E28L, 0xA4506CEBDE82BDE9L, 0xBEF9A3F7B2C67915L,
|
||||
0xC67178F2E372532BL, 0xCA273ECEEA26619CL, 0xD186B8C721C0C207L,
|
||||
0xEADA7DD6CDE0EB1EL, 0xF57D4F7FEE6ED178L, 0x06F067AA72176FBAL,
|
||||
0x0A637DC5A2C898A6L, 0x113F9804BEF90DAEL, 0x1B710B35131C471BL,
|
||||
0x28DB77F523047D84L, 0x32CAAB7B40C72493L, 0x3C9EBE0A15C9BEBCL,
|
||||
0x431D67C49C100D4CL, 0x4CC5D4BECB3E42B6L, 0x597F299CFC657E2AL,
|
||||
0x5FCB6FAB3AD6FAECL, 0x6C44198C4A475817L
|
||||
};
|
||||
|
||||
// Double rounds for sha512.
|
||||
#define sha512_dround(dr, i0, i1, i2, i3, i4, rc0, rc1, in0, in1, in2, in3, in4) \
|
||||
if (dr < 36) \
|
||||
__ ld1(v##rc1, __ T2D, __ post(rscratch2, 16)); \
|
||||
__ addv(v5, __ T2D, v##rc0, v##in0); \
|
||||
__ ext(v6, __ T16B, v##i2, v##i3, 8); \
|
||||
__ ext(v5, __ T16B, v5, v5, 8); \
|
||||
__ ext(v7, __ T16B, v##i1, v##i2, 8); \
|
||||
__ addv(v##i3, __ T2D, v##i3, v5); \
|
||||
if (dr < 32) { \
|
||||
__ ext(v5, __ T16B, v##in3, v##in4, 8); \
|
||||
__ sha512su0(v##in0, __ T2D, v##in1); \
|
||||
} \
|
||||
__ sha512h(v##i3, __ T2D, v6, v7); \
|
||||
if (dr < 32) \
|
||||
__ sha512su1(v##in0, __ T2D, v##in2, v5); \
|
||||
__ addv(v##i4, __ T2D, v##i1, v##i3); \
|
||||
__ sha512h2(v##i3, __ T2D, v##i1, v##i0); \
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
|
||||
Register buf = c_rarg0;
|
||||
Register state = c_rarg1;
|
||||
Register ofs = c_rarg2;
|
||||
Register limit = c_rarg3;
|
||||
|
||||
__ stpd(v8, v9, __ pre(sp, -64));
|
||||
__ stpd(v10, v11, Address(sp, 16));
|
||||
__ stpd(v12, v13, Address(sp, 32));
|
||||
__ stpd(v14, v15, Address(sp, 48));
|
||||
|
||||
Label sha512_loop;
|
||||
|
||||
// load state
|
||||
__ ld1(v8, v9, v10, v11, __ T2D, state);
|
||||
|
||||
// load first 4 round constants
|
||||
__ lea(rscratch1, ExternalAddress((address)round_consts));
|
||||
__ ld1(v20, v21, v22, v23, __ T2D, __ post(rscratch1, 64));
|
||||
|
||||
__ BIND(sha512_loop);
|
||||
// load 128B of data into v12..v19
|
||||
__ ld1(v12, v13, v14, v15, __ T2D, __ post(buf, 64));
|
||||
__ ld1(v16, v17, v18, v19, __ T2D, __ post(buf, 64));
|
||||
__ rev64(v12, __ T16B, v12);
|
||||
__ rev64(v13, __ T16B, v13);
|
||||
__ rev64(v14, __ T16B, v14);
|
||||
__ rev64(v15, __ T16B, v15);
|
||||
__ rev64(v16, __ T16B, v16);
|
||||
__ rev64(v17, __ T16B, v17);
|
||||
__ rev64(v18, __ T16B, v18);
|
||||
__ rev64(v19, __ T16B, v19);
|
||||
|
||||
__ mov(rscratch2, rscratch1);
|
||||
|
||||
__ mov(v0, __ T16B, v8);
|
||||
__ mov(v1, __ T16B, v9);
|
||||
__ mov(v2, __ T16B, v10);
|
||||
__ mov(v3, __ T16B, v11);
|
||||
|
||||
sha512_dround( 0, 0, 1, 2, 3, 4, 20, 24, 12, 13, 19, 16, 17);
|
||||
sha512_dround( 1, 3, 0, 4, 2, 1, 21, 25, 13, 14, 12, 17, 18);
|
||||
sha512_dround( 2, 2, 3, 1, 4, 0, 22, 26, 14, 15, 13, 18, 19);
|
||||
sha512_dround( 3, 4, 2, 0, 1, 3, 23, 27, 15, 16, 14, 19, 12);
|
||||
sha512_dround( 4, 1, 4, 3, 0, 2, 24, 28, 16, 17, 15, 12, 13);
|
||||
sha512_dround( 5, 0, 1, 2, 3, 4, 25, 29, 17, 18, 16, 13, 14);
|
||||
sha512_dround( 6, 3, 0, 4, 2, 1, 26, 30, 18, 19, 17, 14, 15);
|
||||
sha512_dround( 7, 2, 3, 1, 4, 0, 27, 31, 19, 12, 18, 15, 16);
|
||||
sha512_dround( 8, 4, 2, 0, 1, 3, 28, 24, 12, 13, 19, 16, 17);
|
||||
sha512_dround( 9, 1, 4, 3, 0, 2, 29, 25, 13, 14, 12, 17, 18);
|
||||
sha512_dround(10, 0, 1, 2, 3, 4, 30, 26, 14, 15, 13, 18, 19);
|
||||
sha512_dround(11, 3, 0, 4, 2, 1, 31, 27, 15, 16, 14, 19, 12);
|
||||
sha512_dround(12, 2, 3, 1, 4, 0, 24, 28, 16, 17, 15, 12, 13);
|
||||
sha512_dround(13, 4, 2, 0, 1, 3, 25, 29, 17, 18, 16, 13, 14);
|
||||
sha512_dround(14, 1, 4, 3, 0, 2, 26, 30, 18, 19, 17, 14, 15);
|
||||
sha512_dround(15, 0, 1, 2, 3, 4, 27, 31, 19, 12, 18, 15, 16);
|
||||
sha512_dround(16, 3, 0, 4, 2, 1, 28, 24, 12, 13, 19, 16, 17);
|
||||
sha512_dround(17, 2, 3, 1, 4, 0, 29, 25, 13, 14, 12, 17, 18);
|
||||
sha512_dround(18, 4, 2, 0, 1, 3, 30, 26, 14, 15, 13, 18, 19);
|
||||
sha512_dround(19, 1, 4, 3, 0, 2, 31, 27, 15, 16, 14, 19, 12);
|
||||
sha512_dround(20, 0, 1, 2, 3, 4, 24, 28, 16, 17, 15, 12, 13);
|
||||
sha512_dround(21, 3, 0, 4, 2, 1, 25, 29, 17, 18, 16, 13, 14);
|
||||
sha512_dround(22, 2, 3, 1, 4, 0, 26, 30, 18, 19, 17, 14, 15);
|
||||
sha512_dround(23, 4, 2, 0, 1, 3, 27, 31, 19, 12, 18, 15, 16);
|
||||
sha512_dround(24, 1, 4, 3, 0, 2, 28, 24, 12, 13, 19, 16, 17);
|
||||
sha512_dround(25, 0, 1, 2, 3, 4, 29, 25, 13, 14, 12, 17, 18);
|
||||
sha512_dround(26, 3, 0, 4, 2, 1, 30, 26, 14, 15, 13, 18, 19);
|
||||
sha512_dround(27, 2, 3, 1, 4, 0, 31, 27, 15, 16, 14, 19, 12);
|
||||
sha512_dround(28, 4, 2, 0, 1, 3, 24, 28, 16, 17, 15, 12, 13);
|
||||
sha512_dround(29, 1, 4, 3, 0, 2, 25, 29, 17, 18, 16, 13, 14);
|
||||
sha512_dround(30, 0, 1, 2, 3, 4, 26, 30, 18, 19, 17, 14, 15);
|
||||
sha512_dround(31, 3, 0, 4, 2, 1, 27, 31, 19, 12, 18, 15, 16);
|
||||
sha512_dround(32, 2, 3, 1, 4, 0, 28, 24, 12, 0, 0, 0, 0);
|
||||
sha512_dround(33, 4, 2, 0, 1, 3, 29, 25, 13, 0, 0, 0, 0);
|
||||
sha512_dround(34, 1, 4, 3, 0, 2, 30, 26, 14, 0, 0, 0, 0);
|
||||
sha512_dround(35, 0, 1, 2, 3, 4, 31, 27, 15, 0, 0, 0, 0);
|
||||
sha512_dround(36, 3, 0, 4, 2, 1, 24, 0, 16, 0, 0, 0, 0);
|
||||
sha512_dround(37, 2, 3, 1, 4, 0, 25, 0, 17, 0, 0, 0, 0);
|
||||
sha512_dround(38, 4, 2, 0, 1, 3, 26, 0, 18, 0, 0, 0, 0);
|
||||
sha512_dround(39, 1, 4, 3, 0, 2, 27, 0, 19, 0, 0, 0, 0);
|
||||
|
||||
__ addv(v8, __ T2D, v8, v0);
|
||||
__ addv(v9, __ T2D, v9, v1);
|
||||
__ addv(v10, __ T2D, v10, v2);
|
||||
__ addv(v11, __ T2D, v11, v3);
|
||||
|
||||
if (multi_block) {
|
||||
__ add(ofs, ofs, 128);
|
||||
__ cmp(ofs, limit);
|
||||
__ br(Assembler::LE, sha512_loop);
|
||||
__ mov(c_rarg0, ofs); // return ofs
|
||||
}
|
||||
|
||||
__ st1(v8, v9, v10, v11, __ T2D, state);
|
||||
|
||||
__ ldpd(v14, v15, Address(sp, 48));
|
||||
__ ldpd(v12, v13, Address(sp, 32));
|
||||
__ ldpd(v10, v11, Address(sp, 16));
|
||||
__ ldpd(v8, v9, __ post(sp, 64));
|
||||
|
||||
__ ret(lr);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
// Safefetch stubs.
|
||||
void generate_safefetch(const char* name, int size, address* entry,
|
||||
address* fault_pc, address* continuation_pc) {
|
||||
@ -5852,6 +6018,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress");
|
||||
StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB");
|
||||
}
|
||||
if (UseSHA512Intrinsics) {
|
||||
StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress");
|
||||
StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB");
|
||||
}
|
||||
|
||||
// generate Adler32 intrinsics code
|
||||
if (UseAdler32Intrinsics) {
|
||||
|
@ -62,6 +62,10 @@
|
||||
#define HWCAP_ATOMICS (1<<8)
|
||||
#endif
|
||||
|
||||
#ifndef HWCAP_SHA512
|
||||
#define HWCAP_SHA512 (1 << 21)
|
||||
#endif
|
||||
|
||||
int VM_Version::_cpu;
|
||||
int VM_Version::_model;
|
||||
int VM_Version::_model2;
|
||||
@ -285,6 +289,7 @@ void VM_Version::get_processor_features() {
|
||||
if (auxv & HWCAP_AES) strcat(buf, ", aes");
|
||||
if (auxv & HWCAP_SHA1) strcat(buf, ", sha1");
|
||||
if (auxv & HWCAP_SHA2) strcat(buf, ", sha256");
|
||||
if (auxv & HWCAP_SHA512) strcat(buf, ", sha512");
|
||||
if (auxv & HWCAP_ATOMICS) strcat(buf, ", lse");
|
||||
|
||||
_features_string = os::strdup(buf);
|
||||
@ -358,6 +363,11 @@ void VM_Version::get_processor_features() {
|
||||
FLAG_SET_DEFAULT(UseFMA, true);
|
||||
}
|
||||
|
||||
if (UseMD5Intrinsics) {
|
||||
warning("MD5 intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseMD5Intrinsics, false);
|
||||
}
|
||||
|
||||
if (auxv & (HWCAP_SHA1 | HWCAP_SHA2)) {
|
||||
if (FLAG_IS_DEFAULT(UseSHA)) {
|
||||
FLAG_SET_DEFAULT(UseSHA, true);
|
||||
@ -385,7 +395,12 @@ void VM_Version::get_processor_features() {
|
||||
FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseSHA512Intrinsics) {
|
||||
if (UseSHA && (auxv & HWCAP_SHA512)) {
|
||||
// Do not auto-enable UseSHA512Intrinsics until it has been fully tested on hardware
|
||||
// if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) {
|
||||
// FLAG_SET_DEFAULT(UseSHA512Intrinsics, true);
|
||||
// }
|
||||
} else if (UseSHA512Intrinsics) {
|
||||
warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2010, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -52,12 +52,12 @@ void LIR_Address::verify() const {
|
||||
// be handled by the back-end or will be rejected if not.
|
||||
#ifdef _LP64
|
||||
assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand");
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
|
||||
assert(base()->type() == T_ADDRESS || base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
|
||||
"wrong type for addresses");
|
||||
#else
|
||||
assert(base()->is_single_cpu(), "wrong base operand");
|
||||
assert(index()->is_illegal() || index()->is_single_cpu(), "wrong index operand");
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_INT || base()->type() == T_METADATA,
|
||||
assert(base()->type() == T_ADDRESS || base()->type() == T_OBJECT || base()->type() == T_INT || base()->type() == T_METADATA,
|
||||
"wrong type for addresses");
|
||||
#endif
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -209,6 +209,11 @@ void VM_Version::initialize() {
|
||||
FLAG_SET_DEFAULT(UseFMA, false);
|
||||
}
|
||||
|
||||
if (UseMD5Intrinsics) {
|
||||
warning("MD5 intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseMD5Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseSHA) {
|
||||
warning("SHA instructions are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseSHA, false);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -52,12 +52,12 @@ void LIR_Address::verify() const {
|
||||
#ifdef _LP64
|
||||
assert(base()->is_cpu_register(), "wrong base operand");
|
||||
assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand");
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
|
||||
assert(base()->type() == T_ADDRESS || base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
|
||||
"wrong type for addresses");
|
||||
#else
|
||||
assert(base()->is_single_cpu(), "wrong base operand");
|
||||
assert(index()->is_illegal() || index()->is_single_cpu(), "wrong index operand");
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_INT || base()->type() == T_METADATA,
|
||||
assert(base()->type() == T_ADDRESS || base()->type() == T_OBJECT || base()->type() == T_INT || base()->type() == T_METADATA,
|
||||
"wrong type for addresses");
|
||||
#endif
|
||||
}
|
||||
|
@ -284,6 +284,11 @@ void VM_Version::initialize() {
|
||||
FLAG_SET_DEFAULT(UseFMA, true);
|
||||
}
|
||||
|
||||
if (UseMD5Intrinsics) {
|
||||
warning("MD5 intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseMD5Intrinsics, false);
|
||||
}
|
||||
|
||||
if (has_vshasig()) {
|
||||
if (FLAG_IS_DEFAULT(UseSHA)) {
|
||||
UseSHA = true;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -51,7 +51,7 @@ LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) {
|
||||
void LIR_Address::verify() const {
|
||||
assert(base()->is_cpu_register(), "wrong base operand");
|
||||
assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand");
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
|
||||
assert(base()->type() == T_ADDRESS || base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
|
||||
"wrong type for addresses");
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2019 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -179,6 +179,11 @@ void VM_Version::initialize() {
|
||||
FLAG_SET_DEFAULT(UseFMA, true);
|
||||
}
|
||||
|
||||
if (UseMD5Intrinsics) {
|
||||
warning("MD5 intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseMD5Intrinsics, false);
|
||||
}
|
||||
|
||||
// On z/Architecture, we take UseSHA as the general switch to enable/disable the SHA intrinsics.
|
||||
// The specific switches UseSHAxxxIntrinsics will then be set depending on the actual
|
||||
// machine capabilities.
|
||||
|
@ -4301,6 +4301,68 @@ void Assembler::ret(int imm16) {
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::roll(Register dst, int imm8) {
|
||||
assert(isShiftCount(imm8 >> 1), "illegal shift count");
|
||||
int encode = prefix_and_encode(dst->encoding());
|
||||
if (imm8 == 1) {
|
||||
emit_int16((unsigned char)0xD1, (0xC0 | encode));
|
||||
} else {
|
||||
emit_int24((unsigned char)0xC1, (0xc0 | encode), imm8);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::roll(Register dst) {
|
||||
int encode = prefix_and_encode(dst->encoding());
|
||||
emit_int16((unsigned char)0xD3, (0xC0 | encode));
|
||||
}
|
||||
|
||||
void Assembler::rorl(Register dst, int imm8) {
|
||||
assert(isShiftCount(imm8 >> 1), "illegal shift count");
|
||||
int encode = prefix_and_encode(dst->encoding());
|
||||
if (imm8 == 1) {
|
||||
emit_int16((unsigned char)0xD1, (0xC8 | encode));
|
||||
} else {
|
||||
emit_int24((unsigned char)0xC1, (0xc8 | encode), imm8);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::rorl(Register dst) {
|
||||
int encode = prefix_and_encode(dst->encoding());
|
||||
emit_int16((unsigned char)0xD3, (0xC8 | encode));
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
void Assembler::rorq(Register dst) {
|
||||
int encode = prefixq_and_encode(dst->encoding());
|
||||
emit_int16((unsigned char)0xD3, (0xC8 | encode));
|
||||
}
|
||||
|
||||
void Assembler::rorq(Register dst, int imm8) {
|
||||
assert(isShiftCount(imm8 >> 1), "illegal shift count");
|
||||
int encode = prefixq_and_encode(dst->encoding());
|
||||
if (imm8 == 1) {
|
||||
emit_int16((unsigned char)0xD1, (0xC8 | encode));
|
||||
} else {
|
||||
emit_int24((unsigned char)0xC1, (0xc8 | encode), imm8);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::rolq(Register dst) {
|
||||
int encode = prefixq_and_encode(dst->encoding());
|
||||
emit_int16((unsigned char)0xD3, (0xC0 | encode));
|
||||
}
|
||||
|
||||
void Assembler::rolq(Register dst, int imm8) {
|
||||
assert(isShiftCount(imm8 >> 1), "illegal shift count");
|
||||
int encode = prefixq_and_encode(dst->encoding());
|
||||
if (imm8 == 1) {
|
||||
emit_int16((unsigned char)0xD1, (0xC0 | encode));
|
||||
} else {
|
||||
emit_int24((unsigned char)0xC1, (0xc0 | encode), imm8);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void Assembler::sahf() {
|
||||
#ifdef _LP64
|
||||
// Not supported in 64bit mode
|
||||
@ -6216,6 +6278,78 @@ void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vecto
|
||||
emit_operand(dst, src);
|
||||
}
|
||||
|
||||
void Assembler::evprold(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "requires EVEX support");
|
||||
assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
attributes.set_is_evex_instruction();
|
||||
int encode = vex_prefix_and_encode(xmm1->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
|
||||
emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
|
||||
}
|
||||
|
||||
void Assembler::evprolq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "requires EVEX support");
|
||||
assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
attributes.set_is_evex_instruction();
|
||||
int encode = vex_prefix_and_encode(xmm1->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
|
||||
emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
|
||||
}
|
||||
|
||||
void Assembler::evprord(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "requires EVEX support");
|
||||
assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
attributes.set_is_evex_instruction();
|
||||
int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
|
||||
emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
|
||||
}
|
||||
|
||||
void Assembler::evprorq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "requires EVEX support");
|
||||
assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
attributes.set_is_evex_instruction();
|
||||
int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
|
||||
emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
|
||||
}
|
||||
|
||||
void Assembler::evprolvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "requires EVEX support");
|
||||
assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
attributes.set_is_evex_instruction();
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int16(0x15, (unsigned char)(0xC0 | encode));
|
||||
}
|
||||
|
||||
void Assembler::evprolvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "requires EVEX support");
|
||||
assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
attributes.set_is_evex_instruction();
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int16(0x15, (unsigned char)(0xC0 | encode));
|
||||
}
|
||||
|
||||
void Assembler::evprorvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "requires EVEX support");
|
||||
assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
attributes.set_is_evex_instruction();
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int16(0x14, (unsigned char)(0xC0 | encode));
|
||||
}
|
||||
|
||||
void Assembler::evprorvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "requires EVEX support");
|
||||
assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
attributes.set_is_evex_instruction();
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int16(0x14, (unsigned char)(0xC0 | encode));
|
||||
}
|
||||
|
||||
void Assembler::vpternlogd(XMMRegister dst, int imm8, XMMRegister src2, XMMRegister src3, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "requires EVEX support");
|
||||
assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
|
||||
@ -8895,15 +9029,6 @@ void Assembler::rcrq(Register dst, int imm8) {
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::rorq(Register dst, int imm8) {
|
||||
assert(isShiftCount(imm8 >> 1), "illegal shift count");
|
||||
int encode = prefixq_and_encode(dst->encoding());
|
||||
if (imm8 == 1) {
|
||||
emit_int16((unsigned char)0xD1, (0xC8 | encode));
|
||||
} else {
|
||||
emit_int24((unsigned char)0xC1, (0xc8 | encode), imm8);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::rorxq(Register dst, Register src, int imm8) {
|
||||
assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
|
||||
|
@ -1827,7 +1827,18 @@ private:
|
||||
|
||||
void ret(int imm16);
|
||||
|
||||
void roll(Register dst);
|
||||
|
||||
void roll(Register dst, int imm8);
|
||||
|
||||
void rorl(Register dst);
|
||||
|
||||
void rorl(Register dst, int imm8);
|
||||
|
||||
#ifdef _LP64
|
||||
void rolq(Register dst);
|
||||
void rolq(Register dst, int imm8);
|
||||
void rorq(Register dst);
|
||||
void rorq(Register dst, int imm8);
|
||||
void rorxq(Register dst, Register src, int imm8);
|
||||
void rorxd(Register dst, Register src, int imm8);
|
||||
@ -2203,6 +2214,16 @@ private:
|
||||
void vpternlogd(XMMRegister dst, int imm8, XMMRegister src2, Address src3, int vector_len);
|
||||
void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, XMMRegister src3, int vector_len);
|
||||
|
||||
// Vector Rotate Left/Right instruction.
|
||||
void evprolvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
|
||||
void evprolvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
|
||||
void evprorvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
|
||||
void evprorvq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
|
||||
void evprold(XMMRegister dst, XMMRegister src, int shift, int vector_len);
|
||||
void evprolq(XMMRegister dst, XMMRegister src, int shift, int vector_len);
|
||||
void evprord(XMMRegister dst, XMMRegister src, int shift, int vector_len);
|
||||
void evprorq(XMMRegister dst, XMMRegister src, int shift, int vector_len);
|
||||
|
||||
// vinserti forms
|
||||
void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
|
||||
void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -62,12 +62,12 @@ void LIR_Address::verify() const {
|
||||
#ifdef _LP64
|
||||
assert(base()->is_cpu_register(), "wrong base operand");
|
||||
assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand");
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
|
||||
assert(base()->type() == T_ADDRESS || base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
|
||||
"wrong type for addresses");
|
||||
#else
|
||||
assert(base()->is_single_cpu(), "wrong base operand");
|
||||
assert(index()->is_illegal() || index()->is_single_cpu(), "wrong index operand");
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_INT || base()->type() == T_METADATA,
|
||||
assert(base()->type() == T_ADDRESS || base()->type() == T_OBJECT || base()->type() == T_INT || base()->type() == T_METADATA,
|
||||
"wrong type for addresses");
|
||||
#endif
|
||||
}
|
||||
|
@ -870,6 +870,57 @@ void C2_MacroAssembler::vextendbw(bool sign, XMMRegister dst, XMMRegister src, i
|
||||
}
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::vprotate_imm(int opcode, BasicType etype, XMMRegister dst, XMMRegister src,
|
||||
int shift, int vector_len) {
|
||||
if (opcode == Op_RotateLeftV) {
|
||||
if (etype == T_INT) {
|
||||
evprold(dst, src, shift, vector_len);
|
||||
} else {
|
||||
assert(etype == T_LONG, "expected type T_LONG");
|
||||
evprolq(dst, src, shift, vector_len);
|
||||
}
|
||||
} else {
|
||||
assert(opcode == Op_RotateRightV, "opcode should be Op_RotateRightV");
|
||||
if (etype == T_INT) {
|
||||
evprord(dst, src, shift, vector_len);
|
||||
} else {
|
||||
assert(etype == T_LONG, "expected type T_LONG");
|
||||
evprorq(dst, src, shift, vector_len);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::vprotate_var(int opcode, BasicType etype, XMMRegister dst, XMMRegister src,
|
||||
XMMRegister shift, int vector_len) {
|
||||
if (opcode == Op_RotateLeftV) {
|
||||
if (etype == T_INT) {
|
||||
evprolvd(dst, src, shift, vector_len);
|
||||
} else {
|
||||
assert(etype == T_LONG, "expected type T_LONG");
|
||||
evprolvq(dst, src, shift, vector_len);
|
||||
}
|
||||
} else {
|
||||
assert(opcode == Op_RotateRightV, "opcode should be Op_RotateRightV");
|
||||
if (etype == T_INT) {
|
||||
evprorvd(dst, src, shift, vector_len);
|
||||
} else {
|
||||
assert(etype == T_LONG, "expected type T_LONG");
|
||||
evprorvq(dst, src, shift, vector_len);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::vshiftd_imm(int opcode, XMMRegister dst, int shift) {
|
||||
if (opcode == Op_RShiftVI) {
|
||||
psrad(dst, shift);
|
||||
} else if (opcode == Op_LShiftVI) {
|
||||
pslld(dst, shift);
|
||||
} else {
|
||||
assert((opcode == Op_URShiftVI),"opcode should be Op_URShiftVI");
|
||||
psrld(dst, shift);
|
||||
}
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::vshiftd(int opcode, XMMRegister dst, XMMRegister src) {
|
||||
if (opcode == Op_RShiftVI) {
|
||||
psrad(dst, src);
|
||||
@ -881,6 +932,17 @@ void C2_MacroAssembler::vshiftd(int opcode, XMMRegister dst, XMMRegister src) {
|
||||
}
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::vshiftd_imm(int opcode, XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
|
||||
if (opcode == Op_RShiftVI) {
|
||||
vpsrad(dst, nds, shift, vector_len);
|
||||
} else if (opcode == Op_LShiftVI) {
|
||||
vpslld(dst, nds, shift, vector_len);
|
||||
} else {
|
||||
assert((opcode == Op_URShiftVI),"opcode should be Op_URShiftVI");
|
||||
vpsrld(dst, nds, shift, vector_len);
|
||||
}
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::vshiftd(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
|
||||
if (opcode == Op_RShiftVI) {
|
||||
vpsrad(dst, nds, src, vector_len);
|
||||
@ -925,6 +987,17 @@ void C2_MacroAssembler::vshiftq(int opcode, XMMRegister dst, XMMRegister src) {
|
||||
}
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::vshiftq_imm(int opcode, XMMRegister dst, int shift) {
|
||||
if (opcode == Op_RShiftVL) {
|
||||
psrlq(dst, shift); // using srl to implement sra on pre-avs512 systems
|
||||
} else if (opcode == Op_LShiftVL) {
|
||||
psllq(dst, shift);
|
||||
} else {
|
||||
assert((opcode == Op_URShiftVL),"opcode should be Op_URShiftVL");
|
||||
psrlq(dst, shift);
|
||||
}
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::vshiftq(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
|
||||
if (opcode == Op_RShiftVL) {
|
||||
evpsraq(dst, nds, src, vector_len);
|
||||
@ -936,6 +1009,17 @@ void C2_MacroAssembler::vshiftq(int opcode, XMMRegister dst, XMMRegister nds, XM
|
||||
}
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::vshiftq_imm(int opcode, XMMRegister dst, XMMRegister nds, int shift, int vector_len) {
|
||||
if (opcode == Op_RShiftVL) {
|
||||
evpsraq(dst, nds, shift, vector_len);
|
||||
} else if (opcode == Op_LShiftVL) {
|
||||
vpsllq(dst, nds, shift, vector_len);
|
||||
} else {
|
||||
assert((opcode == Op_URShiftVL),"opcode should be Op_URShiftVL");
|
||||
vpsrlq(dst, nds, shift, vector_len);
|
||||
}
|
||||
}
|
||||
|
||||
// Reductions for vectors of ints, longs, floats, and doubles.
|
||||
|
||||
void C2_MacroAssembler::reduce_operation_128(int opcode, XMMRegister dst, XMMRegister src) {
|
||||
|
@ -74,11 +74,18 @@ public:
|
||||
void vextendbw(bool sign, XMMRegister dst, XMMRegister src, int vector_len);
|
||||
void vextendbw(bool sign, XMMRegister dst, XMMRegister src);
|
||||
void vshiftd(int opcode, XMMRegister dst, XMMRegister src);
|
||||
void vshiftd_imm(int opcode, XMMRegister dst, int shift);
|
||||
void vshiftd(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
|
||||
void vshiftd_imm(int opcode, XMMRegister dst, XMMRegister nds, int shift, int vector_len);
|
||||
void vshiftw(int opcode, XMMRegister dst, XMMRegister src);
|
||||
void vshiftw(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
|
||||
void vshiftq(int opcode, XMMRegister dst, XMMRegister src);
|
||||
void vshiftq_imm(int opcode, XMMRegister dst, int shift);
|
||||
void vshiftq(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
|
||||
void vshiftq_imm(int opcode, XMMRegister dst, XMMRegister nds, int shift, int vector_len);
|
||||
|
||||
void vprotate_imm(int opcode, BasicType etype, XMMRegister dst, XMMRegister src, int shift, int vector_len);
|
||||
void vprotate_var(int opcode, BasicType etype, XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
|
||||
|
||||
// Reductions for vectors of ints, longs, floats, and doubles.
|
||||
|
||||
|
@ -956,6 +956,9 @@ public:
|
||||
|
||||
#endif
|
||||
|
||||
void fast_md5(Register buf, Address state, Address ofs, Address limit,
|
||||
bool multi_block);
|
||||
|
||||
void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
|
||||
XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
|
||||
Register buf, Register state, Register ofs, Register limit, Register rsp,
|
||||
|
204
src/hotspot/cpu/x86/macroAssembler_x86_md5.cpp
Normal file
204
src/hotspot/cpu/x86/macroAssembler_x86_md5.cpp
Normal file
@ -0,0 +1,204 @@
|
||||
/*
|
||||
* Copyright (c) 2020 Microsoft Corporation. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (c) 2017 Project Nayuki. (MIT License)
|
||||
* https://www.nayuki.io/page/fast-md5-hash-implementation-in-x86-assembly
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
* this software and associated documentation files (the "Software"), to deal in
|
||||
* the Software without restriction, including without limitation the rights to
|
||||
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
* the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
* subject to the following conditions:
|
||||
* - The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
* - The Software is provided "as is", without warranty of any kind, express or
|
||||
* implied, including but not limited to the warranties of merchantability,
|
||||
* fitness for a particular purpose and noninfringement. In no event shall the
|
||||
* authors or copyright holders be liable for any claim, damages or other
|
||||
* liability, whether in an action of contract, tort or otherwise, arising from,
|
||||
* out of or in connection with the Software or the use or other dealings in the
|
||||
* Software.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "asm/assembler.inline.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "macroAssembler_x86.hpp"
|
||||
|
||||
// int com.sun.security.provider.MD5.implCompress0(byte[] b, int ofs)
|
||||
void MacroAssembler::fast_md5(Register buf, Address state, Address ofs, Address limit, bool multi_block) {
|
||||
|
||||
Label start, done_hash, loop0;
|
||||
|
||||
bind(start);
|
||||
|
||||
bind(loop0);
|
||||
|
||||
// Save hash values for addition after rounds
|
||||
movptr(rdi, state);
|
||||
movl(rax, Address(rdi, 0));
|
||||
movl(rbx, Address(rdi, 4));
|
||||
movl(rcx, Address(rdi, 8));
|
||||
movl(rdx, Address(rdi, 12));
|
||||
|
||||
#define FF(r1, r2, r3, r4, k, s, t) \
|
||||
movl(rsi, r3); \
|
||||
addl(r1, Address(buf, k*4)); \
|
||||
xorl(rsi, r4); \
|
||||
andl(rsi, r2); \
|
||||
xorl(rsi, r4); \
|
||||
leal(r1, Address(r1, rsi, Address::times_1, t)); \
|
||||
roll(r1, s); \
|
||||
addl(r1, r2);
|
||||
|
||||
#define GG(r1, r2, r3, r4, k, s, t) \
|
||||
movl(rsi, r4); \
|
||||
movl(rdi, r4); \
|
||||
addl(r1, Address(buf, k*4)); \
|
||||
notl(rsi); \
|
||||
andl(rdi, r2); \
|
||||
andl(rsi, r3); \
|
||||
orl(rsi, rdi); \
|
||||
leal(r1, Address(r1, rsi, Address::times_1, t)); \
|
||||
roll(r1, s); \
|
||||
addl(r1, r2);
|
||||
|
||||
#define HH(r1, r2, r3, r4, k, s, t) \
|
||||
movl(rsi, r3); \
|
||||
addl(r1, Address(buf, k*4)); \
|
||||
xorl(rsi, r4); \
|
||||
xorl(rsi, r2); \
|
||||
leal(r1, Address(r1, rsi, Address::times_1, t)); \
|
||||
roll(r1, s); \
|
||||
addl(r1, r2);
|
||||
|
||||
#define II(r1, r2, r3, r4, k, s, t) \
|
||||
movl(rsi, r4); \
|
||||
notl(rsi); \
|
||||
addl(r1, Address(buf, k*4)); \
|
||||
orl(rsi, r2); \
|
||||
xorl(rsi, r3); \
|
||||
leal(r1, Address(r1, rsi, Address::times_1, t)); \
|
||||
roll(r1, s); \
|
||||
addl(r1, r2);
|
||||
|
||||
// Round 1
|
||||
FF(rax, rbx, rcx, rdx, 0, 7, 0xd76aa478)
|
||||
FF(rdx, rax, rbx, rcx, 1, 12, 0xe8c7b756)
|
||||
FF(rcx, rdx, rax, rbx, 2, 17, 0x242070db)
|
||||
FF(rbx, rcx, rdx, rax, 3, 22, 0xc1bdceee)
|
||||
FF(rax, rbx, rcx, rdx, 4, 7, 0xf57c0faf)
|
||||
FF(rdx, rax, rbx, rcx, 5, 12, 0x4787c62a)
|
||||
FF(rcx, rdx, rax, rbx, 6, 17, 0xa8304613)
|
||||
FF(rbx, rcx, rdx, rax, 7, 22, 0xfd469501)
|
||||
FF(rax, rbx, rcx, rdx, 8, 7, 0x698098d8)
|
||||
FF(rdx, rax, rbx, rcx, 9, 12, 0x8b44f7af)
|
||||
FF(rcx, rdx, rax, rbx, 10, 17, 0xffff5bb1)
|
||||
FF(rbx, rcx, rdx, rax, 11, 22, 0x895cd7be)
|
||||
FF(rax, rbx, rcx, rdx, 12, 7, 0x6b901122)
|
||||
FF(rdx, rax, rbx, rcx, 13, 12, 0xfd987193)
|
||||
FF(rcx, rdx, rax, rbx, 14, 17, 0xa679438e)
|
||||
FF(rbx, rcx, rdx, rax, 15, 22, 0x49b40821)
|
||||
|
||||
// Round 2
|
||||
GG(rax, rbx, rcx, rdx, 1, 5, 0xf61e2562)
|
||||
GG(rdx, rax, rbx, rcx, 6, 9, 0xc040b340)
|
||||
GG(rcx, rdx, rax, rbx, 11, 14, 0x265e5a51)
|
||||
GG(rbx, rcx, rdx, rax, 0, 20, 0xe9b6c7aa)
|
||||
GG(rax, rbx, rcx, rdx, 5, 5, 0xd62f105d)
|
||||
GG(rdx, rax, rbx, rcx, 10, 9, 0x02441453)
|
||||
GG(rcx, rdx, rax, rbx, 15, 14, 0xd8a1e681)
|
||||
GG(rbx, rcx, rdx, rax, 4, 20, 0xe7d3fbc8)
|
||||
GG(rax, rbx, rcx, rdx, 9, 5, 0x21e1cde6)
|
||||
GG(rdx, rax, rbx, rcx, 14, 9, 0xc33707d6)
|
||||
GG(rcx, rdx, rax, rbx, 3, 14, 0xf4d50d87)
|
||||
GG(rbx, rcx, rdx, rax, 8, 20, 0x455a14ed)
|
||||
GG(rax, rbx, rcx, rdx, 13, 5, 0xa9e3e905)
|
||||
GG(rdx, rax, rbx, rcx, 2, 9, 0xfcefa3f8)
|
||||
GG(rcx, rdx, rax, rbx, 7, 14, 0x676f02d9)
|
||||
GG(rbx, rcx, rdx, rax, 12, 20, 0x8d2a4c8a)
|
||||
|
||||
// Round 3
|
||||
HH(rax, rbx, rcx, rdx, 5, 4, 0xfffa3942)
|
||||
HH(rdx, rax, rbx, rcx, 8, 11, 0x8771f681)
|
||||
HH(rcx, rdx, rax, rbx, 11, 16, 0x6d9d6122)
|
||||
HH(rbx, rcx, rdx, rax, 14, 23, 0xfde5380c)
|
||||
HH(rax, rbx, rcx, rdx, 1, 4, 0xa4beea44)
|
||||
HH(rdx, rax, rbx, rcx, 4, 11, 0x4bdecfa9)
|
||||
HH(rcx, rdx, rax, rbx, 7, 16, 0xf6bb4b60)
|
||||
HH(rbx, rcx, rdx, rax, 10, 23, 0xbebfbc70)
|
||||
HH(rax, rbx, rcx, rdx, 13, 4, 0x289b7ec6)
|
||||
HH(rdx, rax, rbx, rcx, 0, 11, 0xeaa127fa)
|
||||
HH(rcx, rdx, rax, rbx, 3, 16, 0xd4ef3085)
|
||||
HH(rbx, rcx, rdx, rax, 6, 23, 0x04881d05)
|
||||
HH(rax, rbx, rcx, rdx, 9, 4, 0xd9d4d039)
|
||||
HH(rdx, rax, rbx, rcx, 12, 11, 0xe6db99e5)
|
||||
HH(rcx, rdx, rax, rbx, 15, 16, 0x1fa27cf8)
|
||||
HH(rbx, rcx, rdx, rax, 2, 23, 0xc4ac5665)
|
||||
|
||||
// Round 4
|
||||
II(rax, rbx, rcx, rdx, 0, 6, 0xf4292244)
|
||||
II(rdx, rax, rbx, rcx, 7, 10, 0x432aff97)
|
||||
II(rcx, rdx, rax, rbx, 14, 15, 0xab9423a7)
|
||||
II(rbx, rcx, rdx, rax, 5, 21, 0xfc93a039)
|
||||
II(rax, rbx, rcx, rdx, 12, 6, 0x655b59c3)
|
||||
II(rdx, rax, rbx, rcx, 3, 10, 0x8f0ccc92)
|
||||
II(rcx, rdx, rax, rbx, 10, 15, 0xffeff47d)
|
||||
II(rbx, rcx, rdx, rax, 1, 21, 0x85845dd1)
|
||||
II(rax, rbx, rcx, rdx, 8, 6, 0x6fa87e4f)
|
||||
II(rdx, rax, rbx, rcx, 15, 10, 0xfe2ce6e0)
|
||||
II(rcx, rdx, rax, rbx, 6, 15, 0xa3014314)
|
||||
II(rbx, rcx, rdx, rax, 13, 21, 0x4e0811a1)
|
||||
II(rax, rbx, rcx, rdx, 4, 6, 0xf7537e82)
|
||||
II(rdx, rax, rbx, rcx, 11, 10, 0xbd3af235)
|
||||
II(rcx, rdx, rax, rbx, 2, 15, 0x2ad7d2bb)
|
||||
II(rbx, rcx, rdx, rax, 9, 21, 0xeb86d391)
|
||||
|
||||
#undef FF
|
||||
#undef GG
|
||||
#undef HH
|
||||
#undef II
|
||||
|
||||
// write hash values back in the correct order
|
||||
movptr(rdi, state);
|
||||
addl(Address(rdi, 0), rax);
|
||||
addl(Address(rdi, 4), rbx);
|
||||
addl(Address(rdi, 8), rcx);
|
||||
addl(Address(rdi, 12), rdx);
|
||||
|
||||
if (multi_block) {
|
||||
// increment data pointer and loop if more to process
|
||||
addptr(buf, 64);
|
||||
addl(ofs, 64);
|
||||
movl(rsi, ofs);
|
||||
cmpl(rsi, limit);
|
||||
jcc(Assembler::belowEqual, loop0);
|
||||
movptr(rax, rsi); //return ofs
|
||||
}
|
||||
|
||||
bind(done_hash);
|
||||
}
|
@ -2884,6 +2884,46 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
// ofs and limit are use for multi-block byte array.
|
||||
// int com.sun.security.provider.MD5.implCompress(byte[] b, int ofs)
|
||||
address generate_md5_implCompress(bool multi_block, const char *name) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
|
||||
const Register buf_param = rbp;
|
||||
const Address state_param(rsp, 0 * wordSize);
|
||||
const Address ofs_param (rsp, 1 * wordSize);
|
||||
const Address limit_param(rsp, 2 * wordSize);
|
||||
|
||||
__ enter();
|
||||
__ push(rbx);
|
||||
__ push(rdi);
|
||||
__ push(rsi);
|
||||
__ push(rbp);
|
||||
__ subptr(rsp, 3 * wordSize);
|
||||
|
||||
__ movptr(rsi, Address(rbp, 8 + 4));
|
||||
__ movptr(state_param, rsi);
|
||||
if (multi_block) {
|
||||
__ movptr(rsi, Address(rbp, 8 + 8));
|
||||
__ movptr(ofs_param, rsi);
|
||||
__ movptr(rsi, Address(rbp, 8 + 12));
|
||||
__ movptr(limit_param, rsi);
|
||||
}
|
||||
__ movptr(buf_param, Address(rbp, 8 + 0)); // do it last because it override rbp
|
||||
__ fast_md5(buf_param, state_param, ofs_param, limit_param, multi_block);
|
||||
|
||||
__ addptr(rsp, 3 * wordSize);
|
||||
__ pop(rbp);
|
||||
__ pop(rsi);
|
||||
__ pop(rdi);
|
||||
__ pop(rbx);
|
||||
__ leave();
|
||||
__ ret(0);
|
||||
return start;
|
||||
}
|
||||
|
||||
address generate_upper_word_mask() {
|
||||
__ align(64);
|
||||
StubCodeMark mark(this, "StubRoutines", "upper_word_mask");
|
||||
@ -3886,6 +3926,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel();
|
||||
}
|
||||
|
||||
if (UseMD5Intrinsics) {
|
||||
StubRoutines::_md5_implCompress = generate_md5_implCompress(false, "md5_implCompress");
|
||||
StubRoutines::_md5_implCompressMB = generate_md5_implCompress(true, "md5_implCompressMB");
|
||||
}
|
||||
if (UseSHA1Intrinsics) {
|
||||
StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask();
|
||||
StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask();
|
||||
|
@ -3646,6 +3646,43 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
// ofs and limit are use for multi-block byte array.
|
||||
// int com.sun.security.provider.MD5.implCompress(byte[] b, int ofs)
|
||||
address generate_md5_implCompress(bool multi_block, const char *name) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
|
||||
const Register buf_param = r15;
|
||||
const Address state_param(rsp, 0 * wordSize);
|
||||
const Address ofs_param (rsp, 1 * wordSize );
|
||||
const Address limit_param(rsp, 1 * wordSize + 4);
|
||||
|
||||
__ enter();
|
||||
__ push(rbx);
|
||||
__ push(rdi);
|
||||
__ push(rsi);
|
||||
__ push(r15);
|
||||
__ subptr(rsp, 2 * wordSize);
|
||||
|
||||
__ movptr(buf_param, c_rarg0);
|
||||
__ movptr(state_param, c_rarg1);
|
||||
if (multi_block) {
|
||||
__ movl(ofs_param, c_rarg2);
|
||||
__ movl(limit_param, c_rarg3);
|
||||
}
|
||||
__ fast_md5(buf_param, state_param, ofs_param, limit_param, multi_block);
|
||||
|
||||
__ addptr(rsp, 2 * wordSize);
|
||||
__ pop(r15);
|
||||
__ pop(rsi);
|
||||
__ pop(rdi);
|
||||
__ pop(rbx);
|
||||
__ leave();
|
||||
__ ret(0);
|
||||
return start;
|
||||
}
|
||||
|
||||
address generate_upper_word_mask() {
|
||||
__ align(64);
|
||||
StubCodeMark mark(this, "StubRoutines", "upper_word_mask");
|
||||
@ -6327,6 +6364,10 @@ address generate_avx_ghash_processBlocks() {
|
||||
}
|
||||
}
|
||||
|
||||
if (UseMD5Intrinsics) {
|
||||
StubRoutines::_md5_implCompress = generate_md5_implCompress(false, "md5_implCompress");
|
||||
StubRoutines::_md5_implCompressMB = generate_md5_implCompress(true, "md5_implCompressMB");
|
||||
}
|
||||
if (UseSHA1Intrinsics) {
|
||||
StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask();
|
||||
StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask();
|
||||
|
@ -945,6 +945,10 @@ void VM_Version::get_processor_features() {
|
||||
FLAG_SET_DEFAULT(UseFMA, false);
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseMD5Intrinsics)) {
|
||||
UseMD5Intrinsics = true;
|
||||
}
|
||||
|
||||
if (supports_sha() LP64_ONLY(|| supports_avx2() && supports_bmi2())) {
|
||||
if (FLAG_IS_DEFAULT(UseSHA)) {
|
||||
UseSHA = true;
|
||||
|
@ -1469,6 +1469,8 @@ const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType
|
||||
return false; // implementation limitation (only vcmov8F_reg is present)
|
||||
}
|
||||
break;
|
||||
case Op_RotateRightV:
|
||||
case Op_RotateLeftV:
|
||||
case Op_MacroLogicV:
|
||||
if (!VM_Version::supports_evex() ||
|
||||
((size_in_bits != 512) && !VM_Version::supports_avx512vl())) {
|
||||
@ -4955,6 +4957,33 @@ instruct vshiftI(vec dst, vec src, vec shift) %{
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
// Integers vector left constant shift
|
||||
instruct vshiftI_imm(vec dst, vec src, immI8 shift) %{
|
||||
match(Set dst (LShiftVI src (LShiftCntV shift)));
|
||||
match(Set dst (RShiftVI src (RShiftCntV shift)));
|
||||
match(Set dst (URShiftVI src (RShiftCntV shift)));
|
||||
effect(TEMP dst, USE src);
|
||||
format %{ "vshiftd_imm $dst,$src,$shift\t! shift packedI" %}
|
||||
ins_encode %{
|
||||
int opcode = this->ideal_Opcode();
|
||||
if (UseAVX > 0) {
|
||||
int vector_len = vector_length_encoding(this);
|
||||
__ vshiftd_imm(opcode, $dst$$XMMRegister, $src$$XMMRegister, $shift$$constant, vector_len);
|
||||
} else {
|
||||
int vlen = vector_length(this);
|
||||
if (vlen == 2) {
|
||||
__ movdbl($dst$$XMMRegister, $src$$XMMRegister);
|
||||
__ vshiftd_imm(opcode, $dst$$XMMRegister, $shift$$constant);
|
||||
} else {
|
||||
assert(vlen == 4, "sanity");
|
||||
__ movdqu($dst$$XMMRegister, $src$$XMMRegister);
|
||||
__ vshiftd_imm(opcode, $dst$$XMMRegister, $shift$$constant);
|
||||
}
|
||||
}
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
// Longs vector shift
|
||||
instruct vshiftL(vec dst, vec src, vec shift) %{
|
||||
match(Set dst (LShiftVL src shift));
|
||||
@ -4975,6 +5004,26 @@ instruct vshiftL(vec dst, vec src, vec shift) %{
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
// Longs vector constant shift
|
||||
instruct vshiftL_imm(vec dst, vec src, immI8 shift) %{
|
||||
match(Set dst (LShiftVL src (LShiftCntV shift)));
|
||||
match(Set dst (URShiftVL src (RShiftCntV shift)));
|
||||
effect(TEMP dst, USE src, USE shift);
|
||||
format %{ "vshiftq_imm $dst,$src,$shift\t! shift packedL" %}
|
||||
ins_encode %{
|
||||
int opcode = this->ideal_Opcode();
|
||||
if (UseAVX > 0) {
|
||||
int vector_len = vector_length_encoding(this);
|
||||
__ vshiftq_imm(opcode, $dst$$XMMRegister, $src$$XMMRegister, $shift$$constant, vector_len);
|
||||
} else {
|
||||
assert(vector_length(this) == 2, "");
|
||||
__ movdqu($dst$$XMMRegister, $src$$XMMRegister);
|
||||
__ vshiftq_imm(opcode, $dst$$XMMRegister, $shift$$constant);
|
||||
}
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
// -------------------ArithmeticRightShift -----------------------------------
|
||||
// Long vector arithmetic right shift
|
||||
instruct vshiftL_arith_reg(vec dst, vec src, vec shift, vec tmp, rRegI scratch) %{
|
||||
@ -5359,3 +5408,31 @@ instruct vpternlog_mem(vec dst, vec src2, memory src3, immU8 func) %{
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
// --------------------------------- Rotation Operations ----------------------------------
|
||||
instruct vprotate_immI8(vec dst, vec src, immI8 shift) %{
|
||||
match(Set dst (RotateLeftV src shift));
|
||||
match(Set dst (RotateRightV src shift));
|
||||
format %{ "vprotate_imm8 $dst,$src,$shift\t! vector rotate" %}
|
||||
ins_encode %{
|
||||
int opcode = this->ideal_Opcode();
|
||||
int vector_len = vector_length_encoding(this);
|
||||
BasicType etype = this->bottom_type()->is_vect()->element_basic_type();
|
||||
__ vprotate_imm(opcode, etype, $dst$$XMMRegister, $src$$XMMRegister, $shift$$constant, vector_len);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct vprorate(vec dst, vec src, vec shift) %{
|
||||
match(Set dst (RotateLeftV src shift));
|
||||
match(Set dst (RotateRightV src shift));
|
||||
format %{ "vprotate $dst,$src,$shift\t! vector rotate" %}
|
||||
ins_encode %{
|
||||
int opcode = this->ideal_Opcode();
|
||||
int vector_len = vector_length_encoding(this);
|
||||
BasicType etype = this->bottom_type()->is_vect()->element_basic_type();
|
||||
__ vprotate_var(opcode, etype, $dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector_len);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
|
@ -3339,19 +3339,6 @@ operand rdi_RegI()
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand no_rcx_RegI()
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(int_no_rcx_reg));
|
||||
match(RegI);
|
||||
match(rax_RegI);
|
||||
match(rbx_RegI);
|
||||
match(rdx_RegI);
|
||||
match(rdi_RegI);
|
||||
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand no_rax_rdx_RegI()
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(int_no_rax_rdx_reg));
|
||||
@ -3561,16 +3548,6 @@ operand no_rax_RegL()
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand no_rcx_RegL()
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(long_no_rcx_reg));
|
||||
match(RegL);
|
||||
match(rRegL);
|
||||
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand rax_RegL()
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(long_rax_reg));
|
||||
@ -8969,294 +8946,137 @@ instruct i2s(rRegI dst, rRegI src, immI_16 sixteen)
|
||||
|
||||
// ROL/ROR instructions
|
||||
|
||||
// ROL expand
|
||||
instruct rolI_rReg_imm1(rRegI dst, rFlagsReg cr) %{
|
||||
effect(KILL cr, USE_DEF dst);
|
||||
|
||||
format %{ "roll $dst" %}
|
||||
opcode(0xD1, 0x0); /* Opcode D1 /0 */
|
||||
ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct rolI_rReg_imm8(rRegI dst, immI8 shift, rFlagsReg cr) %{
|
||||
effect(USE_DEF dst, USE shift, KILL cr);
|
||||
|
||||
// Rotate left by constant.
|
||||
instruct rolI_imm(rRegI dst, immI8 shift, rFlagsReg cr)
|
||||
%{
|
||||
predicate(n->bottom_type()->basic_type() == T_INT);
|
||||
match(Set dst (RotateLeft dst shift));
|
||||
effect(KILL cr);
|
||||
format %{ "roll $dst, $shift" %}
|
||||
opcode(0xC1, 0x0); /* Opcode C1 /0 ib */
|
||||
ins_encode( reg_opc_imm(dst, shift) );
|
||||
ins_encode %{
|
||||
__ roll($dst$$Register, $shift$$constant);
|
||||
%}
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct rolI_rReg_CL(no_rcx_RegI dst, rcx_RegI shift, rFlagsReg cr)
|
||||
// Rotate Left by variable
|
||||
instruct rolI_rReg_Var(rRegI dst, rcx_RegI shift, rFlagsReg cr)
|
||||
%{
|
||||
effect(USE_DEF dst, USE shift, KILL cr);
|
||||
|
||||
predicate(n->bottom_type()->basic_type() == T_INT);
|
||||
match(Set dst (RotateLeft dst shift));
|
||||
effect(KILL cr);
|
||||
format %{ "roll $dst, $shift" %}
|
||||
opcode(0xD3, 0x0); /* Opcode D3 /0 */
|
||||
ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
|
||||
ins_encode %{
|
||||
__ roll($dst$$Register);
|
||||
%}
|
||||
ins_pipe(ialu_reg_reg);
|
||||
%}
|
||||
// end of ROL expand
|
||||
|
||||
// Rotate Left by one
|
||||
instruct rolI_rReg_i1(rRegI dst, immI1 lshift, immI_M1 rshift, rFlagsReg cr)
|
||||
// Rotate Right by constant.
|
||||
instruct rorI_immI8_legacy(rRegI dst, immI8 shift, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
|
||||
|
||||
expand %{
|
||||
rolI_rReg_imm1(dst, cr);
|
||||
%}
|
||||
%}
|
||||
|
||||
// Rotate Left by 8-bit immediate
|
||||
instruct rolI_rReg_i8(rRegI dst, immI8 lshift, immI8 rshift, rFlagsReg cr)
|
||||
%{
|
||||
predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
|
||||
match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
|
||||
|
||||
expand %{
|
||||
rolI_rReg_imm8(dst, lshift, cr);
|
||||
%}
|
||||
%}
|
||||
|
||||
// Rotate Left by variable
|
||||
instruct rolI_rReg_Var_C0(no_rcx_RegI dst, rcx_RegI shift, immI0 zero, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (OrI (LShiftI dst shift) (URShiftI dst (SubI zero shift))));
|
||||
|
||||
expand %{
|
||||
rolI_rReg_CL(dst, shift, cr);
|
||||
%}
|
||||
%}
|
||||
|
||||
// Rotate Left by variable
|
||||
instruct rolI_rReg_Var_C32(no_rcx_RegI dst, rcx_RegI shift, immI_32 c32, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (OrI (LShiftI dst shift) (URShiftI dst (SubI c32 shift))));
|
||||
|
||||
expand %{
|
||||
rolI_rReg_CL(dst, shift, cr);
|
||||
%}
|
||||
%}
|
||||
|
||||
// ROR expand
|
||||
instruct rorI_rReg_imm1(rRegI dst, rFlagsReg cr)
|
||||
%{
|
||||
effect(USE_DEF dst, KILL cr);
|
||||
|
||||
format %{ "rorl $dst" %}
|
||||
opcode(0xD1, 0x1); /* D1 /1 */
|
||||
ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct rorI_rReg_imm8(rRegI dst, immI8 shift, rFlagsReg cr)
|
||||
%{
|
||||
effect(USE_DEF dst, USE shift, KILL cr);
|
||||
|
||||
predicate(!VM_Version::supports_bmi2() && n->bottom_type()->basic_type() == T_INT);
|
||||
match(Set dst (RotateRight dst shift));
|
||||
effect(KILL cr);
|
||||
format %{ "rorl $dst, $shift" %}
|
||||
opcode(0xC1, 0x1); /* C1 /1 ib */
|
||||
ins_encode(reg_opc_imm(dst, shift));
|
||||
ins_encode %{
|
||||
__ rorl($dst$$Register, $shift$$constant);
|
||||
%}
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct rorI_rReg_CL(no_rcx_RegI dst, rcx_RegI shift, rFlagsReg cr)
|
||||
// Rotate Right by constant.
|
||||
instruct rorI_immI8(rRegI dst, immI8 shift)
|
||||
%{
|
||||
effect(USE_DEF dst, USE shift, KILL cr);
|
||||
predicate(VM_Version::supports_bmi2() && n->bottom_type()->basic_type() == T_INT);
|
||||
match(Set dst (RotateRight dst shift));
|
||||
format %{ "rorxd $dst, $shift" %}
|
||||
ins_encode %{
|
||||
__ rorxd($dst$$Register, $dst$$Register, $shift$$constant);
|
||||
%}
|
||||
ins_pipe(ialu_reg_reg);
|
||||
%}
|
||||
|
||||
// Rotate Right by variable
|
||||
instruct rorI_rReg_Var(rRegI dst, rcx_RegI shift, rFlagsReg cr)
|
||||
%{
|
||||
predicate(n->bottom_type()->basic_type() == T_INT);
|
||||
match(Set dst (RotateRight dst shift));
|
||||
effect(KILL cr);
|
||||
format %{ "rorl $dst, $shift" %}
|
||||
opcode(0xD3, 0x1); /* D3 /1 */
|
||||
ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
|
||||
ins_encode %{
|
||||
__ rorl($dst$$Register);
|
||||
%}
|
||||
ins_pipe(ialu_reg_reg);
|
||||
%}
|
||||
// end of ROR expand
|
||||
|
||||
// Rotate Right by one
|
||||
instruct rorI_rReg_i1(rRegI dst, immI1 rshift, immI_M1 lshift, rFlagsReg cr)
|
||||
|
||||
// Rotate Left by constant.
|
||||
instruct rolL_immI8(rRegL dst, immI8 shift, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
|
||||
|
||||
expand %{
|
||||
rorI_rReg_imm1(dst, cr);
|
||||
%}
|
||||
%}
|
||||
|
||||
// Rotate Right by 8-bit immediate
|
||||
instruct rorI_rReg_i8(rRegI dst, immI8 rshift, immI8 lshift, rFlagsReg cr)
|
||||
%{
|
||||
predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
|
||||
match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
|
||||
|
||||
expand %{
|
||||
rorI_rReg_imm8(dst, rshift, cr);
|
||||
%}
|
||||
%}
|
||||
|
||||
// Rotate Right by variable
|
||||
instruct rorI_rReg_Var_C0(no_rcx_RegI dst, rcx_RegI shift, immI0 zero, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (OrI (URShiftI dst shift) (LShiftI dst (SubI zero shift))));
|
||||
|
||||
expand %{
|
||||
rorI_rReg_CL(dst, shift, cr);
|
||||
%}
|
||||
%}
|
||||
|
||||
// Rotate Right by variable
|
||||
instruct rorI_rReg_Var_C32(no_rcx_RegI dst, rcx_RegI shift, immI_32 c32, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (OrI (URShiftI dst shift) (LShiftI dst (SubI c32 shift))));
|
||||
|
||||
expand %{
|
||||
rorI_rReg_CL(dst, shift, cr);
|
||||
%}
|
||||
%}
|
||||
|
||||
// for long rotate
|
||||
// ROL expand
|
||||
instruct rolL_rReg_imm1(rRegL dst, rFlagsReg cr) %{
|
||||
effect(USE_DEF dst, KILL cr);
|
||||
|
||||
format %{ "rolq $dst" %}
|
||||
opcode(0xD1, 0x0); /* Opcode D1 /0 */
|
||||
ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct rolL_rReg_imm8(rRegL dst, immI8 shift, rFlagsReg cr) %{
|
||||
effect(USE_DEF dst, USE shift, KILL cr);
|
||||
|
||||
predicate(n->bottom_type()->basic_type() == T_LONG);
|
||||
match(Set dst (RotateLeft dst shift));
|
||||
effect(KILL cr);
|
||||
format %{ "rolq $dst, $shift" %}
|
||||
opcode(0xC1, 0x0); /* Opcode C1 /0 ib */
|
||||
ins_encode( reg_opc_imm_wide(dst, shift) );
|
||||
ins_encode %{
|
||||
__ rolq($dst$$Register, $shift$$constant);
|
||||
%}
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct rolL_rReg_CL(no_rcx_RegL dst, rcx_RegI shift, rFlagsReg cr)
|
||||
%{
|
||||
effect(USE_DEF dst, USE shift, KILL cr);
|
||||
|
||||
format %{ "rolq $dst, $shift" %}
|
||||
opcode(0xD3, 0x0); /* Opcode D3 /0 */
|
||||
ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
|
||||
ins_pipe(ialu_reg_reg);
|
||||
%}
|
||||
// end of ROL expand
|
||||
|
||||
// Rotate Left by one
|
||||
instruct rolL_rReg_i1(rRegL dst, immI1 lshift, immI_M1 rshift, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
|
||||
|
||||
expand %{
|
||||
rolL_rReg_imm1(dst, cr);
|
||||
%}
|
||||
%}
|
||||
|
||||
// Rotate Left by 8-bit immediate
|
||||
instruct rolL_rReg_i8(rRegL dst, immI8 lshift, immI8 rshift, rFlagsReg cr)
|
||||
%{
|
||||
predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
|
||||
match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
|
||||
|
||||
expand %{
|
||||
rolL_rReg_imm8(dst, lshift, cr);
|
||||
%}
|
||||
%}
|
||||
|
||||
// Rotate Left by variable
|
||||
instruct rolL_rReg_Var_C0(no_rcx_RegL dst, rcx_RegI shift, immI0 zero, rFlagsReg cr)
|
||||
instruct rolL_rReg_Var(rRegL dst, rcx_RegI shift, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (OrL (LShiftL dst shift) (URShiftL dst (SubI zero shift))));
|
||||
|
||||
expand %{
|
||||
rolL_rReg_CL(dst, shift, cr);
|
||||
predicate(n->bottom_type()->basic_type() == T_LONG);
|
||||
match(Set dst (RotateLeft dst shift));
|
||||
effect(KILL cr);
|
||||
format %{ "rolq $dst, $shift" %}
|
||||
ins_encode %{
|
||||
__ rolq($dst$$Register);
|
||||
%}
|
||||
%}
|
||||
|
||||
// Rotate Left by variable
|
||||
instruct rolL_rReg_Var_C64(no_rcx_RegL dst, rcx_RegI shift, immI_64 c64, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (OrL (LShiftL dst shift) (URShiftL dst (SubI c64 shift))));
|
||||
|
||||
expand %{
|
||||
rolL_rReg_CL(dst, shift, cr);
|
||||
%}
|
||||
%}
|
||||
|
||||
// ROR expand
|
||||
instruct rorL_rReg_imm1(rRegL dst, rFlagsReg cr)
|
||||
%{
|
||||
effect(USE_DEF dst, KILL cr);
|
||||
|
||||
format %{ "rorq $dst" %}
|
||||
opcode(0xD1, 0x1); /* D1 /1 */
|
||||
ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct rorL_rReg_imm8(rRegL dst, immI8 shift, rFlagsReg cr)
|
||||
%{
|
||||
effect(USE_DEF dst, USE shift, KILL cr);
|
||||
|
||||
format %{ "rorq $dst, $shift" %}
|
||||
opcode(0xC1, 0x1); /* C1 /1 ib */
|
||||
ins_encode(reg_opc_imm_wide(dst, shift));
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct rorL_rReg_CL(no_rcx_RegL dst, rcx_RegI shift, rFlagsReg cr)
|
||||
%{
|
||||
effect(USE_DEF dst, USE shift, KILL cr);
|
||||
|
||||
format %{ "rorq $dst, $shift" %}
|
||||
opcode(0xD3, 0x1); /* D3 /1 */
|
||||
ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
|
||||
ins_pipe(ialu_reg_reg);
|
||||
%}
|
||||
// end of ROR expand
|
||||
|
||||
// Rotate Right by one
|
||||
instruct rorL_rReg_i1(rRegL dst, immI1 rshift, immI_M1 lshift, rFlagsReg cr)
|
||||
|
||||
// Rotate Right by constant.
|
||||
instruct rorL_immI8_legacy(rRegL dst, immI8 shift, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
|
||||
|
||||
expand %{
|
||||
rorL_rReg_imm1(dst, cr);
|
||||
predicate(!VM_Version::supports_bmi2() && n->bottom_type()->basic_type() == T_LONG);
|
||||
match(Set dst (RotateRight dst shift));
|
||||
effect(KILL cr);
|
||||
format %{ "rorq $dst, $shift" %}
|
||||
ins_encode %{
|
||||
__ rorq($dst$$Register, $shift$$constant);
|
||||
%}
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
// Rotate Right by 8-bit immediate
|
||||
instruct rorL_rReg_i8(rRegL dst, immI8 rshift, immI8 lshift, rFlagsReg cr)
|
||||
%{
|
||||
predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
|
||||
match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
|
||||
|
||||
expand %{
|
||||
rorL_rReg_imm8(dst, rshift, cr);
|
||||
// Rotate Right by constant
|
||||
instruct rorL_immI8(rRegL dst, immI8 shift)
|
||||
%{
|
||||
predicate(VM_Version::supports_bmi2() && n->bottom_type()->basic_type() == T_LONG);
|
||||
match(Set dst (RotateRight dst shift));
|
||||
format %{ "rorxq $dst, $shift" %}
|
||||
ins_encode %{
|
||||
__ rorxq($dst$$Register, $dst$$Register, $shift$$constant);
|
||||
%}
|
||||
ins_pipe(ialu_reg_reg);
|
||||
%}
|
||||
|
||||
// Rotate Right by variable
|
||||
instruct rorL_rReg_Var_C0(no_rcx_RegL dst, rcx_RegI shift, immI0 zero, rFlagsReg cr)
|
||||
instruct rorL_rReg_Var(rRegL dst, rcx_RegI shift, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (OrL (URShiftL dst shift) (LShiftL dst (SubI zero shift))));
|
||||
|
||||
expand %{
|
||||
rorL_rReg_CL(dst, shift, cr);
|
||||
predicate(n->bottom_type()->basic_type() == T_LONG);
|
||||
match(Set dst (RotateRight dst shift));
|
||||
effect(KILL cr);
|
||||
format %{ "rorq $dst, $shift" %}
|
||||
ins_encode %{
|
||||
__ rorq($dst$$Register);
|
||||
%}
|
||||
ins_pipe(ialu_reg_reg);
|
||||
%}
|
||||
|
||||
// Rotate Right by variable
|
||||
instruct rorL_rReg_Var_C64(no_rcx_RegL dst, rcx_RegI shift, immI_64 c64, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (OrL (URShiftL dst shift) (LShiftL dst (SubI c64 shift))));
|
||||
|
||||
expand %{
|
||||
rorL_rReg_CL(dst, shift, cr);
|
||||
%}
|
||||
%}
|
||||
|
||||
// Logical Instructions
|
||||
|
||||
|
@ -110,6 +110,9 @@
|
||||
# include <inttypes.h>
|
||||
# include <sys/ioctl.h>
|
||||
# include <linux/elf-em.h>
|
||||
#ifdef __GLIBC__
|
||||
# include <malloc.h>
|
||||
#endif
|
||||
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
@ -2156,7 +2159,10 @@ void os::print_os_info(outputStream* st) {
|
||||
os::Posix::print_load_average(st);
|
||||
st->cr();
|
||||
|
||||
os::Linux::print_full_memory_info(st);
|
||||
os::Linux::print_system_memory_info(st);
|
||||
st->cr();
|
||||
|
||||
os::Linux::print_process_memory_info(st);
|
||||
st->cr();
|
||||
|
||||
os::Linux::print_proc_sys_info(st);
|
||||
@ -2314,7 +2320,7 @@ void os::Linux::print_proc_sys_info(outputStream* st) {
|
||||
"/proc/sys/kernel/pid_max", st);
|
||||
}
|
||||
|
||||
void os::Linux::print_full_memory_info(outputStream* st) {
|
||||
void os::Linux::print_system_memory_info(outputStream* st) {
|
||||
_print_ascii_file_h("/proc/meminfo", "/proc/meminfo", st, false);
|
||||
st->cr();
|
||||
|
||||
@ -2326,6 +2332,63 @@ void os::Linux::print_full_memory_info(outputStream* st) {
|
||||
"/sys/kernel/mm/transparent_hugepage/defrag", st);
|
||||
}
|
||||
|
||||
void os::Linux::print_process_memory_info(outputStream* st) {
|
||||
|
||||
st->print_cr("Process Memory:");
|
||||
|
||||
// Print virtual and resident set size; peak values; swap; and for
|
||||
// rss its components if the kernel is recent enough.
|
||||
ssize_t vmsize = -1, vmpeak = -1, vmswap = -1,
|
||||
vmrss = -1, vmhwm = -1, rssanon = -1, rssfile = -1, rssshmem = -1;
|
||||
const int num_values = 8;
|
||||
int num_found = 0;
|
||||
FILE* f = ::fopen("/proc/self/status", "r");
|
||||
char buf[256];
|
||||
while (::fgets(buf, sizeof(buf), f) != NULL && num_found < num_values) {
|
||||
if ( (vmsize == -1 && sscanf(buf, "VmSize: " SSIZE_FORMAT " kB", &vmsize) == 1) ||
|
||||
(vmpeak == -1 && sscanf(buf, "VmPeak: " SSIZE_FORMAT " kB", &vmpeak) == 1) ||
|
||||
(vmswap == -1 && sscanf(buf, "VmSwap: " SSIZE_FORMAT " kB", &vmswap) == 1) ||
|
||||
(vmhwm == -1 && sscanf(buf, "VmHWM: " SSIZE_FORMAT " kB", &vmhwm) == 1) ||
|
||||
(vmrss == -1 && sscanf(buf, "VmRSS: " SSIZE_FORMAT " kB", &vmrss) == 1) ||
|
||||
(rssanon == -1 && sscanf(buf, "RssAnon: " SSIZE_FORMAT " kB", &rssanon) == 1) ||
|
||||
(rssfile == -1 && sscanf(buf, "RssFile: " SSIZE_FORMAT " kB", &rssfile) == 1) ||
|
||||
(rssshmem == -1 && sscanf(buf, "RssShmem: " SSIZE_FORMAT " kB", &rssshmem) == 1)
|
||||
)
|
||||
{
|
||||
num_found ++;
|
||||
}
|
||||
}
|
||||
st->print_cr("Virtual Size: " SSIZE_FORMAT "K (peak: " SSIZE_FORMAT "K)", vmsize, vmpeak);
|
||||
st->print("Resident Set Size: " SSIZE_FORMAT "K (peak: " SSIZE_FORMAT "K)", vmrss, vmhwm);
|
||||
if (rssanon != -1) { // requires kernel >= 4.5
|
||||
st->print(" (anon: " SSIZE_FORMAT "K, file: " SSIZE_FORMAT "K, shmem: " SSIZE_FORMAT "K)",
|
||||
rssanon, rssfile, rssshmem);
|
||||
}
|
||||
st->cr();
|
||||
if (vmswap != -1) { // requires kernel >= 2.6.34
|
||||
st->print_cr("Swapped out: " SSIZE_FORMAT "K", vmswap);
|
||||
}
|
||||
|
||||
// Print glibc outstanding allocations.
|
||||
// (note: there is no implementation of mallinfo for muslc)
|
||||
#ifdef __GLIBC__
|
||||
struct mallinfo mi = ::mallinfo();
|
||||
|
||||
// mallinfo is an old API. Member names mean next to nothing and, beyond that, are int.
|
||||
// So values may have wrapped around. Still useful enough to see how much glibc thinks
|
||||
// we allocated.
|
||||
const size_t total_allocated = (size_t)(unsigned)mi.uordblks;
|
||||
st->print("C-Heap outstanding allocations: " SIZE_FORMAT "K", total_allocated / K);
|
||||
// Since mallinfo members are int, glibc values may have wrapped. Warn about this.
|
||||
if ((vmrss * K) > UINT_MAX && (vmrss * K) > (total_allocated + UINT_MAX)) {
|
||||
st->print(" (may have wrapped)");
|
||||
}
|
||||
st->cr();
|
||||
|
||||
#endif // __GLIBC__
|
||||
|
||||
}
|
||||
|
||||
bool os::Linux::print_ld_preload_file(outputStream* st) {
|
||||
return _print_ascii_file("/etc/ld.so.preload", st, "/etc/ld.so.preload:");
|
||||
}
|
||||
|
@ -101,7 +101,8 @@ class Linux {
|
||||
static bool release_memory_special_shm(char* base, size_t bytes);
|
||||
static bool release_memory_special_huge_tlbfs(char* base, size_t bytes);
|
||||
|
||||
static void print_full_memory_info(outputStream* st);
|
||||
static void print_process_memory_info(outputStream* st);
|
||||
static void print_system_memory_info(outputStream* st);
|
||||
static bool print_container_info(outputStream* st);
|
||||
static void print_steal_info(outputStream* st);
|
||||
static void print_distro_info(outputStream* st);
|
||||
|
@ -4987,7 +4987,7 @@ char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
|
||||
|
||||
hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
|
||||
OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
|
||||
if (hFile == NULL) {
|
||||
if (hFile == INVALID_HANDLE_VALUE) {
|
||||
log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
|
||||
return NULL;
|
||||
}
|
||||
|
@ -773,6 +773,8 @@ bool InstructForm::captures_bottom_type(FormDict &globals) const {
|
||||
!strcmp(_matrule->_rChild->_opType,"CheckCastPP") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"GetAndSetP") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"GetAndSetN") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"RotateLeft") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"RotateRight") ||
|
||||
#if INCLUDE_SHENANDOAHGC
|
||||
!strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeP") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeN") ||
|
||||
@ -4164,7 +4166,7 @@ bool MatchRule::is_vector() const {
|
||||
"RShiftVB","RShiftVS","RShiftVI","RShiftVL",
|
||||
"URShiftVB","URShiftVS","URShiftVI","URShiftVL",
|
||||
"ReplicateB","ReplicateS","ReplicateI","ReplicateL","ReplicateF","ReplicateD",
|
||||
"RoundDoubleModeV","LoadVector","StoreVector",
|
||||
"RoundDoubleModeV","RotateLeftV" , "RotateRightV", "LoadVector","StoreVector",
|
||||
"FmaVD", "FmaVF","PopCountVI",
|
||||
// Next are not supported currently.
|
||||
"PackB","PackS","PackI","PackL","PackF","PackD","Pack2L","Pack2D",
|
||||
|
@ -1315,8 +1315,12 @@ void LIRGenerator::do_isPrimitive(Intrinsic* x) {
|
||||
// Example: Thread.currentThread()
|
||||
void LIRGenerator::do_currentThread(Intrinsic* x) {
|
||||
assert(x->number_of_arguments() == 0, "wrong type");
|
||||
LIR_Opr temp = new_register(T_ADDRESS);
|
||||
LIR_Opr reg = rlock_result(x);
|
||||
__ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
|
||||
__ move(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_ADDRESS), temp);
|
||||
// threadObj = ((OopHandle)_threadObj)->resolve();
|
||||
access_load(IN_NATIVE, T_OBJECT,
|
||||
LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), reg);
|
||||
}
|
||||
|
||||
|
||||
|
@ -761,7 +761,7 @@ Method* ciEnv::lookup_method(ciInstanceKlass* accessor,
|
||||
InstanceKlass* accessor_klass = accessor->get_instanceKlass();
|
||||
Klass* holder_klass = holder->get_Klass();
|
||||
Method* dest_method;
|
||||
LinkInfo link_info(holder_klass, name, sig, accessor_klass, LinkInfo::needs_access_check, tag);
|
||||
LinkInfo link_info(holder_klass, name, sig, accessor_klass, LinkInfo::AccessCheck::required, tag);
|
||||
switch (bc) {
|
||||
case Bytecodes::_invokestatic:
|
||||
dest_method =
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -809,7 +809,7 @@ ciMethod* ciMethod::resolve_invoke(ciKlass* caller, ciKlass* exact_receiver, boo
|
||||
Symbol* h_signature = signature()->get_symbol();
|
||||
|
||||
LinkInfo link_info(resolved, h_name, h_signature, caller_klass,
|
||||
check_access ? LinkInfo::needs_access_check : LinkInfo::skip_access_check);
|
||||
check_access ? LinkInfo::AccessCheck::required : LinkInfo::AccessCheck::skip);
|
||||
Method* m = NULL;
|
||||
// Only do exact lookup if receiver klass has been linked. Otherwise,
|
||||
// the vtable has not been setup, and the LinkResolver will fail.
|
||||
|
@ -3237,7 +3237,7 @@ oop java_lang_reflect_RecordComponent::create(InstanceKlass* holder, RecordCompo
|
||||
char* sig = NEW_RESOURCE_ARRAY(char, sig_len);
|
||||
jio_snprintf(sig, sig_len, "%c%c%s", JVM_SIGNATURE_FUNC, JVM_SIGNATURE_ENDFUNC, type->as_C_string());
|
||||
TempNewSymbol full_sig = SymbolTable::new_symbol(sig);
|
||||
accessor_method = holder->find_instance_method(name, full_sig, Klass::find_private);
|
||||
accessor_method = holder->find_instance_method(name, full_sig, Klass::PrivateLookupMode::find);
|
||||
}
|
||||
|
||||
if (accessor_method != NULL) {
|
||||
|
@ -2116,7 +2116,7 @@ bool ClassVerifier::is_protected_access(InstanceKlass* this_class,
|
||||
InstanceKlass* target_instance = InstanceKlass::cast(target_class);
|
||||
fieldDescriptor fd;
|
||||
if (is_method) {
|
||||
Method* m = target_instance->uncached_lookup_method(field_name, field_sig, Klass::find_overpass);
|
||||
Method* m = target_instance->uncached_lookup_method(field_name, field_sig, Klass::OverpassLookupMode::find);
|
||||
if (m != NULL && m->is_protected()) {
|
||||
if (!this_class->is_same_class_package(m->method_holder())) {
|
||||
return true;
|
||||
@ -2709,7 +2709,7 @@ void ClassVerifier::verify_invoke_init(
|
||||
Method* m = InstanceKlass::cast(ref_klass)->uncached_lookup_method(
|
||||
vmSymbols::object_initializer_name(),
|
||||
cp->signature_ref_at(bcs->get_index_u2()),
|
||||
Klass::find_overpass);
|
||||
Klass::OverpassLookupMode::find);
|
||||
// Do nothing if method is not found. Let resolution detect the error.
|
||||
if (m != NULL) {
|
||||
InstanceKlass* mh = m->method_holder();
|
||||
|
@ -455,7 +455,7 @@ int vmIntrinsics::predicates_needed(vmIntrinsics::ID id) {
|
||||
case vmIntrinsics::_counterMode_AESCrypt:
|
||||
return 1;
|
||||
case vmIntrinsics::_digestBase_implCompressMB:
|
||||
return 3;
|
||||
return 4;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
@ -699,6 +699,9 @@ bool vmIntrinsics::disabled_by_jvm_flags(vmIntrinsics::ID id) {
|
||||
case vmIntrinsics::_counterMode_AESCrypt:
|
||||
if (!UseAESCTRIntrinsics) return true;
|
||||
break;
|
||||
case vmIntrinsics::_md5_implCompress:
|
||||
if (!UseMD5Intrinsics) return true;
|
||||
break;
|
||||
case vmIntrinsics::_sha_implCompress:
|
||||
if (!UseSHA1Intrinsics) return true;
|
||||
break;
|
||||
@ -709,7 +712,7 @@ bool vmIntrinsics::disabled_by_jvm_flags(vmIntrinsics::ID id) {
|
||||
if (!UseSHA512Intrinsics) return true;
|
||||
break;
|
||||
case vmIntrinsics::_digestBase_implCompressMB:
|
||||
if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) return true;
|
||||
if (!(UseMD5Intrinsics || UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) return true;
|
||||
break;
|
||||
case vmIntrinsics::_ghash_processBlocks:
|
||||
if (!UseGHASHIntrinsics) return true;
|
||||
|
@ -1044,11 +1044,15 @@
|
||||
do_intrinsic(_counterMode_AESCrypt, com_sun_crypto_provider_counterMode, crypt_name, byteArray_int_int_byteArray_int_signature, F_R) \
|
||||
do_name( crypt_name, "implCrypt") \
|
||||
\
|
||||
/* support for sun.security.provider.MD5 */ \
|
||||
do_class(sun_security_provider_md5, "sun/security/provider/MD5") \
|
||||
do_intrinsic(_md5_implCompress, sun_security_provider_md5, implCompress_name, implCompress_signature, F_R) \
|
||||
do_name( implCompress_name, "implCompress0") \
|
||||
do_signature(implCompress_signature, "([BI)V") \
|
||||
\
|
||||
/* support for sun.security.provider.SHA */ \
|
||||
do_class(sun_security_provider_sha, "sun/security/provider/SHA") \
|
||||
do_intrinsic(_sha_implCompress, sun_security_provider_sha, implCompress_name, implCompress_signature, F_R) \
|
||||
do_name( implCompress_name, "implCompress0") \
|
||||
do_signature(implCompress_signature, "([BI)V") \
|
||||
\
|
||||
/* support for sun.security.provider.SHA2 */ \
|
||||
do_class(sun_security_provider_sha2, "sun/security/provider/SHA2") \
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1197,7 +1197,7 @@ class ClassHierarchyWalker {
|
||||
} else {
|
||||
// Search class hierarchy first, skipping private implementations
|
||||
// as they never override any inherited methods
|
||||
Method* m = InstanceKlass::cast(k)->find_instance_method(_name, _signature, Klass::skip_private);
|
||||
Method* m = InstanceKlass::cast(k)->find_instance_method(_name, _signature, Klass::PrivateLookupMode::skip);
|
||||
if (!Dependencies::is_concrete_method(m, k)) {
|
||||
// Check for re-abstraction of method
|
||||
if (!k->is_interface() && m != NULL && m->is_abstract()) {
|
||||
@ -1207,7 +1207,7 @@ class ClassHierarchyWalker {
|
||||
ClassHierarchyWalker wf(_participants, _num_participants);
|
||||
Klass* w = wf.find_witness_subtype(k);
|
||||
if (w != NULL) {
|
||||
Method* wm = InstanceKlass::cast(w)->find_instance_method(_name, _signature, Klass::skip_private);
|
||||
Method* wm = InstanceKlass::cast(w)->find_instance_method(_name, _signature, Klass::PrivateLookupMode::skip);
|
||||
if (!Dependencies::is_concrete_method(wm, w)) {
|
||||
// Found a concrete subtype 'w' which does not override abstract method 'm'.
|
||||
// Bail out because 'm' could be called with 'w' as receiver (leading to an
|
||||
|
@ -31,7 +31,6 @@
|
||||
|
||||
typedef void (*initializer)(void);
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
// Per-compiler statistics
|
||||
class CompilerStatistics {
|
||||
friend class VMStructs;
|
||||
@ -58,17 +57,20 @@ class CompilerStatistics {
|
||||
Data _osr; // stats for OSR compilations
|
||||
int _nmethods_size; //
|
||||
int _nmethods_code_size;
|
||||
int bytes_per_second() {
|
||||
|
||||
double total_time() { return _standard._time.seconds() + _osr._time.seconds(); }
|
||||
|
||||
double bytes_per_second() {
|
||||
int bytes = _standard._bytes + _osr._bytes;
|
||||
if (bytes == 0) {
|
||||
return 0;
|
||||
return 0.0;
|
||||
}
|
||||
double seconds = _standard._time.seconds() + _osr._time.seconds();
|
||||
return seconds == 0.0 ? 0 : (int) (bytes / seconds);
|
||||
double seconds = total_time();
|
||||
return seconds == 0.0 ? 0.0 : (bytes / seconds);
|
||||
}
|
||||
|
||||
CompilerStatistics() : _nmethods_size(0), _nmethods_code_size(0) {}
|
||||
};
|
||||
#endif // INCLUDE_JVMCI
|
||||
|
||||
class AbstractCompiler : public CHeapObj<mtCompiler> {
|
||||
private:
|
||||
@ -86,9 +88,7 @@ class AbstractCompiler : public CHeapObj<mtCompiler> {
|
||||
private:
|
||||
const CompilerType _type;
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
CompilerStatistics _stats;
|
||||
#endif
|
||||
|
||||
public:
|
||||
AbstractCompiler(CompilerType type) : _num_compiler_threads(0), _compiler_state(uninitialized), _type(type) {}
|
||||
@ -176,9 +176,7 @@ class AbstractCompiler : public CHeapObj<mtCompiler> {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
CompilerStatistics* stats() { return &_stats; }
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif // SHARE_COMPILER_ABSTRACTCOMPILER_HPP
|
||||
|
@ -181,6 +181,8 @@ int CompileBroker::_sum_nmethod_code_size = 0;
|
||||
|
||||
long CompileBroker::_peak_compilation_time = 0;
|
||||
|
||||
CompilerStatistics CompileBroker::_stats_per_level[CompLevel_full_optimization];
|
||||
|
||||
CompileQueue* CompileBroker::_c2_compile_queue = NULL;
|
||||
CompileQueue* CompileBroker::_c1_compile_queue = NULL;
|
||||
|
||||
@ -1599,6 +1601,7 @@ static const int JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS = 10;
|
||||
* @return true if this thread needs to free/recycle the task
|
||||
*/
|
||||
bool CompileBroker::wait_for_jvmci_completion(JVMCICompiler* jvmci, CompileTask* task, JavaThread* thread) {
|
||||
assert(UseJVMCICompiler, "sanity");
|
||||
MonitorLocker ml(thread, task->lock());
|
||||
int progress_wait_attempts = 0;
|
||||
int methods_compiled = jvmci->methods_compiled();
|
||||
@ -2458,6 +2461,7 @@ void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time
|
||||
methodHandle method (thread, task->method());
|
||||
uint compile_id = task->compile_id();
|
||||
bool is_osr = (task->osr_bci() != standard_entry_bci);
|
||||
const int comp_level = task->comp_level();
|
||||
nmethod* code = task->code();
|
||||
CompilerCounters* counters = thread->counters();
|
||||
|
||||
@ -2506,25 +2510,34 @@ void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time
|
||||
_sum_standard_bytes_compiled += method->code_size() + task->num_inlined_bytecodes();
|
||||
}
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
AbstractCompiler* comp = compiler(task->comp_level());
|
||||
// Collect statistic per compilation level
|
||||
if (comp_level > CompLevel_none && comp_level <= CompLevel_full_optimization) {
|
||||
CompilerStatistics* stats = &_stats_per_level[comp_level-1];
|
||||
if (is_osr) {
|
||||
stats->_osr.update(time, bytes_compiled);
|
||||
} else {
|
||||
stats->_standard.update(time, bytes_compiled);
|
||||
}
|
||||
stats->_nmethods_size += code->total_size();
|
||||
stats->_nmethods_code_size += code->insts_size();
|
||||
} else {
|
||||
assert(false, "CompilerStatistics object does not exist for compilation level %d", comp_level);
|
||||
}
|
||||
|
||||
// Collect statistic per compiler
|
||||
AbstractCompiler* comp = compiler(comp_level);
|
||||
if (comp) {
|
||||
CompilerStatistics* stats = comp->stats();
|
||||
if (stats) {
|
||||
if (is_osr) {
|
||||
stats->_osr.update(time, bytes_compiled);
|
||||
} else {
|
||||
stats->_standard.update(time, bytes_compiled);
|
||||
}
|
||||
stats->_nmethods_size += code->total_size();
|
||||
stats->_nmethods_code_size += code->insts_size();
|
||||
} else { // if (!stats)
|
||||
assert(false, "Compiler statistics object must exist");
|
||||
if (is_osr) {
|
||||
stats->_osr.update(time, bytes_compiled);
|
||||
} else {
|
||||
stats->_standard.update(time, bytes_compiled);
|
||||
}
|
||||
stats->_nmethods_size += code->total_size();
|
||||
stats->_nmethods_code_size += code->insts_size();
|
||||
} else { // if (!comp)
|
||||
assert(false, "Compiler object must exist");
|
||||
}
|
||||
#endif // INCLUDE_JVMCI
|
||||
}
|
||||
|
||||
if (UsePerfData) {
|
||||
@ -2543,9 +2556,10 @@ void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time
|
||||
}
|
||||
|
||||
if (CITimeEach) {
|
||||
float bytes_per_sec = 1.0 * (method->code_size() + task->num_inlined_bytecodes()) / time.seconds();
|
||||
tty->print_cr("%3d seconds: %f bytes/sec : %f (bytes %d + %d inlined)",
|
||||
compile_id, time.seconds(), bytes_per_sec, method->code_size(), task->num_inlined_bytecodes());
|
||||
double compile_time = time.seconds();
|
||||
double bytes_per_sec = compile_time == 0.0 ? 0.0 : (double)(method->code_size() + task->num_inlined_bytecodes()) / compile_time;
|
||||
tty->print_cr("%3d seconds: %6.3f bytes/sec : %f (bytes %d + %d inlined)",
|
||||
compile_id, compile_time, bytes_per_sec, method->code_size(), task->num_inlined_bytecodes());
|
||||
}
|
||||
|
||||
// Collect counts of successful compilations
|
||||
@ -2580,81 +2594,53 @@ const char* CompileBroker::compiler_name(int comp_level) {
|
||||
}
|
||||
}
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
void CompileBroker::print_times(AbstractCompiler* comp) {
|
||||
CompilerStatistics* stats = comp->stats();
|
||||
if (stats) {
|
||||
tty->print_cr(" %s {speed: %d bytes/s; standard: %6.3f s, %d bytes, %d methods; osr: %6.3f s, %d bytes, %d methods; nmethods_size: %d bytes; nmethods_code_size: %d bytes}",
|
||||
comp->name(), stats->bytes_per_second(),
|
||||
void CompileBroker::print_times(const char* name, CompilerStatistics* stats) {
|
||||
tty->print_cr(" %s {speed: %6.3f bytes/s; standard: %6.3f s, %d bytes, %d methods; osr: %6.3f s, %d bytes, %d methods; nmethods_size: %d bytes; nmethods_code_size: %d bytes}",
|
||||
name, stats->bytes_per_second(),
|
||||
stats->_standard._time.seconds(), stats->_standard._bytes, stats->_standard._count,
|
||||
stats->_osr._time.seconds(), stats->_osr._bytes, stats->_osr._count,
|
||||
stats->_nmethods_size, stats->_nmethods_code_size);
|
||||
} else { // if (!stats)
|
||||
assert(false, "Compiler statistics object must exist");
|
||||
}
|
||||
comp->print_timers();
|
||||
}
|
||||
#endif // INCLUDE_JVMCI
|
||||
|
||||
void CompileBroker::print_times(bool per_compiler, bool aggregate) {
|
||||
#if INCLUDE_JVMCI
|
||||
elapsedTimer standard_compilation;
|
||||
elapsedTimer total_compilation;
|
||||
elapsedTimer osr_compilation;
|
||||
|
||||
int standard_bytes_compiled = 0;
|
||||
int osr_bytes_compiled = 0;
|
||||
|
||||
int standard_compile_count = 0;
|
||||
int osr_compile_count = 0;
|
||||
int total_compile_count = 0;
|
||||
|
||||
int nmethods_size = 0;
|
||||
int nmethods_code_size = 0;
|
||||
bool printedHeader = false;
|
||||
|
||||
for (unsigned int i = 0; i < sizeof(_compilers) / sizeof(AbstractCompiler*); i++) {
|
||||
AbstractCompiler* comp = _compilers[i];
|
||||
if (comp != NULL) {
|
||||
if (per_compiler && aggregate && !printedHeader) {
|
||||
printedHeader = true;
|
||||
tty->cr();
|
||||
tty->print_cr("Individual compiler times (for compiled methods only)");
|
||||
tty->print_cr("------------------------------------------------");
|
||||
tty->cr();
|
||||
}
|
||||
CompilerStatistics* stats = comp->stats();
|
||||
|
||||
if (stats) {
|
||||
standard_compilation.add(stats->_standard._time);
|
||||
osr_compilation.add(stats->_osr._time);
|
||||
|
||||
standard_bytes_compiled += stats->_standard._bytes;
|
||||
osr_bytes_compiled += stats->_osr._bytes;
|
||||
|
||||
standard_compile_count += stats->_standard._count;
|
||||
osr_compile_count += stats->_osr._count;
|
||||
|
||||
nmethods_size += stats->_nmethods_size;
|
||||
nmethods_code_size += stats->_nmethods_code_size;
|
||||
} else { // if (!stats)
|
||||
assert(false, "Compiler statistics object must exist");
|
||||
}
|
||||
|
||||
if (per_compiler) {
|
||||
print_times(comp);
|
||||
if (per_compiler) {
|
||||
if (aggregate) {
|
||||
tty->cr();
|
||||
tty->print_cr("Individual compiler times (for compiled methods only)");
|
||||
tty->print_cr("------------------------------------------------");
|
||||
tty->cr();
|
||||
}
|
||||
for (unsigned int i = 0; i < sizeof(_compilers) / sizeof(AbstractCompiler*); i++) {
|
||||
AbstractCompiler* comp = _compilers[i];
|
||||
if (comp != NULL) {
|
||||
print_times(comp->name(), comp->stats());
|
||||
}
|
||||
}
|
||||
if (aggregate) {
|
||||
tty->cr();
|
||||
tty->print_cr("Individual compilation Tier times (for compiled methods only)");
|
||||
tty->print_cr("------------------------------------------------");
|
||||
tty->cr();
|
||||
}
|
||||
char tier_name[256];
|
||||
for (int tier = CompLevel_simple; tier <= CompLevel_highest_tier; tier++) {
|
||||
CompilerStatistics* stats = &_stats_per_level[tier-1];
|
||||
sprintf(tier_name, "Tier%d", tier);
|
||||
print_times(tier_name, stats);
|
||||
}
|
||||
}
|
||||
total_compile_count = osr_compile_count + standard_compile_count;
|
||||
total_compilation.add(osr_compilation);
|
||||
total_compilation.add(standard_compilation);
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
// In hosted mode, print the JVMCI compiler specific counters manually.
|
||||
if (!UseJVMCICompiler) {
|
||||
if (EnableJVMCI && !UseJVMCICompiler) {
|
||||
JVMCICompiler::print_compilation_timers();
|
||||
}
|
||||
#else // INCLUDE_JVMCI
|
||||
#endif
|
||||
|
||||
if (!aggregate) {
|
||||
return;
|
||||
}
|
||||
|
||||
elapsedTimer standard_compilation = CompileBroker::_t_standard_compilation;
|
||||
elapsedTimer osr_compilation = CompileBroker::_t_osr_compilation;
|
||||
elapsedTimer total_compilation = CompileBroker::_t_total_compilation;
|
||||
@ -2665,14 +2651,12 @@ void CompileBroker::print_times(bool per_compiler, bool aggregate) {
|
||||
int standard_compile_count = CompileBroker::_total_standard_compile_count;
|
||||
int osr_compile_count = CompileBroker::_total_osr_compile_count;
|
||||
int total_compile_count = CompileBroker::_total_compile_count;
|
||||
int total_bailout_count = CompileBroker::_total_bailout_count;
|
||||
int total_invalidated_count = CompileBroker::_total_invalidated_count;
|
||||
|
||||
int nmethods_size = CompileBroker::_sum_nmethod_code_size;
|
||||
int nmethods_code_size = CompileBroker::_sum_nmethod_size;
|
||||
#endif // INCLUDE_JVMCI
|
||||
|
||||
if (!aggregate) {
|
||||
return;
|
||||
}
|
||||
tty->cr();
|
||||
tty->print_cr("Accumulated compiler times");
|
||||
tty->print_cr("----------------------------------------------------------");
|
||||
@ -2681,16 +2665,16 @@ void CompileBroker::print_times(bool per_compiler, bool aggregate) {
|
||||
tty->print_cr(" Total compilation time : %7.3f s", total_compilation.seconds());
|
||||
tty->print_cr(" Standard compilation : %7.3f s, Average : %2.3f s",
|
||||
standard_compilation.seconds(),
|
||||
standard_compilation.seconds() / standard_compile_count);
|
||||
standard_compile_count == 0 ? 0.0 : standard_compilation.seconds() / standard_compile_count);
|
||||
tty->print_cr(" Bailed out compilation : %7.3f s, Average : %2.3f s",
|
||||
CompileBroker::_t_bailedout_compilation.seconds(),
|
||||
CompileBroker::_t_bailedout_compilation.seconds() / CompileBroker::_total_bailout_count);
|
||||
total_bailout_count == 0 ? 0.0 : CompileBroker::_t_bailedout_compilation.seconds() / total_bailout_count);
|
||||
tty->print_cr(" On stack replacement : %7.3f s, Average : %2.3f s",
|
||||
osr_compilation.seconds(),
|
||||
osr_compilation.seconds() / osr_compile_count);
|
||||
osr_compile_count == 0 ? 0.0 : osr_compilation.seconds() / osr_compile_count);
|
||||
tty->print_cr(" Invalidated : %7.3f s, Average : %2.3f s",
|
||||
CompileBroker::_t_invalidated_compilation.seconds(),
|
||||
CompileBroker::_t_invalidated_compilation.seconds() / CompileBroker::_total_invalidated_count);
|
||||
total_invalidated_count == 0 ? 0.0 : CompileBroker::_t_invalidated_compilation.seconds() / total_invalidated_count);
|
||||
|
||||
AbstractCompiler *comp = compiler(CompLevel_simple);
|
||||
if (comp != NULL) {
|
||||
|
@ -224,6 +224,8 @@ class CompileBroker: AllStatic {
|
||||
static int _sum_nmethod_code_size;
|
||||
static long _peak_compilation_time;
|
||||
|
||||
static CompilerStatistics _stats_per_level[];
|
||||
|
||||
static volatile int _print_compilation_warning;
|
||||
|
||||
static Handle create_thread_oop(const char* name, TRAPS);
|
||||
@ -371,10 +373,8 @@ public:
|
||||
// Redefine Classes support
|
||||
static void mark_on_stack();
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
// Print curent compilation time stats for a given compiler
|
||||
static void print_times(AbstractCompiler* comp);
|
||||
#endif
|
||||
static void print_times(const char* name, CompilerStatistics* stats);
|
||||
|
||||
// Print a detailed accounting of compilation time
|
||||
static void print_times(bool per_compiler = true, bool aggregate = true);
|
||||
|
@ -129,11 +129,6 @@ public:
|
||||
virtual void prepare_for_verify() {}
|
||||
virtual void verify(VerifyOption option) {}
|
||||
|
||||
virtual jlong millis_since_last_gc() {
|
||||
// Report time since the VM start
|
||||
return os::elapsed_counter() / NANOSECS_PER_MILLISEC;
|
||||
}
|
||||
|
||||
MemRegion reserved_region() const { return _reserved; }
|
||||
bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
|
||||
|
||||
|
@ -1417,6 +1417,7 @@ G1CollectedHeap::G1CollectedHeap() :
|
||||
_young_gen_sampling_thread(NULL),
|
||||
_workers(NULL),
|
||||
_card_table(NULL),
|
||||
_collection_pause_end(Ticks::now()),
|
||||
_soft_ref_policy(),
|
||||
_old_set("Old Region Set", new OldRegionSetChecker()),
|
||||
_archive_set("Archive Region Set", new ArchiveRegionSetChecker()),
|
||||
@ -1966,7 +1967,8 @@ void G1CollectedHeap::increment_old_marking_cycles_started() {
|
||||
_old_marking_cycles_started++;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
|
||||
void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent,
|
||||
bool whole_heap_examined) {
|
||||
MonitorLocker ml(G1OldGCCount_lock, Mutex::_no_safepoint_check_flag);
|
||||
|
||||
// We assume that if concurrent == true, then the caller is a
|
||||
@ -1998,6 +2000,10 @@ void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
|
||||
_old_marking_cycles_started, _old_marking_cycles_completed);
|
||||
|
||||
_old_marking_cycles_completed += 1;
|
||||
if (whole_heap_examined) {
|
||||
// Signal that we have completed a visit to all live objects.
|
||||
record_whole_heap_examined_timestamp();
|
||||
}
|
||||
|
||||
// We need to clear the "in_progress" flag in the CM thread before
|
||||
// we wake up any waiters (especially when ExplicitInvokesConcurrent
|
||||
@ -2366,19 +2372,6 @@ size_t G1CollectedHeap::max_reserved_capacity() const {
|
||||
return _hrm->max_length() * HeapRegion::GrainBytes;
|
||||
}
|
||||
|
||||
jlong G1CollectedHeap::millis_since_last_gc() {
|
||||
// See the notes in GenCollectedHeap::millis_since_last_gc()
|
||||
// for more information about the implementation.
|
||||
jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
|
||||
_policy->collection_pause_end_millis();
|
||||
if (ret_val < 0) {
|
||||
log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
|
||||
". returning zero instead.", ret_val);
|
||||
return 0;
|
||||
}
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::deduplicate_string(oop str) {
|
||||
assert(java_lang_String::is_instance(str), "invariant");
|
||||
|
||||
@ -2641,7 +2634,7 @@ void G1CollectedHeap::gc_epilogue(bool full) {
|
||||
// Update common counters.
|
||||
if (full) {
|
||||
// Update the number of full collections that have been completed.
|
||||
increment_old_marking_cycles_completed(false /* concurrent */);
|
||||
increment_old_marking_cycles_completed(false /* concurrent */, true /* liveness_completed */);
|
||||
}
|
||||
|
||||
// We are at the end of the GC. Total collections has already been increased.
|
||||
@ -2665,6 +2658,8 @@ void G1CollectedHeap::gc_epilogue(bool full) {
|
||||
|
||||
// Print NUMA statistics.
|
||||
_numa->print_statistics();
|
||||
|
||||
_collection_pause_end = Ticks::now();
|
||||
}
|
||||
|
||||
void G1CollectedHeap::verify_numa_regions(const char* desc) {
|
||||
|
@ -159,6 +159,8 @@ private:
|
||||
WorkGang* _workers;
|
||||
G1CardTable* _card_table;
|
||||
|
||||
Ticks _collection_pause_end;
|
||||
|
||||
SoftRefPolicy _soft_ref_policy;
|
||||
|
||||
static size_t _humongous_object_threshold_in_words;
|
||||
@ -644,7 +646,10 @@ public:
|
||||
// the G1OldGCCount_lock in case a Java thread is waiting for a full
|
||||
// GC to happen (e.g., it called System.gc() with
|
||||
// +ExplicitGCInvokesConcurrent).
|
||||
void increment_old_marking_cycles_completed(bool concurrent);
|
||||
// whole_heap_examined should indicate that during that old marking
|
||||
// cycle the whole heap has been examined for live objects (as opposed
|
||||
// to only parts, or aborted before completion).
|
||||
void increment_old_marking_cycles_completed(bool concurrent, bool whole_heap_examined);
|
||||
|
||||
uint old_marking_cycles_completed() {
|
||||
return _old_marking_cycles_completed;
|
||||
@ -1288,8 +1293,7 @@ public:
|
||||
// Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
|
||||
virtual size_t max_reserved_capacity() const;
|
||||
|
||||
virtual jlong millis_since_last_gc();
|
||||
|
||||
Tickspan time_since_last_collection() const { return Ticks::now() - _collection_pause_end; }
|
||||
|
||||
// Convenience function to be used in situations where the heap type can be
|
||||
// asserted to be this type.
|
||||
|
@ -268,7 +268,8 @@ void G1ConcurrentMarkThread::run_service() {
|
||||
// called System.gc() with +ExplicitGCInvokesConcurrent).
|
||||
{
|
||||
SuspendibleThreadSetJoiner sts_join;
|
||||
g1h->increment_old_marking_cycles_completed(true /* concurrent */);
|
||||
g1h->increment_old_marking_cycles_completed(true /* concurrent */,
|
||||
!_cm->has_aborted() /* liveness_completed */);
|
||||
|
||||
_cm->concurrent_cycle_end();
|
||||
ConcurrentGCBreakpoints::notify_active_to_idle();
|
||||
|
@ -58,7 +58,6 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
|
||||
|
||||
// Root scanning phases
|
||||
_gc_par_phases[ThreadRoots] = new WorkerDataArray<double>("ThreadRoots", "Thread Roots (ms):", max_gc_threads);
|
||||
_gc_par_phases[UniverseRoots] = new WorkerDataArray<double>("UniverseRoots", "Universe Roots (ms):", max_gc_threads);
|
||||
_gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>("ObjectSynchronizerRoots", "ObjectSynchronizer Roots (ms):", max_gc_threads);
|
||||
_gc_par_phases[CLDGRoots] = new WorkerDataArray<double>("CLDGRoots", "CLDG Roots (ms):", max_gc_threads);
|
||||
AOT_ONLY(_gc_par_phases[AOTCodeRoots] = new WorkerDataArray<double>("AOTCodeRoots", "AOT Root Scan (ms):", max_gc_threads);)
|
||||
|
@ -48,7 +48,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
GCWorkerStart,
|
||||
ExtRootScan,
|
||||
ThreadRoots,
|
||||
UniverseRoots,
|
||||
ObjectSynchronizerRoots,
|
||||
CLDGRoots,
|
||||
AOT_ONLY(AOTCodeRoots COMMA)
|
||||
|
@ -60,7 +60,6 @@ G1Policy::G1Policy(STWGCTimer* gc_timer) :
|
||||
_ihop_control(create_ihop_control(&_predictor)),
|
||||
_policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
|
||||
_full_collection_start_sec(0.0),
|
||||
_collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC),
|
||||
_young_list_target_length(0),
|
||||
_young_list_fixed_length(0),
|
||||
_young_list_max_length(0),
|
||||
@ -648,8 +647,6 @@ void G1Policy::record_collection_pause_end(double pause_time_ms) {
|
||||
|
||||
record_pause(this_pause, end_time_sec - pause_time_ms / 1000.0, end_time_sec);
|
||||
|
||||
_collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
|
||||
|
||||
if (is_concurrent_start_pause(this_pause)) {
|
||||
record_concurrent_mark_init_end(0.0);
|
||||
} else {
|
||||
|
@ -74,8 +74,6 @@ class G1Policy: public CHeapObj<mtGC> {
|
||||
|
||||
double _full_collection_start_sec;
|
||||
|
||||
jlong _collection_pause_end_millis;
|
||||
|
||||
uint _young_list_target_length;
|
||||
uint _young_list_fixed_length;
|
||||
|
||||
@ -260,8 +258,6 @@ public:
|
||||
// percentage of the current heap capacity.
|
||||
double reclaimable_bytes_percent(size_t reclaimable_bytes) const;
|
||||
|
||||
jlong collection_pause_end_millis() { return _collection_pause_end_millis; }
|
||||
|
||||
private:
|
||||
void clear_collection_set_candidates();
|
||||
// Sets up marking if proper conditions are met.
|
||||
|
@ -43,7 +43,6 @@
|
||||
#include "gc/shared/oopStorageSetParState.inline.hpp"
|
||||
#include "gc/shared/referenceProcessor.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
@ -181,13 +180,6 @@ void G1RootProcessor::process_vm_roots(G1RootClosures* closures,
|
||||
uint worker_id) {
|
||||
OopClosure* strong_roots = closures->strong_oops();
|
||||
|
||||
{
|
||||
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::UniverseRoots, worker_id);
|
||||
if (_process_strong_tasks.try_claim_task(G1RP_PS_Universe_oops_do)) {
|
||||
Universe::oops_do(strong_roots);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ObjectSynchronizerRoots, worker_id);
|
||||
if (_process_strong_tasks.try_claim_task(G1RP_PS_ObjectSynchronizer_oops_do)) {
|
||||
|
@ -56,14 +56,15 @@ void G1YoungRemSetSamplingThread::sleep_before_next_cycle() {
|
||||
}
|
||||
|
||||
bool G1YoungRemSetSamplingThread::should_start_periodic_gc() {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
// If we are currently in a concurrent mark we are going to uncommit memory soon.
|
||||
if (G1CollectedHeap::heap()->concurrent_mark()->cm_thread()->during_cycle()) {
|
||||
if (g1h->concurrent_mark()->cm_thread()->during_cycle()) {
|
||||
log_debug(gc, periodic)("Concurrent cycle in progress. Skipping.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check if enough time has passed since the last GC.
|
||||
uintx time_since_last_gc = (uintx)Universe::heap()->millis_since_last_gc();
|
||||
uintx time_since_last_gc = (uintx)g1h->time_since_last_collection().milliseconds();
|
||||
if ((time_since_last_gc < G1PeriodicGCInterval)) {
|
||||
log_debug(gc, periodic)("Last GC occurred " UINTX_FORMAT "ms before which is below threshold " UINTX_FORMAT "ms. Skipping.",
|
||||
time_since_last_gc, G1PeriodicGCInterval);
|
||||
|
@ -559,10 +559,6 @@ bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
|
||||
return block_start(addr) == addr;
|
||||
}
|
||||
|
||||
jlong ParallelScavengeHeap::millis_since_last_gc() {
|
||||
return PSParallelCompact::millis_since_last_gc();
|
||||
}
|
||||
|
||||
void ParallelScavengeHeap::prepare_for_verify() {
|
||||
ensure_parsability(false); // no need to retire TLABs for verification
|
||||
}
|
||||
|
@ -213,8 +213,6 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||
HeapWord* block_start(const void* addr) const;
|
||||
bool block_is_obj(const HeapWord* addr) const;
|
||||
|
||||
jlong millis_since_last_gc();
|
||||
|
||||
void prepare_for_verify();
|
||||
PSHeapSummary create_ps_heap_summary();
|
||||
virtual void print_on(outputStream* st) const;
|
||||
|
@ -843,7 +843,6 @@ ParallelOldTracer PSParallelCompact::_gc_tracer;
|
||||
elapsedTimer PSParallelCompact::_accumulated_time;
|
||||
unsigned int PSParallelCompact::_total_invocations = 0;
|
||||
unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
|
||||
jlong PSParallelCompact::_time_of_last_gc = 0;
|
||||
CollectorCounters* PSParallelCompact::_counters = NULL;
|
||||
ParMarkBitMap PSParallelCompact::_mark_bitmap;
|
||||
ParallelCompactData PSParallelCompact::_summary_data;
|
||||
@ -1070,8 +1069,8 @@ void PSParallelCompact::post_compact()
|
||||
heap->gen_mangle_unused_area();
|
||||
}
|
||||
|
||||
// Update time of last GC
|
||||
reset_millis_since_last_gc();
|
||||
// Signal that we have completed a visit to all live objects.
|
||||
Universe::heap()->record_whole_heap_examined_timestamp();
|
||||
}
|
||||
|
||||
HeapWord*
|
||||
@ -2008,10 +2007,6 @@ static void mark_from_roots_work(ParallelRootType::Value root_type, uint worker_
|
||||
PCMarkAndPushClosure mark_and_push_closure(cm);
|
||||
|
||||
switch (root_type) {
|
||||
case ParallelRootType::universe:
|
||||
Universe::oops_do(&mark_and_push_closure);
|
||||
break;
|
||||
|
||||
case ParallelRootType::object_synchronizer:
|
||||
ObjectSynchronizer::oops_do(&mark_and_push_closure);
|
||||
break;
|
||||
@ -2225,7 +2220,6 @@ void PSParallelCompact::adjust_roots(ParCompactionManager* cm) {
|
||||
PCAdjustPointerClosure oop_closure(cm);
|
||||
|
||||
// General strong roots.
|
||||
Universe::oops_do(&oop_closure);
|
||||
Threads::oops_do(&oop_closure, NULL);
|
||||
ObjectSynchronizer::oops_do(&oop_closure);
|
||||
OopStorageSet::strong_oops_do(&oop_closure);
|
||||
@ -3197,25 +3191,6 @@ void PSParallelCompact::fill_blocks(size_t region_idx)
|
||||
}
|
||||
}
|
||||
|
||||
jlong PSParallelCompact::millis_since_last_gc() {
|
||||
// We need a monotonically non-decreasing time in ms but
|
||||
// os::javaTimeMillis() does not guarantee monotonicity.
|
||||
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
|
||||
jlong ret_val = now - _time_of_last_gc;
|
||||
// XXX See note in genCollectedHeap::millis_since_last_gc().
|
||||
if (ret_val < 0) {
|
||||
NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, ret_val);)
|
||||
return 0;
|
||||
}
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
void PSParallelCompact::reset_millis_since_last_gc() {
|
||||
// We need a monotonically non-decreasing time in ms but
|
||||
// os::javaTimeMillis() does not guarantee monotonicity.
|
||||
_time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
|
||||
}
|
||||
|
||||
ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
|
||||
{
|
||||
if (source() != copy_destination()) {
|
||||
|
@ -1009,7 +1009,6 @@ class PSParallelCompact : AllStatic {
|
||||
static elapsedTimer _accumulated_time;
|
||||
static unsigned int _total_invocations;
|
||||
static unsigned int _maximum_compaction_gc_num;
|
||||
static jlong _time_of_last_gc; // ms
|
||||
static CollectorCounters* _counters;
|
||||
static ParMarkBitMap _mark_bitmap;
|
||||
static ParallelCompactData _summary_data;
|
||||
@ -1123,9 +1122,6 @@ class PSParallelCompact : AllStatic {
|
||||
static void enqueue_dense_prefix_tasks(TaskQueue& task_queue,
|
||||
uint parallel_gc_threads);
|
||||
|
||||
// Reset time since last full gc
|
||||
static void reset_millis_since_last_gc();
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Print generic summary data
|
||||
static void print_generic_summary_data(ParallelCompactData& summary_data,
|
||||
@ -1249,9 +1245,6 @@ class PSParallelCompact : AllStatic {
|
||||
// Return the SpaceId for the given address.
|
||||
static SpaceId space_id(HeapWord* addr);
|
||||
|
||||
// Time since last full gc (in milliseconds).
|
||||
static jlong millis_since_last_gc();
|
||||
|
||||
static void print_on_error(outputStream* st);
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,7 +34,6 @@ public:
|
||||
// The order reflects the order these roots are to be processed,
|
||||
// We do not want any holes in the enum as we enumerate these values by incrementing them.
|
||||
enum Value {
|
||||
universe,
|
||||
object_synchronizer,
|
||||
class_loader_data,
|
||||
code_cache,
|
||||
|
@ -92,10 +92,6 @@ static void scavenge_roots_work(ParallelRootType::Value root_type, uint worker_i
|
||||
PSPromoteRootsClosure roots_to_old_closure(pm);
|
||||
|
||||
switch (root_type) {
|
||||
case ParallelRootType::universe:
|
||||
Universe::oops_do(&roots_closure);
|
||||
break;
|
||||
|
||||
case ParallelRootType::object_synchronizer:
|
||||
ObjectSynchronizer::oops_do(&roots_closure);
|
||||
break;
|
||||
|
@ -680,12 +680,6 @@ void DefNewGeneration::collect(bool full,
|
||||
from()->set_concurrent_iteration_safe_limit(from()->top());
|
||||
to()->set_concurrent_iteration_safe_limit(to()->top());
|
||||
|
||||
// We need to use a monotonically non-decreasing time in ms
|
||||
// or we will see time-warp warnings and os::javaTimeMillis()
|
||||
// does not guarantee monotonicity.
|
||||
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
|
||||
update_time_of_last_gc(now);
|
||||
|
||||
heap->trace_heap_after_gc(&gc_tracer);
|
||||
|
||||
_gc_timer->register_gc_end();
|
||||
|
@ -137,13 +137,8 @@ void GenMarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_so
|
||||
// input to soft ref clearing policy at the next gc.
|
||||
Universe::update_heap_info_at_gc();
|
||||
|
||||
// Update time of last gc for all generations we collected
|
||||
// (which currently is all the generations in the heap).
|
||||
// We need to use a monotonically non-decreasing time in ms
|
||||
// or we will see time-warp warnings and os::javaTimeMillis()
|
||||
// does not guarantee monotonicity.
|
||||
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
|
||||
gch->update_time_of_last_gc(now);
|
||||
// Signal that we have completed a visit to all live objects.
|
||||
Universe::heap()->record_whole_heap_examined_timestamp();
|
||||
|
||||
gch->trace_heap_after_gc(_gc_tracer);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -141,6 +141,7 @@ Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) con
|
||||
bool control_dependent = (decorators & C2_CONTROL_DEPENDENT_LOAD) != 0;
|
||||
bool unknown_control = (decorators & C2_UNKNOWN_CONTROL_LOAD) != 0;
|
||||
bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
|
||||
bool immutable = (decorators & C2_IMMUTABLE_MEMORY) != 0;
|
||||
|
||||
bool in_native = (decorators & IN_NATIVE) != 0;
|
||||
|
||||
@ -153,10 +154,14 @@ Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) con
|
||||
GraphKit* kit = parse_access.kit();
|
||||
Node* control = control_dependent ? kit->control() : NULL;
|
||||
|
||||
if (in_native) {
|
||||
load = kit->make_load(control, adr, val_type, access.type(), mo, dep,
|
||||
requires_atomic_access, unaligned,
|
||||
if (immutable) {
|
||||
assert(!requires_atomic_access, "can't ensure atomicity");
|
||||
Compile* C = Compile::current();
|
||||
Node* mem = kit->immutable_memory();
|
||||
load = LoadNode::make(kit->gvn(), control, mem, adr,
|
||||
adr_type, val_type, access.type(), mo, dep, unaligned,
|
||||
mismatched, unsafe, access.barrier_data());
|
||||
load = kit->gvn().transform(load);
|
||||
} else {
|
||||
load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
|
||||
dep, requires_atomic_access, unaligned, mismatched, unsafe,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -54,6 +54,8 @@ const DecoratorSet C2_READ_ACCESS = DECORATOR_LAST << 8;
|
||||
const DecoratorSet C2_TIGHTLY_COUPLED_ALLOC = DECORATOR_LAST << 9;
|
||||
// Loads and stores from an arraycopy being optimized
|
||||
const DecoratorSet C2_ARRAY_COPY = DECORATOR_LAST << 10;
|
||||
// Loads from immutable memory
|
||||
const DecoratorSet C2_IMMUTABLE_MEMORY = DECORATOR_LAST << 11;
|
||||
|
||||
class Compile;
|
||||
class ConnectionGraph;
|
||||
|
@ -191,6 +191,7 @@ bool CollectedHeap::is_oop(oop object) const {
|
||||
|
||||
CollectedHeap::CollectedHeap() :
|
||||
_is_gc_active(false),
|
||||
_last_whole_heap_examined_time_ns(os::javaTimeNanos()),
|
||||
_total_collections(0),
|
||||
_total_full_collections(0),
|
||||
_gc_cause(GCCause::_no_gc),
|
||||
@ -488,6 +489,14 @@ void CollectedHeap::resize_all_tlabs() {
|
||||
}
|
||||
}
|
||||
|
||||
jlong CollectedHeap::millis_since_last_whole_heap_examined() {
|
||||
return (os::javaTimeNanos() - _last_whole_heap_examined_time_ns) / NANOSECS_PER_MILLISEC;
|
||||
}
|
||||
|
||||
void CollectedHeap::record_whole_heap_examined_timestamp() {
|
||||
_last_whole_heap_examined_time_ns = os::javaTimeNanos();
|
||||
}
|
||||
|
||||
void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) {
|
||||
assert(timer != NULL, "timer is null");
|
||||
if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) {
|
||||
|
@ -112,6 +112,12 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
// Used for filler objects (static, but initialized in ctor).
|
||||
static size_t _filler_array_max_size;
|
||||
|
||||
// Last time the whole heap has been examined in support of RMI
|
||||
// MaxObjectInspectionAge.
|
||||
// This timestamp must be monotonically non-decreasing to avoid
|
||||
// time-warp warnings.
|
||||
jlong _last_whole_heap_examined_time_ns;
|
||||
|
||||
unsigned int _total_collections; // ... started
|
||||
unsigned int _total_full_collections; // ... started
|
||||
NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
|
||||
@ -404,15 +410,18 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
// Keep alive an object that was loaded with AS_NO_KEEPALIVE.
|
||||
virtual void keep_alive(oop obj) {}
|
||||
|
||||
// Returns the longest time (in ms) that has elapsed since the last
|
||||
// time that any part of the heap was examined by a garbage collection.
|
||||
virtual jlong millis_since_last_gc() = 0;
|
||||
|
||||
// Perform any cleanup actions necessary before allowing a verification.
|
||||
virtual void prepare_for_verify() = 0;
|
||||
|
||||
// Generate any dumps preceding or following a full gc
|
||||
// Returns the longest time (in ms) that has elapsed since the last
|
||||
// time that the whole heap has been examined by a garbage collection.
|
||||
jlong millis_since_last_whole_heap_examined();
|
||||
// GC should call this when the next whole heap analysis has completed to
|
||||
// satisfy above requirement.
|
||||
void record_whole_heap_examined_timestamp();
|
||||
|
||||
private:
|
||||
// Generate any dumps preceding or following a full gc
|
||||
void full_gc_dump(GCTimer* timer, bool before);
|
||||
|
||||
virtual void initialize_serviceability() = 0;
|
||||
|
@ -817,10 +817,6 @@ void GenCollectedHeap::process_roots(StrongRootsScope* scope,
|
||||
bool is_par = scope->n_threads() > 1;
|
||||
Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_code_p);
|
||||
|
||||
if (_process_strong_tasks->try_claim_task(GCH_PS_Universe_oops_do)) {
|
||||
Universe::oops_do(strong_roots);
|
||||
}
|
||||
|
||||
if (_process_strong_tasks->try_claim_task(GCH_PS_ObjectSynchronizer_oops_do)) {
|
||||
ObjectSynchronizer::oops_do(strong_roots);
|
||||
}
|
||||
@ -1358,37 +1354,3 @@ oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
|
||||
}
|
||||
return oop(result);
|
||||
}
|
||||
|
||||
class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
|
||||
jlong _time; // in ms
|
||||
jlong _now; // in ms
|
||||
|
||||
public:
|
||||
GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
|
||||
|
||||
jlong time() { return _time; }
|
||||
|
||||
void do_generation(Generation* gen) {
|
||||
_time = MIN2(_time, gen->time_of_last_gc(_now));
|
||||
}
|
||||
};
|
||||
|
||||
jlong GenCollectedHeap::millis_since_last_gc() {
|
||||
// javaTimeNanos() is guaranteed to be monotonically non-decreasing
|
||||
// provided the underlying platform provides such a time source
|
||||
// (and it is bug free). So we still have to guard against getting
|
||||
// back a time later than 'now'.
|
||||
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
|
||||
GenTimeOfLastGCClosure tolgc_cl(now);
|
||||
// iterate over generations getting the oldest
|
||||
// time that a generation was collected
|
||||
generation_iterate(&tolgc_cl, false);
|
||||
|
||||
jlong retVal = now - tolgc_cl.time();
|
||||
if (retVal < 0) {
|
||||
log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
|
||||
". returning zero instead.", retVal);
|
||||
return 0;
|
||||
}
|
||||
return retVal;
|
||||
}
|
||||
|
@ -105,7 +105,6 @@ protected:
|
||||
|
||||
// The set of potentially parallel tasks in root scanning.
|
||||
enum GCH_strong_roots_tasks {
|
||||
GCH_PS_Universe_oops_do,
|
||||
GCH_PS_ObjectSynchronizer_oops_do,
|
||||
GCH_PS_OopStorageSet_oops_do,
|
||||
GCH_PS_ClassLoaderDataGraph_oops_do,
|
||||
@ -290,10 +289,6 @@ public:
|
||||
// Ensure parsability: override
|
||||
virtual void ensure_parsability(bool retire_tlabs);
|
||||
|
||||
// Time in ms since the longest time a collector ran in
|
||||
// in any generation.
|
||||
virtual jlong millis_since_last_gc();
|
||||
|
||||
// Total number of full collections completed.
|
||||
unsigned int total_full_collections_completed() {
|
||||
assert(_full_collections_completed <= _total_full_collections,
|
||||
@ -306,12 +301,6 @@ public:
|
||||
// Update above counter, as appropriate, at the end of a concurrent GC cycle
|
||||
unsigned int update_full_collections_completed(unsigned int count);
|
||||
|
||||
// Update "time of last gc" for all generations to "now".
|
||||
void update_time_of_last_gc(jlong now) {
|
||||
_young_gen->update_time_of_last_gc(now);
|
||||
_old_gen->update_time_of_last_gc(now);
|
||||
}
|
||||
|
||||
// Update the gc statistics for each generation.
|
||||
void update_gc_stats(Generation* current_generation, bool full) {
|
||||
_old_gen->update_gc_stats(current_generation, full);
|
||||
|
@ -75,7 +75,6 @@ struct ScratchBlock {
|
||||
class Generation: public CHeapObj<mtGC> {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
jlong _time_of_last_gc; // time when last gc on this generation happened (ms)
|
||||
MemRegion _prev_used_region; // for collectors that want to "remember" a value for
|
||||
// used region at some specific point during collection.
|
||||
|
||||
@ -363,25 +362,6 @@ class Generation: public CHeapObj<mtGC> {
|
||||
// activity to make them parsable again. The default is to do nothing.
|
||||
virtual void ensure_parsability() {}
|
||||
|
||||
// Time (in ms) when we were last collected or now if a collection is
|
||||
// in progress.
|
||||
virtual jlong time_of_last_gc(jlong now) {
|
||||
// Both _time_of_last_gc and now are set using a time source
|
||||
// that guarantees monotonically non-decreasing values provided
|
||||
// the underlying platform provides such a source. So we still
|
||||
// have to guard against non-monotonicity.
|
||||
NOT_PRODUCT(
|
||||
if (now < _time_of_last_gc) {
|
||||
log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, _time_of_last_gc, now);
|
||||
}
|
||||
)
|
||||
return _time_of_last_gc;
|
||||
}
|
||||
|
||||
virtual void update_time_of_last_gc(jlong now) {
|
||||
_time_of_last_gc = now;
|
||||
}
|
||||
|
||||
// Generations may keep statistics about collection. This method
|
||||
// updates those statistics. current_generation is the generation
|
||||
// that was most recently collected. This allows the generation to
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,8 +37,8 @@ class OopStorageSet : public AllStatic {
|
||||
|
||||
public:
|
||||
// Must be updated when new OopStorages are introduced
|
||||
static const uint strong_count = 2;
|
||||
static const uint weak_count = 4;
|
||||
static const uint strong_count = 4 JVMTI_ONLY(+ 1);
|
||||
static const uint weak_count = 4 JFR_ONLY(+ 1);
|
||||
static const uint all_count = strong_count + weak_count;
|
||||
|
||||
private:
|
||||
|
@ -27,19 +27,15 @@
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
#if INCLUDE_JFR
|
||||
#include "jfr/jfr.hpp"
|
||||
#endif // INCLUDE_JFR
|
||||
|
||||
#if INCLUDE_JVMTI
|
||||
#include "prims/jvmtiExport.hpp"
|
||||
#endif // INCLUDE_JVMTI
|
||||
|
||||
// serial_phase_count is 0 if JFR and JVMTI are both not built,
|
||||
// serial_phase_count is 0 if JVMTI is not built,
|
||||
// requiring some code to be careful to avoid tautological checks
|
||||
// that some compilers warn about.
|
||||
|
||||
#define HAVE_SERIAL_PHASES (INCLUDE_JVMTI || INCLUDE_JFR)
|
||||
#define HAVE_SERIAL_PHASES INCLUDE_JVMTI
|
||||
|
||||
WeakProcessorPhases::Phase WeakProcessorPhases::serial_phase(uint value) {
|
||||
#if HAVE_SERIAL_PHASES
|
||||
@ -109,7 +105,6 @@ void WeakProcessorPhases::Iterator::verify_dereferenceable() const {
|
||||
const char* WeakProcessorPhases::description(Phase phase) {
|
||||
switch (phase) {
|
||||
JVMTI_ONLY(case jvmti: return "JVMTI weak processing";)
|
||||
JFR_ONLY(case jfr: return "JFR weak processing";)
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return "Invalid serial weak processing phase";
|
||||
@ -119,7 +114,6 @@ const char* WeakProcessorPhases::description(Phase phase) {
|
||||
WeakProcessorPhases::Processor WeakProcessorPhases::processor(Phase phase) {
|
||||
switch (phase) {
|
||||
JVMTI_ONLY(case jvmti: return &JvmtiExport::weak_oops_do;)
|
||||
JFR_ONLY(case jfr: return &Jfr::weak_oops_do;)
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
|
@ -41,15 +41,14 @@ public:
|
||||
typedef void (*Processor)(BoolObjectClosure*, OopClosure*);
|
||||
|
||||
enum Phase {
|
||||
// Serial phases.
|
||||
JVMTI_ONLY(jvmti JFR_ONLY(COMMA))
|
||||
JFR_ONLY(jfr)
|
||||
// Serial phase.
|
||||
JVMTI_ONLY(jvmti)
|
||||
|
||||
// Additional implicit phase values follow for oopstorages.
|
||||
};
|
||||
|
||||
static const uint serial_phase_start = 0;
|
||||
static const uint serial_phase_count = 0 JVMTI_ONLY(+ 1) JFR_ONLY(+ 1);
|
||||
static const uint serial_phase_count = 0 JVMTI_ONLY(+ 1);
|
||||
static const uint oopstorage_phase_start = serial_phase_count;
|
||||
static const uint oopstorage_phase_count = OopStorageSet::weak_count;
|
||||
static const uint phase_count = serial_phase_count + oopstorage_phase_count;
|
||||
|
@ -50,7 +50,7 @@ ShenandoahControlThread::ShenandoahControlThread() :
|
||||
_allocs_seen(0) {
|
||||
|
||||
reset_gc_id();
|
||||
create_and_start(ShenandoahCriticalControlThreadPriority ? CriticalPriority : NearMaxPriority);
|
||||
create_and_start();
|
||||
_periodic_task.enroll();
|
||||
_periodic_satb_flush_task.enroll();
|
||||
if (ShenandoahPacing) {
|
||||
@ -235,6 +235,9 @@ void ShenandoahControlThread::run_service() {
|
||||
// global soft refs policy, and we better report it every time heap
|
||||
// usage goes down.
|
||||
Universe::update_heap_info_at_gc();
|
||||
|
||||
// Signal that we have completed a visit to all live objects.
|
||||
Universe::heap()->record_whole_heap_examined_timestamp();
|
||||
}
|
||||
|
||||
// Disable forced counters update, and update counters one more time
|
||||
|
@ -82,20 +82,6 @@
|
||||
#include "services/mallocTracker.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
#ifdef ASSERT
|
||||
template <class T>
|
||||
void ShenandoahAssertToSpaceClosure::do_oop_work(T* p) {
|
||||
T o = RawAccess<>::oop_load(p);
|
||||
if (! CompressedOops::is_null(o)) {
|
||||
oop obj = CompressedOops::decode_not_null(o);
|
||||
shenandoah_assert_not_forwarded(p, obj);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
void ShenandoahAssertToSpaceClosure::do_oop(oop* p) { do_oop_work(p); }
|
||||
#endif
|
||||
|
||||
class ShenandoahPretouchHeapTask : public AbstractGangTask {
|
||||
private:
|
||||
ShenandoahRegionIterator _regions;
|
||||
@ -1193,12 +1179,6 @@ bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
|
||||
return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
|
||||
}
|
||||
|
||||
jlong ShenandoahHeap::millis_since_last_gc() {
|
||||
double v = heuristics()->time_since_last_gc() * 1000;
|
||||
assert(0 <= v && v <= max_jlong, "value should fit: %f", v);
|
||||
return (jlong)v;
|
||||
}
|
||||
|
||||
void ShenandoahHeap::prepare_for_verify() {
|
||||
if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
|
||||
labs_make_parsable();
|
||||
|
@ -105,17 +105,6 @@ public:
|
||||
virtual bool is_thread_safe() { return false; }
|
||||
};
|
||||
|
||||
#ifdef ASSERT
|
||||
class ShenandoahAssertToSpaceClosure : public OopClosure {
|
||||
private:
|
||||
template <class T>
|
||||
void do_oop_work(T* p);
|
||||
public:
|
||||
void do_oop(narrowOop* p);
|
||||
void do_oop(oop* p);
|
||||
};
|
||||
#endif
|
||||
|
||||
typedef ShenandoahLock ShenandoahHeapLock;
|
||||
typedef ShenandoahLocker ShenandoahHeapLocker;
|
||||
|
||||
@ -556,9 +545,6 @@ public:
|
||||
// Keep alive an object that was loaded with AS_NO_KEEPALIVE.
|
||||
void keep_alive(oop obj);
|
||||
|
||||
// Used by RMI
|
||||
jlong millis_since_last_gc();
|
||||
|
||||
// ---------- Safepoint interface hooks
|
||||
//
|
||||
public:
|
||||
|
@ -37,13 +37,11 @@ class outputStream;
|
||||
f(CNT_PREFIX ## TotalWork, DESC_PREFIX "<total>") \
|
||||
f(CNT_PREFIX ## ThreadRoots, DESC_PREFIX "Thread Roots") \
|
||||
f(CNT_PREFIX ## CodeCacheRoots, DESC_PREFIX "Code Cache Roots") \
|
||||
f(CNT_PREFIX ## UniverseRoots, DESC_PREFIX "Universe Roots") \
|
||||
f(CNT_PREFIX ## VMStrongRoots, DESC_PREFIX "VM Strong Roots") \
|
||||
f(CNT_PREFIX ## VMWeakRoots, DESC_PREFIX "VM Weak Roots") \
|
||||
f(CNT_PREFIX ## ObjectSynchronizerRoots, DESC_PREFIX "Synchronizer Roots") \
|
||||
f(CNT_PREFIX ## CLDGRoots, DESC_PREFIX "CLDG Roots") \
|
||||
f(CNT_PREFIX ## JVMTIWeakRoots, DESC_PREFIX "JVMTI Weak Roots") \
|
||||
f(CNT_PREFIX ## JFRWeakRoots, DESC_PREFIX "JFR Weak Roots") \
|
||||
f(CNT_PREFIX ## StringDedupTableRoots, DESC_PREFIX "Dedup Table Roots") \
|
||||
f(CNT_PREFIX ## StringDedupQueueRoots, DESC_PREFIX "Dedup Queue Roots") \
|
||||
f(CNT_PREFIX ## FinishQueues, DESC_PREFIX "Finish Queues") \
|
||||
|
@ -35,10 +35,8 @@
|
||||
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
|
||||
#include "gc/shenandoah/shenandoahStringDedup.hpp"
|
||||
#include "gc/shenandoah/shenandoahVMOperations.hpp"
|
||||
#include "jfr/jfr.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
|
||||
ShenandoahSerialRoot::ShenandoahSerialRoot(ShenandoahSerialRoot::OopsDo oops_do,
|
||||
@ -54,12 +52,10 @@ void ShenandoahSerialRoot::oops_do(OopClosure* cl, uint worker_id) {
|
||||
}
|
||||
|
||||
ShenandoahSerialRoots::ShenandoahSerialRoots(ShenandoahPhaseTimings::Phase phase) :
|
||||
_universe_root(&Universe::oops_do, phase, ShenandoahPhaseTimings::UniverseRoots),
|
||||
_object_synchronizer_root(&ObjectSynchronizer::oops_do, phase, ShenandoahPhaseTimings::ObjectSynchronizerRoots) {
|
||||
}
|
||||
|
||||
void ShenandoahSerialRoots::oops_do(OopClosure* cl, uint worker_id) {
|
||||
_universe_root.oops_do(cl, worker_id);
|
||||
_object_synchronizer_root.oops_do(cl, worker_id);
|
||||
}
|
||||
|
||||
@ -81,15 +77,8 @@ ShenandoahJVMTIWeakRoot::ShenandoahJVMTIWeakRoot(ShenandoahPhaseTimings::Phase p
|
||||
}
|
||||
#endif // INCLUDE_JVMTI
|
||||
|
||||
#if INCLUDE_JFR
|
||||
ShenandoahJFRWeakRoot::ShenandoahJFRWeakRoot(ShenandoahPhaseTimings::Phase phase) :
|
||||
ShenandoahWeakSerialRoot(&Jfr::weak_oops_do, phase, ShenandoahPhaseTimings::JFRWeakRoots) {
|
||||
}
|
||||
#endif // INCLUDE_JFR
|
||||
|
||||
void ShenandoahSerialWeakRoots::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, uint worker_id) {
|
||||
JVMTI_ONLY(_jvmti_weak_roots.weak_oops_do(is_alive, keep_alive, worker_id);)
|
||||
JFR_ONLY(_jfr_weak_roots.weak_oops_do(is_alive, keep_alive, worker_id);)
|
||||
}
|
||||
|
||||
void ShenandoahSerialWeakRoots::weak_oops_do(OopClosure* cl, uint worker_id) {
|
||||
|
@ -51,7 +51,6 @@ public:
|
||||
|
||||
class ShenandoahSerialRoots {
|
||||
private:
|
||||
ShenandoahSerialRoot _universe_root;
|
||||
ShenandoahSerialRoot _object_synchronizer_root;
|
||||
public:
|
||||
ShenandoahSerialRoots(ShenandoahPhaseTimings::Phase phase);
|
||||
@ -79,21 +78,12 @@ public:
|
||||
};
|
||||
#endif // INCLUDE_JVMTI
|
||||
|
||||
#if INCLUDE_JFR
|
||||
class ShenandoahJFRWeakRoot : public ShenandoahWeakSerialRoot {
|
||||
public:
|
||||
ShenandoahJFRWeakRoot(ShenandoahPhaseTimings::Phase phase);
|
||||
};
|
||||
#endif // INCLUDE_JFR
|
||||
|
||||
class ShenandoahSerialWeakRoots {
|
||||
private:
|
||||
JVMTI_ONLY(ShenandoahJVMTIWeakRoot _jvmti_weak_roots;)
|
||||
JFR_ONLY(ShenandoahJFRWeakRoot _jfr_weak_roots;)
|
||||
public:
|
||||
ShenandoahSerialWeakRoots(ShenandoahPhaseTimings::Phase phase)
|
||||
JVMTI_ONLY(: _jvmti_weak_roots(phase))
|
||||
JFR_ONLY(NOT_JVMTI(:) JVMTI_ONLY(COMMA) _jfr_weak_roots(phase))
|
||||
{};
|
||||
|
||||
void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, uint worker_id);
|
||||
|
@ -37,7 +37,6 @@
|
||||
#include "gc/shared/oopStorage.inline.hpp"
|
||||
#include "gc/shared/oopStorageSet.hpp"
|
||||
#include "gc/shared/weakProcessor.inline.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
@ -75,7 +74,6 @@ void ShenandoahRootVerifier::oops_do(OopClosure* oops) {
|
||||
|
||||
if (verify(SerialRoots)) {
|
||||
shenandoah_assert_safepoint();
|
||||
Universe::oops_do(oops);
|
||||
ObjectSynchronizer::oops_do(oops);
|
||||
}
|
||||
|
||||
@ -119,7 +117,6 @@ void ShenandoahRootVerifier::roots_do(OopClosure* oops) {
|
||||
CLDToOopClosure clds(oops, ClassLoaderData::_claim_none);
|
||||
ClassLoaderDataGraph::cld_do(&clds);
|
||||
|
||||
Universe::oops_do(oops);
|
||||
JNIHandles::oops_do(oops);
|
||||
ObjectSynchronizer::oops_do(oops);
|
||||
Universe::vm_global()->oops_do(oops);
|
||||
@ -145,7 +142,6 @@ void ShenandoahRootVerifier::strong_roots_do(OopClosure* oops) {
|
||||
CLDToOopClosure clds(oops, ClassLoaderData::_claim_none);
|
||||
ClassLoaderDataGraph::roots_cld_do(&clds, NULL);
|
||||
|
||||
Universe::oops_do(oops);
|
||||
JNIHandles::oops_do(oops);
|
||||
ObjectSynchronizer::oops_do(oops);
|
||||
Universe::vm_global()->oops_do(oops);
|
||||
|
@ -191,9 +191,6 @@
|
||||
"adjustment. Lower values make adjustments faster, at the " \
|
||||
"expense of higher perf overhead. Time is in milliseconds.") \
|
||||
\
|
||||
experimental(bool, ShenandoahCriticalControlThreadPriority, false, \
|
||||
"Run control thread runs at critical scheduling priority.") \
|
||||
\
|
||||
diagnostic(bool, ShenandoahVerify, false, \
|
||||
"Enable internal verification. This would catch many GC bugs, " \
|
||||
"but it would also stall the collector during the verification, " \
|
||||
|
@ -99,18 +99,10 @@ void ZArguments::initialize() {
|
||||
FLAG_SET_DEFAULT(VerifyDuringStartup, false);
|
||||
FLAG_SET_DEFAULT(VerifyBeforeExit, false);
|
||||
|
||||
// Verification before heap iteration not (yet) supported, for the
|
||||
// same reason we need fixup_partial_loads
|
||||
FLAG_SET_DEFAULT(VerifyBeforeIteration, false);
|
||||
|
||||
if (VerifyBeforeGC || VerifyDuringGC || VerifyAfterGC) {
|
||||
FLAG_SET_DEFAULT(ZVerifyRoots, true);
|
||||
FLAG_SET_DEFAULT(ZVerifyObjects, true);
|
||||
}
|
||||
|
||||
// Verification of stacks not (yet) supported, for the same reason
|
||||
// we need fixup_partial_loads
|
||||
DEBUG_ONLY(FLAG_SET_DEFAULT(VerifyStack, false));
|
||||
}
|
||||
|
||||
size_t ZArguments::conservative_max_heap_alignment() {
|
||||
|
@ -277,10 +277,6 @@ WorkGang* ZCollectedHeap::get_safepoint_workers() {
|
||||
return _runtime_workers.workers();
|
||||
}
|
||||
|
||||
jlong ZCollectedHeap::millis_since_last_gc() {
|
||||
return ZStatCycle::time_since_last() / MILLIUNITS;
|
||||
}
|
||||
|
||||
void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
|
||||
tc->do_thread(_director);
|
||||
tc->do_thread(_driver);
|
||||
|
@ -107,8 +107,6 @@ public:
|
||||
|
||||
virtual WorkGang* get_safepoint_workers();
|
||||
|
||||
virtual jlong millis_since_last_gc();
|
||||
|
||||
virtual void gc_threads_do(ThreadClosure* tc) const;
|
||||
|
||||
virtual VirtualSpaceSummary create_heap_space_summary();
|
||||
|
@ -381,6 +381,9 @@ public:
|
||||
|
||||
// Update data used by soft reference policy
|
||||
Universe::update_heap_info_at_gc();
|
||||
|
||||
// Signal that we have completed a visit to all live objects
|
||||
Universe::heap()->record_whole_heap_examined_timestamp();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -64,7 +64,6 @@ private:
|
||||
void flip_to_remapped();
|
||||
|
||||
void out_of_memory();
|
||||
void fixup_partial_loads();
|
||||
|
||||
public:
|
||||
static ZHeap* heap();
|
||||
|
@ -50,14 +50,10 @@
|
||||
#include "runtime/thread.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#if INCLUDE_JFR
|
||||
#include "jfr/jfr.hpp"
|
||||
#endif
|
||||
|
||||
static const ZStatSubPhase ZSubPhasePauseRootsSetup("Pause Roots Setup");
|
||||
static const ZStatSubPhase ZSubPhasePauseRoots("Pause Roots");
|
||||
static const ZStatSubPhase ZSubPhasePauseRootsTeardown("Pause Roots Teardown");
|
||||
static const ZStatSubPhase ZSubPhasePauseRootsUniverse("Pause Roots Universe");
|
||||
static const ZStatSubPhase ZSubPhasePauseRootsObjectSynchronizer("Pause Roots ObjectSynchronizer");
|
||||
static const ZStatSubPhase ZSubPhasePauseRootsJVMTIWeakExport("Pause Roots JVMTIWeakExport");
|
||||
static const ZStatSubPhase ZSubPhasePauseRootsVMThread("Pause Roots VM Thread");
|
||||
@ -74,7 +70,6 @@ static const ZStatSubPhase ZSubPhasePauseWeakRootsSetup("Pause Weak Roots Setup"
|
||||
static const ZStatSubPhase ZSubPhasePauseWeakRoots("Pause Weak Roots");
|
||||
static const ZStatSubPhase ZSubPhasePauseWeakRootsTeardown("Pause Weak Roots Teardown");
|
||||
static const ZStatSubPhase ZSubPhasePauseWeakRootsJVMTIWeakExport("Pause Weak Roots JVMTIWeakExport");
|
||||
static const ZStatSubPhase ZSubPhasePauseWeakRootsJFRWeak("Pause Weak Roots JFRWeak");
|
||||
|
||||
static const ZStatSubPhase ZSubPhaseConcurrentWeakRoots("Concurrent Weak Roots");
|
||||
static const ZStatSubPhase ZSubPhaseConcurrentWeakRootsOopStorageSet("Concurrent Weak Roots OopStorageSet");
|
||||
@ -189,7 +184,6 @@ void ZJavaThreadsIterator::threads_do(ThreadClosure* cl) {
|
||||
ZRootsIterator::ZRootsIterator(bool visit_jvmti_weak_export) :
|
||||
_visit_jvmti_weak_export(visit_jvmti_weak_export),
|
||||
_java_threads_iter(),
|
||||
_universe(this),
|
||||
_object_synchronizer(this),
|
||||
_jvmti_weak_export(this),
|
||||
_vm_thread(this),
|
||||
@ -217,11 +211,6 @@ ZRootsIterator::~ZRootsIterator() {
|
||||
COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers());
|
||||
}
|
||||
|
||||
void ZRootsIterator::do_universe(ZRootsIteratorClosure* cl) {
|
||||
ZStatTimer timer(ZSubPhasePauseRootsUniverse);
|
||||
Universe::oops_do(cl);
|
||||
}
|
||||
|
||||
void ZRootsIterator::do_object_synchronizer(ZRootsIteratorClosure* cl) {
|
||||
ZStatTimer timer(ZSubPhasePauseRootsObjectSynchronizer);
|
||||
ObjectSynchronizer::oops_do(cl);
|
||||
@ -252,7 +241,6 @@ void ZRootsIterator::do_code_cache(ZRootsIteratorClosure* cl) {
|
||||
|
||||
void ZRootsIterator::oops_do(ZRootsIteratorClosure* cl) {
|
||||
ZStatTimer timer(ZSubPhasePauseRoots);
|
||||
_universe.oops_do(cl);
|
||||
_object_synchronizer.oops_do(cl);
|
||||
_vm_thread.oops_do(cl);
|
||||
_java_threads.oops_do(cl);
|
||||
@ -295,8 +283,7 @@ void ZConcurrentRootsIterator::oops_do(ZRootsIteratorClosure* cl) {
|
||||
}
|
||||
|
||||
ZWeakRootsIterator::ZWeakRootsIterator() :
|
||||
_jvmti_weak_export(this),
|
||||
_jfr_weak(this) {
|
||||
_jvmti_weak_export(this) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
|
||||
ZStatTimer timer(ZSubPhasePauseWeakRootsSetup);
|
||||
}
|
||||
@ -310,17 +297,9 @@ void ZWeakRootsIterator::do_jvmti_weak_export(BoolObjectClosure* is_alive, ZRoot
|
||||
JvmtiExport::weak_oops_do(is_alive, cl);
|
||||
}
|
||||
|
||||
void ZWeakRootsIterator::do_jfr_weak(BoolObjectClosure* is_alive, ZRootsIteratorClosure* cl) {
|
||||
#if INCLUDE_JFR
|
||||
ZStatTimer timer(ZSubPhasePauseWeakRootsJFRWeak);
|
||||
Jfr::weak_oops_do(is_alive, cl);
|
||||
#endif
|
||||
}
|
||||
|
||||
void ZWeakRootsIterator::weak_oops_do(BoolObjectClosure* is_alive, ZRootsIteratorClosure* cl) {
|
||||
ZStatTimer timer(ZSubPhasePauseWeakRoots);
|
||||
_jvmti_weak_export.weak_oops_do(is_alive, cl);
|
||||
_jfr_weak.weak_oops_do(is_alive, cl);
|
||||
}
|
||||
|
||||
void ZWeakRootsIterator::oops_do(ZRootsIteratorClosure* cl) {
|
||||
|
@ -110,14 +110,12 @@ private:
|
||||
const bool _visit_jvmti_weak_export;
|
||||
ZJavaThreadsIterator _java_threads_iter;
|
||||
|
||||
void do_universe(ZRootsIteratorClosure* cl);
|
||||
void do_object_synchronizer(ZRootsIteratorClosure* cl);
|
||||
void do_jvmti_weak_export(ZRootsIteratorClosure* cl);
|
||||
void do_vm_thread(ZRootsIteratorClosure* cl);
|
||||
void do_java_threads(ZRootsIteratorClosure* cl);
|
||||
void do_code_cache(ZRootsIteratorClosure* cl);
|
||||
|
||||
ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_universe> _universe;
|
||||
ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_object_synchronizer> _object_synchronizer;
|
||||
ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_jvmti_weak_export> _jvmti_weak_export;
|
||||
ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_vm_thread> _vm_thread;
|
||||
@ -170,10 +168,8 @@ public:
|
||||
class ZWeakRootsIterator {
|
||||
private:
|
||||
void do_jvmti_weak_export(BoolObjectClosure* is_alive, ZRootsIteratorClosure* cl);
|
||||
void do_jfr_weak(BoolObjectClosure* is_alive, ZRootsIteratorClosure* cl);
|
||||
|
||||
ZSerialWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_jvmti_weak_export> _jvmti_weak_export;
|
||||
ZSerialWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_jfr_weak> _jfr_weak;
|
||||
|
||||
public:
|
||||
ZWeakRootsIterator();
|
||||
|
@ -328,7 +328,7 @@ Method* LinkResolver::lookup_method_in_klasses(const LinkInfo& link_info,
|
||||
Symbol* signature = link_info.signature();
|
||||
|
||||
// Ignore overpasses so statics can be found during resolution
|
||||
Method* result = klass->uncached_lookup_method(name, signature, Klass::skip_overpass);
|
||||
Method* result = klass->uncached_lookup_method(name, signature, Klass::OverpassLookupMode::skip);
|
||||
|
||||
if (klass->is_array_klass()) {
|
||||
// Only consider klass and super klass for arrays
|
||||
@ -377,11 +377,11 @@ Method* LinkResolver::lookup_instance_method_in_klasses(Klass* klass,
|
||||
Symbol* name,
|
||||
Symbol* signature,
|
||||
Klass::PrivateLookupMode private_mode, TRAPS) {
|
||||
Method* result = klass->uncached_lookup_method(name, signature, Klass::find_overpass, private_mode);
|
||||
Method* result = klass->uncached_lookup_method(name, signature, Klass::OverpassLookupMode::find, private_mode);
|
||||
|
||||
while (result != NULL && result->is_static() && result->method_holder()->super() != NULL) {
|
||||
Klass* super_klass = result->method_holder()->super();
|
||||
result = super_klass->uncached_lookup_method(name, signature, Klass::find_overpass, private_mode);
|
||||
result = super_klass->uncached_lookup_method(name, signature, Klass::OverpassLookupMode::find, private_mode);
|
||||
}
|
||||
|
||||
if (klass->is_array_klass()) {
|
||||
@ -410,8 +410,10 @@ int LinkResolver::vtable_index_of_interface_method(Klass* klass,
|
||||
// First check in default method array
|
||||
if (!resolved_method->is_abstract() && ik->default_methods() != NULL) {
|
||||
int index = InstanceKlass::find_method_index(ik->default_methods(),
|
||||
name, signature, Klass::find_overpass,
|
||||
Klass::find_static, Klass::find_private);
|
||||
name, signature,
|
||||
Klass::OverpassLookupMode::find,
|
||||
Klass::StaticLookupMode::find,
|
||||
Klass::PrivateLookupMode::find);
|
||||
if (index >= 0 ) {
|
||||
vtable_index = ik->default_vtable_indices()->at(index);
|
||||
}
|
||||
@ -430,7 +432,7 @@ Method* LinkResolver::lookup_method_in_interfaces(const LinkInfo& cp_info) {
|
||||
// Specify 'true' in order to skip default methods when searching the
|
||||
// interfaces. Function lookup_method_in_klasses() already looked for
|
||||
// the method in the default methods table.
|
||||
return ik->lookup_method_in_all_interfaces(cp_info.name(), cp_info.signature(), Klass::skip_defaults);
|
||||
return ik->lookup_method_in_all_interfaces(cp_info.name(), cp_info.signature(), Klass::DefaultsLookupMode::skip);
|
||||
}
|
||||
|
||||
Method* LinkResolver::lookup_polymorphic_method(const LinkInfo& link_info,
|
||||
@ -1087,7 +1089,7 @@ void LinkResolver::resolve_static_call(CallInfo& result,
|
||||
// Use updated LinkInfo to reresolve with resolved method holder
|
||||
LinkInfo new_info(resolved_klass, link_info.name(), link_info.signature(),
|
||||
link_info.current_klass(),
|
||||
link_info.check_access() ? LinkInfo::needs_access_check : LinkInfo::skip_access_check);
|
||||
link_info.check_access() ? LinkInfo::AccessCheck::required : LinkInfo::AccessCheck::skip);
|
||||
resolved_method = linktime_resolve_static_method(new_info, CHECK);
|
||||
}
|
||||
|
||||
@ -1236,7 +1238,7 @@ void LinkResolver::runtime_resolve_special_method(CallInfo& result,
|
||||
Method* instance_method = lookup_instance_method_in_klasses(super_klass,
|
||||
resolved_method->name(),
|
||||
resolved_method->signature(),
|
||||
Klass::find_private, CHECK);
|
||||
Klass::PrivateLookupMode::find, CHECK);
|
||||
sel_method = methodHandle(THREAD, instance_method);
|
||||
|
||||
// check if found
|
||||
@ -1478,7 +1480,7 @@ void LinkResolver::runtime_resolve_interface_method(CallInfo& result,
|
||||
Method* method = lookup_instance_method_in_klasses(recv_klass,
|
||||
resolved_method->name(),
|
||||
resolved_method->signature(),
|
||||
Klass::skip_private, CHECK);
|
||||
Klass::PrivateLookupMode::skip, CHECK);
|
||||
selected_method = methodHandle(THREAD, method);
|
||||
|
||||
if (selected_method.is_null() && !check_null_and_abstract) {
|
||||
|
@ -144,28 +144,25 @@ class LinkInfo : public StackObj {
|
||||
constantTag _tag;
|
||||
|
||||
public:
|
||||
enum AccessCheck {
|
||||
needs_access_check,
|
||||
skip_access_check
|
||||
};
|
||||
enum class AccessCheck { required, skip };
|
||||
|
||||
LinkInfo(const constantPoolHandle& pool, int index, const methodHandle& current_method, TRAPS);
|
||||
LinkInfo(const constantPoolHandle& pool, int index, TRAPS);
|
||||
|
||||
// Condensed information from other call sites within the vm.
|
||||
LinkInfo(Klass* resolved_klass, Symbol* name, Symbol* signature, Klass* current_klass,
|
||||
AccessCheck check_access = needs_access_check,
|
||||
AccessCheck check_access = AccessCheck::required,
|
||||
constantTag tag = JVM_CONSTANT_Invalid) :
|
||||
_name(name),
|
||||
_signature(signature), _resolved_klass(resolved_klass), _current_klass(current_klass), _current_method(methodHandle()),
|
||||
_check_access(check_access == needs_access_check), _tag(tag) {}
|
||||
_check_access(check_access == AccessCheck::required), _tag(tag) {}
|
||||
|
||||
LinkInfo(Klass* resolved_klass, Symbol* name, Symbol* signature, const methodHandle& current_method,
|
||||
AccessCheck check_access = needs_access_check,
|
||||
AccessCheck check_access = AccessCheck::required,
|
||||
constantTag tag = JVM_CONSTANT_Invalid) :
|
||||
_name(name),
|
||||
_signature(signature), _resolved_klass(resolved_klass), _current_klass(current_method->method_holder()), _current_method(current_method),
|
||||
_check_access(check_access == needs_access_check), _tag(tag) {}
|
||||
_check_access(check_access == AccessCheck::required), _tag(tag) {}
|
||||
|
||||
// Case where we just find the method and don't check access against the current class
|
||||
LinkInfo(Klass* resolved_klass, Symbol*name, Symbol* signature) :
|
||||
|
@ -102,12 +102,6 @@ void Jfr::on_vm_error_report(outputStream* st) {
|
||||
}
|
||||
}
|
||||
|
||||
void Jfr::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
|
||||
if (LeakProfiler::is_running()) {
|
||||
LeakProfiler::weak_oops_do(is_alive, f);
|
||||
}
|
||||
}
|
||||
|
||||
bool Jfr::on_flight_recorder_option(const JavaVMOption** option, char* delimiter) {
|
||||
return JfrOptionSet::parse_flight_recorder_option(option, delimiter);
|
||||
}
|
||||
|
@ -28,9 +28,7 @@
|
||||
#include "jni.h"
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
class BoolObjectClosure;
|
||||
class JavaThread;
|
||||
class OopClosure;
|
||||
class Thread;
|
||||
|
||||
extern "C" void JNICALL jfr_register_natives(JNIEnv*, jclass);
|
||||
@ -53,7 +51,6 @@ class Jfr : AllStatic {
|
||||
static bool on_flight_recorder_option(const JavaVMOption** option, char* delimiter);
|
||||
static bool on_start_flight_recording_option(const JavaVMOption** option, char* delimiter);
|
||||
static void on_vm_error_report(outputStream* st);
|
||||
static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
|
||||
static void exclude_thread(Thread* thread);
|
||||
static bool is_excluded(Thread* thread);
|
||||
static void include_thread(Thread* thread);
|
||||
|
@ -34,7 +34,6 @@
|
||||
#include "jfr/leakprofiler/chains/edgeQueue.hpp"
|
||||
#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
|
||||
#include "jfr/leakprofiler/utilities/unifiedOopRef.inline.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/synchronizer.hpp"
|
||||
@ -73,7 +72,6 @@ void RootSetClosure<Delegate>::process() {
|
||||
// We don't follow code blob oops, because they have misaligned oops.
|
||||
Threads::oops_do(this, NULL);
|
||||
ObjectSynchronizer::oops_do(this);
|
||||
Universe::oops_do(this);
|
||||
OopStorageSet::strong_oops_do(this);
|
||||
AOTLoader::oops_do(this);
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user