--with-jtreg=<path> - Set the path to JTReg. See Running Tests
Certain third-party libraries used by OpenJDK (libjpeg, giflib, libpng, lcms and zlib) are included in the OpenJDK repository. The default behavior of the OpenJDK build is to use this version of these libraries, but they might be replaced by an external version. To do so, specify system as the <source> option in these arguments. (The default is bundled).
diff --git a/common/doc/building.md b/common/doc/building.md
index 101d041103b..5767ee6f64c 100644
--- a/common/doc/building.md
+++ b/common/doc/building.md
@@ -648,19 +648,6 @@ Hotspot.
Use `--with-libffi=` if `configure` does not properly locate your libffi
files.
-### libelf
-
-libelf from the [elfutils project](http://sourceware.org/elfutils) is required
-when building the AOT feature of Hotspot.
-
- * To install on an apt-based Linux, try running `sudo apt-get install
- libelf-dev`.
- * To install on an rpm-based Linux, try running `sudo yum install
- elfutils-libelf-devel`.
-
-Use `--with-libelf=` if `configure` does not properly locate your libelf
-files.
-
## Other Tooling Requirements
### GNU Make
@@ -813,7 +800,6 @@ features, use `bash configure --help=short` instead.)
* `--with-x=` - Set the path to [X11](#x11)
* `--with-alsa=` - Set the path to [ALSA](#alsa)
* `--with-libffi=` - Set the path to [libffi](#libffi)
- * `--with-libelf=` - Set the path to [libelf](#libelf)
* `--with-jtreg=` - Set the path to JTReg. See [Running Tests](
#running-tests)
diff --git a/corba/.hgtags b/corba/.hgtags
index 3955e7c2528..c1ceb6e6be3 100644
--- a/corba/.hgtags
+++ b/corba/.hgtags
@@ -444,3 +444,4 @@ a923b3f30e7bddb4f960059ddfc7978fc63e2e6e jdk-10+18
28488561cfbcfa4d0d9c489e8afe0155f4231360 jdk-10+19
6ce6cb8ff41c71c49f23b15e0f0468aca5d52b17 jdk-9+180
ba71941ad9dba53b8fffb30602ef673eee88696c jdk-9+181
+7a54ec280513a33e49e60546c0cf9ca573925a43 jdk-10+20
diff --git a/hotspot/.hgtags b/hotspot/.hgtags
index 282c0940830..005d486ea58 100644
--- a/hotspot/.hgtags
+++ b/hotspot/.hgtags
@@ -604,3 +604,4 @@ c9d3317623d48da3327232c81e3f8cfc0d29d888 jdk-10+18
33b74e13c1457f36041addb8b850831f81ca6e9f jdk-10+19
d7baadc223e790c08bc69bf7e553bce65b4e7e40 jdk-9+180
4a443796f6f57842d6a0434ac27ca3d1033ccc20 jdk-9+181
+e93ed1a092409351c90b3a76d80b9aa8b44d5e6a jdk-10+20
diff --git a/hotspot/make/CompileTools.gmk b/hotspot/make/CompileTools.gmk
index c80632226cf..309d9c9aaf6 100644
--- a/hotspot/make/CompileTools.gmk
+++ b/hotspot/make/CompileTools.gmk
@@ -47,11 +47,10 @@ ifeq ($(INCLUDE_GRAAL), true)
$(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_MATCH_PROCESSOR, \
SETUP := GENERATE_OLDBYTECODE, \
SRC := \
- $(SRC_DIR)/org.graalvm.compiler.common/src \
+ $(SRC_DIR)/org.graalvm.word/src \
$(SRC_DIR)/org.graalvm.compiler.core/src \
$(SRC_DIR)/org.graalvm.compiler.core.common/src \
$(SRC_DIR)/org.graalvm.compiler.core.match.processor/src \
- $(SRC_DIR)/org.graalvm.compiler.api.collections/src \
$(SRC_DIR)/org.graalvm.compiler.api.replacements/src \
$(SRC_DIR)/org.graalvm.compiler.asm/src \
$(SRC_DIR)/org.graalvm.compiler.bytecode/src \
@@ -68,6 +67,7 @@ ifeq ($(INCLUDE_GRAAL), true)
$(SRC_DIR)/org.graalvm.compiler.phases.common/src \
$(SRC_DIR)/org.graalvm.compiler.serviceprovider/src \
$(SRC_DIR)/org.graalvm.compiler.virtual/src \
+ $(SRC_DIR)/org.graalvm.util/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.code/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.common/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.meta/src \
@@ -102,6 +102,7 @@ ifeq ($(INCLUDE_GRAAL), true)
SRC := \
$(SRC_DIR)/org.graalvm.compiler.options/src \
$(SRC_DIR)/org.graalvm.compiler.options.processor/src \
+ $(SRC_DIR)/org.graalvm.util/src \
, \
BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor, \
JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor.jar, \
@@ -114,9 +115,8 @@ ifeq ($(INCLUDE_GRAAL), true)
$(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_REPLACEMENTS_VERIFIER, \
SETUP := GENERATE_OLDBYTECODE, \
SRC := \
- $(SRC_DIR)/org.graalvm.compiler.common/src \
+ $(SRC_DIR)/org.graalvm.word/src \
$(SRC_DIR)/org.graalvm.compiler.replacements.verifier/src \
- $(SRC_DIR)/org.graalvm.compiler.api.collections/src \
$(SRC_DIR)/org.graalvm.compiler.api.replacements/src \
$(SRC_DIR)/org.graalvm.compiler.code/src \
$(SRC_DIR)/org.graalvm.compiler.core.common/src \
@@ -125,6 +125,7 @@ ifeq ($(INCLUDE_GRAAL), true)
$(SRC_DIR)/org.graalvm.compiler.nodeinfo/src \
$(SRC_DIR)/org.graalvm.compiler.options/src \
$(SRC_DIR)/org.graalvm.compiler.serviceprovider/src \
+ $(SRC_DIR)/org.graalvm.util/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.code/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.common/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.meta/src \
diff --git a/hotspot/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk b/hotspot/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk
index a8e60309b1c..623430abab6 100644
--- a/hotspot/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk
+++ b/hotspot/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk
@@ -37,7 +37,6 @@ SRC_DIR := $(HOTSPOT_TOPDIR)/src/$(MODULE)/share/classes
PROC_SRC_SUBDIRS := \
org.graalvm.compiler.code \
- org.graalvm.compiler.common \
org.graalvm.compiler.core \
org.graalvm.compiler.core.aarch64 \
org.graalvm.compiler.core.amd64 \
diff --git a/hotspot/make/gensrc/GensrcAdlc.gmk b/hotspot/make/gensrc/GensrcAdlc.gmk
index 3153476960b..ef3b1cf9a12 100644
--- a/hotspot/make/gensrc/GensrcAdlc.gmk
+++ b/hotspot/make/gensrc/GensrcAdlc.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -73,7 +73,7 @@ ifeq ($(call check-jvm-feature, compiler2), true)
OUTPUT_DIR := $(JVM_VARIANT_OUTPUTDIR)/tools/adlc, \
PROGRAM := adlc, \
DEBUG_SYMBOLS := false, \
- DISABLED_WARNINGS_clang := parentheses tautological-compare, \
+ DISABLED_WARNINGS_clang := tautological-compare, \
DISABLED_WARNINGS_solstudio := notemsource, \
))
diff --git a/hotspot/make/lib/CompileDtracePostJvm.gmk b/hotspot/make/lib/CompileDtracePostJvm.gmk
index 77a4e30f4d9..1051cafe8dd 100644
--- a/hotspot/make/lib/CompileDtracePostJvm.gmk
+++ b/hotspot/make/lib/CompileDtracePostJvm.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -52,7 +52,7 @@ ifeq ($(call check-jvm-feature, dtrace), true)
CXX := $(BUILD_CXX), \
LDEXE := $(BUILD_CXX), \
generateJvmOffsets.cpp_CXXFLAGS := $(JVM_CFLAGS) -mt -xnolib -norunpath, \
- generateJvmOffsetsMain.c_CFLAGS := -library=%none -mt -m64 -norunpath -z nodefs, \
+ generateJvmOffsetsMain.c_CFLAGS := -mt -m64 -norunpath -z nodefs, \
LDFLAGS := -m64, \
LIBS := -lc, \
OBJECT_DIR := $(JVM_VARIANT_OUTPUTDIR)/tools/dtrace-gen-offsets/objs, \
diff --git a/hotspot/make/lib/CompileGtest.gmk b/hotspot/make/lib/CompileGtest.gmk
index 29209432f41..10e3f14de0d 100644
--- a/hotspot/make/lib/CompileGtest.gmk
+++ b/hotspot/make/lib/CompileGtest.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -52,7 +52,8 @@ else
$(call create-mapfile)
endif
-# Disabling switch warning for clang because of test source.
+# Disabling undef, switch, format-nonliteral and tautological-undefined-compare
+# warnings for clang because of test source.
# Note: On AIX, the gtest test classes linked into the libjvm.so push the TOC
# size beyond 64k, so we need to link with bigtoc. However, this means that
diff --git a/hotspot/make/lib/CompileJvm.gmk b/hotspot/make/lib/CompileJvm.gmk
index 4eb2cbc1013..d5ecb2555f3 100644
--- a/hotspot/make/lib/CompileJvm.gmk
+++ b/hotspot/make/lib/CompileJvm.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -69,6 +69,7 @@ JVM_CFLAGS_TARGET_DEFINES += \
-DTARGET_ARCH_$(HOTSPOT_TARGET_CPU_ARCH) \
-DINCLUDE_SUFFIX_OS=_$(HOTSPOT_TARGET_OS) \
-DINCLUDE_SUFFIX_CPU=_$(HOTSPOT_TARGET_CPU_ARCH) \
+ -DINCLUDE_SUFFIX_COMPILER=_$(HOTSPOT_TOOLCHAIN_TYPE) \
-DTARGET_COMPILER_$(HOTSPOT_TOOLCHAIN_TYPE) \
-D$(HOTSPOT_TARGET_CPU_DEFINE) \
-DHOTSPOT_LIB_ARCH='"$(OPENJDK_TARGET_CPU_LEGACY_LIB)"' \
@@ -217,9 +218,7 @@ $(eval $(call SetupNativeCompilation, BUILD_LIBJVM, \
CFLAGS_DEBUG_SYMBOLS := $(JVM_CFLAGS_SYMBOLS), \
CXXFLAGS_DEBUG_SYMBOLS := $(JVM_CFLAGS_SYMBOLS), \
vm_version.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
- DISABLED_WARNINGS_clang := delete-non-virtual-dtor dynamic-class-memaccess \
- empty-body format logical-op-parentheses parentheses \
- parentheses-equality switch tautological-compare, \
+ DISABLED_WARNINGS_clang := tautological-compare, \
DISABLED_WARNINGS_xlc := 1540-0216 1540-0198 1540-1090 1540-1639 \
1540-1088 1500-010, \
ASFLAGS := $(JVM_ASFLAGS), \
diff --git a/hotspot/make/lib/Lib-jdk.aot.gmk b/hotspot/make/lib/Lib-jdk.aot.gmk
deleted file mode 100644
index d799fa45286..00000000000
--- a/hotspot/make/lib/Lib-jdk.aot.gmk
+++ /dev/null
@@ -1,53 +0,0 @@
-#
-# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation. Oracle designates this
-# particular file as subject to the "Classpath" exception as provided
-# by Oracle in the LICENSE file that accompanied this code.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-include $(SPEC)
-include NativeCompilation.gmk
-
-$(eval $(call IncludeCustomExtension, hotspot, lib/Lib-jdk.aot.gmk))
-
-##############################################################################
-# Build libjelfshim only when AOT is enabled.
-ifeq ($(ENABLE_AOT), true)
- JELFSHIM_NAME := jelfshim
-
- $(eval $(call SetupNativeCompilation, BUILD_LIBJELFSHIM, \
- TOOLCHAIN := TOOLCHAIN_DEFAULT, \
- OPTIMIZATION := LOW, \
- LIBRARY := $(JELFSHIM_NAME), \
- OUTPUT_DIR := $(call FindLibDirForModule, $(MODULE)), \
- SRC := $(HOTSPOT_TOPDIR)/src/jdk.aot/unix/native/libjelfshim, \
- CFLAGS := $(CFLAGS_JDKLIB) $(ELF_CFLAGS) \
- -DAOT_VERSION_STRING='"$(VERSION_STRING)"' \
- -I$(SUPPORT_OUTPUTDIR)/headers/$(MODULE), \
- LDFLAGS := $(LDFLAGS_JDKLIB), \
- OBJECT_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/lib$(JELFSHIM_NAME), \
- LIBS := $(ELF_LIBS) $(LIBS_JDKLIB), \
- ))
-
- TARGETS += $(BUILD_LIBJELFSHIM)
-endif
-
-##############################################################################
diff --git a/hotspot/make/test/JtregNative.gmk b/hotspot/make/test/JtregNative.gmk
index 19faa10ad3d..3f1d3fd46be 100644
--- a/hotspot/make/test/JtregNative.gmk
+++ b/hotspot/make/test/JtregNative.gmk
@@ -35,12 +35,17 @@ include $(SPEC)
include MakeBase.gmk
include TestFilesCompilation.gmk
+$(eval $(call IncludeCustomExtension, hotspot, test/JtregNative.gmk))
+
################################################################################
# Targets for building the native tests themselves.
################################################################################
# Add more directories here when needed.
-BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
+BUILD_HOTSPOT_JTREG_NATIVE_SRC += \
+ $(HOTSPOT_TOPDIR)/test/gc/g1/TestJNIWeakG1 \
+ $(HOTSPOT_TOPDIR)/test/gc/stress/gclocker \
+ $(HOTSPOT_TOPDIR)/test/gc/cslocker \
$(HOTSPOT_TOPDIR)/test/native_sanity \
$(HOTSPOT_TOPDIR)/test/runtime/jni/8025979 \
$(HOTSPOT_TOPDIR)/test/runtime/jni/8033445 \
@@ -53,8 +58,10 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
$(HOTSPOT_TOPDIR)/test/runtime/modules/getModuleJNI \
$(HOTSPOT_TOPDIR)/test/runtime/SameObject \
$(HOTSPOT_TOPDIR)/test/runtime/BoolReturn \
+ $(HOTSPOT_TOPDIR)/test/runtime/noClassDefFoundMsg \
$(HOTSPOT_TOPDIR)/test/compiler/floatingpoint/ \
$(HOTSPOT_TOPDIR)/test/compiler/calls \
+ $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/GetOwnedMonitorInfo \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/GetNamedModule \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/IsModifiableModule \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/AddModuleReads \
@@ -66,6 +73,7 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/ModuleAwareAgents/ClassFileLoadHook \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/ModuleAwareAgents/ClassLoadPrepare \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/ModuleAwareAgents/ThreadStart \
+ $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/StartPhase/AllowedFunctions \
#
# Add conditional directories here when needed.
@@ -85,6 +93,7 @@ endif
ifeq ($(TOOLCHAIN_TYPE), solstudio)
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_liboverflow := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libSimpleClassFileLoadHook := -lc
+ BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libGetOwnedMonitorInfoTest := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libGetNamedModuleTest := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libIsModifiableModuleTest := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libAddModuleReadsTest := -lc
@@ -93,6 +102,7 @@ ifeq ($(TOOLCHAIN_TYPE), solstudio)
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAClassFileLoadHook := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAClassLoadPrepare := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAThreadStart := -lc
+ BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libAllowedFunctions := -lc
endif
ifeq ($(OPENJDK_TARGET_OS), linux)
diff --git a/hotspot/src/cpu/aarch64/vm/aarch64.ad b/hotspot/src/cpu/aarch64/vm/aarch64.ad
index cd2174a67c3..a4a8be37022 100644
--- a/hotspot/src/cpu/aarch64/vm/aarch64.ad
+++ b/hotspot/src/cpu/aarch64/vm/aarch64.ad
@@ -1,5 +1,5 @@
//
-// Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2014, Red Hat Inc. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
@@ -3564,7 +3564,7 @@ const int Matcher::min_vector_size(const BasicType bt) {
}
// Vector ideal reg.
-const int Matcher::vector_ideal_reg(int len) {
+const uint Matcher::vector_ideal_reg(int len) {
switch(len) {
case 8: return Op_VecD;
case 16: return Op_VecX;
@@ -3573,7 +3573,7 @@ const int Matcher::vector_ideal_reg(int len) {
return 0;
}
-const int Matcher::vector_shift_count_ideal_reg(int size) {
+const uint Matcher::vector_shift_count_ideal_reg(int size) {
return Op_VecX;
}
@@ -5218,7 +5218,7 @@ frame %{
// ppc port uses 0 but we definitely need to allow for fixed_slots
// which folds in the space used for monitors
return_addr(STACK - 2 +
- round_to((Compile::current()->in_preserve_stack_slots() +
+ align_up((Compile::current()->in_preserve_stack_slots() +
Compile::current()->fixed_slots()),
stack_alignment_in_slots()));
@@ -5343,6 +5343,17 @@ operand immI_M1()
interface(CONST_INTER);
%}
+// Shift values for add/sub extension shift
+operand immIExt()
+%{
+ predicate(0 <= n->get_int() && (n->get_int() <= 4));
+ match(ConI);
+
+ op_cost(0);
+ format %{ %}
+ interface(CONST_INTER);
+%}
+
operand immI_le_4()
%{
predicate(n->get_int() <= 4);
@@ -5423,6 +5434,16 @@ operand immI_56()
interface(CONST_INTER);
%}
+operand immI_63()
+%{
+ predicate(n->get_int() == 63);
+ match(ConI);
+
+ op_cost(0);
+ format %{ %}
+ interface(CONST_INTER);
+%}
+
operand immI_64()
%{
predicate(n->get_int() == 64);
@@ -5453,20 +5474,10 @@ operand immI_65535()
interface(CONST_INTER);
%}
-operand immL_63()
-%{
- predicate(n->get_int() == 63);
- match(ConI);
-
- op_cost(0);
- format %{ %}
- interface(CONST_INTER);
-%}
-
operand immL_255()
%{
- predicate(n->get_int() == 255);
- match(ConI);
+ predicate(n->get_long() == 255L);
+ match(ConL);
op_cost(0);
format %{ %}
@@ -10951,7 +10962,7 @@ instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
// Long Negation
-instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
+instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
match(Set dst (SubL zero src));
ins_cost(INSN_COST);
@@ -11146,7 +11157,7 @@ instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
ins_pipe(ldiv_reg_reg);
%}
-instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
+instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
match(Set dst (URShiftL (RShiftL src1 div1) div2));
ins_cost(INSN_COST);
format %{ "lsr $dst, $src1, $div1" %}
@@ -11156,7 +11167,7 @@ instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
ins_pipe(ialu_reg_shift);
%}
-instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
+instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
ins_cost(INSN_COST);
format %{ "add $dst, $src, $div1" %}
@@ -12789,7 +12800,7 @@ instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
%{
match(Set dst (AddL src1 (ConvI2L src2)));
ins_cost(INSN_COST);
- format %{ "add $dst, $src1, sxtw $src2" %}
+ format %{ "add $dst, $src1, $src2, sxtw" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
@@ -12802,7 +12813,7 @@ instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
%{
match(Set dst (SubL src1 (ConvI2L src2)));
ins_cost(INSN_COST);
- format %{ "sub $dst, $src1, sxtw $src2" %}
+ format %{ "sub $dst, $src1, $src2, sxtw" %}
ins_encode %{
__ sub(as_Register($dst$$reg), as_Register($src1$$reg),
@@ -12816,7 +12827,7 @@ instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 l
%{
match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
ins_cost(INSN_COST);
- format %{ "add $dst, $src1, sxth $src2" %}
+ format %{ "add $dst, $src1, $src2, sxth" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
@@ -12829,7 +12840,7 @@ instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 l
%{
match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
ins_cost(INSN_COST);
- format %{ "add $dst, $src1, sxtb $src2" %}
+ format %{ "add $dst, $src1, $src2, sxtb" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
@@ -12842,7 +12853,7 @@ instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 l
%{
match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
ins_cost(INSN_COST);
- format %{ "add $dst, $src1, uxtb $src2" %}
+ format %{ "add $dst, $src1, $src2, uxtb" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
@@ -12855,7 +12866,7 @@ instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, imm
%{
match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
ins_cost(INSN_COST);
- format %{ "add $dst, $src1, sxth $src2" %}
+ format %{ "add $dst, $src1, $src2, sxth" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
@@ -12868,7 +12879,7 @@ instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, imm
%{
match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
ins_cost(INSN_COST);
- format %{ "add $dst, $src1, sxtw $src2" %}
+ format %{ "add $dst, $src1, $src2, sxtw" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
@@ -12881,7 +12892,7 @@ instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, imm
%{
match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
ins_cost(INSN_COST);
- format %{ "add $dst, $src1, sxtb $src2" %}
+ format %{ "add $dst, $src1, $src2, sxtb" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
@@ -12894,7 +12905,7 @@ instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, imm
%{
match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
ins_cost(INSN_COST);
- format %{ "add $dst, $src1, uxtb $src2" %}
+ format %{ "add $dst, $src1, $src2, uxtb" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
@@ -13034,6 +13045,294 @@ instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295
ins_pipe(ialu_reg_reg);
%}
+
+instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
+%{
+ match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "add $dst, $src1, $src2, sxtb #lshift2" %}
+
+ ins_encode %{
+ __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
+%{
+ match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "add $dst, $src1, $src2, sxth #lshift2" %}
+
+ ins_encode %{
+ __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
+%{
+ match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "add $dst, $src1, $src2, sxtw #lshift2" %}
+
+ ins_encode %{
+ __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
+%{
+ match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "sub $dst, $src1, $src2, sxtb #lshift2" %}
+
+ ins_encode %{
+ __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
+%{
+ match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "sub $dst, $src1, $src2, sxth #lshift2" %}
+
+ ins_encode %{
+ __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
+%{
+ match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "sub $dst, $src1, $src2, sxtw #lshift2" %}
+
+ ins_encode %{
+ __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
+%{
+ match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "addw $dst, $src1, $src2, sxtb #lshift2" %}
+
+ ins_encode %{
+ __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
+%{
+ match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "addw $dst, $src1, $src2, sxth #lshift2" %}
+
+ ins_encode %{
+ __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
+%{
+ match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "subw $dst, $src1, $src2, sxtb #lshift2" %}
+
+ ins_encode %{
+ __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
+%{
+ match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "subw $dst, $src1, $src2, sxth #lshift2" %}
+
+ ins_encode %{
+ __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+
+instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
+%{
+ match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "add $dst, $src1, $src2, sxtw #lshift" %}
+
+ ins_encode %{
+ __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%};
+
+instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
+%{
+ match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "sub $dst, $src1, $src2, sxtw #lshift" %}
+
+ ins_encode %{
+ __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%};
+
+
+instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
+%{
+ match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "add $dst, $src1, $src2, uxtb #lshift" %}
+
+ ins_encode %{
+ __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
+%{
+ match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "add $dst, $src1, $src2, uxth #lshift" %}
+
+ ins_encode %{
+ __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
+%{
+ match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "add $dst, $src1, $src2, uxtw #lshift" %}
+
+ ins_encode %{
+ __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
+%{
+ match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "sub $dst, $src1, $src2, uxtb #lshift" %}
+
+ ins_encode %{
+ __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
+%{
+ match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "sub $dst, $src1, $src2, uxth #lshift" %}
+
+ ins_encode %{
+ __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
+%{
+ match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "sub $dst, $src1, $src2, uxtw #lshift" %}
+
+ ins_encode %{
+ __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
+%{
+ match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "addw $dst, $src1, $src2, uxtb #lshift" %}
+
+ ins_encode %{
+ __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
+%{
+ match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "addw $dst, $src1, $src2, uxth #lshift" %}
+
+ ins_encode %{
+ __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
+%{
+ match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "subw $dst, $src1, $src2, uxtb #lshift" %}
+
+ ins_encode %{
+ __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
+%{
+ match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "subw $dst, $src1, $src2, uxth #lshift" %}
+
+ ins_encode %{
+ __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}
// END This section of the file is automatically generated. Do not edit --------------
// ============================================================================
@@ -15443,9 +15742,9 @@ instruct ShouldNotReachHere() %{
format %{ "ShouldNotReachHere" %}
ins_encode %{
- // TODO
- // implement proper trap call here
- __ brk(999);
+ // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
+ // return true
+ __ dpcs1(0xdead + 1);
%}
ins_pipe(pipe_class_default);
@@ -15803,6 +16102,16 @@ instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
ins_pipe(pipe_class_memory);
%}
+instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
+%{
+ match(Set result (HasNegatives ary1 len));
+ effect(USE_KILL ary1, USE_KILL len, KILL cr);
+ format %{ "has negatives byte[] $ary1,$len -> $result" %}
+ ins_encode %{
+ __ has_negatives($ary1$$Register, $len$$Register, $result$$Register);
+ %}
+ ins_pipe( pipe_slow );
+%}
// fast char[] to byte[] compression
instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
@@ -16833,6 +17142,48 @@ instruct vmla4I(vecX dst, vecX src1, vecX src2)
ins_pipe(vmla128);
%}
+// dst + src1 * src2
+instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
+ predicate(UseFMA && n->as_Vector()->length() == 2);
+ match(Set dst (FmaVF dst (Binary src1 src2)));
+ format %{ "fmla $dst,$src1,$src2\t# vector (2S)" %}
+ ins_cost(INSN_COST);
+ ins_encode %{
+ __ fmla(as_FloatRegister($dst$$reg), __ T2S,
+ as_FloatRegister($src1$$reg),
+ as_FloatRegister($src2$$reg));
+ %}
+ ins_pipe(vmuldiv_fp64);
+%}
+
+// dst + src1 * src2
+instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
+ predicate(UseFMA && n->as_Vector()->length() == 4);
+ match(Set dst (FmaVF dst (Binary src1 src2)));
+ format %{ "fmla $dst,$src1,$src2\t# vector (4S)" %}
+ ins_cost(INSN_COST);
+ ins_encode %{
+ __ fmla(as_FloatRegister($dst$$reg), __ T4S,
+ as_FloatRegister($src1$$reg),
+ as_FloatRegister($src2$$reg));
+ %}
+ ins_pipe(vmuldiv_fp128);
+%}
+
+// dst + src1 * src2
+instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
+ predicate(UseFMA && n->as_Vector()->length() == 2);
+ match(Set dst (FmaVD dst (Binary src1 src2)));
+ format %{ "fmla $dst,$src1,$src2\t# vector (2D)" %}
+ ins_cost(INSN_COST);
+ ins_encode %{
+ __ fmla(as_FloatRegister($dst$$reg), __ T2D,
+ as_FloatRegister($src1$$reg),
+ as_FloatRegister($src2$$reg));
+ %}
+ ins_pipe(vmuldiv_fp128);
+%}
+
// --------------------------------- MLS --------------------------------------
instruct vmls4S(vecD dst, vecD src1, vecD src2)
@@ -16892,6 +17243,51 @@ instruct vmls4I(vecX dst, vecX src1, vecX src2)
ins_pipe(vmla128);
%}
+// dst - src1 * src2
+instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
+ predicate(UseFMA && n->as_Vector()->length() == 2);
+ match(Set dst (FmaVF dst (Binary (NegVF src1) src2)));
+ match(Set dst (FmaVF dst (Binary src1 (NegVF src2))));
+ format %{ "fmls $dst,$src1,$src2\t# vector (2S)" %}
+ ins_cost(INSN_COST);
+ ins_encode %{
+ __ fmls(as_FloatRegister($dst$$reg), __ T2S,
+ as_FloatRegister($src1$$reg),
+ as_FloatRegister($src2$$reg));
+ %}
+ ins_pipe(vmuldiv_fp64);
+%}
+
+// dst - src1 * src2
+instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
+ predicate(UseFMA && n->as_Vector()->length() == 4);
+ match(Set dst (FmaVF dst (Binary (NegVF src1) src2)));
+ match(Set dst (FmaVF dst (Binary src1 (NegVF src2))));
+ format %{ "fmls $dst,$src1,$src2\t# vector (4S)" %}
+ ins_cost(INSN_COST);
+ ins_encode %{
+ __ fmls(as_FloatRegister($dst$$reg), __ T4S,
+ as_FloatRegister($src1$$reg),
+ as_FloatRegister($src2$$reg));
+ %}
+ ins_pipe(vmuldiv_fp128);
+%}
+
+// dst - src1 * src2
+instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
+ predicate(UseFMA && n->as_Vector()->length() == 2);
+ match(Set dst (FmaVD dst (Binary (NegVD src1) src2)));
+ match(Set dst (FmaVD dst (Binary src1 (NegVD src2))));
+ format %{ "fmls $dst,$src1,$src2\t# vector (2D)" %}
+ ins_cost(INSN_COST);
+ ins_encode %{
+ __ fmls(as_FloatRegister($dst$$reg), __ T2D,
+ as_FloatRegister($src1$$reg),
+ as_FloatRegister($src2$$reg));
+ %}
+ ins_pipe(vmuldiv_fp128);
+%}
+
// --------------------------------- DIV --------------------------------------
instruct vdiv2F(vecD dst, vecD src1, vecD src2)
diff --git a/hotspot/src/cpu/aarch64/vm/aarch64_ad.m4 b/hotspot/src/cpu/aarch64/vm/aarch64_ad.m4
index cc2e748dd66..b2a4d379eb9 100644
--- a/hotspot/src/cpu/aarch64/vm/aarch64_ad.m4
+++ b/hotspot/src/cpu/aarch64/vm/aarch64_ad.m4
@@ -268,21 +268,21 @@ instruct $2$1_rReg(iReg$1NoSp dst, iReg$1 src, iRegI shift, rFlagsReg cr)
ins_pipe(ialu_reg_reg_vshift);
%}')dnl
define(ROL_INSN, `
-instruct $3$1_rReg_Var_C$2(iRegLNoSp dst, iRegL src, iRegI shift, immI$2 c$2, rFlagsReg cr)
+instruct $3$1_rReg_Var_C$2(iReg$1NoSp dst, iReg$1 src, iRegI shift, immI$2 c$2, rFlagsReg cr)
%{
match(Set dst (Or$1 (LShift$1 src shift) (URShift$1 src (SubI c$2 shift))));
expand %{
- $3L_rReg(dst, src, shift, cr);
+ $3$1_rReg(dst, src, shift, cr);
%}
%}')dnl
define(ROR_INSN, `
-instruct $3$1_rReg_Var_C$2(iRegLNoSp dst, iRegL src, iRegI shift, immI$2 c$2, rFlagsReg cr)
+instruct $3$1_rReg_Var_C$2(iReg$1NoSp dst, iReg$1 src, iRegI shift, immI$2 c$2, rFlagsReg cr)
%{
match(Set dst (Or$1 (URShift$1 src shift) (LShift$1 src (SubI c$2 shift))));
expand %{
- $3L_rReg(dst, src, shift, cr);
+ $3$1_rReg(dst, src, shift, cr);
%}
%}')dnl
ROL_EXPAND(L, rol, rorv)
@@ -305,7 +305,7 @@ instruct $3Ext$1(iReg$2NoSp dst, iReg$2`'ORL2I($2) src1, iReg$1`'ORL2I($1) src2,
%{
match(Set dst ($3$2 src1 (ConvI2L src2)));
ins_cost(INSN_COST);
- format %{ "$4 $dst, $src1, $5 $src2" %}
+ format %{ "$4 $dst, $src1, $src2, $5" %}
ins_encode %{
__ $4(as_Register($dst$$reg), as_Register($src1$$reg),
@@ -321,7 +321,7 @@ instruct $3Ext$1_$6(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) sr
%{
match(Set dst ($3$1 src1 EXTEND($1, $4, src2, lshift, rshift)));
ins_cost(INSN_COST);
- format %{ "$5 $dst, $src1, $6 $src2" %}
+ format %{ "$5 $dst, $src1, $src2, $6" %}
ins_encode %{
__ $5(as_Register($dst$$reg), as_Register($src1$$reg),
@@ -363,5 +363,82 @@ ADD_SUB_ZERO_EXTEND(I,65535,Sub,subw,uxth)
ADD_SUB_ZERO_EXTEND(L,255,Sub,sub,uxtb)
ADD_SUB_ZERO_EXTEND(L,65535,Sub,sub,uxth)
ADD_SUB_ZERO_EXTEND(L,4294967295,Sub,sub,uxtw)
+dnl
+dnl ADD_SUB_ZERO_EXTEND_SHIFT(mode, size, add node, insn, ext type)
+define(`ADD_SUB_EXTENDED_SHIFT', `
+instruct $3Ext$1_$6_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immIExt lshift2, immI_`'eval($7-$2) lshift1, immI_`'eval($7-$2) rshift1, rFlagsReg cr)
+%{
+ match(Set dst ($3$1 src1 (LShift$1 EXTEND($1, $4, src2, lshift1, rshift1) lshift2)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "$5 $dst, $src1, $src2, $6 #lshift2" %}
+ ins_encode %{
+ __ $5(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::$6, ($lshift2$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}')
+dnl $1 $2 $3 $4 $5 $6 $7
+ADD_SUB_EXTENDED_SHIFT(L,8,Add,RShift,add,sxtb,64)
+ADD_SUB_EXTENDED_SHIFT(L,16,Add,RShift,add,sxth,64)
+ADD_SUB_EXTENDED_SHIFT(L,32,Add,RShift,add,sxtw,64)
+dnl
+ADD_SUB_EXTENDED_SHIFT(L,8,Sub,RShift,sub,sxtb,64)
+ADD_SUB_EXTENDED_SHIFT(L,16,Sub,RShift,sub,sxth,64)
+ADD_SUB_EXTENDED_SHIFT(L,32,Sub,RShift,sub,sxtw,64)
+dnl
+ADD_SUB_EXTENDED_SHIFT(I,8,Add,RShift,addw,sxtb,32)
+ADD_SUB_EXTENDED_SHIFT(I,16,Add,RShift,addw,sxth,32)
+dnl
+ADD_SUB_EXTENDED_SHIFT(I,8,Sub,RShift,subw,sxtb,32)
+ADD_SUB_EXTENDED_SHIFT(I,16,Sub,RShift,subw,sxth,32)
+dnl
+dnl ADD_SUB_CONV_SHIFT(mode, add node, insn, ext type)
+define(`ADD_SUB_CONV_SHIFT', `
+instruct $2ExtI_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
+%{
+ match(Set dst ($2$1 src1 (LShiftL (ConvI2L src2) lshift)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "$3 $dst, $src1, $src2, $4 #lshift" %}
+
+ ins_encode %{
+ __ $3(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::$4, ($lshift$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}')
+dnl
+ADD_SUB_CONV_SHIFT(L,Add,add,sxtw);
+ADD_SUB_CONV_SHIFT(L,Sub,sub,sxtw);
+dnl
+dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, ext type)
+define(`ADD_SUB_ZERO_EXTEND_SHIFT', `
+instruct $3Ext$1_$5_and_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_$2 mask, immIExt lshift, rFlagsReg cr)
+%{
+ match(Set dst ($3$1 src1 (LShift$1 (And$1 src2 mask) lshift)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "$4 $dst, $src1, $src2, $5 #lshift" %}
+
+ ins_encode %{
+ __ $4(as_Register($dst$$reg), as_Register($src1$$reg),
+ as_Register($src2$$reg), ext::$5, ($lshift$$constant));
+ %}
+ ins_pipe(ialu_reg_reg_shift);
+%}')
+dnl
+dnl $1 $2 $3 $4 $5
+ADD_SUB_ZERO_EXTEND_SHIFT(L,255,Add,add,uxtb)
+ADD_SUB_ZERO_EXTEND_SHIFT(L,65535,Add,add,uxth)
+ADD_SUB_ZERO_EXTEND_SHIFT(L,4294967295,Add,add,uxtw)
+dnl
+ADD_SUB_ZERO_EXTEND_SHIFT(L,255,Sub,sub,uxtb)
+ADD_SUB_ZERO_EXTEND_SHIFT(L,65535,Sub,sub,uxth)
+ADD_SUB_ZERO_EXTEND_SHIFT(L,4294967295,Sub,sub,uxtw)
+dnl
+ADD_SUB_ZERO_EXTEND_SHIFT(I,255,Add,addw,uxtb)
+ADD_SUB_ZERO_EXTEND_SHIFT(I,65535,Add,addw,uxth)
+dnl
+ADD_SUB_ZERO_EXTEND_SHIFT(I,255,Sub,subw,uxtb)
+ADD_SUB_ZERO_EXTEND_SHIFT(I,65535,Sub,subw,uxth)
+dnl
// END This section of the file is automatically generated. Do not edit --------------
diff --git a/hotspot/src/cpu/aarch64/vm/abstractInterpreter_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/abstractInterpreter_aarch64.cpp
index 49a266e5480..e73f1521e32 100644
--- a/hotspot/src/cpu/aarch64/vm/abstractInterpreter_aarch64.cpp
+++ b/hotspot/src/cpu/aarch64/vm/abstractInterpreter_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -28,6 +28,7 @@
#include "oops/constMethod.hpp"
#include "oops/method.hpp"
#include "runtime/frame.inline.hpp"
+#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include "utilities/macros.hpp"
@@ -53,27 +54,6 @@ int AbstractInterpreter::BasicType_as_index(BasicType type) {
return i;
}
-// These should never be compiled since the interpreter will prefer
-// the compiled version to the intrinsic version.
-bool AbstractInterpreter::can_be_compiled(methodHandle m) {
- switch (method_kind(m)) {
- case Interpreter::java_lang_math_sin : // fall thru
- case Interpreter::java_lang_math_cos : // fall thru
- case Interpreter::java_lang_math_tan : // fall thru
- case Interpreter::java_lang_math_abs : // fall thru
- case Interpreter::java_lang_math_log : // fall thru
- case Interpreter::java_lang_math_log10 : // fall thru
- case Interpreter::java_lang_math_sqrt : // fall thru
- case Interpreter::java_lang_math_pow : // fall thru
- case Interpreter::java_lang_math_exp : // fall thru
- case Interpreter::java_lang_math_fmaD : // fall thru
- case Interpreter::java_lang_math_fmaF :
- return false;
- default:
- return true;
- }
-}
-
// How much stack a method activation needs in words.
int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
const int entry_size = frame::interpreter_frame_monitor_size();
@@ -121,7 +101,7 @@ int AbstractInterpreter::size_activation(int max_stack,
// On AArch64 we always keep the stack pointer 16-aligned, so we
// must round up here.
- size = round_to(size, 2);
+ size = align_up(size, 2);
return size;
}
diff --git a/hotspot/src/cpu/aarch64/vm/assembler_aarch64.hpp b/hotspot/src/cpu/aarch64/vm/assembler_aarch64.hpp
index 2f97a3e11d3..6dc260f5a54 100644
--- a/hotspot/src/cpu/aarch64/vm/assembler_aarch64.hpp
+++ b/hotspot/src/cpu/aarch64/vm/assembler_aarch64.hpp
@@ -2201,6 +2201,8 @@ public:
INSN(fdiv, 1, 0, 0b111111);
INSN(fmul, 1, 0, 0b110111);
INSN(fsub, 0, 1, 0b110101);
+ INSN(fmla, 0, 0, 0b110011);
+ INSN(fmls, 0, 1, 0b110011);
#undef INSN
diff --git a/hotspot/src/cpu/aarch64/vm/bytes_aarch64.hpp b/hotspot/src/cpu/aarch64/vm/bytes_aarch64.hpp
index fc7890e945b..701d3af7fc0 100644
--- a/hotspot/src/cpu/aarch64/vm/bytes_aarch64.hpp
+++ b/hotspot/src/cpu/aarch64/vm/bytes_aarch64.hpp
@@ -30,12 +30,6 @@
class Bytes: AllStatic {
public:
- // Returns true if the byte ordering used by Java is different from the native byte ordering
- // of the underlying machine. For example, this is true for Intel x86, but false for Solaris
- // on Sparc.
- static inline bool is_Java_byte_ordering_different(){ return true; }
-
-
// Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering
// (no special code is needed since x86 CPUs can access unaligned data)
static inline u2 get_native_u2(address p) { return *(u2*)p; }
diff --git a/hotspot/src/cpu/aarch64/vm/c1_FpuStackSim_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/c1_FpuStackSim_aarch64.cpp
index 2db73cd1523..3e213003947 100644
--- a/hotspot/src/cpu/aarch64/vm/c1_FpuStackSim_aarch64.cpp
+++ b/hotspot/src/cpu/aarch64/vm/c1_FpuStackSim_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -23,12 +23,6 @@
*
*/
-#include "precompiled.hpp"
-#include "c1/c1_FpuStackSim.hpp"
-#include "c1/c1_FrameMap.hpp"
-#include "utilities/array.hpp"
-#include "utilities/ostream.hpp"
-
//--------------------------------------------------------
// FpuStackSim
//--------------------------------------------------------
diff --git a/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
index 0016aa9ba6a..e6d9de99685 100644
--- a/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
+++ b/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
@@ -2740,8 +2740,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
// set already but no need to check.
__ cbz(rscratch1, next);
- __ andr(rscratch1, tmp, TypeEntries::type_unknown);
- __ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
+ __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
if (TypeEntries::is_type_none(current_klass)) {
__ cbz(rscratch2, none);
@@ -2761,8 +2760,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
__ ldr(tmp, mdo_addr);
- __ andr(rscratch1, tmp, TypeEntries::type_unknown);
- __ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
+ __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
}
// different than before. Cannot keep accurate profile.
@@ -2812,8 +2810,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
__ ldr(tmp, mdo_addr);
- __ andr(rscratch1, tmp, TypeEntries::type_unknown);
- __ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
+ __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
__ orr(tmp, tmp, TypeEntries::type_unknown);
__ str(tmp, mdo_addr);
diff --git a/hotspot/src/cpu/aarch64/vm/frame_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/frame_aarch64.cpp
index fe48df281b1..ab53d646d80 100644
--- a/hotspot/src/cpu/aarch64/vm/frame_aarch64.cpp
+++ b/hotspot/src/cpu/aarch64/vm/frame_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -784,6 +784,8 @@ extern "C" void pm(unsigned long fp, unsigned long bcx) {
frame::frame(void* sp, void* fp, void* pc) {
init((intptr_t*)sp, (intptr_t*)fp, (address)pc);
}
+
+void frame::pd_ps() {}
#endif
void JavaFrameAnchor::make_walkable(JavaThread* thread) {
diff --git a/hotspot/src/cpu/aarch64/vm/globals_aarch64.hpp b/hotspot/src/cpu/aarch64/vm/globals_aarch64.hpp
index 06ec817cd9b..ba6e3eb637f 100644
--- a/hotspot/src/cpu/aarch64/vm/globals_aarch64.hpp
+++ b/hotspot/src/cpu/aarch64/vm/globals_aarch64.hpp
@@ -154,8 +154,11 @@ define_pd_global(intx, InlineSmallCode, 1000);
product(intx, BlockZeroingLowLimit, 256, \
"Minimum size in bytes when block zeroing will be used") \
range(1, max_jint) \
- product(bool, TraceTraps, false, "Trace all traps the signal handler")
-
+ product(bool, TraceTraps, false, "Trace all traps the signal handler")\
+ product(int, SoftwarePrefetchHintDistance, -1, \
+ "Use prfm hint with specified distance in compiled code." \
+ "Value -1 means off.") \
+ range(-1, 32760)
#endif
diff --git a/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.cpp
index 81636fb8ea1..480a6435f3d 100644
--- a/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.cpp
+++ b/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -270,7 +270,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
get_constant_pool(result);
// load pointer for resolved_references[] objArray
- ldr(result, Address(result, ConstantPool::resolved_references_offset_in_bytes()));
+ ldr(result, Address(result, ConstantPool::cache_offset_in_bytes()));
+ ldr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
// JNIHandles::resolve(obj);
ldr(result, Address(result, 0));
// Add in the index
@@ -278,6 +279,15 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
}
+void InterpreterMacroAssembler::load_resolved_klass_at_offset(
+ Register cpool, Register index, Register klass, Register temp) {
+ add(temp, cpool, index, LSL, LogBytesPerWord);
+ ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
+ ldr(klass, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes())); // klass = cpool->_resolved_klasses
+ add(klass, klass, temp, LSL, LogBytesPerWord);
+ ldr(klass, Address(klass, Array::base_offset_in_bytes()));
+}
+
// Generate a subtype check: branch to ok_is_subtype if sub_klass is a
// subtype of super_klass.
//
@@ -682,7 +692,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
}
// Load (object->mark() | 1) into swap_reg
- ldr(rscratch1, Address(obj_reg, 0));
+ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
orr(swap_reg, rscratch1, 1);
// Save (object->mark() | 1) into BasicLock's displaced header
@@ -694,14 +704,14 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
Label fail;
if (PrintBiasedLockingStatistics) {
Label fast;
- cmpxchgptr(swap_reg, lock_reg, obj_reg, rscratch1, fast, &fail);
+ cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, fast, &fail);
bind(fast);
atomic_incw(Address((address)BiasedLocking::fast_path_entry_count_addr()),
rscratch2, rscratch1, tmp);
b(done);
bind(fail);
} else {
- cmpxchgptr(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
+ cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
}
// Test if the oopMark is an obvious stack pointer, i.e.,
@@ -791,7 +801,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
cbz(header_reg, done);
// Atomic swap back the old header
- cmpxchgptr(swap_reg, header_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
+ cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
// Call the runtime routine for slow case.
str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj
@@ -1744,8 +1754,7 @@ void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register t
// Load the offset of the area within the MDO used for
// parameters. If it's negative we're not profiling any parameters
ldr(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
- cmp(tmp1, 0u);
- br(Assembler::LT, profile_continue);
+ tbnz(tmp1, 63, profile_continue); // i.e. sign bit set
// Compute a pointer to the area for parameters from the offset
// and move the pointer to the slot for the last
diff --git a/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.hpp b/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.hpp
index 925690b0d80..637ae481f5b 100644
--- a/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.hpp
+++ b/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -54,9 +54,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
int number_of_arguments,
bool check_exceptions);
- virtual void check_and_handle_popframe(Register java_thread);
- virtual void check_and_handle_earlyret(Register java_thread);
-
// base routine for all dispatches
void dispatch_base(TosState state, address* table, bool verifyoop = true);
@@ -67,6 +64,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
void jump_to_entry(address entry);
+ virtual void check_and_handle_popframe(Register java_thread);
+ virtual void check_and_handle_earlyret(Register java_thread);
+
// Interpreter-specific registers
void save_bcp() {
str(rbcp, Address(rfp, frame::interpreter_frame_bcp_offset * wordSize));
@@ -123,6 +123,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
// load cpool->resolved_references(index);
void load_resolved_reference_at_index(Register result, Register index);
+ // load cpool->resolved_klass_at(index);
+ void load_resolved_klass_at_offset(Register cpool, Register index, Register klass, Register temp);
+
void pop_ptr(Register r = r0);
void pop_i(Register r = r0);
void pop_l(Register r = r0);
diff --git a/hotspot/src/cpu/aarch64/vm/interpreterRT_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/interpreterRT_aarch64.cpp
index 3914f4e4020..f174b67d146 100644
--- a/hotspot/src/cpu/aarch64/vm/interpreterRT_aarch64.cpp
+++ b/hotspot/src/cpu/aarch64/vm/interpreterRT_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -369,7 +369,7 @@ class SlowSignatureHandler
}
public:
- SlowSignatureHandler(methodHandle method, address from, intptr_t* to)
+ SlowSignatureHandler(const methodHandle& method, address from, intptr_t* to)
: NativeSignatureIterator(method)
{
_from = from;
diff --git a/hotspot/src/cpu/aarch64/vm/interpreterRT_aarch64.hpp b/hotspot/src/cpu/aarch64/vm/interpreterRT_aarch64.hpp
index 10c92c3b2ed..e0b1ecbba17 100644
--- a/hotspot/src/cpu/aarch64/vm/interpreterRT_aarch64.hpp
+++ b/hotspot/src/cpu/aarch64/vm/interpreterRT_aarch64.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -47,7 +47,7 @@ class SignatureHandlerGenerator: public NativeSignatureIterator {
public:
// Creation
- SignatureHandlerGenerator(methodHandle method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
+ SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
_masm = new MacroAssembler(buffer);
_num_int_args = (method->is_static() ? 1 : 0);
_num_fp_args = 0;
diff --git a/hotspot/src/cpu/aarch64/vm/jniFastGetField_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/jniFastGetField_aarch64.cpp
index a09c5230dc4..b0f9acee633 100644
--- a/hotspot/src/cpu/aarch64/vm/jniFastGetField_aarch64.cpp
+++ b/hotspot/src/cpu/aarch64/vm/jniFastGetField_aarch64.cpp
@@ -76,8 +76,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
SafepointSynchronize::safepoint_counter_addr(), offset);
Address safepoint_counter_addr(rcounter_addr, offset);
__ ldrw(rcounter, safepoint_counter_addr);
- __ andw(rscratch1, rcounter, 1);
- __ cbnzw(rscratch1, slow);
+ __ tbnz(rcounter, 0, slow);
__ eor(robj, c_rarg1, rcounter);
__ eor(robj, robj, rcounter); // obj, since
// robj ^ rcounter ^ rcounter == robj
diff --git a/hotspot/src/cpu/aarch64/vm/jvmciCodeInstaller_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/jvmciCodeInstaller_aarch64.cpp
index f9cc50fe4d0..bebd5ae8ca9 100644
--- a/hotspot/src/cpu/aarch64/vm/jvmciCodeInstaller_aarch64.cpp
+++ b/hotspot/src/cpu/aarch64/vm/jvmciCodeInstaller_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -55,7 +55,7 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS)
}
}
#endif // ASSERT
- Handle obj = HotSpotObjectConstantImpl::object(constant);
+ Handle obj(THREAD, HotSpotObjectConstantImpl::object(constant));
jobject value = JNIHandles::make_local(obj());
MacroAssembler::patch_oop(pc, (address)obj());
int oop_index = _oop_recorder->find_index(value);
diff --git a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
index 303c2f5eb8e..d5deb80ed9a 100644
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -38,6 +38,7 @@
#include "opto/compile.hpp"
#include "opto/intrinsicnode.hpp"
#include "opto/node.hpp"
+#include "prims/jvm.h"
#include "runtime/biasedLocking.hpp"
#include "runtime/icache.hpp"
#include "runtime/interfaceSupport.hpp"
@@ -515,7 +516,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
mov(rscratch1, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
andr(swap_reg, swap_reg, rscratch1);
orr(tmp_reg, swap_reg, rthread);
- cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
+ cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
// If the biasing toward our thread failed, this means that
// another thread succeeded in biasing it toward itself and we
// need to revoke that bias. The revocation will occur in the
@@ -542,7 +543,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
Label here;
load_prototype_header(tmp_reg, obj_reg);
orr(tmp_reg, rthread, tmp_reg);
- cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
+ cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
// If the biasing toward our thread failed, then another thread
// succeeded in biasing it toward itself and we need to revoke that
// bias. The revocation will occur in the runtime in the slow case.
@@ -569,7 +570,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
{
Label here, nope;
load_prototype_header(tmp_reg, obj_reg);
- cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, &nope);
+ cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, rscratch1, here, &nope);
bind(here);
// Fall through to the normal CAS-based lock, because no matter what
@@ -2011,6 +2012,12 @@ void MacroAssembler::stop(const char* msg) {
hlt(0);
}
+void MacroAssembler::unimplemented(const char* what) {
+ char* b = new char[1024];
+ jio_snprintf(b, 1024, "unimplemented: %s", what);
+ stop(b);
+}
+
// If a constant does not fit in an immediate field, generate some
// number of MOV instructions and then perform the operation.
void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, unsigned imm,
@@ -2141,6 +2148,12 @@ void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Reg
b(*fail);
}
+void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp,
+ Label &succeed, Label *fail) {
+ assert(oopDesc::mark_offset_in_bytes() == 0, "assumption");
+ cmpxchgptr(oldv, newv, obj, tmp, succeed, fail);
+}
+
void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp,
Label &succeed, Label *fail) {
// oldv holds comparison value
@@ -4816,6 +4829,62 @@ void MacroAssembler::string_compare(Register str1, Register str2,
BLOCK_COMMENT("} string_compare");
}
+// This method checks if provided byte array contains byte with highest bit set.
+void MacroAssembler::has_negatives(Register ary1, Register len, Register result) {
+ // Simple and most common case of aligned small array which is not at the
+ // end of memory page is placed here. All other cases are in stub.
+ Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE;
+ const uint64_t UPPER_BIT_MASK=0x8080808080808080;
+ assert_different_registers(ary1, len, result);
+
+ cmpw(len, 0);
+ br(LE, SET_RESULT);
+ cmpw(len, 4 * wordSize);
+ br(GE, STUB_LONG); // size > 32 then go to stub
+
+ int shift = 64 - exact_log2(os::vm_page_size());
+ lsl(rscratch1, ary1, shift);
+ mov(rscratch2, (size_t)(4 * wordSize) << shift);
+ adds(rscratch2, rscratch1, rscratch2); // At end of page?
+ br(CS, STUB); // at the end of page then go to stub
+ subs(len, len, wordSize);
+ br(LT, END);
+
+ BIND(LOOP);
+ ldr(rscratch1, Address(post(ary1, wordSize)));
+ tst(rscratch1, UPPER_BIT_MASK);
+ br(NE, SET_RESULT);
+ subs(len, len, wordSize);
+ br(GE, LOOP);
+ cmpw(len, -wordSize);
+ br(EQ, SET_RESULT);
+
+ BIND(END);
+ ldr(result, Address(ary1));
+ sub(len, zr, len, LSL, 3); // LSL 3 is to get bits from bytes
+ lslv(result, result, len);
+ tst(result, UPPER_BIT_MASK);
+ b(SET_RESULT);
+
+ BIND(STUB);
+ RuntimeAddress has_neg = RuntimeAddress(StubRoutines::aarch64::has_negatives());
+ assert(has_neg.target() != NULL, "has_negatives stub has not been generated");
+ trampoline_call(has_neg);
+ b(DONE);
+
+ BIND(STUB_LONG);
+ RuntimeAddress has_neg_long = RuntimeAddress(
+ StubRoutines::aarch64::has_negatives_long());
+ assert(has_neg_long.target() != NULL, "has_negatives stub has not been generated");
+ trampoline_call(has_neg_long);
+ b(DONE);
+
+ BIND(SET_RESULT);
+ cset(result, NE); // set true or false
+
+ BIND(DONE);
+}
+
// Compare Strings or char/byte arrays.
// is_string is true iff this is a string comparison.
diff --git a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
index 59171e5dd19..158e83c3cdb 100644
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -77,12 +77,6 @@ class MacroAssembler: public Assembler {
bool check_exceptions // whether to check for pending exceptions after return
);
- // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
- // The implementation is only non-empty for the InterpreterMacroAssembler,
- // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
- virtual void check_and_handle_popframe(Register java_thread);
- virtual void check_and_handle_earlyret(Register java_thread);
-
void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
// Maximum size of class area in Metaspace when compressed
@@ -97,6 +91,12 @@ class MacroAssembler: public Assembler {
> (1u << log2_intptr(CompressedClassSpaceSize))));
}
+ // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
+ // The implementation is only non-empty for the InterpreterMacroAssembler,
+ // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
+ virtual void check_and_handle_popframe(Register java_thread);
+ virtual void check_and_handle_earlyret(Register java_thread);
+
// Biased locking support
// lock_reg and obj_reg must be loaded up with the appropriate values.
// swap_reg is killed.
@@ -169,6 +169,7 @@ class MacroAssembler: public Assembler {
template
inline void cmpw(Register Rd, T imm) { subsw(zr, Rd, imm); }
+ // imm is limited to 12 bits.
inline void cmp(Register Rd, unsigned imm) { subs(zr, Rd, imm); }
inline void cmnw(Register Rd, unsigned imm) { addsw(zr, Rd, imm); }
@@ -941,7 +942,7 @@ public:
void untested() { stop("untested"); }
- void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); }
+ void unimplemented(const char* what = "");
void should_not_reach_here() { stop("should not reach here"); }
@@ -949,8 +950,8 @@ public:
void bang_stack_with_offset(int offset) {
// stack grows down, caller passes positive offset
assert(offset > 0, "must bang with negative offset");
- mov(rscratch2, -offset);
- str(zr, Address(sp, rscratch2));
+ sub(rscratch2, sp, offset);
+ str(zr, Address(rscratch2));
}
// Writes to stack successive pages until offset reached to check for
@@ -974,6 +975,8 @@ public:
// Various forms of CAS
+ void cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp,
+ Label &suceed, Label *fail);
void cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp,
Label &suceed, Label *fail);
@@ -1207,6 +1210,8 @@ public:
Register tmp1,
FloatRegister vtmp, FloatRegister vtmpZ, int ae);
+ void has_negatives(Register ary1, Register len, Register result);
+
void arrays_equals(Register a1, Register a2,
Register result, Register cnt1,
int elem_size, bool is_string);
diff --git a/hotspot/src/cpu/aarch64/vm/metaspaceShared_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/metaspaceShared_aarch64.cpp
deleted file mode 100644
index 72af9a84f04..00000000000
--- a/hotspot/src/cpu/aarch64/vm/metaspaceShared_aarch64.cpp
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "memory/metaspaceShared.hpp"
-
-// Generate the self-patching vtable method:
-//
-// This method will be called (as any other Klass virtual method) with
-// the Klass itself as the first argument. Example:
-//
-// oop obj;
-// int size = obj->klass()->oop_size(this);
-//
-// for which the virtual method call is Klass::oop_size();
-//
-// The dummy method is called with the Klass object as the first
-// operand, and an object as the second argument.
-//
-
-//=====================================================================
-
-// All of the dummy methods in the vtable are essentially identical,
-// differing only by an ordinal constant, and they bear no relationship
-// to the original method which the caller intended. Also, there needs
-// to be 'vtbl_list_size' instances of the vtable in order to
-// differentiate between the 'vtable_list_size' original Klass objects.
-
-#define __ masm->
-
-extern "C" {
- void aarch64_prolog(void);
-}
-
-void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
- void** vtable,
- char** md_top,
- char* md_end,
- char** mc_top,
- char* mc_end) {
-
-#ifdef BUILTIN_SIM
- // Write a dummy word to the writable shared metaspace.
- // MetaspaceShared::initialize_shared_spaces will fill it with the
- // address of aarch64_prolog().
- address *prolog_ptr = (address*)*md_top;
- *(intptr_t *)(*md_top) = (intptr_t)0;
- (*md_top) += sizeof(intptr_t);
-#endif
-
- intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
- *(intptr_t *)(*md_top) = vtable_bytes;
- *md_top += sizeof(intptr_t);
- void** dummy_vtable = (void**)*md_top;
- *vtable = dummy_vtable;
- *md_top += vtable_bytes;
-
- // Get ready to generate dummy methods.
-
- CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
- MacroAssembler* masm = new MacroAssembler(&cb);
-
- Label common_code;
- for (int i = 0; i < vtbl_list_size; ++i) {
- for (int j = 0; j < num_virtuals; ++j) {
- dummy_vtable[num_virtuals * i + j] = (void*)masm->pc();
-
- // We're called directly from C code.
-#ifdef BUILTIN_SIM
- __ c_stub_prolog(8, 0, MacroAssembler::ret_type_integral, prolog_ptr);
-#endif
- // Load rscratch1 with a value indicating vtable/offset pair.
- // -- bits[ 7..0] (8 bits) which virtual method in table?
- // -- bits[12..8] (5 bits) which virtual method table?
- __ mov(rscratch1, (i << 8) + j);
- __ b(common_code);
- }
- }
-
- __ bind(common_code);
-
- Register tmp0 = r10, tmp1 = r11; // AAPCS64 temporary registers
- __ enter();
- __ lsr(tmp0, rscratch1, 8); // isolate vtable identifier.
- __ mov(tmp1, (address)vtbl_list); // address of list of vtable pointers.
- __ ldr(tmp1, Address(tmp1, tmp0, Address::lsl(LogBytesPerWord))); // get correct vtable pointer.
- __ str(tmp1, Address(c_rarg0)); // update vtable pointer in obj.
- __ add(rscratch1, tmp1, rscratch1, ext::uxtb, LogBytesPerWord); // address of real method pointer.
- __ ldr(rscratch1, Address(rscratch1)); // get real method pointer.
- __ blrt(rscratch1, 8, 0, 1); // jump to the real method.
- __ leave();
- __ ret(lr);
-
- *mc_top = (char*)__ pc();
-}
-
-#ifdef BUILTIN_SIM
-void MetaspaceShared::relocate_vtbl_list(char **buffer) {
- void **sim_entry = (void**)*buffer;
- *sim_entry = (void*)aarch64_prolog;
- *buffer += sizeof(intptr_t);
-}
-#endif
diff --git a/hotspot/src/cpu/aarch64/vm/methodHandles_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/methodHandles_aarch64.cpp
index dedabb6729b..cc837ee83af 100644
--- a/hotspot/src/cpu/aarch64/vm/methodHandles_aarch64.cpp
+++ b/hotspot/src/cpu/aarch64/vm/methodHandles_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -63,7 +63,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Register obj, SystemDictionary::WKID klass_id,
const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
- KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
+ Klass* klass = SystemDictionary::well_known_klass(klass_id);
Register temp = rscratch2;
Register temp2 = rscratch1; // used by MacroAssembler::cmpptr
Label L_ok, L_bad;
@@ -137,8 +137,9 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
__ verify_oop(method_temp);
__ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
__ verify_oop(method_temp);
- // the following assumes that a Method* is normally compressed in the vmtarget field:
- __ ldr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));
+ __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())));
+ __ verify_oop(method_temp);
+ __ ldr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())));
if (VerifyMethodHandles && !for_compiler_entry) {
// make sure recv is already on stack
@@ -282,7 +283,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
- Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));
+ Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()));
+ Address vmtarget_method( rmethod, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()));
Register temp1_recv_klass = temp1;
if (iid != vmIntrinsics::_linkToStatic) {
@@ -335,14 +337,16 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
}
- __ ldr(rmethod, member_vmtarget);
+ __ load_heap_oop(rmethod, member_vmtarget);
+ __ ldr(rmethod, vmtarget_method);
break;
case vmIntrinsics::_linkToStatic:
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
}
- __ ldr(rmethod, member_vmtarget);
+ __ load_heap_oop(rmethod, member_vmtarget);
+ __ ldr(rmethod, vmtarget_method);
break;
case vmIntrinsics::_linkToVirtual:
diff --git a/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp
index a286102e7b1..ac55da31ea9 100644
--- a/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp
+++ b/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp
@@ -36,6 +36,7 @@
#include "oops/compiledICHolder.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
+#include "utilities/align.hpp"
#include "vmreg_aarch64.inline.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
@@ -123,7 +124,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
assert(!save_vectors, "vectors are generated only by C2 and JVMCI");
#endif
- int frame_size_in_bytes = round_to(additional_frame_words*wordSize +
+ int frame_size_in_bytes = align_up(additional_frame_words*wordSize +
reg_save_size*BytesPerInt, 16);
// OopMap frame size is in compiler stack slots (jint's) not bytes or words
int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
@@ -190,7 +191,7 @@ void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
__ ldr(r0, Address(sp, r0_offset_in_bytes()));
// Pop all of the register save are off the stack
- __ add(sp, sp, round_to(return_offset_in_bytes(), 16));
+ __ add(sp, sp, align_up(return_offset_in_bytes(), 16));
}
// Is vector's size (in bytes) bigger than a size saved by default?
@@ -317,7 +318,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
}
}
- return round_to(stk_args, 2);
+ return align_up(stk_args, 2);
}
// Patch the callers callsite with entry to compiled code if it exists.
@@ -375,7 +376,7 @@ static void gen_c2i_adapter(MacroAssembler *masm,
__ mov(r13, sp);
// stack is aligned, keep it that way
- extraspace = round_to(extraspace, 2*wordSize);
+ extraspace = align_up(extraspace, 2*wordSize);
if (extraspace)
__ sub(sp, sp, extraspace);
@@ -547,7 +548,7 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
}
// Cut-out for having no stack args.
- int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
+ int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
if (comp_args_on_stack) {
__ sub(rscratch1, sp, comp_words_on_stack * wordSize);
__ andr(sp, rscratch1, -16);
@@ -1206,7 +1207,7 @@ static void rt_call(MacroAssembler* masm, address dest, int gpargs, int fpargs,
}
static void verify_oop_args(MacroAssembler* masm,
- methodHandle method,
+ const methodHandle& method,
const BasicType* sig_bt,
const VMRegPair* regs) {
Register temp_reg = r19; // not part of any compiled calling seq
@@ -1228,7 +1229,7 @@ static void verify_oop_args(MacroAssembler* masm,
}
static void gen_special_dispatch(MacroAssembler* masm,
- methodHandle method,
+ const methodHandle& method,
const BasicType* sig_bt,
const VMRegPair* regs) {
verify_oop_args(masm, method, sig_bt, regs);
@@ -1486,7 +1487,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
total_save_slots = double_slots * 2 + single_slots;
// align the save area
if (double_slots != 0) {
- stack_slots = round_to(stack_slots, 2);
+ stack_slots = align_up(stack_slots, 2);
}
}
@@ -1543,7 +1544,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Now compute actual number of stack words we need rounding to make
// stack properly aligned.
- stack_slots = round_to(stack_slots, StackAlignmentInSlots);
+ stack_slots = align_up(stack_slots, StackAlignmentInSlots);
int stack_size = stack_slots * VMRegImpl::stack_slot_size;
@@ -1842,7 +1843,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
}
// Load (object->mark() | 1) into swap_reg %r0
- __ ldr(rscratch1, Address(obj_reg, 0));
+ __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ orr(swap_reg, rscratch1, 1);
// Save (object->mark() | 1) into BasicLock's displaced header
@@ -1850,7 +1851,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// src -> dest iff dest == r0 else r0 <- dest
{ Label here;
- __ cmpxchgptr(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
+ __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
}
// Hmm should this move to the slow path code area???
@@ -2029,7 +2030,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Atomic swap old header if oop still contains the stack lock
Label succeed;
- __ cmpxchgptr(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
+ __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
__ bind(succeed);
// slow path re-enters here
@@ -2293,7 +2294,7 @@ int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals)
return 0; // No adjustment for negative locals
int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
// diff is counted in stack words
- return round_to(diff, 2);
+ return align_up(diff, 2);
}
diff --git a/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp
index 650c374371d..e6e79a468d2 100644
--- a/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp
+++ b/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp
@@ -39,6 +39,7 @@
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.inline.hpp"
+#include "utilities/align.hpp"
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
@@ -619,19 +620,21 @@ class StubGenerator: public StubCodeGenerator {
// Generate code for an array write pre barrier
//
- // addr - starting address
- // count - element count
- // tmp - scratch register
+ // addr - starting address
+ // count - element count
+ // tmp - scratch register
+ // saved_regs - registers to be saved before calling static_write_ref_array_pre
//
- // Destroy no registers except rscratch1 and rscratch2
+ // Callers must specify which registers to preserve in saved_regs.
+ // Clobbers: r0-r18, v0-v7, v16-v31, except saved_regs.
//
- void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) {
+ void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized, RegSet saved_regs) {
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
case BarrierSet::G1SATBCTLogging:
// With G1, don't generate the call if we statically know that the target in uninitialized
if (!dest_uninitialized) {
- __ push_call_clobbered_registers();
+ __ push(saved_regs, sp);
if (count == c_rarg0) {
if (addr == c_rarg1) {
// exactly backwards!!
@@ -647,7 +650,7 @@ class StubGenerator: public StubCodeGenerator {
__ mov(c_rarg1, count);
}
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
- __ pop_call_clobbered_registers();
+ __ pop(saved_regs, sp);
break;
case BarrierSet::CardTableForRS:
case BarrierSet::CardTableExtension:
@@ -664,20 +667,23 @@ class StubGenerator: public StubCodeGenerator {
// Generate code for an array write post barrier
//
// Input:
- // start - register containing starting address of destination array
- // end - register containing ending address of destination array
- // scratch - scratch register
+ // start - register containing starting address of destination array
+ // end - register containing ending address of destination array
+ // scratch - scratch register
+ // saved_regs - registers to be saved before calling static_write_ref_array_post
//
// The input registers are overwritten.
// The ending address is inclusive.
- void gen_write_ref_array_post_barrier(Register start, Register end, Register scratch) {
+ // Callers must specify which registers to preserve in saved_regs.
+ // Clobbers: r0-r18, v0-v7, v16-v31, except saved_regs.
+ void gen_write_ref_array_post_barrier(Register start, Register end, Register scratch, RegSet saved_regs) {
assert_different_registers(start, end, scratch);
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
case BarrierSet::G1SATBCTLogging:
{
- __ push_call_clobbered_registers();
+ __ push(saved_regs, sp);
// must compute element count unless barrier set interface is changed (other platforms supply count)
assert_different_registers(start, end, scratch);
__ lea(scratch, Address(end, BytesPerHeapOop));
@@ -686,7 +692,7 @@ class StubGenerator: public StubCodeGenerator {
__ mov(c_rarg0, start);
__ mov(c_rarg1, scratch);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2);
- __ pop_call_clobbered_registers();
+ __ pop(saved_regs, sp);
}
break;
case BarrierSet::CardTableForRS:
@@ -758,7 +764,7 @@ class StubGenerator: public StubCodeGenerator {
// alignment.
Label small;
int low_limit = MAX2(zva_length * 2, (int)BlockZeroingLowLimit);
- __ cmp(cnt, low_limit >> 3);
+ __ subs(rscratch1, cnt, low_limit >> 3);
__ br(Assembler::LT, small);
__ zero_dcache_blocks(base, cnt);
__ bind(small);
@@ -821,7 +827,7 @@ class StubGenerator: public StubCodeGenerator {
Label again, drain;
const char *stub_name;
if (direction == copy_forwards)
- stub_name = "foward_copy_longs";
+ stub_name = "forward_copy_longs";
else
stub_name = "backward_copy_longs";
StubCodeMark mark(this, "StubRoutines", stub_name);
@@ -1438,6 +1444,7 @@ class StubGenerator: public StubCodeGenerator {
address generate_disjoint_copy(size_t size, bool aligned, bool is_oop, address *entry,
const char *name, bool dest_uninitialized = false) {
Register s = c_rarg0, d = c_rarg1, count = c_rarg2;
+ RegSet saved_reg = RegSet::of(s, d, count);
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ pc();
@@ -1450,9 +1457,9 @@ class StubGenerator: public StubCodeGenerator {
}
if (is_oop) {
+ gen_write_ref_array_pre_barrier(d, count, dest_uninitialized, saved_reg);
+ // save regs before copy_memory
__ push(RegSet::of(d, count), sp);
- // no registers are destroyed by this call
- gen_write_ref_array_pre_barrier(d, count, dest_uninitialized);
}
copy_memory(aligned, s, d, count, rscratch1, size);
if (is_oop) {
@@ -1461,7 +1468,7 @@ class StubGenerator: public StubCodeGenerator {
verify_oop_array(size, d, count, r16);
__ sub(count, count, 1); // make an inclusive end pointer
__ lea(count, Address(d, count, Address::lsl(exact_log2(size))));
- gen_write_ref_array_post_barrier(d, count, rscratch1);
+ gen_write_ref_array_post_barrier(d, count, rscratch1, RegSet());
}
__ leave();
__ mov(r0, zr); // return 0
@@ -1494,7 +1501,7 @@ class StubGenerator: public StubCodeGenerator {
address *entry, const char *name,
bool dest_uninitialized = false) {
Register s = c_rarg0, d = c_rarg1, count = c_rarg2;
-
+ RegSet saved_regs = RegSet::of(s, d, count);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ pc();
__ enter();
@@ -1511,9 +1518,9 @@ class StubGenerator: public StubCodeGenerator {
__ br(Assembler::HS, nooverlap_target);
if (is_oop) {
+ gen_write_ref_array_pre_barrier(d, count, dest_uninitialized, saved_regs);
+ // save regs before copy_memory
__ push(RegSet::of(d, count), sp);
- // no registers are destroyed by this call
- gen_write_ref_array_pre_barrier(d, count, dest_uninitialized);
}
copy_memory(aligned, s, d, count, rscratch1, -size);
if (is_oop) {
@@ -1522,7 +1529,7 @@ class StubGenerator: public StubCodeGenerator {
verify_oop_array(size, d, count, r16);
__ sub(count, count, 1); // make an inclusive end pointer
__ lea(count, Address(d, count, Address::lsl(exact_log2(size))));
- gen_write_ref_array_post_barrier(d, count, rscratch1);
+ gen_write_ref_array_post_barrier(d, count, rscratch1, RegSet());
}
__ leave();
__ mov(r0, zr); // return 0
@@ -1804,6 +1811,9 @@ class StubGenerator: public StubCodeGenerator {
const Register ckoff = c_rarg3; // super_check_offset
const Register ckval = c_rarg4; // super_klass
+ RegSet wb_pre_saved_regs = RegSet::range(c_rarg0, c_rarg4);
+ RegSet wb_post_saved_regs = RegSet::of(count);
+
// Registers used as temps (r18, r19, r20 are save-on-entry)
const Register count_save = r21; // orig elementscount
const Register start_to = r20; // destination array start address
@@ -1861,7 +1871,7 @@ class StubGenerator: public StubCodeGenerator {
}
#endif //ASSERT
- gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
+ gen_write_ref_array_pre_barrier(to, count, dest_uninitialized, wb_pre_saved_regs);
// save the original count
__ mov(count_save, count);
@@ -1905,7 +1915,7 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_do_card_marks);
__ add(to, to, -heapOopSize); // make an inclusive end pointer
- gen_write_ref_array_post_barrier(start_to, to, rscratch1);
+ gen_write_ref_array_post_barrier(start_to, to, rscratch1, wb_post_saved_regs);
__ bind(L_done_pop);
__ pop(RegSet::of(r18, r19, r20, r21), sp);
@@ -3660,6 +3670,167 @@ class StubGenerator: public StubCodeGenerator {
__ eor(result, __ T16B, lo, t0);
}
+ address generate_has_negatives(address &has_negatives_long) {
+ StubCodeMark mark(this, "StubRoutines", "has_negatives");
+ const int large_loop_size = 64;
+ const uint64_t UPPER_BIT_MASK=0x8080808080808080;
+ int dcache_line = VM_Version::dcache_line_size();
+
+ Register ary1 = r1, len = r2, result = r0;
+
+ __ align(CodeEntryAlignment);
+ address entry = __ pc();
+
+ __ enter();
+
+ Label RET_TRUE, RET_TRUE_NO_POP, RET_FALSE, ALIGNED, LOOP16, CHECK_16, DONE,
+ LARGE_LOOP, POST_LOOP16, LEN_OVER_15, LEN_OVER_8, POST_LOOP16_LOAD_TAIL;
+
+ __ cmp(len, 15);
+ __ br(Assembler::GT, LEN_OVER_15);
+ // The only case when execution falls into this code is when pointer is near
+ // the end of memory page and we have to avoid reading next page
+ __ add(ary1, ary1, len);
+ __ subs(len, len, 8);
+ __ br(Assembler::GT, LEN_OVER_8);
+ __ ldr(rscratch2, Address(ary1, -8));
+ __ sub(rscratch1, zr, len, __ LSL, 3); // LSL 3 is to get bits from bytes.
+ __ lsrv(rscratch2, rscratch2, rscratch1);
+ __ tst(rscratch2, UPPER_BIT_MASK);
+ __ cset(result, Assembler::NE);
+ __ leave();
+ __ ret(lr);
+ __ bind(LEN_OVER_8);
+ __ ldp(rscratch1, rscratch2, Address(ary1, -16));
+ __ sub(len, len, 8); // no data dep., then sub can be executed while loading
+ __ tst(rscratch2, UPPER_BIT_MASK);
+ __ br(Assembler::NE, RET_TRUE_NO_POP);
+ __ sub(rscratch2, zr, len, __ LSL, 3); // LSL 3 is to get bits from bytes
+ __ lsrv(rscratch1, rscratch1, rscratch2);
+ __ tst(rscratch1, UPPER_BIT_MASK);
+ __ cset(result, Assembler::NE);
+ __ leave();
+ __ ret(lr);
+
+ Register tmp1 = r3, tmp2 = r4, tmp3 = r5, tmp4 = r6, tmp5 = r7, tmp6 = r10;
+ const RegSet spilled_regs = RegSet::range(tmp1, tmp5) + tmp6;
+
+ has_negatives_long = __ pc(); // 2nd entry point
+
+ __ enter();
+
+ __ bind(LEN_OVER_15);
+ __ push(spilled_regs, sp);
+ __ andr(rscratch2, ary1, 15); // check pointer for 16-byte alignment
+ __ cbz(rscratch2, ALIGNED);
+ __ ldp(tmp6, tmp1, Address(ary1));
+ __ mov(tmp5, 16);
+ __ sub(rscratch1, tmp5, rscratch2); // amount of bytes until aligned address
+ __ add(ary1, ary1, rscratch1);
+ __ sub(len, len, rscratch1);
+ __ orr(tmp6, tmp6, tmp1);
+ __ tst(tmp6, UPPER_BIT_MASK);
+ __ br(Assembler::NE, RET_TRUE);
+
+ __ bind(ALIGNED);
+ __ cmp(len, large_loop_size);
+ __ br(Assembler::LT, CHECK_16);
+ // Perform 16-byte load as early return in pre-loop to handle situation
+ // when initially aligned large array has negative values at starting bytes,
+ // so LARGE_LOOP would do 4 reads instead of 1 (in worst case), which is
+ // slower. Cases with negative bytes further ahead won't be affected that
+ // much. In fact, it'll be faster due to early loads, less instructions and
+ // less branches in LARGE_LOOP.
+ __ ldp(tmp6, tmp1, Address(__ post(ary1, 16)));
+ __ sub(len, len, 16);
+ __ orr(tmp6, tmp6, tmp1);
+ __ tst(tmp6, UPPER_BIT_MASK);
+ __ br(Assembler::NE, RET_TRUE);
+ __ cmp(len, large_loop_size);
+ __ br(Assembler::LT, CHECK_16);
+
+ if (SoftwarePrefetchHintDistance >= 0
+ && SoftwarePrefetchHintDistance >= dcache_line) {
+ // initial prefetch
+ __ prfm(Address(ary1, SoftwarePrefetchHintDistance - dcache_line));
+ }
+ __ bind(LARGE_LOOP);
+ if (SoftwarePrefetchHintDistance >= 0) {
+ __ prfm(Address(ary1, SoftwarePrefetchHintDistance));
+ }
+ // Issue load instructions first, since it can save few CPU/MEM cycles, also
+ // instead of 4 triples of "orr(...), addr(...);cbnz(...);" (for each ldp)
+ // better generate 7 * orr(...) + 1 andr(...) + 1 cbnz(...) which saves 3
+ // instructions per cycle and have less branches, but this approach disables
+ // early return, thus, all 64 bytes are loaded and checked every time.
+ __ ldp(tmp2, tmp3, Address(ary1));
+ __ ldp(tmp4, tmp5, Address(ary1, 16));
+ __ ldp(rscratch1, rscratch2, Address(ary1, 32));
+ __ ldp(tmp6, tmp1, Address(ary1, 48));
+ __ add(ary1, ary1, large_loop_size);
+ __ sub(len, len, large_loop_size);
+ __ orr(tmp2, tmp2, tmp3);
+ __ orr(tmp4, tmp4, tmp5);
+ __ orr(rscratch1, rscratch1, rscratch2);
+ __ orr(tmp6, tmp6, tmp1);
+ __ orr(tmp2, tmp2, tmp4);
+ __ orr(rscratch1, rscratch1, tmp6);
+ __ orr(tmp2, tmp2, rscratch1);
+ __ tst(tmp2, UPPER_BIT_MASK);
+ __ br(Assembler::NE, RET_TRUE);
+ __ cmp(len, large_loop_size);
+ __ br(Assembler::GE, LARGE_LOOP);
+
+ __ bind(CHECK_16); // small 16-byte load pre-loop
+ __ cmp(len, 16);
+ __ br(Assembler::LT, POST_LOOP16);
+
+ __ bind(LOOP16); // small 16-byte load loop
+ __ ldp(tmp2, tmp3, Address(__ post(ary1, 16)));
+ __ sub(len, len, 16);
+ __ orr(tmp2, tmp2, tmp3);
+ __ tst(tmp2, UPPER_BIT_MASK);
+ __ br(Assembler::NE, RET_TRUE);
+ __ cmp(len, 16);
+ __ br(Assembler::GE, LOOP16); // 16-byte load loop end
+
+ __ bind(POST_LOOP16); // 16-byte aligned, so we can read unconditionally
+ __ cmp(len, 8);
+ __ br(Assembler::LE, POST_LOOP16_LOAD_TAIL);
+ __ ldr(tmp3, Address(__ post(ary1, 8)));
+ __ sub(len, len, 8);
+ __ tst(tmp3, UPPER_BIT_MASK);
+ __ br(Assembler::NE, RET_TRUE);
+
+ __ bind(POST_LOOP16_LOAD_TAIL);
+ __ cbz(len, RET_FALSE); // Can't shift left by 64 when len==0
+ __ ldr(tmp1, Address(ary1));
+ __ mov(tmp2, 64);
+ __ sub(tmp4, tmp2, len, __ LSL, 3);
+ __ lslv(tmp1, tmp1, tmp4);
+ __ tst(tmp1, UPPER_BIT_MASK);
+ __ br(Assembler::NE, RET_TRUE);
+ // Fallthrough
+
+ __ bind(RET_FALSE);
+ __ pop(spilled_regs, sp);
+ __ leave();
+ __ mov(result, zr);
+ __ ret(lr);
+
+ __ bind(RET_TRUE);
+ __ pop(spilled_regs, sp);
+ __ bind(RET_TRUE_NO_POP);
+ __ leave();
+ __ mov(result, 1);
+ __ ret(lr);
+
+ __ bind(DONE);
+ __ pop(spilled_regs, sp);
+ __ leave();
+ __ ret(lr);
+ return entry;
+ }
/**
* Arguments:
*
@@ -4676,6 +4847,7 @@ class StubGenerator: public StubCodeGenerator {
// }
};
+
// Initialization
void generate_initial() {
// Generate initial stubs and initializes the entry points
@@ -4734,6 +4906,9 @@ class StubGenerator: public StubCodeGenerator {
// arraycopy stubs used by compilers
generate_arraycopy_stubs();
+ // has negatives stub for large arrays.
+ StubRoutines::aarch64::_has_negatives = generate_has_negatives(StubRoutines::aarch64::_has_negatives_long);
+
if (UseMultiplyToLenIntrinsic) {
StubRoutines::_multiplyToLen = generate_multiplyToLen();
}
diff --git a/hotspot/src/cpu/aarch64/vm/stubRoutines_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/stubRoutines_aarch64.cpp
index 9bcbe1e1e1d..1313166ca3a 100644
--- a/hotspot/src/cpu/aarch64/vm/stubRoutines_aarch64.cpp
+++ b/hotspot/src/cpu/aarch64/vm/stubRoutines_aarch64.cpp
@@ -44,6 +44,8 @@ address StubRoutines::aarch64::_float_sign_flip = NULL;
address StubRoutines::aarch64::_double_sign_mask = NULL;
address StubRoutines::aarch64::_double_sign_flip = NULL;
address StubRoutines::aarch64::_zero_blocks = NULL;
+address StubRoutines::aarch64::_has_negatives = NULL;
+address StubRoutines::aarch64::_has_negatives_long = NULL;
bool StubRoutines::aarch64::_completed = false;
/**
diff --git a/hotspot/src/cpu/aarch64/vm/stubRoutines_aarch64.hpp b/hotspot/src/cpu/aarch64/vm/stubRoutines_aarch64.hpp
index fc86d04a648..e7a6bc3c850 100644
--- a/hotspot/src/cpu/aarch64/vm/stubRoutines_aarch64.hpp
+++ b/hotspot/src/cpu/aarch64/vm/stubRoutines_aarch64.hpp
@@ -62,6 +62,9 @@ class aarch64 {
static address _double_sign_flip;
static address _zero_blocks;
+
+ static address _has_negatives;
+ static address _has_negatives_long;
static bool _completed;
public:
@@ -120,6 +123,14 @@ class aarch64 {
return _zero_blocks;
}
+ static address has_negatives() {
+ return _has_negatives;
+ }
+
+ static address has_negatives_long() {
+ return _has_negatives_long;
+ }
+
static bool complete() {
return _completed;
}
diff --git a/hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp
index 6f44292c55a..f22f8d874aa 100644
--- a/hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp
+++ b/hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp
@@ -402,14 +402,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(
return entry;
}
-address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
- address entry = __ pc();
- // NULL last_sp until next java call
- __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
- __ dispatch_next(state);
- return entry;
-}
-
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
address entry = __ pc();
@@ -444,6 +436,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ notify(Assembler::method_reentry);
}
#endif
+
+ __ check_and_handle_popframe(rthread);
+ __ check_and_handle_earlyret(rthread);
+
__ get_dispatch();
__ dispatch_next(state, step);
diff --git a/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp
index f0f0f5a6ee5..e110035d31a 100644
--- a/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp
+++ b/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -246,8 +246,7 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
assert(load_bc_into_bc_reg, "we use bc_reg as temp");
__ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
__ movw(bc_reg, bc);
- __ cmpw(temp_reg, (unsigned) 0);
- __ br(Assembler::EQ, L_patch_done); // don't patch
+ __ cbzw(temp_reg, L_patch_done); // don't patch
}
break;
default:
@@ -3418,8 +3417,7 @@ void TemplateTable::_new() {
__ br(Assembler::NE, slow_case);
// get InstanceKlass
- __ lea(r4, Address(r4, r3, Address::lsl(3)));
- __ ldr(r4, Address(r4, sizeof(ConstantPool)));
+ __ load_resolved_klass_at_offset(r4, r3, r4, rscratch1);
// make sure klass is initialized & doesn't have finalizer
// make sure klass is fully initialized
@@ -3572,8 +3570,7 @@ void TemplateTable::checkcast()
// Get superklass in r0 and subklass in r3
__ bind(quicked);
__ mov(r3, r0); // Save object in r3; r0 needed for subtype check
- __ lea(r0, Address(r2, r19, Address::lsl(3)));
- __ ldr(r0, Address(r0, sizeof(ConstantPool)));
+ __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); // r0 = klass
__ bind(resolved);
__ load_klass(r19, r3);
@@ -3629,8 +3626,7 @@ void TemplateTable::instanceof() {
// Get superklass in r0 and subklass in r3
__ bind(quicked);
__ load_klass(r3, r0);
- __ lea(r0, Address(r2, r19, Address::lsl(3)));
- __ ldr(r0, Address(r0, sizeof(ConstantPool)));
+ __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1);
__ bind(resolved);
diff --git a/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.cpp
index 33cb9496a91..722ebbba322 100644
--- a/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.cpp
+++ b/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.cpp
@@ -137,6 +137,8 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(PrefetchScanIntervalInBytes, 3*dcache_line);
if (FLAG_IS_DEFAULT(PrefetchCopyIntervalInBytes))
FLAG_SET_DEFAULT(PrefetchCopyIntervalInBytes, 3*dcache_line);
+ if (FLAG_IS_DEFAULT(SoftwarePrefetchHintDistance))
+ FLAG_SET_DEFAULT(SoftwarePrefetchHintDistance, 3*dcache_line);
if (PrefetchCopyIntervalInBytes != -1 &&
((PrefetchCopyIntervalInBytes & 7) || (PrefetchCopyIntervalInBytes >= 32768))) {
@@ -146,6 +148,12 @@ void VM_Version::get_processor_features() {
PrefetchCopyIntervalInBytes = 32760;
}
+ if (SoftwarePrefetchHintDistance != -1 &&
+ (SoftwarePrefetchHintDistance & 7)) {
+ warning("SoftwarePrefetchHintDistance must be -1, or a multiple of 8");
+ SoftwarePrefetchHintDistance &= ~7;
+ }
+
unsigned long auxv = getauxval(AT_HWCAP);
char buf[512];
diff --git a/hotspot/src/cpu/arm/vm/abstractInterpreter_arm.cpp b/hotspot/src/cpu/arm/vm/abstractInterpreter_arm.cpp
index 7899c102e5b..efc233a562e 100644
--- a/hotspot/src/cpu/arm/vm/abstractInterpreter_arm.cpp
+++ b/hotspot/src/cpu/arm/vm/abstractInterpreter_arm.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
#include "runtime/handles.inline.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/synchronizer.hpp"
+#include "utilities/align.hpp"
#include "utilities/macros.hpp"
int AbstractInterpreter::BasicType_as_index(BasicType type) {
@@ -68,23 +69,6 @@ int AbstractInterpreter::BasicType_as_index(BasicType type) {
return i;
}
-// These should never be compiled since the interpreter will prefer
-// the compiled version to the intrinsic version.
-bool AbstractInterpreter::can_be_compiled(methodHandle m) {
- switch (method_kind(m)) {
- case Interpreter::java_lang_math_sin : // fall thru
- case Interpreter::java_lang_math_cos : // fall thru
- case Interpreter::java_lang_math_tan : // fall thru
- case Interpreter::java_lang_math_abs : // fall thru
- case Interpreter::java_lang_math_log : // fall thru
- case Interpreter::java_lang_math_log10 : // fall thru
- case Interpreter::java_lang_math_sqrt :
- return false;
- default:
- return true;
- }
-}
-
// How much stack a method activation needs in words.
int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
const int stub_code = AARCH64_ONLY(24) NOT_AARCH64(12); // see generate_call_stub
@@ -125,7 +109,7 @@ int AbstractInterpreter::size_activation(int max_stack,
tempcount*Interpreter::stackElementWords + extra_args;
#ifdef AARCH64
- size = round_to(size, StackAlignmentInBytes/BytesPerWord);
+ size = align_up(size, StackAlignmentInBytes/BytesPerWord);
#endif // AARCH64
return size;
@@ -206,7 +190,7 @@ void AbstractInterpreter::layout_activation(Method* method,
}
if (caller->is_interpreted_frame()) {
intptr_t* locals_base = (locals - method->max_locals()*Interpreter::stackElementWords + 1);
- locals_base = (intptr_t*)round_down((intptr_t)locals_base, StackAlignmentInBytes);
+ locals_base = align_down(locals_base, StackAlignmentInBytes);
assert(interpreter_frame->sender_sp() <= locals_base, "interpreter-to-interpreter frame chaining");
} else if (caller->is_compiled_frame()) {
@@ -234,10 +218,17 @@ void AbstractInterpreter::layout_activation(Method* method,
#ifdef AARCH64
interpreter_frame->interpreter_frame_set_stack_top(stack_top);
+ // We have to add extra reserved slots to max_stack. There are 3 users of the extra slots,
+ // none of which are at the same time, so we just need to make sure there is enough room
+ // for the biggest user:
+ // -reserved slot for exception handler
+ // -reserved slots for JSR292. Method::extra_stack_entries() is the size.
+ // -3 reserved slots so get_method_counters() can save some registers before call_VM().
+ int max_stack = method->constMethod()->max_stack() + MAX2(3, Method::extra_stack_entries());
intptr_t* extended_sp = (intptr_t*) monbot -
- (method->max_stack() + 1) * Interpreter::stackElementWords - // +1 is reserved slot for exception handler
+ (max_stack * Interpreter::stackElementWords) -
popframe_extra_args;
- extended_sp = (intptr_t*)round_down((intptr_t)extended_sp, StackAlignmentInBytes);
+ extended_sp = align_down(extended_sp, StackAlignmentInBytes);
interpreter_frame->interpreter_frame_set_extended_sp(extended_sp);
#else
interpreter_frame->interpreter_frame_set_last_sp(stack_top);
@@ -249,7 +240,7 @@ void AbstractInterpreter::layout_activation(Method* method,
#ifdef AARCH64
if (caller->is_interpreted_frame()) {
- intptr_t* sender_sp = (intptr_t*)round_down((intptr_t)caller->interpreter_frame_tos_address(), StackAlignmentInBytes);
+ intptr_t* sender_sp = align_down(caller->interpreter_frame_tos_address(), StackAlignmentInBytes);
interpreter_frame->set_interpreter_frame_sender_sp(sender_sp);
} else {
diff --git a/hotspot/src/cpu/arm/vm/arm.ad b/hotspot/src/cpu/arm/vm/arm.ad
index f4e5bd324cd..7cc789f599d 100644
--- a/hotspot/src/cpu/arm/vm/arm.ad
+++ b/hotspot/src/cpu/arm/vm/arm.ad
@@ -1,5 +1,5 @@
//
-// Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@@ -1122,7 +1122,7 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
}
// Vector ideal reg corresponding to specified size in bytes
-const int Matcher::vector_ideal_reg(int size) {
+const uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize >= size, "");
switch(size) {
case 8: return Op_VecD;
@@ -1132,7 +1132,7 @@ const int Matcher::vector_ideal_reg(int size) {
return 0;
}
-const int Matcher::vector_shift_count_ideal_reg(int size) {
+const uint Matcher::vector_shift_count_ideal_reg(int size) {
return vector_ideal_reg(size);
}
@@ -1881,7 +1881,7 @@ frame %{
// Ret Addr is on stack in slot 0 if no locks or verification or alignment.
// Otherwise, it is above the locks and verification slot and alignment word
return_addr(STACK - 1*VMRegImpl::slots_per_word +
- round_to((Compile::current()->in_preserve_stack_slots() +
+ align_up((Compile::current()->in_preserve_stack_slots() +
Compile::current()->fixed_slots()),
stack_alignment_in_slots()));
@@ -11968,9 +11968,13 @@ instruct ShouldNotReachHere( )
size(4);
// Use the following format syntax
- format %{ "breakpoint ; ShouldNotReachHere" %}
+ format %{ "ShouldNotReachHere" %}
ins_encode %{
- __ breakpoint();
+#ifdef AARCH64
+ __ dpcs1(0xdead);
+#else
+ __ udf(0xdead);
+#endif
%}
ins_pipe(tail_call);
%}
diff --git a/hotspot/src/cpu/arm/vm/assembler_arm_32.hpp b/hotspot/src/cpu/arm/vm/assembler_arm_32.hpp
index e32f6a98f29..95e6568832e 100644
--- a/hotspot/src/cpu/arm/vm/assembler_arm_32.hpp
+++ b/hotspot/src/cpu/arm/vm/assembler_arm_32.hpp
@@ -578,6 +578,11 @@ class Assembler : public AbstractAssembler {
F(bl, 0xb)
#undef F
+ void udf(int imm_16) {
+ assert((imm_16 >> 16) == 0, "encoding constraint");
+ emit_int32(0xe7f000f0 | (imm_16 & 0xfff0) << 8 | (imm_16 & 0xf));
+ }
+
// ARMv7 instructions
#define F(mnemonic, wt) \
diff --git a/hotspot/src/cpu/arm/vm/assembler_arm_64.hpp b/hotspot/src/cpu/arm/vm/assembler_arm_64.hpp
index 44d0504036c..9c6cd14c3f8 100644
--- a/hotspot/src/cpu/arm/vm/assembler_arm_64.hpp
+++ b/hotspot/src/cpu/arm/vm/assembler_arm_64.hpp
@@ -1083,6 +1083,7 @@ class Assembler : public AbstractAssembler {
F(brk, 0b001, 0b000, 0b00)
F(hlt, 0b010, 0b000, 0b00)
+ F(dpcs1, 0b101, 0b000, 0b01)
#undef F
enum SystemRegister { // o0<1> op1<3> CRn<4> CRm<4> op2<3>
diff --git a/hotspot/src/cpu/arm/vm/bytes_arm.hpp b/hotspot/src/cpu/arm/vm/bytes_arm.hpp
index 0cf7e2a77c9..0e5a894050a 100644
--- a/hotspot/src/cpu/arm/vm/bytes_arm.hpp
+++ b/hotspot/src/cpu/arm/vm/bytes_arm.hpp
@@ -35,12 +35,6 @@
class Bytes: AllStatic {
public:
- // Returns true if the byte ordering used by Java is different from the native byte ordering
- // of the underlying machine.
- static inline bool is_Java_byte_ordering_different() {
- return VM_LITTLE_ENDIAN != 0;
- }
-
static inline u2 get_Java_u2(address p) {
return (u2(p[0]) << 8) | u2(p[1]);
}
diff --git a/hotspot/src/cpu/arm/vm/c1_FpuStackSim_arm.cpp b/hotspot/src/cpu/arm/vm/c1_FpuStackSim_arm.cpp
index ac826b491a2..287f4e412d7 100644
--- a/hotspot/src/cpu/arm/vm/c1_FpuStackSim_arm.cpp
+++ b/hotspot/src/cpu/arm/vm/c1_FpuStackSim_arm.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,10 +22,4 @@
*
*/
-#include "precompiled.hpp"
-#include "c1/c1_FpuStackSim.hpp"
-#include "c1/c1_FrameMap.hpp"
-#include "utilities/array.hpp"
-#include "utilities/ostream.hpp"
-
// Nothing needed here
diff --git a/hotspot/src/cpu/arm/vm/c1_Runtime1_arm.cpp b/hotspot/src/cpu/arm/vm/c1_Runtime1_arm.cpp
index 5e793e16bd7..a4a969c87fa 100644
--- a/hotspot/src/cpu/arm/vm/c1_Runtime1_arm.cpp
+++ b/hotspot/src/cpu/arm/vm/c1_Runtime1_arm.cpp
@@ -37,6 +37,7 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/signature.hpp"
#include "runtime/vframeArray.hpp"
+#include "utilities/align.hpp"
#include "vmreg_arm.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
@@ -250,7 +251,7 @@ static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers
__ sub(SP, SP, (reg_save_size - 2) * wordSize);
- for (int i = 0; i < round_down(number_of_saved_gprs, 2); i += 2) {
+ for (int i = 0; i < align_down((int)number_of_saved_gprs, 2); i += 2) {
__ stp(as_Register(i), as_Register(i+1), Address(SP, (R0_offset + i) * wordSize));
}
diff --git a/hotspot/src/cpu/arm/vm/frame_arm.cpp b/hotspot/src/cpu/arm/vm/frame_arm.cpp
index defe4a45a4a..ae40ec808ff 100644
--- a/hotspot/src/cpu/arm/vm/frame_arm.cpp
+++ b/hotspot/src/cpu/arm/vm/frame_arm.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -621,6 +621,8 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
frame::frame(void* sp, void* fp, void* pc) {
init((intptr_t*)sp, (intptr_t*)fp, (address)pc);
}
+
+void frame::pd_ps() {}
#endif
intptr_t *frame::initial_deoptimization_info() {
diff --git a/hotspot/src/cpu/arm/vm/interp_masm_arm.cpp b/hotspot/src/cpu/arm/vm/interp_masm_arm.cpp
index 96df37c275e..364ce6e854c 100644
--- a/hotspot/src/cpu/arm/vm/interp_masm_arm.cpp
+++ b/hotspot/src/cpu/arm/vm/interp_masm_arm.cpp
@@ -298,7 +298,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
Register cache = result;
// load pointer for resolved_references[] objArray
- ldr(cache, Address(result, ConstantPool::resolved_references_offset_in_bytes()));
+ ldr(cache, Address(result, ConstantPool::cache_offset_in_bytes()));
+ ldr(cache, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
// JNIHandles::resolve(result)
ldr(cache, Address(cache, 0));
// Add in the index
@@ -308,6 +309,15 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
load_heap_oop(result, Address(cache, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
}
+void InterpreterMacroAssembler::load_resolved_klass_at_offset(
+ Register Rcpool, Register Rindex, Register Rklass) {
+ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
+ ldrh(Rtemp, Address(Rtemp, sizeof(ConstantPool))); // Rtemp = resolved_klass_index
+ ldr(Rklass, Address(Rcpool, ConstantPool::resolved_klasses_offset_in_bytes())); // Rklass = cpool->_resolved_klasses
+ add(Rklass, Rklass, AsmOperand(Rtemp, lsl, LogBytesPerWord));
+ ldr(Rklass, Address(Rklass, Array::base_offset_in_bytes()));
+}
+
// Generate a subtype check: branch to not_subtype if sub_klass is
// not a subtype of super_klass.
// Profiling code for the subtype check failure (profile_typecheck_failed)
@@ -2016,75 +2026,42 @@ void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
void InterpreterMacroAssembler::get_method_counters(Register method,
Register Rcounters,
- Label& skip) {
+ Label& skip,
+ bool saveRegs,
+ Register reg1,
+ Register reg2,
+ Register reg3) {
const Address method_counters(method, Method::method_counters_offset());
Label has_counters;
ldr(Rcounters, method_counters);
cbnz(Rcounters, has_counters);
+ if (saveRegs) {
+ // Save and restore in use caller-saved registers since they will be trashed by call_VM
+ assert(reg1 != noreg, "must specify reg1");
+ assert(reg2 != noreg, "must specify reg2");
#ifdef AARCH64
- const Register tmp = Rcounters;
- const int saved_regs_size = 20*wordSize;
-
- // Note: call_VM will cut SP according to Rstack_top value before call, and restore SP to
- // extended_sp value from frame after the call.
- // So make sure there is enough stack space to save registers and adjust Rstack_top accordingly.
- {
- Label enough_stack_space;
- check_extended_sp(tmp);
- sub(Rstack_top, Rstack_top, saved_regs_size);
- cmp(SP, Rstack_top);
- b(enough_stack_space, ls);
-
- align_reg(tmp, Rstack_top, StackAlignmentInBytes);
- mov(SP, tmp);
- str(tmp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
-
- bind(enough_stack_space);
- check_stack_top();
-
- int offset = 0;
- stp(R0, R1, Address(Rstack_top, offset)); offset += 2*wordSize;
- stp(R2, R3, Address(Rstack_top, offset)); offset += 2*wordSize;
- stp(R4, R5, Address(Rstack_top, offset)); offset += 2*wordSize;
- stp(R6, R7, Address(Rstack_top, offset)); offset += 2*wordSize;
- stp(R8, R9, Address(Rstack_top, offset)); offset += 2*wordSize;
- stp(R10, R11, Address(Rstack_top, offset)); offset += 2*wordSize;
- stp(R12, R13, Address(Rstack_top, offset)); offset += 2*wordSize;
- stp(R14, R15, Address(Rstack_top, offset)); offset += 2*wordSize;
- stp(R16, R17, Address(Rstack_top, offset)); offset += 2*wordSize;
- stp(R18, LR, Address(Rstack_top, offset)); offset += 2*wordSize;
- assert (offset == saved_regs_size, "should be");
- }
+ assert(reg3 != noreg, "must specify reg3");
+ stp(reg1, reg2, Address(Rstack_top, -2*wordSize, pre_indexed));
+ stp(reg3, ZR, Address(Rstack_top, -2*wordSize, pre_indexed));
#else
- push(RegisterSet(R0, R3) | RegisterSet(R12) | RegisterSet(R14));
-#endif // AARCH64
+ assert(reg3 == noreg, "must not specify reg3");
+ push(RegisterSet(reg1) | RegisterSet(reg2));
+#endif
+ }
mov(R1, method);
- call_VM(noreg, CAST_FROM_FN_PTR(address,
- InterpreterRuntime::build_method_counters), R1);
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), R1);
+ if (saveRegs) {
#ifdef AARCH64
- {
- int offset = 0;
- ldp(R0, R1, Address(Rstack_top, offset)); offset += 2*wordSize;
- ldp(R2, R3, Address(Rstack_top, offset)); offset += 2*wordSize;
- ldp(R4, R5, Address(Rstack_top, offset)); offset += 2*wordSize;
- ldp(R6, R7, Address(Rstack_top, offset)); offset += 2*wordSize;
- ldp(R8, R9, Address(Rstack_top, offset)); offset += 2*wordSize;
- ldp(R10, R11, Address(Rstack_top, offset)); offset += 2*wordSize;
- ldp(R12, R13, Address(Rstack_top, offset)); offset += 2*wordSize;
- ldp(R14, R15, Address(Rstack_top, offset)); offset += 2*wordSize;
- ldp(R16, R17, Address(Rstack_top, offset)); offset += 2*wordSize;
- ldp(R18, LR, Address(Rstack_top, offset)); offset += 2*wordSize;
- assert (offset == saved_regs_size, "should be");
-
- add(Rstack_top, Rstack_top, saved_regs_size);
- }
+ ldp(reg3, ZR, Address(Rstack_top, 2*wordSize, post_indexed));
+ ldp(reg1, reg2, Address(Rstack_top, 2*wordSize, post_indexed));
#else
- pop(RegisterSet(R0, R3) | RegisterSet(R12) | RegisterSet(R14));
-#endif // AARCH64
+ pop(RegisterSet(reg1) | RegisterSet(reg2));
+#endif
+ }
ldr(Rcounters, method_counters);
cbz(Rcounters, skip); // No MethodCounters created, OutOfMemory
diff --git a/hotspot/src/cpu/arm/vm/interp_masm_arm.hpp b/hotspot/src/cpu/arm/vm/interp_masm_arm.hpp
index 39e60226bf6..434f501bf74 100644
--- a/hotspot/src/cpu/arm/vm/interp_masm_arm.hpp
+++ b/hotspot/src/cpu/arm/vm/interp_masm_arm.hpp
@@ -53,9 +53,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
// Template interpreter specific version of call_VM_helper
virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions);
- virtual void check_and_handle_popframe();
- virtual void check_and_handle_earlyret();
-
// base routine for all dispatches
typedef enum { DispatchDefault, DispatchNormal } DispatchTableMode;
void dispatch_base(TosState state, DispatchTableMode table_mode, bool verifyoop = true);
@@ -63,6 +60,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
public:
InterpreterMacroAssembler(CodeBuffer* code);
+ virtual void check_and_handle_popframe();
+ virtual void check_and_handle_earlyret();
+
// Interpreter-specific registers
#if defined(AARCH64) && defined(ASSERT)
@@ -141,6 +141,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
// Load object from cpool->resolved_references(*bcp+1)
void load_resolved_reference_at_index(Register result, Register tmp);
+ // load cpool->resolved_klass_at(index); Rtemp is corrupted upon return
+ void load_resolved_klass_at_offset(Register Rcpool, Register Rindex, Register Rklass);
+
void store_check_part1(Register card_table_base); // Sets card_table_base register.
void store_check_part2(Register obj, Register card_table_base, Register tmp);
@@ -328,7 +331,13 @@ class InterpreterMacroAssembler: public MacroAssembler {
void trace_state(const char* msg) PRODUCT_RETURN;
- void get_method_counters(Register method, Register Rcounters, Label& skip);
+void get_method_counters(Register method,
+ Register Rcounters,
+ Label& skip,
+ bool saveRegs = false,
+ Register reg1 = noreg,
+ Register reg2 = noreg,
+ Register reg3 = noreg);
};
#endif // CPU_ARM_VM_INTERP_MASM_ARM_HPP
diff --git a/hotspot/src/cpu/arm/vm/interpreterRT_arm.cpp b/hotspot/src/cpu/arm/vm/interpreterRT_arm.cpp
index 4925f630c8d..af895ad91b4 100644
--- a/hotspot/src/cpu/arm/vm/interpreterRT_arm.cpp
+++ b/hotspot/src/cpu/arm/vm/interpreterRT_arm.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -422,7 +422,7 @@ class SlowSignatureHandler: public NativeSignatureIterator {
#endif // !__ABI_HARD__
public:
- SlowSignatureHandler(methodHandle method, address from, intptr_t* to) :
+ SlowSignatureHandler(const methodHandle& method, address from, intptr_t* to) :
NativeSignatureIterator(method) {
_from = from;
diff --git a/hotspot/src/cpu/arm/vm/interpreterRT_arm.hpp b/hotspot/src/cpu/arm/vm/interpreterRT_arm.hpp
index fa1e0dce678..28c8b3c7db2 100644
--- a/hotspot/src/cpu/arm/vm/interpreterRT_arm.hpp
+++ b/hotspot/src/cpu/arm/vm/interpreterRT_arm.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -56,7 +56,7 @@ class SignatureHandlerGenerator: public NativeSignatureIterator {
#endif
public:
// Creation
- SignatureHandlerGenerator(methodHandle method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
+ SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
_masm = new MacroAssembler(buffer);
_abi_offset = 0;
_ireg = is_static() ? 2 : 1;
diff --git a/hotspot/src/cpu/arm/vm/macroAssembler_arm.hpp b/hotspot/src/cpu/arm/vm/macroAssembler_arm.hpp
index e6f73353cb9..b0710a1518a 100644
--- a/hotspot/src/cpu/arm/vm/macroAssembler_arm.hpp
+++ b/hotspot/src/cpu/arm/vm/macroAssembler_arm.hpp
@@ -206,6 +206,9 @@ protected:
// may customize this version by overriding it for its purposes (e.g., to save/restore
// additional registers when doing a VM call).
virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions);
+public:
+
+ MacroAssembler(CodeBuffer* code) : Assembler(code) {}
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler,
@@ -213,10 +216,6 @@ protected:
virtual void check_and_handle_popframe() {}
virtual void check_and_handle_earlyret() {}
-public:
-
- MacroAssembler(CodeBuffer* code) : Assembler(code) {}
-
// By default, we do not need relocation information for non
// patchable absolute addresses. However, when needed by some
// extensions, ignore_non_patchable_relocations can be modified,
diff --git a/hotspot/src/cpu/arm/vm/metaspaceShared_arm.cpp b/hotspot/src/cpu/arm/vm/metaspaceShared_arm.cpp
deleted file mode 100644
index 1cda4dff24e..00000000000
--- a/hotspot/src/cpu/arm/vm/metaspaceShared_arm.cpp
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "assembler_arm.inline.hpp"
-#include "memory/metaspaceShared.hpp"
-
-// Generate the self-patching vtable method:
-//
-// This method will be called (as any other Klass virtual method) with
-// the Klass itself as the first argument. Example:
-//
-// oop obj;
-// int size = obj->klass()->oop_size(this);
-//
-// for which the virtual method call is Klass::oop_size();
-//
-// The dummy method is called with the Klass object as the first
-// operand, and an object as the second argument.
-//
-
-//=====================================================================
-
-// All of the dummy methods in the vtable are essentially identical,
-// differing only by an ordinal constant, and they bear no relationship
-// to the original method which the caller intended. Also, there needs
-// to be 'vtbl_list_size' instances of the vtable in order to
-// differentiate between the 'vtable_list_size' original Klass objects.
-
-#define __ masm->
-
-void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
- void** vtable,
- char** md_top,
- char* md_end,
- char** mc_top,
- char* mc_end) {
- intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
- *(intptr_t *)(*md_top) = vtable_bytes;
- *md_top += sizeof(intptr_t);
- void** dummy_vtable = (void**)*md_top;
- *vtable = dummy_vtable;
- *md_top += vtable_bytes;
-
- CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
- MacroAssembler* masm = new MacroAssembler(&cb);
-
- for (int i = 0; i < vtbl_list_size; ++i) {
- Label common_code;
- for (int j = 0; j < num_virtuals; ++j) {
- dummy_vtable[num_virtuals * i + j] = (void*) __ pc();
- __ mov(Rtemp, j); // Rtemp contains an index of a virtual method in the table
- __ b(common_code);
- }
-
- InlinedAddress vtable_address((address)&vtbl_list[i]);
- __ bind(common_code);
- const Register tmp2 = AARCH64_ONLY(Rtemp2) NOT_AARCH64(R4);
- assert_different_registers(Rtemp, tmp2);
-#ifndef AARCH64
- __ push(tmp2);
-#endif // !AARCH64
- // Do not use ldr_global since the code must be portable across all ARM architectures
- __ ldr_literal(tmp2, vtable_address);
- __ ldr(tmp2, Address(tmp2)); // get correct vtable address
- __ ldr(Rtemp, Address::indexed_ptr(tmp2, Rtemp)); // get real method pointer
- __ str(tmp2, Address(R0)); // update vtable. R0 = "this"
-#ifndef AARCH64
- __ pop(tmp2);
-#endif // !AARCH64
- __ jump(Rtemp);
- __ bind_literal(vtable_address);
- }
-
- __ flush();
- *mc_top = (char*) __ pc();
-}
diff --git a/hotspot/src/cpu/arm/vm/methodHandles_arm.cpp b/hotspot/src/cpu/arm/vm/methodHandles_arm.cpp
index 15317969319..be9dac4de4f 100644
--- a/hotspot/src/cpu/arm/vm/methodHandles_arm.cpp
+++ b/hotspot/src/cpu/arm/vm/methodHandles_arm.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
+#include "prims/jvm.h"
#include "prims/methodHandles.hpp"
#define __ _masm->
@@ -67,7 +68,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Register obj, Register temp1, Register temp2, SystemDictionary::WKID klass_id,
const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
- KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
+ Klass* klass = SystemDictionary::well_known_klass(klass_id);
Label L_ok, L_bad;
BLOCK_COMMENT("verify_klass {");
__ verify_oop(obj);
@@ -157,8 +158,9 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
__ load_heap_oop(tmp, Address(tmp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
__ verify_oop(tmp);
- // the following assumes that a Method* is normally compressed in the vmtarget field:
- __ ldr(Rmethod, Address(tmp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));
+ __ load_heap_oop(Rmethod, Address(tmp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())));
+ __ verify_oop(Rmethod);
+ __ ldr(Rmethod, Address(Rmethod, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())));
if (VerifyMethodHandles && !for_compiler_entry) {
// make sure recv is already on stack
@@ -320,7 +322,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
- Address member_vmtarget(member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));
+ Address member_vmtarget(member_reg, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()));
+ Address vmtarget_method(Rmethod, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()));
Register temp1_recv_klass = temp1;
if (iid != vmIntrinsics::_linkToStatic) {
@@ -375,14 +378,17 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
}
- __ ldr(Rmethod, member_vmtarget);
+ __ load_heap_oop(Rmethod, member_vmtarget);
+ __ ldr(Rmethod, vmtarget_method);
break;
case vmIntrinsics::_linkToStatic:
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
}
- __ ldr(Rmethod, member_vmtarget);
+ __ load_heap_oop(Rmethod, member_vmtarget);
+ __ ldr(Rmethod, vmtarget_method);
+ break;
break;
case vmIntrinsics::_linkToVirtual:
diff --git a/hotspot/src/cpu/arm/vm/sharedRuntime_arm.cpp b/hotspot/src/cpu/arm/vm/sharedRuntime_arm.cpp
index 48f096473c3..59bc2e5b5d8 100644
--- a/hotspot/src/cpu/arm/vm/sharedRuntime_arm.cpp
+++ b/hotspot/src/cpu/arm/vm/sharedRuntime_arm.cpp
@@ -34,6 +34,7 @@
#include "oops/compiledICHolder.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
+#include "utilities/align.hpp"
#include "vmreg_arm.inline.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
@@ -747,7 +748,7 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
assert_different_registers(tmp, R0, R1, R2, R3, R4, R5, R6, R7, Rsender_sp, Rparams);
if (comp_args_on_stack) {
- __ sub_slow(SP, SP, round_to(comp_args_on_stack * VMRegImpl::stack_slot_size, StackAlignmentInBytes));
+ __ sub_slow(SP, SP, align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, StackAlignmentInBytes));
}
for (int i = 0; i < total_args_passed; i++) {
@@ -870,7 +871,7 @@ static void gen_c2i_adapter(MacroAssembler *masm,
#ifdef AARCH64
- int extraspace = round_to(total_args_passed * Interpreter::stackElementSize, StackAlignmentInBytes);
+ int extraspace = align_up(total_args_passed * Interpreter::stackElementSize, StackAlignmentInBytes);
if (extraspace) {
__ sub(SP, SP, extraspace);
}
@@ -1023,7 +1024,7 @@ static int reg2offset_out(VMReg r) {
static void verify_oop_args(MacroAssembler* masm,
- methodHandle method,
+ const methodHandle& method,
const BasicType* sig_bt,
const VMRegPair* regs) {
Register temp_reg = Rmethod; // not part of any compiled calling seq
@@ -1044,7 +1045,7 @@ static void verify_oop_args(MacroAssembler* masm,
}
static void gen_special_dispatch(MacroAssembler* masm,
- methodHandle method,
+ const methodHandle& method,
const BasicType* sig_bt,
const VMRegPair* regs) {
verify_oop_args(masm, method, sig_bt, regs);
@@ -1181,7 +1182,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
stack_slots += 2 * VMRegImpl::slots_per_word;
// Calculate the final stack size taking account of alignment
- stack_slots = round_to(stack_slots, StackAlignmentInBytes / VMRegImpl::stack_slot_size);
+ stack_slots = align_up(stack_slots, StackAlignmentInBytes / VMRegImpl::stack_slot_size);
int stack_size = stack_slots * VMRegImpl::stack_slot_size;
int lock_slot_fp_offset = stack_size - 2 * wordSize -
lock_slot_offset * VMRegImpl::stack_slot_size;
@@ -1851,7 +1852,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
int extra_locals_size = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
#ifdef AARCH64
- extra_locals_size = round_to(extra_locals_size, StackAlignmentInBytes/BytesPerWord);
+ extra_locals_size = align_up(extra_locals_size, StackAlignmentInBytes/BytesPerWord);
#endif // AARCH64
return extra_locals_size;
}
diff --git a/hotspot/src/cpu/arm/vm/stubGenerator_arm.cpp b/hotspot/src/cpu/arm/vm/stubGenerator_arm.cpp
index 7d8c8d49927..d5ca6401fa4 100644
--- a/hotspot/src/cpu/arm/vm/stubGenerator_arm.cpp
+++ b/hotspot/src/cpu/arm/vm/stubGenerator_arm.cpp
@@ -37,6 +37,7 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
+#include "utilities/align.hpp"
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
@@ -2876,7 +2877,7 @@ class StubGenerator: public StubCodeGenerator {
BLOCK_COMMENT("PreBarrier");
#ifdef AARCH64
- callee_saved_regs = round_to(callee_saved_regs, 2);
+ callee_saved_regs = align_up(callee_saved_regs, 2);
for (int i = 0; i < callee_saved_regs; i += 2) {
__ raw_push(as_Register(i), as_Register(i+1));
}
diff --git a/hotspot/src/cpu/arm/vm/templateInterpreterGenerator_arm.cpp b/hotspot/src/cpu/arm/vm/templateInterpreterGenerator_arm.cpp
index 7fda747ad48..1c2e706a26c 100644
--- a/hotspot/src/cpu/arm/vm/templateInterpreterGenerator_arm.cpp
+++ b/hotspot/src/cpu/arm/vm/templateInterpreterGenerator_arm.cpp
@@ -45,6 +45,7 @@
#include "runtime/synchronizer.hpp"
#include "runtime/timer.hpp"
#include "runtime/vframeArray.hpp"
+#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include "utilities/macros.hpp"
@@ -270,12 +271,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
return entry;
}
-address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
- // Not used.
- STOP("generate_continuation_for");
- return NULL;
-}
-
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
address entry = __ pc();
@@ -310,6 +305,9 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ convert_retval_to_tos(state);
#endif // !AARCH64
+ __ check_and_handle_popframe();
+ __ check_and_handle_earlyret();
+
__ dispatch_next(state, step);
return entry;
@@ -678,7 +676,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// Rstack_top & RextendedSP
__ sub(Rstack_top, SP, 10*wordSize);
if (native_call) {
- __ sub(RextendedSP, Rstack_top, round_to(wordSize, StackAlignmentInBytes)); // reserve 1 slot for exception handling
+ __ sub(RextendedSP, Rstack_top, align_up(wordSize, StackAlignmentInBytes)); // reserve 1 slot for exception handling
} else {
__ sub(RextendedSP, Rstack_top, AsmOperand(RmaxStack, lsl, Interpreter::logStackElementSize));
__ align_reg(RextendedSP, RextendedSP, StackAlignmentInBytes);
@@ -1098,7 +1096,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// Allocate more stack space to accomodate all arguments passed on GP and FP registers:
// 8 * wordSize for GPRs
// 8 * wordSize for FPRs
- int reg_arguments = round_to(8*wordSize + 8*wordSize, StackAlignmentInBytes);
+ int reg_arguments = align_up(8*wordSize + 8*wordSize, StackAlignmentInBytes);
#else
// C functions need aligned stack
@@ -1111,7 +1109,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// Allocate more stack space to accomodate all GP as well as FP registers:
// 4 * wordSize
// 8 * BytesPerLong
- int reg_arguments = round_to((4*wordSize) + (8*BytesPerLong), StackAlignmentInBytes);
+ int reg_arguments = align_up((4*wordSize) + (8*BytesPerLong), StackAlignmentInBytes);
#else
// Reserve at least 4 words on the stack for loading
// of parameters passed on registers (R0-R3).
@@ -1401,7 +1399,13 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
#ifdef AARCH64
// setup RmaxStack
__ ldrh(RmaxStack, Address(RconstMethod, ConstMethod::max_stack_offset()));
- __ add(RmaxStack, RmaxStack, MAX2(1, Method::extra_stack_entries())); // reserve slots for exception handler and JSR292 appendix argument
+ // We have to add extra reserved slots to max_stack. There are 3 users of the extra slots,
+ // none of which are at the same time, so we just need to make sure there is enough room
+ // for the biggest user:
+ // -reserved slot for exception handler
+ // -reserved slots for JSR292. Method::extra_stack_entries() is the size.
+ // -3 reserved slots so get_method_counters() can save some registers before call_VM().
+ __ add(RmaxStack, RmaxStack, MAX2(3, Method::extra_stack_entries()));
#endif // AARCH64
// see if we've got enough room on the stack for locals plus overhead.
diff --git a/hotspot/src/cpu/arm/vm/templateTable_arm.cpp b/hotspot/src/cpu/arm/vm/templateTable_arm.cpp
index c8711b830eb..7fd60ce8d2d 100644
--- a/hotspot/src/cpu/arm/vm/templateTable_arm.cpp
+++ b/hotspot/src/cpu/arm/vm/templateTable_arm.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2286,13 +2286,18 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
}
__ bind(no_mdo);
// Increment backedge counter in MethodCounters*
- __ get_method_counters(Rmethod, Rcounters, dispatch);
+ // Note Rbumped_taken_count is a callee saved registers for ARM32, but caller saved for ARM64
+ __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
+ Rdisp, R3_bytecode,
+ AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset()));
__ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask,
Rcnt, R4_tmp, eq, &backedge_counter_overflow);
} else {
- // increment counter
- __ get_method_counters(Rmethod, Rcounters, dispatch);
+ // Increment backedge counter in MethodCounters*
+ __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
+ Rdisp, R3_bytecode,
+ AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
__ ldr_u32(Rtemp, Address(Rcounters, be_offset)); // load backedge counter
__ add(Rtemp, Rtemp, InvocationCounter::count_increment); // increment counter
__ str_32(Rtemp, Address(Rcounters, be_offset)); // store counter
@@ -4367,10 +4372,9 @@ void TemplateTable::_new() {
#endif // AARCH64
// get InstanceKlass
- __ add(Rklass, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
- __ ldr(Rklass, Address(Rklass, sizeof(ConstantPool)));
__ cmp(Rtemp, JVM_CONSTANT_Class);
__ b(slow_case, ne);
+ __ load_resolved_klass_at_offset(Rcpool, Rindex, Rklass);
// make sure klass is initialized & doesn't have finalizer
// make sure klass is fully initialized
@@ -4642,8 +4646,7 @@ void TemplateTable::checkcast() {
// Get superklass in Rsuper and subklass in Rsub
__ bind(quicked);
- __ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
- __ ldr(Rsuper, Address(Rtemp, sizeof(ConstantPool)));
+ __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
__ bind(resolved);
__ load_klass(Rsub, Robj);
@@ -4716,8 +4719,7 @@ void TemplateTable::instanceof() {
// Get superklass in Rsuper and subklass in Rsub
__ bind(quicked);
- __ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
- __ ldr(Rsuper, Address(Rtemp, sizeof(ConstantPool)));
+ __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
__ bind(resolved);
__ load_klass(Rsub, Robj);
diff --git a/hotspot/src/cpu/arm/vm/vm_version_arm_32.cpp b/hotspot/src/cpu/arm/vm/vm_version_arm_32.cpp
index 10a2408772d..2d1d4949860 100644
--- a/hotspot/src/cpu/arm/vm/vm_version_arm_32.cpp
+++ b/hotspot/src/cpu/arm/vm/vm_version_arm_32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "memory/resourceArea.hpp"
+#include "prims/jvm.h"
#include "runtime/java.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/stubCodeGenerator.hpp"
@@ -256,7 +257,9 @@ void VM_Version::initialize() {
}
}
- AllocatePrefetchDistance = 128;
+ if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
+ FLAG_SET_DEFAULT(AllocatePrefetchDistance, 128);
+ }
#ifdef COMPILER2
FLAG_SET_DEFAULT(UseFPUForSpilling, true);
diff --git a/hotspot/src/cpu/arm/vm/vm_version_arm_64.cpp b/hotspot/src/cpu/arm/vm/vm_version_arm_64.cpp
index 7f25255d72f..55091befc78 100644
--- a/hotspot/src/cpu/arm/vm/vm_version_arm_64.cpp
+++ b/hotspot/src/cpu/arm/vm/vm_version_arm_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "memory/resourceArea.hpp"
+#include "prims/jvm.h"
#include "runtime/java.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/stubCodeGenerator.hpp"
@@ -201,7 +202,9 @@ void VM_Version::initialize() {
}
}
- AllocatePrefetchDistance = 128;
+ if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
+ FLAG_SET_DEFAULT(AllocatePrefetchDistance, 128);
+ }
#ifdef COMPILER2
FLAG_SET_DEFAULT(UseFPUForSpilling, true);
diff --git a/hotspot/src/cpu/ppc/vm/abstractInterpreter_ppc.cpp b/hotspot/src/cpu/ppc/vm/abstractInterpreter_ppc.cpp
index 9aa1370a362..44dd1608d65 100644
--- a/hotspot/src/cpu/ppc/vm/abstractInterpreter_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/abstractInterpreter_ppc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -51,27 +51,6 @@ int AbstractInterpreter::BasicType_as_index(BasicType type) {
return i;
}
-// These should never be compiled since the interpreter will prefer
-// the compiled version to the intrinsic version.
-bool AbstractInterpreter::can_be_compiled(methodHandle m) {
- switch (method_kind(m)) {
- case Interpreter::java_lang_math_sin : // fall thru
- case Interpreter::java_lang_math_cos : // fall thru
- case Interpreter::java_lang_math_tan : // fall thru
- case Interpreter::java_lang_math_abs : // fall thru
- case Interpreter::java_lang_math_log : // fall thru
- case Interpreter::java_lang_math_log10 : // fall thru
- case Interpreter::java_lang_math_sqrt : // fall thru
- case Interpreter::java_lang_math_pow : // fall thru
- case Interpreter::java_lang_math_exp : // fall thru
- case Interpreter::java_lang_math_fmaD : // fall thru
- case Interpreter::java_lang_math_fmaF :
- return false;
- default:
- return true;
- }
-}
-
// How much stack a method activation needs in stack slots.
// We must calc this exactly like in generate_fixed_frame.
// Note: This returns the conservative size assuming maximum alignment.
diff --git a/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp b/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp
index 09217a42bb5..26b77b8cbc3 100644
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp
+++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -376,10 +376,12 @@ class Assembler : public AbstractAssembler {
STWX_OPCODE = (31u << OPCODE_SHIFT | 151u << 1),
STWU_OPCODE = (37u << OPCODE_SHIFT),
STWUX_OPCODE = (31u << OPCODE_SHIFT | 183u << 1),
+ STWBRX_OPCODE = (31u << OPCODE_SHIFT | 662u << 1),
STH_OPCODE = (44u << OPCODE_SHIFT),
STHX_OPCODE = (31u << OPCODE_SHIFT | 407u << 1),
STHU_OPCODE = (45u << OPCODE_SHIFT),
+ STHBRX_OPCODE = (31u << OPCODE_SHIFT | 918u << 1),
STB_OPCODE = (38u << OPCODE_SHIFT),
STBX_OPCODE = (31u << OPCODE_SHIFT | 215u << 1),
@@ -401,11 +403,13 @@ class Assembler : public AbstractAssembler {
LD_OPCODE = (58u << OPCODE_SHIFT | 0u << XO_30_31_SHIFT), // DS-FORM
LDU_OPCODE = (58u << OPCODE_SHIFT | 1u << XO_30_31_SHIFT), // DS-FORM
LDX_OPCODE = (31u << OPCODE_SHIFT | 21u << XO_21_30_SHIFT), // X-FORM
+ LDBRX_OPCODE = (31u << OPCODE_SHIFT | 532u << 1), // X-FORM
STD_OPCODE = (62u << OPCODE_SHIFT | 0u << XO_30_31_SHIFT), // DS-FORM
STDU_OPCODE = (62u << OPCODE_SHIFT | 1u << XO_30_31_SHIFT), // DS-FORM
- STDUX_OPCODE = (31u << OPCODE_SHIFT | 181u << 1), // X-FORM
+ STDUX_OPCODE = (31u << OPCODE_SHIFT | 181u << 1), // X-FORM
STDX_OPCODE = (31u << OPCODE_SHIFT | 149u << XO_21_30_SHIFT), // X-FORM
+ STDBRX_OPCODE = (31u << OPCODE_SHIFT | 660u << 1), // X-FORM
RLDICR_OPCODE = (30u << OPCODE_SHIFT | 1u << XO_27_29_SHIFT), // MD-FORM
RLDICL_OPCODE = (30u << OPCODE_SHIFT | 0u << XO_27_29_SHIFT), // MD-FORM
@@ -506,7 +510,13 @@ class Assembler : public AbstractAssembler {
LXVD2X_OPCODE = (31u << OPCODE_SHIFT | 844u << 1),
STXVD2X_OPCODE = (31u << OPCODE_SHIFT | 972u << 1),
MTVSRD_OPCODE = (31u << OPCODE_SHIFT | 179u << 1),
+ MTVSRWZ_OPCODE = (31u << OPCODE_SHIFT | 243u << 1),
MFVSRD_OPCODE = (31u << OPCODE_SHIFT | 51u << 1),
+ MTVSRWA_OPCODE = (31u << OPCODE_SHIFT | 211u << 1),
+ MFVSRWZ_OPCODE = (31u << OPCODE_SHIFT | 115u << 1),
+ XXPERMDI_OPCODE= (60u << OPCODE_SHIFT | 10u << 3),
+ XXMRGHW_OPCODE = (60u << OPCODE_SHIFT | 18u << 3),
+ XXMRGLW_OPCODE = (60u << OPCODE_SHIFT | 50u << 3),
// Vector Permute and Formatting
VPKPX_OPCODE = (4u << OPCODE_SHIFT | 782u ),
@@ -556,6 +566,7 @@ class Assembler : public AbstractAssembler {
VADDUBM_OPCODE = (4u << OPCODE_SHIFT | 0u ),
VADDUWM_OPCODE = (4u << OPCODE_SHIFT | 128u ),
VADDUHM_OPCODE = (4u << OPCODE_SHIFT | 64u ),
+ VADDUDM_OPCODE = (4u << OPCODE_SHIFT | 192u ),
VADDUBS_OPCODE = (4u << OPCODE_SHIFT | 512u ),
VADDUWS_OPCODE = (4u << OPCODE_SHIFT | 640u ),
VADDUHS_OPCODE = (4u << OPCODE_SHIFT | 576u ),
@@ -1094,16 +1105,19 @@ class Assembler : public AbstractAssembler {
static int vrs( VectorRegister r) { return vrs(r->encoding());}
static int vrt( VectorRegister r) { return vrt(r->encoding());}
+ // Only used on SHA sigma instructions (VX-form)
+ static int vst( int x) { return opp_u_field(x, 16, 16); }
+ static int vsix( int x) { return opp_u_field(x, 20, 17); }
+
// Support Vector-Scalar (VSX) instructions.
- static int vsra( int x) { return opp_u_field(x, 15, 11); }
- static int vsrb( int x) { return opp_u_field(x, 20, 16); }
- static int vsrc( int x) { return opp_u_field(x, 25, 21); }
- static int vsrs( int x) { return opp_u_field(x, 10, 6); }
- static int vsrt( int x) { return opp_u_field(x, 10, 6); }
+ static int vsra( int x) { return opp_u_field(x & 0x1F, 15, 11) | opp_u_field((x & 0x20) >> 5, 29, 29); }
+ static int vsrb( int x) { return opp_u_field(x & 0x1F, 20, 16) | opp_u_field((x & 0x20) >> 5, 30, 30); }
+ static int vsrs( int x) { return opp_u_field(x & 0x1F, 10, 6) | opp_u_field((x & 0x20) >> 5, 31, 31); }
+ static int vsrt( int x) { return vsrs(x); }
+ static int vsdm( int x) { return opp_u_field(x, 23, 22); }
static int vsra( VectorSRegister r) { return vsra(r->encoding());}
static int vsrb( VectorSRegister r) { return vsrb(r->encoding());}
- static int vsrc( VectorSRegister r) { return vsrc(r->encoding());}
static int vsrs( VectorSRegister r) { return vsrs(r->encoding());}
static int vsrt( VectorSRegister r) { return vsrt(r->encoding());}
@@ -1552,6 +1566,9 @@ class Assembler : public AbstractAssembler {
inline void ld( Register d, int si16, Register s1);
inline void ldu( Register d, int si16, Register s1);
+ // 8 bytes reversed
+ inline void ldbrx( Register d, Register s1, Register s2);
+
// For convenience. Load pointer into d from b+s1.
inline void ld_ptr(Register d, int b, Register s1);
DEBUG_ONLY(inline void ld_ptr(Register d, ByteSize b, Register s1);)
@@ -1560,10 +1577,12 @@ class Assembler : public AbstractAssembler {
inline void stwx( Register d, Register s1, Register s2);
inline void stw( Register d, int si16, Register s1);
inline void stwu( Register d, int si16, Register s1);
+ inline void stwbrx( Register d, Register s1, Register s2);
inline void sthx( Register d, Register s1, Register s2);
inline void sth( Register d, int si16, Register s1);
inline void sthu( Register d, int si16, Register s1);
+ inline void sthbrx( Register d, Register s1, Register s2);
inline void stbx( Register d, Register s1, Register s2);
inline void stb( Register d, int si16, Register s1);
@@ -1573,6 +1592,7 @@ class Assembler : public AbstractAssembler {
inline void std( Register d, int si16, Register s1);
inline void stdu( Register d, int si16, Register s1);
inline void stdux(Register s, Register a, Register b);
+ inline void stdbrx( Register d, Register s1, Register s2);
inline void st_ptr(Register d, int si16, Register s1);
DEBUG_ONLY(inline void st_ptr(Register d, ByteSize b, Register s1);)
@@ -2016,7 +2036,7 @@ class Assembler : public AbstractAssembler {
inline void vperm( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
inline void vsel( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
inline void vsl( VectorRegister d, VectorRegister a, VectorRegister b);
- inline void vsldoi( VectorRegister d, VectorRegister a, VectorRegister b, int si4);
+ inline void vsldoi( VectorRegister d, VectorRegister a, VectorRegister b, int ui4);
inline void vslo( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vsr( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vsro( VectorRegister d, VectorRegister a, VectorRegister b);
@@ -2027,6 +2047,7 @@ class Assembler : public AbstractAssembler {
inline void vaddubm( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vadduwm( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vadduhm( VectorRegister d, VectorRegister a, VectorRegister b);
+ inline void vaddudm( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vaddubs( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vadduws( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vadduhs( VectorRegister d, VectorRegister a, VectorRegister b);
@@ -2102,6 +2123,7 @@ class Assembler : public AbstractAssembler {
inline void vandc( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vnor( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vor( VectorRegister d, VectorRegister a, VectorRegister b);
+ inline void vmr( VectorRegister d, VectorRegister a);
inline void vxor( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vrld( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vrlb( VectorRegister d, VectorRegister a, VectorRegister b);
@@ -2125,8 +2147,24 @@ class Assembler : public AbstractAssembler {
inline void lxvd2x( VectorSRegister d, Register a, Register b);
inline void stxvd2x( VectorSRegister d, Register a);
inline void stxvd2x( VectorSRegister d, Register a, Register b);
+ inline void mtvrwz( VectorRegister d, Register a);
+ inline void mfvrwz( Register a, VectorRegister d);
inline void mtvrd( VectorRegister d, Register a);
inline void mfvrd( Register a, VectorRegister d);
+ inline void xxpermdi( VectorSRegister d, VectorSRegister a, VectorSRegister b, int dm);
+ inline void xxmrghw( VectorSRegister d, VectorSRegister a, VectorSRegister b);
+ inline void xxmrglw( VectorSRegister d, VectorSRegister a, VectorSRegister b);
+
+ // VSX Extended Mnemonics
+ inline void xxspltd( VectorSRegister d, VectorSRegister a, int x);
+ inline void xxmrghd( VectorSRegister d, VectorSRegister a, VectorSRegister b);
+ inline void xxmrgld( VectorSRegister d, VectorSRegister a, VectorSRegister b);
+ inline void xxswapd( VectorSRegister d, VectorSRegister a);
+
+ // Vector-Scalar (VSX) instructions.
+ inline void mtfprd( FloatRegister d, Register a);
+ inline void mtfprwa( FloatRegister d, Register a);
+ inline void mffprd( Register a, FloatRegister d);
// AES (introduced with Power 8)
inline void vcipher( VectorRegister d, VectorRegister a, VectorRegister b);
@@ -2182,14 +2220,18 @@ class Assembler : public AbstractAssembler {
inline void lbz( Register d, int si16);
inline void ldx( Register d, Register s2);
inline void ld( Register d, int si16);
+ inline void ldbrx(Register d, Register s2);
inline void stwx( Register d, Register s2);
inline void stw( Register d, int si16);
+ inline void stwbrx( Register d, Register s2);
inline void sthx( Register d, Register s2);
inline void sth( Register d, int si16);
+ inline void sthbrx( Register d, Register s2);
inline void stbx( Register d, Register s2);
inline void stb( Register d, int si16);
inline void stdx( Register d, Register s2);
inline void std( Register d, int si16);
+ inline void stdbrx( Register d, Register s2);
// PPC 2, section 3.2.1 Instruction Cache Instructions
inline void icbi( Register s2);
diff --git a/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp b/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp
index 7f42d6b6050..d21ffaf8fcb 100644
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp
+++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -327,6 +327,7 @@ inline void Assembler::lbzu( Register d, int si16, Register s1) { assert(d !=
inline void Assembler::ld( Register d, int si16, Register s1) { emit_int32(LD_OPCODE | rt(d) | ds(si16) | ra0mem(s1));}
inline void Assembler::ldx( Register d, Register s1, Register s2) { emit_int32(LDX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::ldu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LDU_OPCODE | rt(d) | ds(si16) | rta0mem(s1));}
+inline void Assembler::ldbrx( Register d, Register s1, Register s2) { emit_int32(LDBRX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::ld_ptr(Register d, int b, Register s1) { ld(d, b, s1); }
DEBUG_ONLY(inline void Assembler::ld_ptr(Register d, ByteSize b, Register s1) { ld(d, in_bytes(b), s1); })
@@ -335,10 +336,12 @@ DEBUG_ONLY(inline void Assembler::ld_ptr(Register d, ByteSize b, Register s1) {
inline void Assembler::stwx( Register d, Register s1, Register s2) { emit_int32(STWX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::stw( Register d, int si16, Register s1) { emit_int32(STW_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}
inline void Assembler::stwu( Register d, int si16, Register s1) { emit_int32(STWU_OPCODE | rs(d) | d1(si16) | rta0mem(s1));}
+inline void Assembler::stwbrx( Register d, Register s1, Register s2) { emit_int32(STWBRX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::sthx( Register d, Register s1, Register s2) { emit_int32(STHX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::sth( Register d, int si16, Register s1) { emit_int32(STH_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}
inline void Assembler::sthu( Register d, int si16, Register s1) { emit_int32(STHU_OPCODE | rs(d) | d1(si16) | rta0mem(s1));}
+inline void Assembler::sthbrx( Register d, Register s1, Register s2) { emit_int32(STHBRX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::stbx( Register d, Register s1, Register s2) { emit_int32(STBX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::stb( Register d, int si16, Register s1) { emit_int32(STB_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}
@@ -348,6 +351,7 @@ inline void Assembler::std( Register d, int si16, Register s1) { emit_int32(
inline void Assembler::stdx( Register d, Register s1, Register s2) { emit_int32(STDX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::stdu( Register d, int si16, Register s1) { emit_int32(STDU_OPCODE | rs(d) | ds(si16) | rta0mem(s1));}
inline void Assembler::stdux(Register s, Register a, Register b) { emit_int32(STDUX_OPCODE| rs(s) | rta0mem(a) | rb(b));}
+inline void Assembler::stdbrx( Register d, Register s1, Register s2) { emit_int32(STDBRX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::st_ptr(Register d, int b, Register s1) { std(d, b, s1); }
DEBUG_ONLY(inline void Assembler::st_ptr(Register d, ByteSize b, Register s1) { std(d, in_bytes(b), s1); })
@@ -754,12 +758,28 @@ inline void Assembler::lvsl( VectorRegister d, Register s1, Register s2) { emit
inline void Assembler::lvsr( VectorRegister d, Register s1, Register s2) { emit_int32( LVSR_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
// Vector-Scalar (VSX) instructions.
-inline void Assembler::lxvd2x (VectorSRegister d, Register s1) { emit_int32( LXVD2X_OPCODE | vsrt(d) | ra(0) | rb(s1)); }
-inline void Assembler::lxvd2x (VectorSRegister d, Register s1, Register s2) { emit_int32( LXVD2X_OPCODE | vsrt(d) | ra0mem(s1) | rb(s2)); }
-inline void Assembler::stxvd2x(VectorSRegister d, Register s1) { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra(0) | rb(s1)); }
-inline void Assembler::stxvd2x(VectorSRegister d, Register s1, Register s2) { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra0mem(s1) | rb(s2)); }
-inline void Assembler::mtvrd( VectorRegister d, Register a) { emit_int32( MTVSRD_OPCODE | vrt(d) | ra(a) | 1u); } // 1u: d is treated as Vector (VMX/Altivec).
-inline void Assembler::mfvrd( Register a, VectorRegister d) { emit_int32( MFVSRD_OPCODE | vrt(d) | ra(a) | 1u); } // 1u: d is treated as Vector (VMX/Altivec).
+inline void Assembler::lxvd2x( VectorSRegister d, Register s1) { emit_int32( LXVD2X_OPCODE | vsrt(d) | ra(0) | rb(s1)); }
+inline void Assembler::lxvd2x( VectorSRegister d, Register s1, Register s2) { emit_int32( LXVD2X_OPCODE | vsrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::stxvd2x( VectorSRegister d, Register s1) { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra(0) | rb(s1)); }
+inline void Assembler::stxvd2x( VectorSRegister d, Register s1, Register s2) { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::mtvrd( VectorRegister d, Register a) { emit_int32( MTVSRD_OPCODE | vsrt(d->to_vsr()) | ra(a)); }
+inline void Assembler::mfvrd( Register a, VectorRegister d) { emit_int32( MFVSRD_OPCODE | vsrt(d->to_vsr()) | ra(a)); }
+inline void Assembler::mtvrwz( VectorRegister d, Register a) { emit_int32( MTVSRWZ_OPCODE | vsrt(d->to_vsr()) | ra(a)); }
+inline void Assembler::mfvrwz( Register a, VectorRegister d) { emit_int32( MFVSRWZ_OPCODE | vsrt(d->to_vsr()) | ra(a)); }
+inline void Assembler::xxpermdi(VectorSRegister d, VectorSRegister a, VectorSRegister b, int dm) { emit_int32( XXPERMDI_OPCODE | vsrt(d) | vsra(a) | vsrb(b) | vsdm(dm)); }
+inline void Assembler::xxmrghw( VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXMRGHW_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
+inline void Assembler::xxmrglw( VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXMRGHW_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
+
+// VSX Extended Mnemonics
+inline void Assembler::xxspltd( VectorSRegister d, VectorSRegister a, int x) { xxpermdi(d, a, a, x ? 3 : 0); }
+inline void Assembler::xxmrghd( VectorSRegister d, VectorSRegister a, VectorSRegister b) { xxpermdi(d, a, b, 0); }
+inline void Assembler::xxmrgld( VectorSRegister d, VectorSRegister a, VectorSRegister b) { xxpermdi(d, a, b, 3); }
+inline void Assembler::xxswapd( VectorSRegister d, VectorSRegister a) { xxpermdi(d, a, a, 2); }
+
+// Vector-Scalar (VSX) instructions.
+inline void Assembler::mtfprd( FloatRegister d, Register a) { emit_int32( MTVSRD_OPCODE | frt(d) | ra(a)); }
+inline void Assembler::mtfprwa( FloatRegister d, Register a) { emit_int32( MTVSRWA_OPCODE | frt(d) | ra(a)); }
+inline void Assembler::mffprd( Register a, FloatRegister d) { emit_int32( MFVSRD_OPCODE | frt(d) | ra(a)); }
inline void Assembler::vpkpx( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKPX_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vpkshss( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKSHSS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
@@ -791,7 +811,7 @@ inline void Assembler::vspltisw(VectorRegister d, int si5)
inline void Assembler::vperm( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c){ emit_int32( VPERM_OPCODE | vrt(d) | vra(a) | vrb(b) | vrc(c)); }
inline void Assembler::vsel( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c){ emit_int32( VSEL_OPCODE | vrt(d) | vra(a) | vrb(b) | vrc(c)); }
inline void Assembler::vsl( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSL_OPCODE | vrt(d) | vra(a) | vrb(b)); }
-inline void Assembler::vsldoi( VectorRegister d, VectorRegister a, VectorRegister b, int si4) { emit_int32( VSLDOI_OPCODE| vrt(d) | vra(a) | vrb(b) | vsldoi_shb(simm(si4,4))); }
+inline void Assembler::vsldoi( VectorRegister d, VectorRegister a, VectorRegister b, int ui4) { emit_int32( VSLDOI_OPCODE| vrt(d) | vra(a) | vrb(b) | vsldoi_shb(uimm(ui4,4))); }
inline void Assembler::vslo( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSLO_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsr( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsro( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRO_OPCODE | vrt(d) | vra(a) | vrb(b)); }
@@ -802,6 +822,7 @@ inline void Assembler::vaddsws( VectorRegister d, VectorRegister a, VectorRegist
inline void Assembler::vaddubm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUBM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vadduwm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUWM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vadduhm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUHM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vaddudm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUDM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vaddubs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vadduws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vadduhs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
@@ -878,6 +899,7 @@ inline void Assembler::vand( VectorRegister d, VectorRegister a, VectorRegist
inline void Assembler::vandc( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VANDC_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vnor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNOR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VOR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmr( VectorRegister d, VectorRegister a) { emit_int32( VOR_OPCODE | vrt(d) | vra(a) | vrb(a)); }
inline void Assembler::vxor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VXOR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vrld( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLD_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vrlb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
@@ -944,14 +966,18 @@ inline void Assembler::lbzx( Register d, Register s2) { emit_int32( LBZX_OPCODE
inline void Assembler::lbz( Register d, int si16 ) { emit_int32( LBZ_OPCODE | rt(d) | d1(si16));}
inline void Assembler::ld( Register d, int si16 ) { emit_int32( LD_OPCODE | rt(d) | ds(si16));}
inline void Assembler::ldx( Register d, Register s2) { emit_int32( LDX_OPCODE | rt(d) | rb(s2));}
+inline void Assembler::ldbrx(Register d, Register s2) { emit_int32( LDBRX_OPCODE| rt(d) | rb(s2));}
inline void Assembler::stwx( Register d, Register s2) { emit_int32( STWX_OPCODE | rs(d) | rb(s2));}
inline void Assembler::stw( Register d, int si16 ) { emit_int32( STW_OPCODE | rs(d) | d1(si16));}
+inline void Assembler::stwbrx(Register d, Register s2){ emit_int32(STWBRX_OPCODE| rs(d) | rb(s2));}
inline void Assembler::sthx( Register d, Register s2) { emit_int32( STHX_OPCODE | rs(d) | rb(s2));}
inline void Assembler::sth( Register d, int si16 ) { emit_int32( STH_OPCODE | rs(d) | d1(si16));}
+inline void Assembler::sthbrx(Register d, Register s2){ emit_int32(STHBRX_OPCODE| rs(d) | rb(s2));}
inline void Assembler::stbx( Register d, Register s2) { emit_int32( STBX_OPCODE | rs(d) | rb(s2));}
inline void Assembler::stb( Register d, int si16 ) { emit_int32( STB_OPCODE | rs(d) | d1(si16));}
inline void Assembler::std( Register d, int si16 ) { emit_int32( STD_OPCODE | rs(d) | ds(si16));}
inline void Assembler::stdx( Register d, Register s2) { emit_int32( STDX_OPCODE | rs(d) | rb(s2));}
+inline void Assembler::stdbrx(Register d, Register s2){ emit_int32(STDBRX_OPCODE| rs(d) | rb(s2));}
// ra0 version
inline void Assembler::icbi( Register s2) { emit_int32( ICBI_OPCODE | rb(s2) ); }
diff --git a/hotspot/src/cpu/ppc/vm/bytes_ppc.hpp b/hotspot/src/cpu/ppc/vm/bytes_ppc.hpp
index b75b0f0fa5d..ac16dcb872d 100644
--- a/hotspot/src/cpu/ppc/vm/bytes_ppc.hpp
+++ b/hotspot/src/cpu/ppc/vm/bytes_ppc.hpp
@@ -37,10 +37,6 @@ class Bytes: AllStatic {
#if defined(VM_LITTLE_ENDIAN)
- // Returns true, if the byte ordering used by Java is different from the native byte ordering
- // of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
- static inline bool is_Java_byte_ordering_different() { return true; }
-
// Forward declarations of the compiler-dependent implementation
static inline u2 swap_u2(u2 x);
static inline u4 swap_u4(u4 x);
@@ -155,10 +151,6 @@ class Bytes: AllStatic {
#else // !defined(VM_LITTLE_ENDIAN)
- // Returns true, if the byte ordering used by Java is different from the nativ byte ordering
- // of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
- static inline bool is_Java_byte_ordering_different() { return false; }
-
// Thus, a swap between native and Java ordering is always a no-op:
static inline u2 swap_u2(u2 x) { return x; }
static inline u4 swap_u4(u4 x) { return x; }
diff --git a/hotspot/src/cpu/ppc/vm/c1_LIRAssembler_ppc.cpp b/hotspot/src/cpu/ppc/vm/c1_LIRAssembler_ppc.cpp
index 127403fde2f..62be4f4820b 100644
--- a/hotspot/src/cpu/ppc/vm/c1_LIRAssembler_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/c1_LIRAssembler_ppc.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -514,25 +514,48 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
}
case Bytecodes::_i2d:
case Bytecodes::_l2d: {
- __ fcfid(dst->as_double_reg(), src->as_double_reg()); // via mem
+ bool src_in_memory = !VM_Version::has_mtfprd();
+ FloatRegister rdst = dst->as_double_reg();
+ FloatRegister rsrc;
+ if (src_in_memory) {
+ rsrc = src->as_double_reg(); // via mem
+ } else {
+ // move src to dst register
+ if (code == Bytecodes::_i2d) {
+ __ mtfprwa(rdst, src->as_register());
+ } else {
+ __ mtfprd(rdst, src->as_register_lo());
+ }
+ rsrc = rdst;
+ }
+ __ fcfid(rdst, rsrc);
break;
}
- case Bytecodes::_i2f: {
+ case Bytecodes::_i2f:
+ case Bytecodes::_l2f: {
+ bool src_in_memory = !VM_Version::has_mtfprd();
FloatRegister rdst = dst->as_float_reg();
- FloatRegister rsrc = src->as_double_reg(); // via mem
+ FloatRegister rsrc;
+ if (src_in_memory) {
+ rsrc = src->as_double_reg(); // via mem
+ } else {
+ // move src to dst register
+ if (code == Bytecodes::_i2f) {
+ __ mtfprwa(rdst, src->as_register());
+ } else {
+ __ mtfprd(rdst, src->as_register_lo());
+ }
+ rsrc = rdst;
+ }
if (VM_Version::has_fcfids()) {
__ fcfids(rdst, rsrc);
} else {
+ assert(code == Bytecodes::_i2f, "fcfid+frsp needs fixup code to avoid rounding incompatibility");
__ fcfid(rdst, rsrc);
__ frsp(rdst, rdst);
}
break;
}
- case Bytecodes::_l2f: { // >= Power7
- assert(VM_Version::has_fcfids(), "fcfid+frsp needs fixup code to avoid rounding incompatibility");
- __ fcfids(dst->as_float_reg(), src->as_double_reg()); // via mem
- break;
- }
case Bytecodes::_f2d: {
__ fmr_if_needed(dst->as_double_reg(), src->as_float_reg());
break;
@@ -543,31 +566,49 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
}
case Bytecodes::_d2i:
case Bytecodes::_f2i: {
+ bool dst_in_memory = !VM_Version::has_mtfprd();
FloatRegister rsrc = (code == Bytecodes::_d2i) ? src->as_double_reg() : src->as_float_reg();
- Address addr = frame_map()->address_for_slot(dst->double_stack_ix());
+ Address addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : NULL;
Label L;
// Result must be 0 if value is NaN; test by comparing value to itself.
__ fcmpu(CCR0, rsrc, rsrc);
- __ li(R0, 0); // 0 in case of NAN
- __ std(R0, addr.disp(), addr.base());
+ if (dst_in_memory) {
+ __ li(R0, 0); // 0 in case of NAN
+ __ std(R0, addr.disp(), addr.base());
+ } else {
+ __ li(dst->as_register(), 0);
+ }
__ bso(CCR0, L);
__ fctiwz(rsrc, rsrc); // USE_KILL
- __ stfd(rsrc, addr.disp(), addr.base());
+ if (dst_in_memory) {
+ __ stfd(rsrc, addr.disp(), addr.base());
+ } else {
+ __ mffprd(dst->as_register(), rsrc);
+ }
__ bind(L);
break;
}
case Bytecodes::_d2l:
case Bytecodes::_f2l: {
+ bool dst_in_memory = !VM_Version::has_mtfprd();
FloatRegister rsrc = (code == Bytecodes::_d2l) ? src->as_double_reg() : src->as_float_reg();
- Address addr = frame_map()->address_for_slot(dst->double_stack_ix());
+ Address addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : NULL;
Label L;
// Result must be 0 if value is NaN; test by comparing value to itself.
__ fcmpu(CCR0, rsrc, rsrc);
- __ li(R0, 0); // 0 in case of NAN
- __ std(R0, addr.disp(), addr.base());
+ if (dst_in_memory) {
+ __ li(R0, 0); // 0 in case of NAN
+ __ std(R0, addr.disp(), addr.base());
+ } else {
+ __ li(dst->as_register_lo(), 0);
+ }
__ bso(CCR0, L);
__ fctidz(rsrc, rsrc); // USE_KILL
- __ stfd(rsrc, addr.disp(), addr.base());
+ if (dst_in_memory) {
+ __ stfd(rsrc, addr.disp(), addr.base());
+ } else {
+ __ mffprd(dst->as_register_lo(), rsrc);
+ }
__ bind(L);
break;
}
@@ -3177,9 +3218,8 @@ void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
assert_different_registers(val, crc, res);
__ load_const_optimized(res, StubRoutines::crc_table_addr(), R0);
- __ nand(crc, crc, crc); // ~crc
- __ update_byte_crc32(crc, val, res);
- __ nand(res, crc, crc); // ~crc
+ __ kernel_crc32_singleByteReg(crc, val, res, true);
+ __ mr(res, crc);
}
#undef __
diff --git a/hotspot/src/cpu/ppc/vm/c1_LIRGenerator_ppc.cpp b/hotspot/src/cpu/ppc/vm/c1_LIRGenerator_ppc.cpp
index 5bcd457e7e0..1a54ff4250d 100644
--- a/hotspot/src/cpu/ppc/vm/c1_LIRGenerator_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/c1_LIRGenerator_ppc.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -63,18 +63,6 @@ void LIRItem::load_nonconstant() {
}
-inline void load_int_as_long(LIR_List *ll, LIRItem &li, LIR_Opr dst) {
- LIR_Opr r = li.value()->operand();
- if (r->is_register()) {
- LIR_Opr dst_l = FrameMap::as_long_opr(dst->as_register());
- ll->convert(Bytecodes::_i2l, li.result(), dst_l); // Convert.
- } else {
- // Constants or memory get loaded with sign extend on this platform.
- ll->move(li.result(), dst);
- }
-}
-
-
//--------------------------------------------------------------
// LIRGenerator
//--------------------------------------------------------------
@@ -883,81 +871,91 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
// _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
// _i2b, _i2c, _i2s
void LIRGenerator::do_Convert(Convert* x) {
- switch (x->op()) {
+ if (!VM_Version::has_mtfprd()) {
+ switch (x->op()) {
- // int -> float: force spill
- case Bytecodes::_l2f: {
- if (!VM_Version::has_fcfids()) { // fcfids is >= Power7 only
- // fcfid+frsp needs fixup code to avoid rounding incompatibility.
- address entry = CAST_FROM_FN_PTR(address, SharedRuntime::l2f);
- LIR_Opr result = call_runtime(x->value(), entry, x->type(), NULL);
- set_result(x, result);
+ // int -> float: force spill
+ case Bytecodes::_l2f: {
+ if (!VM_Version::has_fcfids()) { // fcfids is >= Power7 only
+ // fcfid+frsp needs fixup code to avoid rounding incompatibility.
+ address entry = CAST_FROM_FN_PTR(address, SharedRuntime::l2f);
+ LIR_Opr result = call_runtime(x->value(), entry, x->type(), NULL);
+ set_result(x, result);
+ return;
+ } // else fallthru
+ }
+ case Bytecodes::_l2d: {
+ LIRItem value(x->value(), this);
+ LIR_Opr reg = rlock_result(x);
+ value.load_item();
+ LIR_Opr tmp = force_to_spill(value.result(), T_DOUBLE);
+ __ convert(x->op(), tmp, reg);
+ return;
+ }
+ case Bytecodes::_i2f:
+ case Bytecodes::_i2d: {
+ LIRItem value(x->value(), this);
+ LIR_Opr reg = rlock_result(x);
+ value.load_item();
+ // Convert i2l first.
+ LIR_Opr tmp1 = new_register(T_LONG);
+ __ convert(Bytecodes::_i2l, value.result(), tmp1);
+ LIR_Opr tmp2 = force_to_spill(tmp1, T_DOUBLE);
+ __ convert(x->op(), tmp2, reg);
+ return;
+ }
+
+ // float -> int: result will be stored
+ case Bytecodes::_f2l:
+ case Bytecodes::_d2l: {
+ LIRItem value(x->value(), this);
+ LIR_Opr reg = rlock_result(x);
+ value.set_destroys_register(); // USE_KILL
+ value.load_item();
+ set_vreg_flag(reg, must_start_in_memory);
+ __ convert(x->op(), value.result(), reg);
+ return;
+ }
+ case Bytecodes::_f2i:
+ case Bytecodes::_d2i: {
+ LIRItem value(x->value(), this);
+ LIR_Opr reg = rlock_result(x);
+ value.set_destroys_register(); // USE_KILL
+ value.load_item();
+ // Convert l2i afterwards.
+ LIR_Opr tmp1 = new_register(T_LONG);
+ set_vreg_flag(tmp1, must_start_in_memory);
+ __ convert(x->op(), value.result(), tmp1);
+ __ convert(Bytecodes::_l2i, tmp1, reg);
+ return;
+ }
+
+ // Within same category: just register conversions.
+ case Bytecodes::_i2b:
+ case Bytecodes::_i2c:
+ case Bytecodes::_i2s:
+ case Bytecodes::_i2l:
+ case Bytecodes::_l2i:
+ case Bytecodes::_f2d:
+ case Bytecodes::_d2f:
break;
- } // else fallthru
- }
- case Bytecodes::_l2d: {
- LIRItem value(x->value(), this);
- LIR_Opr reg = rlock_result(x);
- value.load_item();
- LIR_Opr tmp = force_to_spill(value.result(), T_DOUBLE);
- __ convert(x->op(), tmp, reg);
- break;
- }
- case Bytecodes::_i2f:
- case Bytecodes::_i2d: {
- LIRItem value(x->value(), this);
- LIR_Opr reg = rlock_result(x);
- value.load_item();
- // Convert i2l first.
- LIR_Opr tmp1 = new_register(T_LONG);
- __ convert(Bytecodes::_i2l, value.result(), tmp1);
- LIR_Opr tmp2 = force_to_spill(tmp1, T_DOUBLE);
- __ convert(x->op(), tmp2, reg);
- break;
- }
- // float -> int: result will be stored
- case Bytecodes::_f2l:
- case Bytecodes::_d2l: {
- LIRItem value(x->value(), this);
- LIR_Opr reg = rlock_result(x);
- value.set_destroys_register(); // USE_KILL
- value.load_item();
- set_vreg_flag(reg, must_start_in_memory);
- __ convert(x->op(), value.result(), reg);
- break;
+ default: ShouldNotReachHere();
}
- case Bytecodes::_f2i:
- case Bytecodes::_d2i: {
- LIRItem value(x->value(), this);
- LIR_Opr reg = rlock_result(x);
- value.set_destroys_register(); // USE_KILL
- value.load_item();
- // Convert l2i afterwards.
- LIR_Opr tmp1 = new_register(T_LONG);
- set_vreg_flag(tmp1, must_start_in_memory);
- __ convert(x->op(), value.result(), tmp1);
- __ convert(Bytecodes::_l2i, tmp1, reg);
- break;
- }
-
- // Within same category: just register conversions.
- case Bytecodes::_i2b:
- case Bytecodes::_i2c:
- case Bytecodes::_i2s:
- case Bytecodes::_i2l:
- case Bytecodes::_l2i:
- case Bytecodes::_f2d:
- case Bytecodes::_d2f: {
- LIRItem value(x->value(), this);
- LIR_Opr reg = rlock_result(x);
- value.load_item();
- __ convert(x->op(), value.result(), reg);
- break;
- }
-
- default: ShouldNotReachHere();
}
+
+ // Register conversion.
+ LIRItem value(x->value(), this);
+ LIR_Opr reg = rlock_result(x);
+ value.load_item();
+ switch (x->op()) {
+ case Bytecodes::_f2l:
+ case Bytecodes::_d2l:
+ case Bytecodes::_f2i:
+ case Bytecodes::_d2i: value.set_destroys_register(); break; // USE_KILL
+ default: break;
+ }
+ __ convert(x->op(), value.result(), reg);
}
@@ -1426,10 +1424,9 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
arg2 = cc->at(1),
arg3 = cc->at(2);
- // CCallingConventionRequiresIntsAsLongs
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits.
__ leal(LIR_OprFact::address(a), arg2);
- load_int_as_long(gen()->lir(), len, arg3);
+ len.load_item_force(arg3); // We skip int->long conversion here, , because CRC32 stub expects int.
__ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args());
__ move(result_reg, result);
@@ -1441,6 +1438,76 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
}
}
+void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
+ assert(UseCRC32CIntrinsics, "or should not be here");
+ LIR_Opr result = rlock_result(x);
+
+ switch (x->id()) {
+ case vmIntrinsics::_updateBytesCRC32C:
+ case vmIntrinsics::_updateDirectByteBufferCRC32C: {
+ bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
+
+ LIRItem crc(x->argument_at(0), this);
+ LIRItem buf(x->argument_at(1), this);
+ LIRItem off(x->argument_at(2), this);
+ LIRItem end(x->argument_at(3), this);
+ buf.load_item();
+ off.load_nonconstant();
+ end.load_nonconstant();
+
+ // len = end - off
+ LIR_Opr len = end.result();
+ LIR_Opr tmpA = new_register(T_INT);
+ LIR_Opr tmpB = new_register(T_INT);
+ __ move(end.result(), tmpA);
+ __ move(off.result(), tmpB);
+ __ sub(tmpA, tmpB, tmpA);
+ len = tmpA;
+
+ LIR_Opr index = off.result();
+ int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
+ if (off.result()->is_constant()) {
+ index = LIR_OprFact::illegalOpr;
+ offset += off.result()->as_jint();
+ }
+ LIR_Opr base_op = buf.result();
+ LIR_Address* a = NULL;
+
+ if (index->is_valid()) {
+ LIR_Opr tmp = new_register(T_LONG);
+ __ convert(Bytecodes::_i2l, index, tmp);
+ index = tmp;
+ __ add(index, LIR_OprFact::intptrConst(offset), index);
+ a = new LIR_Address(base_op, index, T_BYTE);
+ } else {
+ a = new LIR_Address(base_op, offset, T_BYTE);
+ }
+
+ BasicTypeList signature(3);
+ signature.append(T_INT);
+ signature.append(T_ADDRESS);
+ signature.append(T_INT);
+ CallingConvention* cc = frame_map()->c_calling_convention(&signature);
+ const LIR_Opr result_reg = result_register_for(x->type());
+
+ LIR_Opr arg1 = cc->at(0),
+ arg2 = cc->at(1),
+ arg3 = cc->at(2);
+
+ crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32C stub doesn't care about high bits.
+ __ leal(LIR_OprFact::address(a), arg2);
+ __ move(len, cc->at(2)); // We skip int->long conversion here, because CRC32C stub expects int.
+
+ __ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), LIR_OprFact::illegalOpr, result_reg, cc->args());
+ __ move(result_reg, result);
+ break;
+ }
+ default: {
+ ShouldNotReachHere();
+ }
+ }
+}
+
void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
assert(x->number_of_arguments() == 3, "wrong type");
assert(UseFMA, "Needs FMA instructions support.");
@@ -1467,7 +1534,3 @@ void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
fatal("vectorizedMismatch intrinsic is not implemented on this platform");
}
-
-void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
- Unimplemented();
-}
diff --git a/hotspot/src/cpu/ppc/vm/c1_MacroAssembler_ppc.cpp b/hotspot/src/cpu/ppc/vm/c1_MacroAssembler_ppc.cpp
index 377680a47eb..631b20b81a5 100644
--- a/hotspot/src/cpu/ppc/vm/c1_MacroAssembler_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/c1_MacroAssembler_ppc.cpp
@@ -36,6 +36,7 @@
#include "runtime/os.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/sharedRuntime.hpp"
+#include "utilities/align.hpp"
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
@@ -340,7 +341,7 @@ void C1_MacroAssembler::allocate_array(
// Check for negative or excessive length.
size_t max_length = max_array_allocation_length >> log2_elt_size;
if (UseTLAB) {
- size_t max_tlab = align_size_up(ThreadLocalAllocBuffer::max_size() >> log2_elt_size, 64*K);
+ size_t max_tlab = align_up(ThreadLocalAllocBuffer::max_size() >> log2_elt_size, 64*K);
if (max_tlab < max_length) { max_length = max_tlab; }
}
load_const_optimized(t1, max_length);
diff --git a/hotspot/src/cpu/ppc/vm/c1_Runtime1_ppc.cpp b/hotspot/src/cpu/ppc/vm/c1_Runtime1_ppc.cpp
index 3562a79825f..0c68f5a4c8b 100644
--- a/hotspot/src/cpu/ppc/vm/c1_Runtime1_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/c1_Runtime1_ppc.cpp
@@ -36,6 +36,7 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/signature.hpp"
#include "runtime/vframeArray.hpp"
+#include "utilities/align.hpp"
#include "utilities/macros.hpp"
#include "vmreg_ppc.inline.hpp"
#if INCLUDE_ALL_GCS
@@ -251,7 +252,7 @@ void Runtime1::initialize_pd() {
fpu_reg_save_offsets[i] = sp_offset;
sp_offset += BytesPerWord;
}
- frame_size_in_bytes = align_size_up(sp_offset, frame::alignment_in_bytes);
+ frame_size_in_bytes = align_up(sp_offset, frame::alignment_in_bytes);
}
@@ -275,7 +276,7 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe
static OopMapSet* generate_exception_throw_with_stack_parms(StubAssembler* sasm, address target,
int stack_parms) {
// Make a frame and preserve the caller's caller-save registers.
- const int parm_size_in_bytes = align_size_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes);
+ const int parm_size_in_bytes = align_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes);
const int padding = parm_size_in_bytes - (stack_parms << LogBytesPerWord);
OopMap* oop_map = save_live_registers(sasm, true, noreg, parm_size_in_bytes);
@@ -287,6 +288,7 @@ static OopMapSet* generate_exception_throw_with_stack_parms(StubAssembler* sasm,
__ ld(R5_ARG3, frame_size_in_bytes + padding + 8, R1_SP);
case 1:
__ ld(R4_ARG2, frame_size_in_bytes + padding + 0, R1_SP);
+ case 0:
call_offset = __ call_RT(noreg, noreg, target);
break;
default: Unimplemented(); break;
@@ -325,7 +327,7 @@ OopMapSet* Runtime1::generate_stub_call(StubAssembler* sasm, Register result, ad
static OopMapSet* stub_call_with_stack_parms(StubAssembler* sasm, Register result, address target,
int stack_parms, bool do_return = true) {
// Make a frame and preserve the caller's caller-save registers.
- const int parm_size_in_bytes = align_size_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes);
+ const int parm_size_in_bytes = align_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes);
const int padding = parm_size_in_bytes - (stack_parms << LogBytesPerWord);
OopMap* oop_map = save_live_registers(sasm, true, noreg, parm_size_in_bytes);
@@ -337,6 +339,7 @@ static OopMapSet* stub_call_with_stack_parms(StubAssembler* sasm, Register resul
__ ld(R5_ARG3, frame_size_in_bytes + padding + 8, R1_SP);
case 1:
__ ld(R4_ARG2, frame_size_in_bytes + padding + 0, R1_SP);
+ case 0:
call_offset = __ call_RT(result, noreg, target);
break;
default: Unimplemented(); break;
diff --git a/hotspot/src/cpu/ppc/vm/frame_ppc.cpp b/hotspot/src/cpu/ppc/vm/frame_ppc.cpp
index b6a538681f6..7437d33fc5c 100644
--- a/hotspot/src/cpu/ppc/vm/frame_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/frame_ppc.cpp
@@ -244,4 +244,6 @@ intptr_t *frame::initial_deoptimization_info() {
frame::frame(void* sp, void* fp, void* pc) : _sp((intptr_t*)sp), _unextended_sp((intptr_t*)sp) {
find_codeblob_and_set_pc_and_deopt_state((address)pc); // also sets _fp and adjusts _unextended_sp
}
+
+void frame::pd_ps() {}
#endif
diff --git a/hotspot/src/cpu/ppc/vm/frame_ppc.hpp b/hotspot/src/cpu/ppc/vm/frame_ppc.hpp
index ccec598c1ad..6023aa85066 100644
--- a/hotspot/src/cpu/ppc/vm/frame_ppc.hpp
+++ b/hotspot/src/cpu/ppc/vm/frame_ppc.hpp
@@ -82,13 +82,7 @@
public:
// C frame layout
-
- enum {
- // stack alignment
- alignment_in_bytes = 16,
- // log_2(16*8 bits) = 7.
- log_2_of_alignment_in_bits = 7
- };
+ static const int alignment_in_bytes = 16;
// ABI_MINFRAME:
struct abi_minframe {
diff --git a/hotspot/src/cpu/ppc/vm/frame_ppc.inline.hpp b/hotspot/src/cpu/ppc/vm/frame_ppc.inline.hpp
index d22f5b564e8..15c4bce9dc4 100644
--- a/hotspot/src/cpu/ppc/vm/frame_ppc.inline.hpp
+++ b/hotspot/src/cpu/ppc/vm/frame_ppc.inline.hpp
@@ -28,6 +28,7 @@
#include "code/codeCache.hpp"
#include "code/vmreg.inline.hpp"
+#include "utilities/align.hpp"
// Inline functions for ppc64 frames:
@@ -193,7 +194,7 @@ inline intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
inline int frame::interpreter_frame_monitor_size() {
// Number of stack slots for a monitor.
- return round_to(BasicObjectLock::size(), // number of stack slots
+ return align_up(BasicObjectLock::size(), // number of stack slots
WordsPerLong); // number of stack slots for a Java long
}
diff --git a/hotspot/src/cpu/ppc/vm/globals_ppc.hpp b/hotspot/src/cpu/ppc/vm/globals_ppc.hpp
index bf4ca17df09..0bc041e9188 100644
--- a/hotspot/src/cpu/ppc/vm/globals_ppc.hpp
+++ b/hotspot/src/cpu/ppc/vm/globals_ppc.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -164,7 +164,7 @@ define_pd_global(intx, InitArrayShortSize, 9*BytesPerLong);
product(bool, ZapMemory, false, "Write 0x0101... to empty memory." \
" Use this to ease debugging.") \
\
- /* Use Restricted Transactional Memory for lock eliding */ \
+ /* Use Restricted Transactional Memory for lock elision */ \
product(bool, UseRTMLocking, false, \
"Enable RTM lock eliding for inflated locks in compiled code") \
\
@@ -174,24 +174,31 @@ define_pd_global(intx, InitArrayShortSize, 9*BytesPerLong);
product(bool, UseRTMDeopt, false, \
"Perform deopt and recompilation based on RTM abort ratio") \
\
- product(uintx, RTMRetryCount, 5, \
+ product(int, RTMRetryCount, 5, \
"Number of RTM retries on lock abort or busy") \
+ range(0, max_jint) \
\
- experimental(intx, RTMSpinLoopCount, 100, \
+ experimental(int, RTMSpinLoopCount, 100, \
"Spin count for lock to become free before RTM retry") \
+ range(0, 32767) /* immediate operand limit on ppc */ \
\
- experimental(intx, RTMAbortThreshold, 1000, \
+ experimental(int, RTMAbortThreshold, 1000, \
"Calculate abort ratio after this number of aborts") \
+ range(0, max_jint) \
\
- experimental(intx, RTMLockingThreshold, 10000, \
+ experimental(int, RTMLockingThreshold, 10000, \
"Lock count at which to do RTM lock eliding without " \
"abort ratio calculation") \
+ range(0, max_jint) \
\
- experimental(intx, RTMAbortRatio, 50, \
+ experimental(int, RTMAbortRatio, 50, \
"Lock abort ratio at which to stop use RTM lock eliding") \
+ range(0, 100) /* natural range */ \
\
- experimental(intx, RTMTotalCountIncrRate, 64, \
+ experimental(int, RTMTotalCountIncrRate, 64, \
"Increment total RTM attempted lock count once every n times") \
+ range(1, 32767) /* immediate operand limit on ppc */ \
+ constraint(RTMTotalCountIncrRateConstraintFunc,AfterErgo) \
\
experimental(intx, RTMLockingCalculationDelay, 0, \
"Number of milliseconds to wait before start calculating aborts " \
diff --git a/hotspot/src/cpu/ppc/vm/interp_masm_ppc.hpp b/hotspot/src/cpu/ppc/vm/interp_masm_ppc.hpp
index d2085ea4e78..b49f8bb424f 100644
--- a/hotspot/src/cpu/ppc/vm/interp_masm_ppc.hpp
+++ b/hotspot/src/cpu/ppc/vm/interp_masm_ppc.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,8 +45,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
#define thread_(field_name) in_bytes(JavaThread::field_name ## _offset()), R16_thread
#define method_(field_name) in_bytes(Method::field_name ## _offset()), R19_method
- virtual void check_and_handle_popframe(Register java_thread);
- virtual void check_and_handle_earlyret(Register java_thread);
+ virtual void check_and_handle_popframe(Register scratch_reg);
+ virtual void check_and_handle_earlyret(Register scratch_reg);
// Base routine for all dispatches.
void dispatch_base(TosState state, address* table);
@@ -79,6 +79,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
// Load object from cpool->resolved_references(index).
void load_resolved_reference_at_index(Register result, Register index, Label *is_null = NULL);
+ // load cpool->resolved_klass_at(index)
+ void load_resolved_klass_at_offset(Register Rcpool, Register Roffset, Register Rklass);
+
void load_receiver(Register Rparam_count, Register Rrecv_dst);
// helpers for expression stack
@@ -96,8 +99,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
void push_2ptrs(Register first, Register second);
- void push_l_pop_d(Register l = R17_tos, FloatRegister d = F15_ftos);
- void push_d_pop_l(FloatRegister d = F15_ftos, Register l = R17_tos);
+ void move_l_to_d(Register l = R17_tos, FloatRegister d = F15_ftos);
+ void move_d_to_l(FloatRegister d = F15_ftos, Register l = R17_tos);
void pop (TosState state); // transition vtos -> state
void push(TosState state); // transition state -> vtos
diff --git a/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp b/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp
index 95234115319..1715ddc3fa0 100644
--- a/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp
+++ b/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -284,14 +284,22 @@ void InterpreterMacroAssembler::push_2ptrs(Register first, Register second) {
addi(R15_esp, R15_esp, - 2 * Interpreter::stackElementSize );
}
-void InterpreterMacroAssembler::push_l_pop_d(Register l, FloatRegister d) {
- std(l, 0, R15_esp);
- lfd(d, 0, R15_esp);
+void InterpreterMacroAssembler::move_l_to_d(Register l, FloatRegister d) {
+ if (VM_Version::has_mtfprd()) {
+ mtfprd(d, l);
+ } else {
+ std(l, 0, R15_esp);
+ lfd(d, 0, R15_esp);
+ }
}
-void InterpreterMacroAssembler::push_d_pop_l(FloatRegister d, Register l) {
- stfd(d, 0, R15_esp);
- ld(l, 0, R15_esp);
+void InterpreterMacroAssembler::move_d_to_l(FloatRegister d, Register l) {
+ if (VM_Version::has_mtfprd()) {
+ mffprd(l, d);
+ } else {
+ stfd(d, 0, R15_esp);
+ ld(l, 0, R15_esp);
+ }
}
void InterpreterMacroAssembler::push(TosState state) {
@@ -454,7 +462,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
Register tmp = index; // reuse
sldi(tmp, index, LogBytesPerHeapOop);
// Load pointer for resolved_references[] objArray.
- ld(result, ConstantPool::resolved_references_offset_in_bytes(), result);
+ ld(result, ConstantPool::cache_offset_in_bytes(), result);
+ ld(result, ConstantPoolCache::resolved_references_offset_in_bytes(), result);
// JNIHandles::resolve(result)
ld(result, 0, result);
#ifdef ASSERT
@@ -471,6 +480,25 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result, is_null);
}
+// load cpool->resolved_klass_at(index)
+void InterpreterMacroAssembler::load_resolved_klass_at_offset(Register Rcpool, Register Roffset, Register Rklass) {
+ // int value = *(Rcpool->int_at_addr(which));
+ // int resolved_klass_index = extract_low_short_from_int(value);
+ add(Roffset, Rcpool, Roffset);
+#if defined(VM_LITTLE_ENDIAN)
+ lhz(Roffset, sizeof(ConstantPool), Roffset); // Roffset = resolved_klass_index
+#else
+ lhz(Roffset, sizeof(ConstantPool) + 2, Roffset); // Roffset = resolved_klass_index
+#endif
+
+ ld(Rklass, ConstantPool::resolved_klasses_offset_in_bytes(), Rcpool); // Rklass = Rcpool->_resolved_klasses
+
+ sldi(Roffset, Roffset, LogBytesPerWord);
+ addi(Roffset, Roffset, Array::base_offset_in_bytes());
+ isync(); // Order load of instance Klass wrt. tags.
+ ldx(Rklass, Rklass, Roffset);
+}
+
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass. Blows registers Rsub_klass, tmp1, tmp2.
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, Register Rsuper_klass, Register Rtmp1,
diff --git a/hotspot/src/cpu/ppc/vm/interpreterRT_ppc.hpp b/hotspot/src/cpu/ppc/vm/interpreterRT_ppc.hpp
index d71781cbc83..a3e8fa632bf 100644
--- a/hotspot/src/cpu/ppc/vm/interpreterRT_ppc.hpp
+++ b/hotspot/src/cpu/ppc/vm/interpreterRT_ppc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -45,7 +45,7 @@ class SignatureHandlerGenerator: public NativeSignatureIterator {
public:
// Creation
- SignatureHandlerGenerator(methodHandle method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
+ SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
_masm = new MacroAssembler(buffer);
_num_used_fp_arg_regs = 0;
}
diff --git a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp
index 6eb27c78f17..fa4b2fe2427 100644
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2498,14 +2498,20 @@ void MacroAssembler::rtm_abort_ratio_calculation(Register rtm_counters_Reg,
// All transactions = total_count * RTMTotalCountIncrRate
// Set no_rtm bit if (Aborted transactions >= All transactions * RTMAbortRatio)
ld(R0, RTMLockingCounters::abort_count_offset(), rtm_counters_Reg);
- cmpdi(CCR0, R0, RTMAbortThreshold);
- blt(CCR0, L_check_always_rtm2);
+ if (is_simm(RTMAbortThreshold, 16)) { // cmpdi can handle 16bit immediate only.
+ cmpdi(CCR0, R0, RTMAbortThreshold);
+ blt(CCR0, L_check_always_rtm2); // reload of rtm_counters_Reg not necessary
+ } else {
+ load_const_optimized(rtm_counters_Reg, RTMAbortThreshold);
+ cmpd(CCR0, R0, rtm_counters_Reg);
+ blt(CCR0, L_check_always_rtm1); // reload of rtm_counters_Reg required
+ }
mulli(R0, R0, 100);
const Register tmpReg = rtm_counters_Reg;
ld(tmpReg, RTMLockingCounters::total_count_offset(), rtm_counters_Reg);
- mulli(tmpReg, tmpReg, RTMTotalCountIncrRate);
- mulli(tmpReg, tmpReg, RTMAbortRatio);
+ mulli(tmpReg, tmpReg, RTMTotalCountIncrRate); // allowable range: int16
+ mulli(tmpReg, tmpReg, RTMAbortRatio); // allowable range: int16
cmpd(CCR0, R0, tmpReg);
blt(CCR0, L_check_always_rtm1); // jump to reload
if (method_data != NULL) {
@@ -2521,7 +2527,13 @@ void MacroAssembler::rtm_abort_ratio_calculation(Register rtm_counters_Reg,
load_const_optimized(rtm_counters_Reg, (address)rtm_counters, R0); // reload
bind(L_check_always_rtm2);
ld(tmpReg, RTMLockingCounters::total_count_offset(), rtm_counters_Reg);
- cmpdi(CCR0, tmpReg, RTMLockingThreshold / RTMTotalCountIncrRate);
+ int64_t thresholdValue = RTMLockingThreshold / RTMTotalCountIncrRate;
+ if (is_simm(thresholdValue, 16)) { // cmpdi can handle 16bit immediate only.
+ cmpdi(CCR0, tmpReg, thresholdValue);
+ } else {
+ load_const_optimized(R0, thresholdValue);
+ cmpd(CCR0, tmpReg, R0);
+ }
blt(CCR0, L_done);
if (method_data != NULL) {
// Set rtm_state to "always rtm" in MDO.
@@ -2620,7 +2632,7 @@ void MacroAssembler::rtm_stack_locking(ConditionRegister flag,
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
Label L_noincrement;
if (RTMTotalCountIncrRate > 1) {
- branch_on_random_using_tb(tmp, (int)RTMTotalCountIncrRate, L_noincrement);
+ branch_on_random_using_tb(tmp, RTMTotalCountIncrRate, L_noincrement);
}
assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM");
load_const_optimized(tmp, (address)stack_rtm_counters->total_count_addr(), R0);
@@ -2687,7 +2699,7 @@ void MacroAssembler::rtm_inflated_locking(ConditionRegister flag,
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
Label L_noincrement;
if (RTMTotalCountIncrRate > 1) {
- branch_on_random_using_tb(R0, (int)RTMTotalCountIncrRate, L_noincrement);
+ branch_on_random_using_tb(R0, RTMTotalCountIncrRate, L_noincrement);
}
assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
load_const(R0, (address)rtm_counters->total_count_addr(), tmpReg);
@@ -4120,7 +4132,7 @@ void MacroAssembler::update_byte_crc32(Register crc, Register val, Register tabl
* @param table register pointing to CRC table
*/
void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
- Register data, bool loopAlignment, bool invertCRC) {
+ Register data, bool loopAlignment) {
assert_different_registers(crc, buf, len, table, data);
Label L_mainLoop, L_done;
@@ -4131,10 +4143,6 @@ void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register
clrldi_(len, len, 32); // Enforce 32 bit. Anything to do?
beq(CCR0, L_done);
- if (invertCRC) {
- nand(crc, crc, crc); // ~c
- }
-
mtctr(len);
align(mainLoop_alignment);
BIND(L_mainLoop);
@@ -4143,10 +4151,6 @@ void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register
update_byte_crc32(crc, data, table);
bdnz(L_mainLoop); // Iterate.
- if (invertCRC) {
- nand(crc, crc, crc); // ~c
- }
-
bind(L_done);
}
@@ -4203,7 +4207,8 @@ void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register tab
*/
void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3,
- Register tc0, Register tc1, Register tc2, Register tc3) {
+ Register tc0, Register tc1, Register tc2, Register tc3,
+ bool invertCRC) {
assert_different_registers(crc, buf, len, table);
Label L_mainLoop, L_tail;
@@ -4217,14 +4222,16 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
const int complexThreshold = 2*mainLoop_stepping;
// Don't test for len <= 0 here. This pathological case should not occur anyway.
- // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
- // The situation itself is detected and handled correctly by the conditional branches
- // following aghi(len, -stepping) and aghi(len, +stepping).
+ // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles
+ // for all well-behaved cases. The situation itself is detected and handled correctly
+ // within update_byteLoop_crc32.
assert(tailLoop_stepping == 1, "check tailLoop_stepping!");
BLOCK_COMMENT("kernel_crc32_2word {");
- nand(crc, crc, crc); // ~c
+ if (invertCRC) {
+ nand(crc, crc, crc); // 1s complement of crc
+ }
// Check for short (=mainLoop_stepping is guaranteed).
}
- update_byteLoop_crc32(crc, buf, tmp2, table, data, false, false);
+ update_byteLoop_crc32(crc, buf, tmp2, table, data, false);
}
srdi(tmp2, len, log_stepping); // #iterations for mainLoop
@@ -4281,9 +4288,11 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
// Process last few (=mainLoop_stepping is guaranteed).
}
- update_byteLoop_crc32(crc, buf, tmp2, table, data, false, false);
+ update_byteLoop_crc32(crc, buf, tmp2, table, data, false);
}
srdi(tmp2, len, log_stepping); // #iterations for mainLoop
@@ -4374,9 +4386,11 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
// Process last few (klass()->klass_part()->oop_size(this);
-//
-// for which the virtual method call is Klass::oop_size();
-//
-// The dummy method is called with the Klass object as the first
-// operand, and an object as the second argument.
-//
-
-//=====================================================================
-
-// All of the dummy methods in the vtable are essentially identical,
-// differing only by an ordinal constant, and they bear no releationship
-// to the original method which the caller intended. Also, there needs
-// to be 'vtbl_list_size' instances of the vtable in order to
-// differentiate between the 'vtable_list_size' original Klass objects.
-
-#define __ masm->
-
-void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
- void** vtable,
- char** md_top,
- char* md_end,
- char** mc_top,
- char* mc_end) {
- intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
- *(intptr_t *)(*md_top) = vtable_bytes;
- *md_top += sizeof(intptr_t);
- void** dummy_vtable = (void**)*md_top;
- *vtable = dummy_vtable;
- *md_top += vtable_bytes;
-
- // Get ready to generate dummy methods.
-
- CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
- MacroAssembler* masm = new MacroAssembler(&cb);
-
- // There are more general problems with CDS on ppc, so I can not
- // really test this. But having this instead of Unimplementd() allows
- // us to pass TestOptionsWithRanges.java.
- __ unimplemented();
-}
-
diff --git a/hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp b/hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp
index a4bb111d9e0..ff0fcb85dcb 100644
--- a/hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
+#include "prims/jvm.h"
#include "prims/methodHandles.hpp"
#define __ _masm->
@@ -71,7 +72,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Register temp_reg, Register temp2_reg,
const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
- KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
+ Klass* klass = SystemDictionary::well_known_klass(klass_id);
Label L_ok, L_bad;
BLOCK_COMMENT("verify_klass {");
__ verify_oop(obj_reg);
@@ -174,8 +175,9 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
__ verify_oop(method_temp);
__ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()), method_temp, temp2);
__ verify_oop(method_temp);
- // The following assumes that a Method* is normally compressed in the vmtarget field:
- __ ld(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), method_temp);
+ __ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()), method_temp);
+ __ verify_oop(method_temp);
+ __ ld(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()), method_temp);
if (VerifyMethodHandles && !for_compiler_entry) {
// Make sure recv is already on stack.
@@ -361,14 +363,16 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp2);
}
- __ ld(R19_method, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), member_reg);
+ __ load_heap_oop(R19_method, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()), member_reg);
+ __ ld(R19_method, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()), R19_method);
break;
case vmIntrinsics::_linkToStatic:
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp2);
}
- __ ld(R19_method, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), member_reg);
+ __ load_heap_oop(R19_method, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()), member_reg);
+ __ ld(R19_method, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()), R19_method);
break;
case vmIntrinsics::_linkToVirtual:
diff --git a/hotspot/src/cpu/ppc/vm/ppc.ad b/hotspot/src/cpu/ppc/vm/ppc.ad
index f9534b65bff..de0d6088460 100644
--- a/hotspot/src/cpu/ppc/vm/ppc.ad
+++ b/hotspot/src/cpu/ppc/vm/ppc.ad
@@ -1,6 +1,6 @@
//
-// Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
-// Copyright (c) 2012, 2016 SAP SE. All rights reserved.
+// Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2012, 2017 SAP SE. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@@ -2053,12 +2053,12 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
}
// Vector ideal reg.
-const int Matcher::vector_ideal_reg(int size) {
+const uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize == 8 && size == 8, "");
return Op_RegL;
}
-const int Matcher::vector_shift_count_ideal_reg(int size) {
+const uint Matcher::vector_shift_count_ideal_reg(int size) {
fatal("vector shift is not supported");
return Node::NotAMachineReg;
}
@@ -3079,6 +3079,17 @@ encode %{
__ bind(done);
%}
+ enc_class enc_cmove_bso_reg(iRegLdst dst, flagsRegSrc crx, regD src) %{
+ // TODO: PPC port $archOpcode(ppc64Opcode_cmove);
+
+ MacroAssembler _masm(&cbuf);
+ Label done;
+ __ bso($crx$$CondRegister, done);
+ __ mffprd($dst$$Register, $src$$FloatRegister);
+ // TODO PPC port __ endgroup_if_needed(_size == 12);
+ __ bind(done);
+ %}
+
enc_class enc_bc(flagsRegSrc crx, cmpOp cmp, Label lbl) %{
// TODO: PPC port $archOpcode(ppc64Opcode_bc);
@@ -5842,6 +5853,16 @@ instruct loadConN_lo(iRegNdst dst, iRegNsrc src1, immN src2) %{
ins_pipe(pipe_class_default);
%}
+instruct rldicl(iRegLdst dst, iRegLsrc src, immI16 shift, immI16 mask_begin) %{
+ effect(DEF dst, USE src, USE shift, USE mask_begin);
+
+ size(4);
+ ins_encode %{
+ __ rldicl($dst$$Register, $src$$Register, $shift$$constant, $mask_begin$$constant);
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
// Needed to postalloc expand loadConN: ConN is loaded as ConI
// leaving the upper 32 bits with sign-extension bits.
// This clears these bits: dst = src & 0xFFFFFFFF.
@@ -9306,6 +9327,44 @@ instruct shrP_convP2X_reg_imm6(iRegLdst dst, iRegP_N2P src1, uimmI6 src2) %{
ins_pipe(pipe_class_default);
%}
+// Bitfield Extract: URShiftI + AndI
+instruct andI_urShiftI_regI_immI_immIpow2minus1(iRegIdst dst, iRegIsrc src1, immI src2, immIpow2minus1 src3) %{
+ match(Set dst (AndI (URShiftI src1 src2) src3));
+
+ format %{ "EXTRDI $dst, $src1, shift=$src2, mask=$src3 \t// int bitfield extract" %}
+ size(4);
+ ins_encode %{
+ // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
+ int rshift = ($src2$$constant) & 0x1f;
+ int length = log2_long(((jlong) $src3$$constant) + 1);
+ if (rshift + length > 32) {
+ // if necessary, adjust mask to omit rotated bits.
+ length = 32 - rshift;
+ }
+ __ extrdi($dst$$Register, $src1$$Register, length, 64 - (rshift + length));
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+// Bitfield Extract: URShiftL + AndL
+instruct andL_urShiftL_regL_immI_immLpow2minus1(iRegLdst dst, iRegLsrc src1, immI src2, immLpow2minus1 src3) %{
+ match(Set dst (AndL (URShiftL src1 src2) src3));
+
+ format %{ "EXTRDI $dst, $src1, shift=$src2, mask=$src3 \t// long bitfield extract" %}
+ size(4);
+ ins_encode %{
+ // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
+ int rshift = ($src2$$constant) & 0x3f;
+ int length = log2_long(((jlong) $src3$$constant) + 1);
+ if (rshift + length > 64) {
+ // if necessary, adjust mask to omit rotated bits.
+ length = 64 - rshift;
+ }
+ __ extrdi($dst$$Register, $src1$$Register, length, 64 - (rshift + length));
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
instruct sxtI_reg(iRegIdst dst, iRegIsrc src) %{
match(Set dst (ConvL2I (ConvI2L src)));
@@ -10078,9 +10137,36 @@ instruct andcL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
// float intBitsToFloat(int bits)
//
// Notes on the implementation on ppc64:
-// We only provide rules which move between a register and a stack-location,
-// because we always have to go through memory when moving between a float
-// register and an integer register.
+// For Power7 and earlier, the rules are limited to those which move between a
+// register and a stack-location, because we always have to go through memory
+// when moving between a float register and an integer register.
+// This restriction is removed in Power8 with the introduction of the mtfprd
+// and mffprd instructions.
+
+instruct moveL2D_reg(regD dst, iRegLsrc src) %{
+ match(Set dst (MoveL2D src));
+ predicate(VM_Version::has_mtfprd());
+
+ format %{ "MTFPRD $dst, $src" %}
+ size(4);
+ ins_encode %{
+ __ mtfprd($dst$$FloatRegister, $src$$Register);
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+instruct moveI2D_reg(regD dst, iRegIsrc src) %{
+ // no match-rule, false predicate
+ effect(DEF dst, USE src);
+ predicate(false);
+
+ format %{ "MTFPRWA $dst, $src" %}
+ size(4);
+ ins_encode %{
+ __ mtfprwa($dst$$FloatRegister, $src$$Register);
+ %}
+ ins_pipe(pipe_class_default);
+%}
//---------- Chain stack slots between similar types --------
@@ -10519,6 +10605,16 @@ instruct convB2I_reg(iRegIdst dst, iRegIsrc src, immI_24 amount) %{
ins_pipe(pipe_class_default);
%}
+instruct extsh(iRegIdst dst, iRegIsrc src) %{
+ effect(DEF dst, USE src);
+
+ size(4);
+ ins_encode %{
+ __ extsh($dst$$Register, $src$$Register);
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
// LShiftI 16 + RShiftI 16 converts short to int.
instruct convS2I_reg(iRegIdst dst, iRegIsrc src, immI_16 amount) %{
match(Set dst (RShiftI (LShiftI src amount) amount));
@@ -10583,6 +10679,20 @@ instruct cmovI_bso_stackSlotL(iRegIdst dst, flagsRegSrc crx, stackSlotL src) %{
ins_pipe(pipe_class_default);
%}
+instruct cmovI_bso_reg(iRegIdst dst, flagsRegSrc crx, regD src) %{
+ // no match-rule, false predicate
+ effect(DEF dst, USE crx, USE src);
+ predicate(false);
+
+ ins_variable_size_depending_on_alignment(true);
+
+ format %{ "cmovI $crx, $dst, $src" %}
+ // Worst case is branch + move + stop, no stop without scheduler.
+ size(false /* TODO: PPC PORT(InsertEndGroupPPC64 && Compile::current()->do_hb_scheduling())*/ ? 12 : 8);
+ ins_encode( enc_cmove_bso_reg(dst, crx, src) );
+ ins_pipe(pipe_class_default);
+%}
+
instruct cmovI_bso_stackSlotL_conLvalue0_Ex(iRegIdst dst, flagsRegSrc crx, stackSlotL mem) %{
// no match-rule, false predicate
effect(DEF dst, USE crx, USE mem);
@@ -10637,9 +10747,64 @@ instruct cmovI_bso_stackSlotL_conLvalue0_Ex(iRegIdst dst, flagsRegSrc crx, stack
%}
%}
+instruct cmovI_bso_reg_conLvalue0_Ex(iRegIdst dst, flagsRegSrc crx, regD src) %{
+ // no match-rule, false predicate
+ effect(DEF dst, USE crx, USE src);
+ predicate(false);
+
+ format %{ "CmovI $dst, $crx, $src \t// postalloc expanded" %}
+ postalloc_expand %{
+ //
+ // replaces
+ //
+ // region dst crx src
+ // \ | | /
+ // dst=cmovI_bso_reg_conLvalue0
+ //
+ // with
+ //
+ // region dst
+ // \ /
+ // dst=loadConI16(0)
+ // |
+ // ^ region dst crx src
+ // | \ | | /
+ // dst=cmovI_bso_reg
+ //
+
+ // Create new nodes.
+ MachNode *m1 = new loadConI16Node();
+ MachNode *m2 = new cmovI_bso_regNode();
+
+ // inputs for new nodes
+ m1->add_req(n_region);
+ m2->add_req(n_region, n_crx, n_src);
+
+ // precedences for new nodes
+ m2->add_prec(m1);
+
+ // operands for new nodes
+ m1->_opnds[0] = op_dst;
+ m1->_opnds[1] = new immI16Oper(0);
+
+ m2->_opnds[0] = op_dst;
+ m2->_opnds[1] = op_crx;
+ m2->_opnds[2] = op_src;
+
+ // registers for new nodes
+ ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
+ ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
+
+ // Insert new nodes.
+ nodes->push(m1);
+ nodes->push(m2);
+ %}
+%}
+
// Double to Int conversion, NaN is mapped to 0.
instruct convD2I_reg_ExEx(iRegIdst dst, regD src) %{
match(Set dst (ConvD2I src));
+ predicate(!VM_Version::has_mtfprd());
ins_cost(DEFAULT_COST);
expand %{
@@ -10653,6 +10818,21 @@ instruct convD2I_reg_ExEx(iRegIdst dst, regD src) %{
%}
%}
+// Double to Int conversion, NaN is mapped to 0. Special version for Power8.
+instruct convD2I_reg_mffprd_ExEx(iRegIdst dst, regD src) %{
+ match(Set dst (ConvD2I src));
+ predicate(VM_Version::has_mtfprd());
+ ins_cost(DEFAULT_COST);
+
+ expand %{
+ regD tmpD;
+ flagsReg crx;
+ cmpDUnordered_reg_reg(crx, src, src); // Check whether src is NaN.
+ convD2IRaw_regD(tmpD, src); // Convert float to int (speculated).
+ cmovI_bso_reg_conLvalue0_Ex(dst, crx, tmpD); // Cmove based on NaN check.
+ %}
+%}
+
instruct convF2IRaw_regF(regF dst, regF src) %{
// no match-rule, false predicate
effect(DEF dst, USE src);
@@ -10670,6 +10850,7 @@ instruct convF2IRaw_regF(regF dst, regF src) %{
// Float to Int conversion, NaN is mapped to 0.
instruct convF2I_regF_ExEx(iRegIdst dst, regF src) %{
match(Set dst (ConvF2I src));
+ predicate(!VM_Version::has_mtfprd());
ins_cost(DEFAULT_COST);
expand %{
@@ -10683,6 +10864,21 @@ instruct convF2I_regF_ExEx(iRegIdst dst, regF src) %{
%}
%}
+// Float to Int conversion, NaN is mapped to 0. Special version for Power8.
+instruct convF2I_regF_mffprd_ExEx(iRegIdst dst, regF src) %{
+ match(Set dst (ConvF2I src));
+ predicate(VM_Version::has_mtfprd());
+ ins_cost(DEFAULT_COST);
+
+ expand %{
+ regF tmpF;
+ flagsReg crx;
+ cmpFUnordered_reg_reg(crx, src, src); // Check whether src is NaN.
+ convF2IRaw_regF(tmpF, src); // Convert float to int (speculated).
+ cmovI_bso_reg_conLvalue0_Ex(dst, crx, tmpF); // Cmove based on NaN check.
+ %}
+%}
+
// Convert to Long
instruct convI2L_reg(iRegLdst dst, iRegIsrc src) %{
@@ -10752,6 +10948,20 @@ instruct cmovL_bso_stackSlotL(iRegLdst dst, flagsRegSrc crx, stackSlotL src) %{
ins_pipe(pipe_class_default);
%}
+instruct cmovL_bso_reg(iRegLdst dst, flagsRegSrc crx, regD src) %{
+ // no match-rule, false predicate
+ effect(DEF dst, USE crx, USE src);
+ predicate(false);
+
+ ins_variable_size_depending_on_alignment(true);
+
+ format %{ "cmovL $crx, $dst, $src" %}
+ // Worst case is branch + move + stop, no stop without scheduler.
+ size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8);
+ ins_encode( enc_cmove_bso_reg(dst, crx, src) );
+ ins_pipe(pipe_class_default);
+%}
+
instruct cmovL_bso_stackSlotL_conLvalue0_Ex(iRegLdst dst, flagsRegSrc crx, stackSlotL mem) %{
// no match-rule, false predicate
effect(DEF dst, USE crx, USE mem);
@@ -10803,9 +11013,61 @@ instruct cmovL_bso_stackSlotL_conLvalue0_Ex(iRegLdst dst, flagsRegSrc crx, stack
%}
%}
+instruct cmovL_bso_reg_conLvalue0_Ex(iRegLdst dst, flagsRegSrc crx, regD src) %{
+ // no match-rule, false predicate
+ effect(DEF dst, USE crx, USE src);
+ predicate(false);
+
+ format %{ "CmovL $dst, $crx, $src \t// postalloc expanded" %}
+ postalloc_expand %{
+ //
+ // replaces
+ //
+ // region dst crx src
+ // \ | | /
+ // dst=cmovL_bso_reg_conLvalue0
+ //
+ // with
+ //
+ // region dst
+ // \ /
+ // dst=loadConL16(0)
+ // |
+ // ^ region dst crx src
+ // | \ | | /
+ // dst=cmovL_bso_reg
+ //
+
+ // Create new nodes.
+ MachNode *m1 = new loadConL16Node();
+ MachNode *m2 = new cmovL_bso_regNode();
+
+ // inputs for new nodes
+ m1->add_req(n_region);
+ m2->add_req(n_region, n_crx, n_src);
+ m2->add_prec(m1);
+
+ // operands for new nodes
+ m1->_opnds[0] = op_dst;
+ m1->_opnds[1] = new immL16Oper(0);
+ m2->_opnds[0] = op_dst;
+ m2->_opnds[1] = op_crx;
+ m2->_opnds[2] = op_src;
+
+ // registers for new nodes
+ ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
+ ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
+
+ // Insert new nodes.
+ nodes->push(m1);
+ nodes->push(m2);
+ %}
+%}
+
// Float to Long conversion, NaN is mapped to 0.
instruct convF2L_reg_ExEx(iRegLdst dst, regF src) %{
match(Set dst (ConvF2L src));
+ predicate(!VM_Version::has_mtfprd());
ins_cost(DEFAULT_COST);
expand %{
@@ -10819,6 +11081,21 @@ instruct convF2L_reg_ExEx(iRegLdst dst, regF src) %{
%}
%}
+// Float to Long conversion, NaN is mapped to 0. Special version for Power8.
+instruct convF2L_reg_mffprd_ExEx(iRegLdst dst, regF src) %{
+ match(Set dst (ConvF2L src));
+ predicate(VM_Version::has_mtfprd());
+ ins_cost(DEFAULT_COST);
+
+ expand %{
+ regF tmpF;
+ flagsReg crx;
+ cmpFUnordered_reg_reg(crx, src, src); // Check whether src is NaN.
+ convF2LRaw_regF(tmpF, src); // Convert float to long (speculated).
+ cmovL_bso_reg_conLvalue0_Ex(dst, crx, tmpF); // Cmove based on NaN check.
+ %}
+%}
+
instruct convD2LRaw_regD(regD dst, regD src) %{
// no match-rule, false predicate
effect(DEF dst, USE src);
@@ -10836,6 +11113,7 @@ instruct convD2LRaw_regD(regD dst, regD src) %{
// Double to Long conversion, NaN is mapped to 0.
instruct convD2L_reg_ExEx(iRegLdst dst, regD src) %{
match(Set dst (ConvD2L src));
+ predicate(!VM_Version::has_mtfprd());
ins_cost(DEFAULT_COST);
expand %{
@@ -10849,6 +11127,21 @@ instruct convD2L_reg_ExEx(iRegLdst dst, regD src) %{
%}
%}
+// Double to Long conversion, NaN is mapped to 0. Special version for Power8.
+instruct convD2L_reg_mffprd_ExEx(iRegLdst dst, regD src) %{
+ match(Set dst (ConvD2L src));
+ predicate(VM_Version::has_mtfprd());
+ ins_cost(DEFAULT_COST);
+
+ expand %{
+ regD tmpD;
+ flagsReg crx;
+ cmpDUnordered_reg_reg(crx, src, src); // Check whether src is NaN.
+ convD2LRaw_regD(tmpD, src); // Convert float to long (speculated).
+ cmovL_bso_reg_conLvalue0_Ex(dst, crx, tmpD); // Cmove based on NaN check.
+ %}
+%}
+
// Convert to Float
// Placed here as needed in expand.
@@ -10914,7 +11207,7 @@ instruct convL2FRaw_regF(regF dst, regD src) %{
// Integer to Float conversion. Special version for Power7.
instruct convI2F_ireg_fcfids_Ex(regF dst, iRegIsrc src) %{
match(Set dst (ConvI2F src));
- predicate(VM_Version::has_fcfids());
+ predicate(VM_Version::has_fcfids() && !VM_Version::has_mtfprd());
ins_cost(DEFAULT_COST);
expand %{
@@ -10928,10 +11221,23 @@ instruct convI2F_ireg_fcfids_Ex(regF dst, iRegIsrc src) %{
%}
%}
+// Integer to Float conversion. Special version for Power8.
+instruct convI2F_ireg_mtfprd_Ex(regF dst, iRegIsrc src) %{
+ match(Set dst (ConvI2F src));
+ predicate(VM_Version::has_fcfids() && VM_Version::has_mtfprd());
+ ins_cost(DEFAULT_COST);
+
+ expand %{
+ regD tmpD;
+ moveI2D_reg(tmpD, src);
+ convL2FRaw_regF(dst, tmpD); // Convert to float.
+ %}
+%}
+
// L2F to avoid runtime call.
instruct convL2F_ireg_fcfids_Ex(regF dst, iRegLsrc src) %{
match(Set dst (ConvL2F src));
- predicate(VM_Version::has_fcfids());
+ predicate(VM_Version::has_fcfids() && !VM_Version::has_mtfprd());
ins_cost(DEFAULT_COST);
expand %{
@@ -10943,6 +11249,19 @@ instruct convL2F_ireg_fcfids_Ex(regF dst, iRegLsrc src) %{
%}
%}
+// L2F to avoid runtime call. Special version for Power8.
+instruct convL2F_ireg_mtfprd_Ex(regF dst, iRegLsrc src) %{
+ match(Set dst (ConvL2F src));
+ predicate(VM_Version::has_fcfids() && VM_Version::has_mtfprd());
+ ins_cost(DEFAULT_COST);
+
+ expand %{
+ regD tmpD;
+ moveL2D_reg(tmpD, src);
+ convL2FRaw_regF(dst, tmpD); // Convert to float.
+ %}
+%}
+
// Moved up as used in expand.
//instruct convD2F_reg(regF dst, regD src) %{%}
@@ -10951,6 +11270,7 @@ instruct convL2F_ireg_fcfids_Ex(regF dst, iRegLsrc src) %{
// Integer to Double conversion.
instruct convI2D_reg_Ex(regD dst, iRegIsrc src) %{
match(Set dst (ConvI2D src));
+ predicate(!VM_Version::has_mtfprd());
ins_cost(DEFAULT_COST);
expand %{
@@ -10964,6 +11284,19 @@ instruct convI2D_reg_Ex(regD dst, iRegIsrc src) %{
%}
%}
+// Integer to Double conversion. Special version for Power8.
+instruct convI2D_reg_mtfprd_Ex(regD dst, iRegIsrc src) %{
+ match(Set dst (ConvI2D src));
+ predicate(VM_Version::has_mtfprd());
+ ins_cost(DEFAULT_COST);
+
+ expand %{
+ regD tmpD;
+ moveI2D_reg(tmpD, src);
+ convL2DRaw_regD(dst, tmpD); // Convert to double.
+ %}
+%}
+
// Long to Double conversion
instruct convL2D_reg_Ex(regD dst, stackSlotL src) %{
match(Set dst (ConvL2D src));
@@ -10976,6 +11309,19 @@ instruct convL2D_reg_Ex(regD dst, stackSlotL src) %{
%}
%}
+// Long to Double conversion. Special version for Power8.
+instruct convL2D_reg_mtfprd_Ex(regD dst, iRegLsrc src) %{
+ match(Set dst (ConvL2D src));
+ predicate(VM_Version::has_mtfprd());
+ ins_cost(DEFAULT_COST);
+
+ expand %{
+ regD tmpD;
+ moveL2D_reg(tmpD, src);
+ convL2DRaw_regD(dst, tmpD); // Convert to double.
+ %}
+%}
+
instruct convF2D_reg(regD dst, regF src) %{
match(Set dst (ConvF2D src));
format %{ "FMR $dst, $src \t// float->double" %}
@@ -12705,8 +13051,7 @@ instruct insrwi(iRegIdst dst, iRegIsrc src, immI16 pos, immI16 shift) %{
// Just slightly faster than java implementation.
instruct bytes_reverse_int_Ex(iRegIdst dst, iRegIsrc src) %{
match(Set dst (ReverseBytesI src));
- predicate(UseCountLeadingZerosInstructionsPPC64);
- ins_cost(DEFAULT_COST);
+ ins_cost(7*DEFAULT_COST);
expand %{
immI16 imm24 %{ (int) 24 %}
@@ -12728,6 +13073,172 @@ instruct bytes_reverse_int_Ex(iRegIdst dst, iRegIsrc src) %{
%}
%}
+instruct bytes_reverse_long_Ex(iRegLdst dst, iRegLsrc src) %{
+ match(Set dst (ReverseBytesL src));
+ ins_cost(15*DEFAULT_COST);
+
+ expand %{
+ immI16 imm56 %{ (int) 56 %}
+ immI16 imm48 %{ (int) 48 %}
+ immI16 imm40 %{ (int) 40 %}
+ immI16 imm32 %{ (int) 32 %}
+ immI16 imm24 %{ (int) 24 %}
+ immI16 imm16 %{ (int) 16 %}
+ immI16 imm8 %{ (int) 8 %}
+ immI16 imm0 %{ (int) 0 %}
+ iRegLdst tmpL1;
+ iRegLdst tmpL2;
+ iRegLdst tmpL3;
+ iRegLdst tmpL4;
+ iRegLdst tmpL5;
+ iRegLdst tmpL6;
+
+ // src : |a|b|c|d|e|f|g|h|
+ rldicl(tmpL1, src, imm8, imm24); // tmpL1 : | | | |e|f|g|h|a|
+ rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |a| | | |e|
+ rldicl(tmpL3, tmpL2, imm32, imm0); // tmpL3 : | | | |e| | | |a|
+ rldicl(tmpL1, src, imm16, imm24); // tmpL1 : | | | |f|g|h|a|b|
+ rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |b| | | |f|
+ rldicl(tmpL4, tmpL2, imm40, imm0); // tmpL4 : | | |f| | | |b| |
+ orL_reg_reg(tmpL5, tmpL3, tmpL4); // tmpL5 : | | |f|e| | |b|a|
+ rldicl(tmpL1, src, imm24, imm24); // tmpL1 : | | | |g|h|a|b|c|
+ rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |c| | | |g|
+ rldicl(tmpL3, tmpL2, imm48, imm0); // tmpL3 : | |g| | | |c| | |
+ rldicl(tmpL1, src, imm32, imm24); // tmpL1 : | | | |h|a|b|c|d|
+ rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |d| | | |h|
+ rldicl(tmpL4, tmpL2, imm56, imm0); // tmpL4 : |h| | | |d| | | |
+ orL_reg_reg(tmpL6, tmpL3, tmpL4); // tmpL6 : |h|g| | |d|c| | |
+ orL_reg_reg(dst, tmpL5, tmpL6); // dst : |h|g|f|e|d|c|b|a|
+ %}
+%}
+
+instruct bytes_reverse_ushort_Ex(iRegIdst dst, iRegIsrc src) %{
+ match(Set dst (ReverseBytesUS src));
+ ins_cost(2*DEFAULT_COST);
+
+ expand %{
+ immI16 imm16 %{ (int) 16 %}
+ immI16 imm8 %{ (int) 8 %}
+
+ urShiftI_reg_imm(dst, src, imm8);
+ insrwi(dst, src, imm16, imm8);
+ %}
+%}
+
+instruct bytes_reverse_short_Ex(iRegIdst dst, iRegIsrc src) %{
+ match(Set dst (ReverseBytesS src));
+ ins_cost(3*DEFAULT_COST);
+
+ expand %{
+ immI16 imm16 %{ (int) 16 %}
+ immI16 imm8 %{ (int) 8 %}
+ iRegLdst tmpI1;
+
+ urShiftI_reg_imm(tmpI1, src, imm8);
+ insrwi(tmpI1, src, imm16, imm8);
+ extsh(dst, tmpI1);
+ %}
+%}
+
+// Load Integer reversed byte order
+instruct loadI_reversed(iRegIdst dst, indirect mem) %{
+ match(Set dst (ReverseBytesI (LoadI mem)));
+ ins_cost(MEMORY_REF_COST);
+
+ size(4);
+ ins_encode %{
+ __ lwbrx($dst$$Register, $mem$$Register);
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+// Load Long - aligned and reversed
+instruct loadL_reversed(iRegLdst dst, indirect mem) %{
+ match(Set dst (ReverseBytesL (LoadL mem)));
+ predicate(VM_Version::has_ldbrx());
+ ins_cost(MEMORY_REF_COST);
+
+ size(4);
+ ins_encode %{
+ __ ldbrx($dst$$Register, $mem$$Register);
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+// Load unsigned short / char reversed byte order
+instruct loadUS_reversed(iRegIdst dst, indirect mem) %{
+ match(Set dst (ReverseBytesUS (LoadUS mem)));
+ ins_cost(MEMORY_REF_COST);
+
+ size(4);
+ ins_encode %{
+ __ lhbrx($dst$$Register, $mem$$Register);
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+// Load short reversed byte order
+instruct loadS_reversed(iRegIdst dst, indirect mem) %{
+ match(Set dst (ReverseBytesS (LoadS mem)));
+ ins_cost(MEMORY_REF_COST + DEFAULT_COST);
+
+ size(8);
+ ins_encode %{
+ __ lhbrx($dst$$Register, $mem$$Register);
+ __ extsh($dst$$Register, $dst$$Register);
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+// Store Integer reversed byte order
+instruct storeI_reversed(iRegIsrc src, indirect mem) %{
+ match(Set mem (StoreI mem (ReverseBytesI src)));
+ ins_cost(MEMORY_REF_COST);
+
+ size(4);
+ ins_encode %{
+ __ stwbrx($src$$Register, $mem$$Register);
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+// Store Long reversed byte order
+instruct storeL_reversed(iRegLsrc src, indirect mem) %{
+ match(Set mem (StoreL mem (ReverseBytesL src)));
+ predicate(VM_Version::has_stdbrx());
+ ins_cost(MEMORY_REF_COST);
+
+ size(4);
+ ins_encode %{
+ __ stdbrx($src$$Register, $mem$$Register);
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+// Store unsigned short / char reversed byte order
+instruct storeUS_reversed(iRegIsrc src, indirect mem) %{
+ match(Set mem (StoreC mem (ReverseBytesUS src)));
+ ins_cost(MEMORY_REF_COST);
+
+ size(4);
+ ins_encode %{
+ __ sthbrx($src$$Register, $mem$$Register);
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+// Store short reversed byte order
+instruct storeS_reversed(iRegIsrc src, indirect mem) %{
+ match(Set mem (StoreC mem (ReverseBytesS src)));
+ ins_cost(MEMORY_REF_COST);
+
+ size(4);
+ ins_encode %{
+ __ sthbrx($src$$Register, $mem$$Register);
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
//---------- Replicate Vector Instructions ------------------------------------
// Insrdi does replicate if src == dst.
diff --git a/hotspot/src/cpu/ppc/vm/register_ppc.cpp b/hotspot/src/cpu/ppc/vm/register_ppc.cpp
index 8dcd325ad75..1428fef704a 100644
--- a/hotspot/src/cpu/ppc/vm/register_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/register_ppc.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2013 SAP SE. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -81,8 +81,17 @@ const char* VectorSRegisterImpl::name() const {
"VSR0", "VSR1", "VSR2", "VSR3", "VSR4", "VSR5", "VSR6", "VSR7",
"VSR8", "VSR9", "VSR10", "VSR11", "VSR12", "VSR13", "VSR14", "VSR15",
"VSR16", "VSR17", "VSR18", "VSR19", "VSR20", "VSR21", "VSR22", "VSR23",
- "VSR24", "VSR25", "VSR26", "VSR27", "VSR28", "VSR29", "VSR30", "VSR31"
+ "VSR24", "VSR25", "VSR26", "VSR27", "VSR28", "VSR29", "VSR30", "VSR31",
+ "VSR32", "VSR33", "VSR34", "VSR35", "VSR36", "VSR37", "VSR38", "VSR39",
+ "VSR40", "VSR41", "VSR42", "VSR43", "VSR44", "VSR45", "VSR46", "VSR47",
+ "VSR48", "VSR49", "VSR50", "VSR51", "VSR52", "VSR53", "VSR54", "VSR55",
+ "VSR56", "VSR57", "VSR58", "VSR59", "VSR60", "VSR61", "VSR62", "VSR63"
};
return is_valid() ? names[encoding()] : "vsnoreg";
}
+// Method to convert a VectorRegister to a Vector-Scalar Register (VectorSRegister)
+VectorSRegister VectorRegisterImpl::to_vsr() const {
+ if (this == vnoreg) { return vsnoregi; }
+ return as_VectorSRegister(encoding() + 32);
+}
diff --git a/hotspot/src/cpu/ppc/vm/register_ppc.hpp b/hotspot/src/cpu/ppc/vm/register_ppc.hpp
index e73dc18a30a..c554f88619d 100644
--- a/hotspot/src/cpu/ppc/vm/register_ppc.hpp
+++ b/hotspot/src/cpu/ppc/vm/register_ppc.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -398,6 +398,11 @@ inline VectorRegister as_VectorRegister(int encoding) {
return (VectorRegister)(intptr_t)encoding;
}
+// Forward declaration
+// Use VectorSRegister as a shortcut.
+class VectorSRegisterImpl;
+typedef VectorSRegisterImpl* VectorSRegister;
+
// The implementation of vector registers for the Power architecture
class VectorRegisterImpl: public AbstractRegisterImpl {
public:
@@ -415,6 +420,9 @@ class VectorRegisterImpl: public AbstractRegisterImpl {
bool is_valid() const { return 0 <= value() && value() < number_of_registers; }
const char* name() const;
+
+ // convert to VSR
+ VectorSRegister to_vsr() const;
};
// The Vector registers of the Power architecture
@@ -491,10 +499,6 @@ CONSTANT_REGISTER_DECLARATION(VectorRegister, VR31, (31));
#endif // DONT_USE_REGISTER_DEFINES
-// Use VectorSRegister as a shortcut.
-class VectorSRegisterImpl;
-typedef VectorSRegisterImpl* VectorSRegister;
-
inline VectorSRegister as_VectorSRegister(int encoding) {
return (VectorSRegister)(intptr_t)encoding;
}
@@ -503,7 +507,7 @@ inline VectorSRegister as_VectorSRegister(int encoding) {
class VectorSRegisterImpl: public AbstractRegisterImpl {
public:
enum {
- number_of_registers = 32
+ number_of_registers = 64
};
// construction
@@ -554,6 +558,38 @@ CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR28, (28));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR29, (29));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR30, (30));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR31, (31));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR32, (32));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR33, (33));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR34, (34));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR35, (35));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR36, (36));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR37, (37));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR38, (38));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR39, (39));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR40, (40));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR41, (41));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR42, (42));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR43, (43));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR44, (44));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR45, (45));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR46, (46));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR47, (47));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR48, (48));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR49, (49));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR50, (50));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR51, (51));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR52, (52));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR53, (53));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR54, (54));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR55, (55));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR56, (56));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR57, (57));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR58, (58));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR59, (59));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR60, (60));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR61, (61));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR62, (62));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR63, (63));
#ifndef DONT_USE_REGISTER_DEFINES
#define vsnoregi ((VectorSRegister)(vsnoreg_VectorSRegisterEnumValue))
@@ -589,6 +625,38 @@ CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR31, (31));
#define VSR29 ((VectorSRegister)( VSR29_VectorSRegisterEnumValue))
#define VSR30 ((VectorSRegister)( VSR30_VectorSRegisterEnumValue))
#define VSR31 ((VectorSRegister)( VSR31_VectorSRegisterEnumValue))
+#define VSR32 ((VectorSRegister)( VSR32_VectorSRegisterEnumValue))
+#define VSR33 ((VectorSRegister)( VSR33_VectorSRegisterEnumValue))
+#define VSR34 ((VectorSRegister)( VSR34_VectorSRegisterEnumValue))
+#define VSR35 ((VectorSRegister)( VSR35_VectorSRegisterEnumValue))
+#define VSR36 ((VectorSRegister)( VSR36_VectorSRegisterEnumValue))
+#define VSR37 ((VectorSRegister)( VSR37_VectorSRegisterEnumValue))
+#define VSR38 ((VectorSRegister)( VSR38_VectorSRegisterEnumValue))
+#define VSR39 ((VectorSRegister)( VSR39_VectorSRegisterEnumValue))
+#define VSR40 ((VectorSRegister)( VSR40_VectorSRegisterEnumValue))
+#define VSR41 ((VectorSRegister)( VSR41_VectorSRegisterEnumValue))
+#define VSR42 ((VectorSRegister)( VSR42_VectorSRegisterEnumValue))
+#define VSR43 ((VectorSRegister)( VSR43_VectorSRegisterEnumValue))
+#define VSR44 ((VectorSRegister)( VSR44_VectorSRegisterEnumValue))
+#define VSR45 ((VectorSRegister)( VSR45_VectorSRegisterEnumValue))
+#define VSR46 ((VectorSRegister)( VSR46_VectorSRegisterEnumValue))
+#define VSR47 ((VectorSRegister)( VSR47_VectorSRegisterEnumValue))
+#define VSR48 ((VectorSRegister)( VSR48_VectorSRegisterEnumValue))
+#define VSR49 ((VectorSRegister)( VSR49_VectorSRegisterEnumValue))
+#define VSR50 ((VectorSRegister)( VSR50_VectorSRegisterEnumValue))
+#define VSR51 ((VectorSRegister)( VSR51_VectorSRegisterEnumValue))
+#define VSR52 ((VectorSRegister)( VSR52_VectorSRegisterEnumValue))
+#define VSR53 ((VectorSRegister)( VSR53_VectorSRegisterEnumValue))
+#define VSR54 ((VectorSRegister)( VSR54_VectorSRegisterEnumValue))
+#define VSR55 ((VectorSRegister)( VSR55_VectorSRegisterEnumValue))
+#define VSR56 ((VectorSRegister)( VSR56_VectorSRegisterEnumValue))
+#define VSR57 ((VectorSRegister)( VSR57_VectorSRegisterEnumValue))
+#define VSR58 ((VectorSRegister)( VSR58_VectorSRegisterEnumValue))
+#define VSR59 ((VectorSRegister)( VSR59_VectorSRegisterEnumValue))
+#define VSR60 ((VectorSRegister)( VSR60_VectorSRegisterEnumValue))
+#define VSR61 ((VectorSRegister)( VSR61_VectorSRegisterEnumValue))
+#define VSR62 ((VectorSRegister)( VSR62_VectorSRegisterEnumValue))
+#define VSR63 ((VectorSRegister)( VSR63_VectorSRegisterEnumValue))
#endif // DONT_USE_REGISTER_DEFINES
// Maximum number of incoming arguments that can be passed in i registers.
@@ -609,7 +677,7 @@ class ConcreteRegisterImpl : public AbstractRegisterImpl {
* 2 // register halves
+ ConditionRegisterImpl::number_of_registers // condition code registers
+ SpecialRegisterImpl::number_of_registers // special registers
- + VectorRegisterImpl::number_of_registers // vector registers
+ + VectorRegisterImpl::number_of_registers // VSX registers
};
static const int max_gpr;
diff --git a/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp b/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp
index dc36aa77da2..20a1a963abc 100644
--- a/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp
@@ -35,6 +35,7 @@
#include "oops/compiledICHolder.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
+#include "utilities/align.hpp"
#include "vmreg_ppc.inline.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
@@ -221,7 +222,7 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble
const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
sizeof(RegisterSaver::LiveRegType);
const int register_save_size = regstosave_num * reg_size;
- const int frame_size_in_bytes = round_to(register_save_size, frame::alignment_in_bytes)
+ const int frame_size_in_bytes = align_up(register_save_size, frame::alignment_in_bytes)
+ frame::abi_reg_args_size;
*out_frame_size_in_bytes = frame_size_in_bytes;
const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
@@ -658,7 +659,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
ShouldNotReachHere();
}
}
- return round_to(stk, 2);
+ return align_up(stk, 2);
}
#if defined(COMPILER1) || defined(COMPILER2)
@@ -845,7 +846,7 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
}
}
- return round_to(stk, 2);
+ return align_up(stk, 2);
}
#endif // COMPILER2
@@ -873,7 +874,7 @@ static address gen_c2i_adapter(MacroAssembler *masm,
// Adapter needs TOP_IJAVA_FRAME_ABI.
const int adapter_size = frame::top_ijava_frame_abi_size +
- round_to(total_args_passed * wordSize, frame::alignment_in_bytes);
+ align_up(total_args_passed * wordSize, frame::alignment_in_bytes);
// regular (verified) c2i entry point
c2i_entrypoint = __ pc();
@@ -1022,9 +1023,9 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
// number (all values in registers) or the maximum stack slot accessed.
// Convert 4-byte c2 stack slots to words.
- comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
+ comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
// Round up to miminum stack alignment, in wordSize.
- comp_words_on_stack = round_to(comp_words_on_stack, 2);
+ comp_words_on_stack = align_up(comp_words_on_stack, 2);
__ resize_frame(-comp_words_on_stack * wordSize, R11_scratch1);
}
@@ -1609,7 +1610,7 @@ static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType
}
static void verify_oop_args(MacroAssembler* masm,
- methodHandle method,
+ const methodHandle& method,
const BasicType* sig_bt,
const VMRegPair* regs) {
Register temp_reg = R19_method; // not part of any compiled calling seq
@@ -1631,7 +1632,7 @@ static void verify_oop_args(MacroAssembler* masm,
}
static void gen_special_dispatch(MacroAssembler* masm,
- methodHandle method,
+ const methodHandle& method,
const BasicType* sig_bt,
const VMRegPair* regs) {
verify_oop_args(masm, method, sig_bt, regs);
@@ -1918,7 +1919,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
}
}
}
- total_save_slots = double_slots * 2 + round_to(single_slots, 2); // round to even
+ total_save_slots = double_slots * 2 + align_up(single_slots, 2); // round to even
}
int oop_handle_slot_offset = stack_slots;
@@ -1945,7 +1946,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Now compute actual number of stack words we need.
// Rounding to make stack properly aligned.
- stack_slots = round_to(stack_slots, // 7)
+ stack_slots = align_up(stack_slots, // 7)
frame::alignment_in_bytes / VMRegImpl::stack_slot_size);
int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size;
@@ -2203,8 +2204,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// disallows any pending_exception.
// Save argument registers and leave room for C-compatible ABI_REG_ARGS.
- int frame_size = frame::abi_reg_args_size +
- round_to(total_c_args * wordSize, frame::alignment_in_bytes);
+ int frame_size = frame::abi_reg_args_size + align_up(total_c_args * wordSize, frame::alignment_in_bytes);
__ mr(R11_scratch1, R1_SP);
RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs, out_regs2);
@@ -2570,7 +2570,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// This function returns the adjust size (in number of words) to a c2i adapter
// activation for use during deoptimization.
int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
- return round_to((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::alignment_in_bytes);
+ return align_up((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::alignment_in_bytes);
}
uint SharedRuntime::out_preserve_stack_slots() {
diff --git a/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp b/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp
index 4bd0c833ab8..339152c5ff3 100644
--- a/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,6 +38,7 @@
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.inline.hpp"
+#include "utilities/align.hpp"
#define __ _masm->
@@ -626,7 +627,7 @@ class StubGenerator: public StubCodeGenerator {
int spill_slots = 3;
if (preserve1 != noreg) { spill_slots++; }
if (preserve2 != noreg) { spill_slots++; }
- const int frame_size = align_size_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
+ const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
Label filtered;
// Is marking active?
@@ -687,7 +688,7 @@ class StubGenerator: public StubCodeGenerator {
case BarrierSet::G1SATBCTLogging:
{
int spill_slots = (preserve != noreg) ? 1 : 0;
- const int frame_size = align_size_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
+ const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
__ save_LR_CR(R0);
__ push_frame(frame_size, R0);
@@ -2728,7 +2729,7 @@ class StubGenerator: public StubCodeGenerator {
__ vspltisb (vTmp2, -16);
__ vrld (keyPerm, keyPerm, vTmp2);
__ vrld (keyPerm, keyPerm, vTmp2);
- __ vsldoi (keyPerm, keyPerm, keyPerm, -8);
+ __ vsldoi (keyPerm, keyPerm, keyPerm, 8);
// load the 1st round key to vKey1
__ li (keypos, 0);
@@ -2928,7 +2929,7 @@ class StubGenerator: public StubCodeGenerator {
__ vspltisb (vTmp2, -16);
__ vrld (keyPerm, keyPerm, vTmp2);
__ vrld (keyPerm, keyPerm, vTmp2);
- __ vsldoi (keyPerm, keyPerm, keyPerm, -8);
+ __ vsldoi (keyPerm, keyPerm, keyPerm, 8);
__ cmpwi (CCR0, keylen, 44);
__ beq (CCR0, L_do44);
@@ -3276,6 +3277,36 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
+
+ // Compute CRC32/CRC32C function.
+ void generate_CRC_updateBytes(const char* name, Register table, bool invertCRC) {
+
+ // arguments to kernel_crc32:
+ const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call.
+ const Register data = R4_ARG2; // source byte array
+ const Register dataLen = R5_ARG3; // #bytes to process
+
+ const Register t0 = R2;
+ const Register t1 = R7;
+ const Register t2 = R8;
+ const Register t3 = R9;
+ const Register tc0 = R10;
+ const Register tc1 = R11;
+ const Register tc2 = R12;
+
+ BLOCK_COMMENT("Stub body {");
+ assert_different_registers(crc, data, dataLen, table);
+
+ __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, table, invertCRC);
+
+ BLOCK_COMMENT("return");
+ __ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
+ __ blr();
+
+ BLOCK_COMMENT("} Stub body");
+ }
+
+
/**
* Arguments:
*
@@ -3296,14 +3327,14 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", name);
address start = __ function_entry(); // Remember stub start address (is rtn value).
+ const Register table = R6; // crc table address
+
+#ifdef VM_LITTLE_ENDIAN
// arguments to kernel_crc32:
const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call.
const Register data = R4_ARG2; // source byte array
const Register dataLen = R5_ARG3; // #bytes to process
- const Register table = R6; // crc table address
-
-#ifdef VM_LITTLE_ENDIAN
if (VM_Version::has_vpmsumb()) {
const Register constants = R2; // constants address
const Register bconstants = R8; // barret table address
@@ -3321,7 +3352,7 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::ppc64::generate_load_crc_constants_addr(_masm, constants);
StubRoutines::ppc64::generate_load_crc_barret_constants_addr(_masm, bconstants);
- __ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4);
+ __ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, true);
BLOCK_COMMENT("return");
__ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
@@ -3331,31 +3362,79 @@ class StubGenerator: public StubCodeGenerator {
} else
#endif
{
- const Register t0 = R2;
- const Register t1 = R7;
- const Register t2 = R8;
- const Register t3 = R9;
- const Register tc0 = R10;
- const Register tc1 = R11;
- const Register tc2 = R12;
+ StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
+ generate_CRC_updateBytes(name, table, true);
+ }
+
+ return start;
+ }
+
+
+ /**
+ * Arguments:
+ *
+ * Inputs:
+ * R3_ARG1 - int crc
+ * R4_ARG2 - byte* buf
+ * R5_ARG3 - int length (of buffer)
+ *
+ * scratch:
+ * R2, R6-R12
+ *
+ * Ouput:
+ * R3_RET - int crc result
+ */
+ // Compute CRC32C function.
+ address generate_CRC32C_updateBytes(const char* name) {
+ __ align(CodeEntryAlignment);
+ StubCodeMark mark(this, "StubRoutines", name);
+ address start = __ function_entry(); // Remember stub start address (is rtn value).
+
+ const Register table = R6; // crc table address
+
+#if 0 // no vector support yet for CRC32C
+#ifdef VM_LITTLE_ENDIAN
+ // arguments to kernel_crc32:
+ const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call.
+ const Register data = R4_ARG2; // source byte array
+ const Register dataLen = R5_ARG3; // #bytes to process
+
+ if (VM_Version::has_vpmsumb()) {
+ const Register constants = R2; // constants address
+ const Register bconstants = R8; // barret table address
+
+ const Register t0 = R9;
+ const Register t1 = R10;
+ const Register t2 = R11;
+ const Register t3 = R12;
+ const Register t4 = R7;
BLOCK_COMMENT("Stub body {");
assert_different_registers(crc, data, dataLen, table);
- StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
+ StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
+ StubRoutines::ppc64::generate_load_crc32c_constants_addr(_masm, constants);
+ StubRoutines::ppc64::generate_load_crc32c_barret_constants_addr(_masm, bconstants);
- __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, table);
+ __ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, false);
BLOCK_COMMENT("return");
__ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
__ blr();
BLOCK_COMMENT("} Stub body");
+ } else
+#endif
+#endif
+ {
+ StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
+ generate_CRC_updateBytes(name, table, false);
}
return start;
}
+
// Initialization
void generate_initial() {
// Generates all stubs and initializes the entry points
@@ -3383,6 +3462,12 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_crc_table_adr = (address)StubRoutines::ppc64::_crc_table;
StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes");
}
+
+ // CRC32C Intrinsics.
+ if (UseCRC32CIntrinsics) {
+ StubRoutines::_crc32c_table_addr = (address)StubRoutines::ppc64::_crc32c_table;
+ StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes("CRC32C_updateBytes");
+ }
}
void generate_all() {
diff --git a/hotspot/src/cpu/ppc/vm/stubRoutines_ppc.hpp b/hotspot/src/cpu/ppc/vm/stubRoutines_ppc.hpp
index 5b5b2c270dd..dcda6be8a76 100644
--- a/hotspot/src/cpu/ppc/vm/stubRoutines_ppc.hpp
+++ b/hotspot/src/cpu/ppc/vm/stubRoutines_ppc.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -55,13 +55,16 @@ class ppc64 {
// CRC32 Intrinsics.
static juint _crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
+ static juint _crc32c_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
static juint* _constants;
static juint* _barret_constants;
public:
// CRC32 Intrinsics.
+ static void generate_load_table_addr(MacroAssembler* masm, Register table, address table_addr, uint64_t table_contents);
static void generate_load_crc_table_addr(MacroAssembler* masm, Register table);
+ static void generate_load_crc32c_table_addr(MacroAssembler* masm, Register table);
static void generate_load_crc_constants_addr(MacroAssembler* masm, Register table);
static void generate_load_crc_barret_constants_addr(MacroAssembler* masm, Register table);
static juint* generate_crc_constants();
diff --git a/hotspot/src/cpu/ppc/vm/stubRoutines_ppc_64.cpp b/hotspot/src/cpu/ppc/vm/stubRoutines_ppc_64.cpp
index 088d63a2f79..f5591669bcd 100644
--- a/hotspot/src/cpu/ppc/vm/stubRoutines_ppc_64.cpp
+++ b/hotspot/src/cpu/ppc/vm/stubRoutines_ppc_64.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+ * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,11 @@
#define __ masm->
-// CRC32 Intrinsics.
+// CRC32(C) Intrinsics.
+void StubRoutines::ppc64::generate_load_crc32c_table_addr(MacroAssembler* masm, Register table) {
+ __ load_const_optimized(table, StubRoutines::_crc32c_table_addr, R0);
+}
+
void StubRoutines::ppc64::generate_load_crc_table_addr(MacroAssembler* masm, Register table) {
__ load_const_optimized(table, StubRoutines::_crc_table_adr, R0);
}
@@ -347,441 +351,592 @@ juint* StubRoutines::ppc64::generate_crc_barret_constants() {
* crc_table[] from jdk/src/share/native/java/util/zip/zlib-1.2.8/crc32.h
*/
juint StubRoutines::ppc64::_crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE] = {
- {
- 0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL,
- 0x706af48fUL, 0xe963a535UL, 0x9e6495a3UL, 0x0edb8832UL, 0x79dcb8a4UL,
- 0xe0d5e91eUL, 0x97d2d988UL, 0x09b64c2bUL, 0x7eb17cbdUL, 0xe7b82d07UL,
- 0x90bf1d91UL, 0x1db71064UL, 0x6ab020f2UL, 0xf3b97148UL, 0x84be41deUL,
- 0x1adad47dUL, 0x6ddde4ebUL, 0xf4d4b551UL, 0x83d385c7UL, 0x136c9856UL,
- 0x646ba8c0UL, 0xfd62f97aUL, 0x8a65c9ecUL, 0x14015c4fUL, 0x63066cd9UL,
- 0xfa0f3d63UL, 0x8d080df5UL, 0x3b6e20c8UL, 0x4c69105eUL, 0xd56041e4UL,
- 0xa2677172UL, 0x3c03e4d1UL, 0x4b04d447UL, 0xd20d85fdUL, 0xa50ab56bUL,
- 0x35b5a8faUL, 0x42b2986cUL, 0xdbbbc9d6UL, 0xacbcf940UL, 0x32d86ce3UL,
- 0x45df5c75UL, 0xdcd60dcfUL, 0xabd13d59UL, 0x26d930acUL, 0x51de003aUL,
- 0xc8d75180UL, 0xbfd06116UL, 0x21b4f4b5UL, 0x56b3c423UL, 0xcfba9599UL,
- 0xb8bda50fUL, 0x2802b89eUL, 0x5f058808UL, 0xc60cd9b2UL, 0xb10be924UL,
- 0x2f6f7c87UL, 0x58684c11UL, 0xc1611dabUL, 0xb6662d3dUL, 0x76dc4190UL,
- 0x01db7106UL, 0x98d220bcUL, 0xefd5102aUL, 0x71b18589UL, 0x06b6b51fUL,
- 0x9fbfe4a5UL, 0xe8b8d433UL, 0x7807c9a2UL, 0x0f00f934UL, 0x9609a88eUL,
- 0xe10e9818UL, 0x7f6a0dbbUL, 0x086d3d2dUL, 0x91646c97UL, 0xe6635c01UL,
- 0x6b6b51f4UL, 0x1c6c6162UL, 0x856530d8UL, 0xf262004eUL, 0x6c0695edUL,
- 0x1b01a57bUL, 0x8208f4c1UL, 0xf50fc457UL, 0x65b0d9c6UL, 0x12b7e950UL,
- 0x8bbeb8eaUL, 0xfcb9887cUL, 0x62dd1ddfUL, 0x15da2d49UL, 0x8cd37cf3UL,
- 0xfbd44c65UL, 0x4db26158UL, 0x3ab551ceUL, 0xa3bc0074UL, 0xd4bb30e2UL,
- 0x4adfa541UL, 0x3dd895d7UL, 0xa4d1c46dUL, 0xd3d6f4fbUL, 0x4369e96aUL,
- 0x346ed9fcUL, 0xad678846UL, 0xda60b8d0UL, 0x44042d73UL, 0x33031de5UL,
- 0xaa0a4c5fUL, 0xdd0d7cc9UL, 0x5005713cUL, 0x270241aaUL, 0xbe0b1010UL,
- 0xc90c2086UL, 0x5768b525UL, 0x206f85b3UL, 0xb966d409UL, 0xce61e49fUL,
- 0x5edef90eUL, 0x29d9c998UL, 0xb0d09822UL, 0xc7d7a8b4UL, 0x59b33d17UL,
- 0x2eb40d81UL, 0xb7bd5c3bUL, 0xc0ba6cadUL, 0xedb88320UL, 0x9abfb3b6UL,
- 0x03b6e20cUL, 0x74b1d29aUL, 0xead54739UL, 0x9dd277afUL, 0x04db2615UL,
- 0x73dc1683UL, 0xe3630b12UL, 0x94643b84UL, 0x0d6d6a3eUL, 0x7a6a5aa8UL,
- 0xe40ecf0bUL, 0x9309ff9dUL, 0x0a00ae27UL, 0x7d079eb1UL, 0xf00f9344UL,
- 0x8708a3d2UL, 0x1e01f268UL, 0x6906c2feUL, 0xf762575dUL, 0x806567cbUL,
- 0x196c3671UL, 0x6e6b06e7UL, 0xfed41b76UL, 0x89d32be0UL, 0x10da7a5aUL,
- 0x67dd4accUL, 0xf9b9df6fUL, 0x8ebeeff9UL, 0x17b7be43UL, 0x60b08ed5UL,
- 0xd6d6a3e8UL, 0xa1d1937eUL, 0x38d8c2c4UL, 0x4fdff252UL, 0xd1bb67f1UL,
- 0xa6bc5767UL, 0x3fb506ddUL, 0x48b2364bUL, 0xd80d2bdaUL, 0xaf0a1b4cUL,
- 0x36034af6UL, 0x41047a60UL, 0xdf60efc3UL, 0xa867df55UL, 0x316e8eefUL,
- 0x4669be79UL, 0xcb61b38cUL, 0xbc66831aUL, 0x256fd2a0UL, 0x5268e236UL,
- 0xcc0c7795UL, 0xbb0b4703UL, 0x220216b9UL, 0x5505262fUL, 0xc5ba3bbeUL,
- 0xb2bd0b28UL, 0x2bb45a92UL, 0x5cb36a04UL, 0xc2d7ffa7UL, 0xb5d0cf31UL,
- 0x2cd99e8bUL, 0x5bdeae1dUL, 0x9b64c2b0UL, 0xec63f226UL, 0x756aa39cUL,
- 0x026d930aUL, 0x9c0906a9UL, 0xeb0e363fUL, 0x72076785UL, 0x05005713UL,
- 0x95bf4a82UL, 0xe2b87a14UL, 0x7bb12baeUL, 0x0cb61b38UL, 0x92d28e9bUL,
- 0xe5d5be0dUL, 0x7cdcefb7UL, 0x0bdbdf21UL, 0x86d3d2d4UL, 0xf1d4e242UL,
- 0x68ddb3f8UL, 0x1fda836eUL, 0x81be16cdUL, 0xf6b9265bUL, 0x6fb077e1UL,
- 0x18b74777UL, 0x88085ae6UL, 0xff0f6a70UL, 0x66063bcaUL, 0x11010b5cUL,
- 0x8f659effUL, 0xf862ae69UL, 0x616bffd3UL, 0x166ccf45UL, 0xa00ae278UL,
- 0xd70dd2eeUL, 0x4e048354UL, 0x3903b3c2UL, 0xa7672661UL, 0xd06016f7UL,
- 0x4969474dUL, 0x3e6e77dbUL, 0xaed16a4aUL, 0xd9d65adcUL, 0x40df0b66UL,
- 0x37d83bf0UL, 0xa9bcae53UL, 0xdebb9ec5UL, 0x47b2cf7fUL, 0x30b5ffe9UL,
- 0xbdbdf21cUL, 0xcabac28aUL, 0x53b39330UL, 0x24b4a3a6UL, 0xbad03605UL,
- 0xcdd70693UL, 0x54de5729UL, 0x23d967bfUL, 0xb3667a2eUL, 0xc4614ab8UL,
- 0x5d681b02UL, 0x2a6f2b94UL, 0xb40bbe37UL, 0xc30c8ea1UL, 0x5a05df1bUL,
- 0x2d02ef8dUL
-#ifdef CRC32_BYFOUR
- },
- {
- 0x00000000UL, 0x191b3141UL, 0x32366282UL, 0x2b2d53c3UL, 0x646cc504UL,
- 0x7d77f445UL, 0x565aa786UL, 0x4f4196c7UL, 0xc8d98a08UL, 0xd1c2bb49UL,
- 0xfaefe88aUL, 0xe3f4d9cbUL, 0xacb54f0cUL, 0xb5ae7e4dUL, 0x9e832d8eUL,
- 0x87981ccfUL, 0x4ac21251UL, 0x53d92310UL, 0x78f470d3UL, 0x61ef4192UL,
- 0x2eaed755UL, 0x37b5e614UL, 0x1c98b5d7UL, 0x05838496UL, 0x821b9859UL,
- 0x9b00a918UL, 0xb02dfadbUL, 0xa936cb9aUL, 0xe6775d5dUL, 0xff6c6c1cUL,
- 0xd4413fdfUL, 0xcd5a0e9eUL, 0x958424a2UL, 0x8c9f15e3UL, 0xa7b24620UL,
- 0xbea97761UL, 0xf1e8e1a6UL, 0xe8f3d0e7UL, 0xc3de8324UL, 0xdac5b265UL,
- 0x5d5daeaaUL, 0x44469febUL, 0x6f6bcc28UL, 0x7670fd69UL, 0x39316baeUL,
- 0x202a5aefUL, 0x0b07092cUL, 0x121c386dUL, 0xdf4636f3UL, 0xc65d07b2UL,
- 0xed705471UL, 0xf46b6530UL, 0xbb2af3f7UL, 0xa231c2b6UL, 0x891c9175UL,
- 0x9007a034UL, 0x179fbcfbUL, 0x0e848dbaUL, 0x25a9de79UL, 0x3cb2ef38UL,
- 0x73f379ffUL, 0x6ae848beUL, 0x41c51b7dUL, 0x58de2a3cUL, 0xf0794f05UL,
- 0xe9627e44UL, 0xc24f2d87UL, 0xdb541cc6UL, 0x94158a01UL, 0x8d0ebb40UL,
- 0xa623e883UL, 0xbf38d9c2UL, 0x38a0c50dUL, 0x21bbf44cUL, 0x0a96a78fUL,
- 0x138d96ceUL, 0x5ccc0009UL, 0x45d73148UL, 0x6efa628bUL, 0x77e153caUL,
- 0xbabb5d54UL, 0xa3a06c15UL, 0x888d3fd6UL, 0x91960e97UL, 0xded79850UL,
- 0xc7cca911UL, 0xece1fad2UL, 0xf5facb93UL, 0x7262d75cUL, 0x6b79e61dUL,
- 0x4054b5deUL, 0x594f849fUL, 0x160e1258UL, 0x0f152319UL, 0x243870daUL,
- 0x3d23419bUL, 0x65fd6ba7UL, 0x7ce65ae6UL, 0x57cb0925UL, 0x4ed03864UL,
- 0x0191aea3UL, 0x188a9fe2UL, 0x33a7cc21UL, 0x2abcfd60UL, 0xad24e1afUL,
- 0xb43fd0eeUL, 0x9f12832dUL, 0x8609b26cUL, 0xc94824abUL, 0xd05315eaUL,
- 0xfb7e4629UL, 0xe2657768UL, 0x2f3f79f6UL, 0x362448b7UL, 0x1d091b74UL,
- 0x04122a35UL, 0x4b53bcf2UL, 0x52488db3UL, 0x7965de70UL, 0x607eef31UL,
- 0xe7e6f3feUL, 0xfefdc2bfUL, 0xd5d0917cUL, 0xcccba03dUL, 0x838a36faUL,
- 0x9a9107bbUL, 0xb1bc5478UL, 0xa8a76539UL, 0x3b83984bUL, 0x2298a90aUL,
- 0x09b5fac9UL, 0x10aecb88UL, 0x5fef5d4fUL, 0x46f46c0eUL, 0x6dd93fcdUL,
- 0x74c20e8cUL, 0xf35a1243UL, 0xea412302UL, 0xc16c70c1UL, 0xd8774180UL,
- 0x9736d747UL, 0x8e2de606UL, 0xa500b5c5UL, 0xbc1b8484UL, 0x71418a1aUL,
- 0x685abb5bUL, 0x4377e898UL, 0x5a6cd9d9UL, 0x152d4f1eUL, 0x0c367e5fUL,
- 0x271b2d9cUL, 0x3e001cddUL, 0xb9980012UL, 0xa0833153UL, 0x8bae6290UL,
- 0x92b553d1UL, 0xddf4c516UL, 0xc4eff457UL, 0xefc2a794UL, 0xf6d996d5UL,
- 0xae07bce9UL, 0xb71c8da8UL, 0x9c31de6bUL, 0x852aef2aUL, 0xca6b79edUL,
- 0xd37048acUL, 0xf85d1b6fUL, 0xe1462a2eUL, 0x66de36e1UL, 0x7fc507a0UL,
- 0x54e85463UL, 0x4df36522UL, 0x02b2f3e5UL, 0x1ba9c2a4UL, 0x30849167UL,
- 0x299fa026UL, 0xe4c5aeb8UL, 0xfdde9ff9UL, 0xd6f3cc3aUL, 0xcfe8fd7bUL,
- 0x80a96bbcUL, 0x99b25afdUL, 0xb29f093eUL, 0xab84387fUL, 0x2c1c24b0UL,
- 0x350715f1UL, 0x1e2a4632UL, 0x07317773UL, 0x4870e1b4UL, 0x516bd0f5UL,
- 0x7a468336UL, 0x635db277UL, 0xcbfad74eUL, 0xd2e1e60fUL, 0xf9ccb5ccUL,
- 0xe0d7848dUL, 0xaf96124aUL, 0xb68d230bUL, 0x9da070c8UL, 0x84bb4189UL,
- 0x03235d46UL, 0x1a386c07UL, 0x31153fc4UL, 0x280e0e85UL, 0x674f9842UL,
- 0x7e54a903UL, 0x5579fac0UL, 0x4c62cb81UL, 0x8138c51fUL, 0x9823f45eUL,
- 0xb30ea79dUL, 0xaa1596dcUL, 0xe554001bUL, 0xfc4f315aUL, 0xd7626299UL,
- 0xce7953d8UL, 0x49e14f17UL, 0x50fa7e56UL, 0x7bd72d95UL, 0x62cc1cd4UL,
- 0x2d8d8a13UL, 0x3496bb52UL, 0x1fbbe891UL, 0x06a0d9d0UL, 0x5e7ef3ecUL,
- 0x4765c2adUL, 0x6c48916eUL, 0x7553a02fUL, 0x3a1236e8UL, 0x230907a9UL,
- 0x0824546aUL, 0x113f652bUL, 0x96a779e4UL, 0x8fbc48a5UL, 0xa4911b66UL,
- 0xbd8a2a27UL, 0xf2cbbce0UL, 0xebd08da1UL, 0xc0fdde62UL, 0xd9e6ef23UL,
- 0x14bce1bdUL, 0x0da7d0fcUL, 0x268a833fUL, 0x3f91b27eUL, 0x70d024b9UL,
- 0x69cb15f8UL, 0x42e6463bUL, 0x5bfd777aUL, 0xdc656bb5UL, 0xc57e5af4UL,
- 0xee530937UL, 0xf7483876UL, 0xb809aeb1UL, 0xa1129ff0UL, 0x8a3fcc33UL,
- 0x9324fd72UL
- },
- {
- 0x00000000UL, 0x01c26a37UL, 0x0384d46eUL, 0x0246be59UL, 0x0709a8dcUL,
- 0x06cbc2ebUL, 0x048d7cb2UL, 0x054f1685UL, 0x0e1351b8UL, 0x0fd13b8fUL,
- 0x0d9785d6UL, 0x0c55efe1UL, 0x091af964UL, 0x08d89353UL, 0x0a9e2d0aUL,
- 0x0b5c473dUL, 0x1c26a370UL, 0x1de4c947UL, 0x1fa2771eUL, 0x1e601d29UL,
- 0x1b2f0bacUL, 0x1aed619bUL, 0x18abdfc2UL, 0x1969b5f5UL, 0x1235f2c8UL,
- 0x13f798ffUL, 0x11b126a6UL, 0x10734c91UL, 0x153c5a14UL, 0x14fe3023UL,
- 0x16b88e7aUL, 0x177ae44dUL, 0x384d46e0UL, 0x398f2cd7UL, 0x3bc9928eUL,
- 0x3a0bf8b9UL, 0x3f44ee3cUL, 0x3e86840bUL, 0x3cc03a52UL, 0x3d025065UL,
- 0x365e1758UL, 0x379c7d6fUL, 0x35dac336UL, 0x3418a901UL, 0x3157bf84UL,
- 0x3095d5b3UL, 0x32d36beaUL, 0x331101ddUL, 0x246be590UL, 0x25a98fa7UL,
- 0x27ef31feUL, 0x262d5bc9UL, 0x23624d4cUL, 0x22a0277bUL, 0x20e69922UL,
- 0x2124f315UL, 0x2a78b428UL, 0x2bbade1fUL, 0x29fc6046UL, 0x283e0a71UL,
- 0x2d711cf4UL, 0x2cb376c3UL, 0x2ef5c89aUL, 0x2f37a2adUL, 0x709a8dc0UL,
- 0x7158e7f7UL, 0x731e59aeUL, 0x72dc3399UL, 0x7793251cUL, 0x76514f2bUL,
- 0x7417f172UL, 0x75d59b45UL, 0x7e89dc78UL, 0x7f4bb64fUL, 0x7d0d0816UL,
- 0x7ccf6221UL, 0x798074a4UL, 0x78421e93UL, 0x7a04a0caUL, 0x7bc6cafdUL,
- 0x6cbc2eb0UL, 0x6d7e4487UL, 0x6f38fadeUL, 0x6efa90e9UL, 0x6bb5866cUL,
- 0x6a77ec5bUL, 0x68315202UL, 0x69f33835UL, 0x62af7f08UL, 0x636d153fUL,
- 0x612bab66UL, 0x60e9c151UL, 0x65a6d7d4UL, 0x6464bde3UL, 0x662203baUL,
- 0x67e0698dUL, 0x48d7cb20UL, 0x4915a117UL, 0x4b531f4eUL, 0x4a917579UL,
- 0x4fde63fcUL, 0x4e1c09cbUL, 0x4c5ab792UL, 0x4d98dda5UL, 0x46c49a98UL,
- 0x4706f0afUL, 0x45404ef6UL, 0x448224c1UL, 0x41cd3244UL, 0x400f5873UL,
- 0x4249e62aUL, 0x438b8c1dUL, 0x54f16850UL, 0x55330267UL, 0x5775bc3eUL,
- 0x56b7d609UL, 0x53f8c08cUL, 0x523aaabbUL, 0x507c14e2UL, 0x51be7ed5UL,
- 0x5ae239e8UL, 0x5b2053dfUL, 0x5966ed86UL, 0x58a487b1UL, 0x5deb9134UL,
- 0x5c29fb03UL, 0x5e6f455aUL, 0x5fad2f6dUL, 0xe1351b80UL, 0xe0f771b7UL,
- 0xe2b1cfeeUL, 0xe373a5d9UL, 0xe63cb35cUL, 0xe7fed96bUL, 0xe5b86732UL,
- 0xe47a0d05UL, 0xef264a38UL, 0xeee4200fUL, 0xeca29e56UL, 0xed60f461UL,
- 0xe82fe2e4UL, 0xe9ed88d3UL, 0xebab368aUL, 0xea695cbdUL, 0xfd13b8f0UL,
- 0xfcd1d2c7UL, 0xfe976c9eUL, 0xff5506a9UL, 0xfa1a102cUL, 0xfbd87a1bUL,
- 0xf99ec442UL, 0xf85cae75UL, 0xf300e948UL, 0xf2c2837fUL, 0xf0843d26UL,
- 0xf1465711UL, 0xf4094194UL, 0xf5cb2ba3UL, 0xf78d95faUL, 0xf64fffcdUL,
- 0xd9785d60UL, 0xd8ba3757UL, 0xdafc890eUL, 0xdb3ee339UL, 0xde71f5bcUL,
- 0xdfb39f8bUL, 0xddf521d2UL, 0xdc374be5UL, 0xd76b0cd8UL, 0xd6a966efUL,
- 0xd4efd8b6UL, 0xd52db281UL, 0xd062a404UL, 0xd1a0ce33UL, 0xd3e6706aUL,
- 0xd2241a5dUL, 0xc55efe10UL, 0xc49c9427UL, 0xc6da2a7eUL, 0xc7184049UL,
- 0xc25756ccUL, 0xc3953cfbUL, 0xc1d382a2UL, 0xc011e895UL, 0xcb4dafa8UL,
- 0xca8fc59fUL, 0xc8c97bc6UL, 0xc90b11f1UL, 0xcc440774UL, 0xcd866d43UL,
- 0xcfc0d31aUL, 0xce02b92dUL, 0x91af9640UL, 0x906dfc77UL, 0x922b422eUL,
- 0x93e92819UL, 0x96a63e9cUL, 0x976454abUL, 0x9522eaf2UL, 0x94e080c5UL,
- 0x9fbcc7f8UL, 0x9e7eadcfUL, 0x9c381396UL, 0x9dfa79a1UL, 0x98b56f24UL,
- 0x99770513UL, 0x9b31bb4aUL, 0x9af3d17dUL, 0x8d893530UL, 0x8c4b5f07UL,
- 0x8e0de15eUL, 0x8fcf8b69UL, 0x8a809decUL, 0x8b42f7dbUL, 0x89044982UL,
- 0x88c623b5UL, 0x839a6488UL, 0x82580ebfUL, 0x801eb0e6UL, 0x81dcdad1UL,
- 0x8493cc54UL, 0x8551a663UL, 0x8717183aUL, 0x86d5720dUL, 0xa9e2d0a0UL,
- 0xa820ba97UL, 0xaa6604ceUL, 0xaba46ef9UL, 0xaeeb787cUL, 0xaf29124bUL,
- 0xad6fac12UL, 0xacadc625UL, 0xa7f18118UL, 0xa633eb2fUL, 0xa4755576UL,
- 0xa5b73f41UL, 0xa0f829c4UL, 0xa13a43f3UL, 0xa37cfdaaUL, 0xa2be979dUL,
- 0xb5c473d0UL, 0xb40619e7UL, 0xb640a7beUL, 0xb782cd89UL, 0xb2cddb0cUL,
- 0xb30fb13bUL, 0xb1490f62UL, 0xb08b6555UL, 0xbbd72268UL, 0xba15485fUL,
- 0xb853f606UL, 0xb9919c31UL, 0xbcde8ab4UL, 0xbd1ce083UL, 0xbf5a5edaUL,
- 0xbe9834edUL
- },
- {
- 0x00000000UL, 0xb8bc6765UL, 0xaa09c88bUL, 0x12b5afeeUL, 0x8f629757UL,
- 0x37def032UL, 0x256b5fdcUL, 0x9dd738b9UL, 0xc5b428efUL, 0x7d084f8aUL,
- 0x6fbde064UL, 0xd7018701UL, 0x4ad6bfb8UL, 0xf26ad8ddUL, 0xe0df7733UL,
- 0x58631056UL, 0x5019579fUL, 0xe8a530faUL, 0xfa109f14UL, 0x42acf871UL,
- 0xdf7bc0c8UL, 0x67c7a7adUL, 0x75720843UL, 0xcdce6f26UL, 0x95ad7f70UL,
- 0x2d111815UL, 0x3fa4b7fbUL, 0x8718d09eUL, 0x1acfe827UL, 0xa2738f42UL,
- 0xb0c620acUL, 0x087a47c9UL, 0xa032af3eUL, 0x188ec85bUL, 0x0a3b67b5UL,
- 0xb28700d0UL, 0x2f503869UL, 0x97ec5f0cUL, 0x8559f0e2UL, 0x3de59787UL,
- 0x658687d1UL, 0xdd3ae0b4UL, 0xcf8f4f5aUL, 0x7733283fUL, 0xeae41086UL,
- 0x525877e3UL, 0x40edd80dUL, 0xf851bf68UL, 0xf02bf8a1UL, 0x48979fc4UL,
- 0x5a22302aUL, 0xe29e574fUL, 0x7f496ff6UL, 0xc7f50893UL, 0xd540a77dUL,
- 0x6dfcc018UL, 0x359fd04eUL, 0x8d23b72bUL, 0x9f9618c5UL, 0x272a7fa0UL,
- 0xbafd4719UL, 0x0241207cUL, 0x10f48f92UL, 0xa848e8f7UL, 0x9b14583dUL,
- 0x23a83f58UL, 0x311d90b6UL, 0x89a1f7d3UL, 0x1476cf6aUL, 0xaccaa80fUL,
- 0xbe7f07e1UL, 0x06c36084UL, 0x5ea070d2UL, 0xe61c17b7UL, 0xf4a9b859UL,
- 0x4c15df3cUL, 0xd1c2e785UL, 0x697e80e0UL, 0x7bcb2f0eUL, 0xc377486bUL,
- 0xcb0d0fa2UL, 0x73b168c7UL, 0x6104c729UL, 0xd9b8a04cUL, 0x446f98f5UL,
- 0xfcd3ff90UL, 0xee66507eUL, 0x56da371bUL, 0x0eb9274dUL, 0xb6054028UL,
- 0xa4b0efc6UL, 0x1c0c88a3UL, 0x81dbb01aUL, 0x3967d77fUL, 0x2bd27891UL,
- 0x936e1ff4UL, 0x3b26f703UL, 0x839a9066UL, 0x912f3f88UL, 0x299358edUL,
- 0xb4446054UL, 0x0cf80731UL, 0x1e4da8dfUL, 0xa6f1cfbaUL, 0xfe92dfecUL,
- 0x462eb889UL, 0x549b1767UL, 0xec277002UL, 0x71f048bbUL, 0xc94c2fdeUL,
- 0xdbf98030UL, 0x6345e755UL, 0x6b3fa09cUL, 0xd383c7f9UL, 0xc1366817UL,
- 0x798a0f72UL, 0xe45d37cbUL, 0x5ce150aeUL, 0x4e54ff40UL, 0xf6e89825UL,
- 0xae8b8873UL, 0x1637ef16UL, 0x048240f8UL, 0xbc3e279dUL, 0x21e91f24UL,
- 0x99557841UL, 0x8be0d7afUL, 0x335cb0caUL, 0xed59b63bUL, 0x55e5d15eUL,
- 0x47507eb0UL, 0xffec19d5UL, 0x623b216cUL, 0xda874609UL, 0xc832e9e7UL,
- 0x708e8e82UL, 0x28ed9ed4UL, 0x9051f9b1UL, 0x82e4565fUL, 0x3a58313aUL,
- 0xa78f0983UL, 0x1f336ee6UL, 0x0d86c108UL, 0xb53aa66dUL, 0xbd40e1a4UL,
- 0x05fc86c1UL, 0x1749292fUL, 0xaff54e4aUL, 0x322276f3UL, 0x8a9e1196UL,
- 0x982bbe78UL, 0x2097d91dUL, 0x78f4c94bUL, 0xc048ae2eUL, 0xd2fd01c0UL,
- 0x6a4166a5UL, 0xf7965e1cUL, 0x4f2a3979UL, 0x5d9f9697UL, 0xe523f1f2UL,
- 0x4d6b1905UL, 0xf5d77e60UL, 0xe762d18eUL, 0x5fdeb6ebUL, 0xc2098e52UL,
- 0x7ab5e937UL, 0x680046d9UL, 0xd0bc21bcUL, 0x88df31eaUL, 0x3063568fUL,
- 0x22d6f961UL, 0x9a6a9e04UL, 0x07bda6bdUL, 0xbf01c1d8UL, 0xadb46e36UL,
- 0x15080953UL, 0x1d724e9aUL, 0xa5ce29ffUL, 0xb77b8611UL, 0x0fc7e174UL,
- 0x9210d9cdUL, 0x2aacbea8UL, 0x38191146UL, 0x80a57623UL, 0xd8c66675UL,
- 0x607a0110UL, 0x72cfaefeUL, 0xca73c99bUL, 0x57a4f122UL, 0xef189647UL,
- 0xfdad39a9UL, 0x45115eccUL, 0x764dee06UL, 0xcef18963UL, 0xdc44268dUL,
- 0x64f841e8UL, 0xf92f7951UL, 0x41931e34UL, 0x5326b1daUL, 0xeb9ad6bfUL,
- 0xb3f9c6e9UL, 0x0b45a18cUL, 0x19f00e62UL, 0xa14c6907UL, 0x3c9b51beUL,
- 0x842736dbUL, 0x96929935UL, 0x2e2efe50UL, 0x2654b999UL, 0x9ee8defcUL,
- 0x8c5d7112UL, 0x34e11677UL, 0xa9362eceUL, 0x118a49abUL, 0x033fe645UL,
- 0xbb838120UL, 0xe3e09176UL, 0x5b5cf613UL, 0x49e959fdUL, 0xf1553e98UL,
- 0x6c820621UL, 0xd43e6144UL, 0xc68bceaaUL, 0x7e37a9cfUL, 0xd67f4138UL,
- 0x6ec3265dUL, 0x7c7689b3UL, 0xc4caeed6UL, 0x591dd66fUL, 0xe1a1b10aUL,
- 0xf3141ee4UL, 0x4ba87981UL, 0x13cb69d7UL, 0xab770eb2UL, 0xb9c2a15cUL,
- 0x017ec639UL, 0x9ca9fe80UL, 0x241599e5UL, 0x36a0360bUL, 0x8e1c516eUL,
- 0x866616a7UL, 0x3eda71c2UL, 0x2c6fde2cUL, 0x94d3b949UL, 0x090481f0UL,
- 0xb1b8e695UL, 0xa30d497bUL, 0x1bb12e1eUL, 0x43d23e48UL, 0xfb6e592dUL,
- 0xe9dbf6c3UL, 0x516791a6UL, 0xccb0a91fUL, 0x740cce7aUL, 0x66b96194UL,
- 0xde0506f1UL
- },
- {
- 0x00000000UL, 0x96300777UL, 0x2c610eeeUL, 0xba510999UL, 0x19c46d07UL,
- 0x8ff46a70UL, 0x35a563e9UL, 0xa395649eUL, 0x3288db0eUL, 0xa4b8dc79UL,
- 0x1ee9d5e0UL, 0x88d9d297UL, 0x2b4cb609UL, 0xbd7cb17eUL, 0x072db8e7UL,
- 0x911dbf90UL, 0x6410b71dUL, 0xf220b06aUL, 0x4871b9f3UL, 0xde41be84UL,
- 0x7dd4da1aUL, 0xebe4dd6dUL, 0x51b5d4f4UL, 0xc785d383UL, 0x56986c13UL,
- 0xc0a86b64UL, 0x7af962fdUL, 0xecc9658aUL, 0x4f5c0114UL, 0xd96c0663UL,
- 0x633d0ffaUL, 0xf50d088dUL, 0xc8206e3bUL, 0x5e10694cUL, 0xe44160d5UL,
- 0x727167a2UL, 0xd1e4033cUL, 0x47d4044bUL, 0xfd850dd2UL, 0x6bb50aa5UL,
- 0xfaa8b535UL, 0x6c98b242UL, 0xd6c9bbdbUL, 0x40f9bcacUL, 0xe36cd832UL,
- 0x755cdf45UL, 0xcf0dd6dcUL, 0x593dd1abUL, 0xac30d926UL, 0x3a00de51UL,
- 0x8051d7c8UL, 0x1661d0bfUL, 0xb5f4b421UL, 0x23c4b356UL, 0x9995bacfUL,
- 0x0fa5bdb8UL, 0x9eb80228UL, 0x0888055fUL, 0xb2d90cc6UL, 0x24e90bb1UL,
- 0x877c6f2fUL, 0x114c6858UL, 0xab1d61c1UL, 0x3d2d66b6UL, 0x9041dc76UL,
- 0x0671db01UL, 0xbc20d298UL, 0x2a10d5efUL, 0x8985b171UL, 0x1fb5b606UL,
- 0xa5e4bf9fUL, 0x33d4b8e8UL, 0xa2c90778UL, 0x34f9000fUL, 0x8ea80996UL,
- 0x18980ee1UL, 0xbb0d6a7fUL, 0x2d3d6d08UL, 0x976c6491UL, 0x015c63e6UL,
- 0xf4516b6bUL, 0x62616c1cUL, 0xd8306585UL, 0x4e0062f2UL, 0xed95066cUL,
- 0x7ba5011bUL, 0xc1f40882UL, 0x57c40ff5UL, 0xc6d9b065UL, 0x50e9b712UL,
- 0xeab8be8bUL, 0x7c88b9fcUL, 0xdf1ddd62UL, 0x492dda15UL, 0xf37cd38cUL,
- 0x654cd4fbUL, 0x5861b24dUL, 0xce51b53aUL, 0x7400bca3UL, 0xe230bbd4UL,
- 0x41a5df4aUL, 0xd795d83dUL, 0x6dc4d1a4UL, 0xfbf4d6d3UL, 0x6ae96943UL,
- 0xfcd96e34UL, 0x468867adUL, 0xd0b860daUL, 0x732d0444UL, 0xe51d0333UL,
- 0x5f4c0aaaUL, 0xc97c0dddUL, 0x3c710550UL, 0xaa410227UL, 0x10100bbeUL,
- 0x86200cc9UL, 0x25b56857UL, 0xb3856f20UL, 0x09d466b9UL, 0x9fe461ceUL,
- 0x0ef9de5eUL, 0x98c9d929UL, 0x2298d0b0UL, 0xb4a8d7c7UL, 0x173db359UL,
- 0x810db42eUL, 0x3b5cbdb7UL, 0xad6cbac0UL, 0x2083b8edUL, 0xb6b3bf9aUL,
- 0x0ce2b603UL, 0x9ad2b174UL, 0x3947d5eaUL, 0xaf77d29dUL, 0x1526db04UL,
- 0x8316dc73UL, 0x120b63e3UL, 0x843b6494UL, 0x3e6a6d0dUL, 0xa85a6a7aUL,
- 0x0bcf0ee4UL, 0x9dff0993UL, 0x27ae000aUL, 0xb19e077dUL, 0x44930ff0UL,
- 0xd2a30887UL, 0x68f2011eUL, 0xfec20669UL, 0x5d5762f7UL, 0xcb676580UL,
- 0x71366c19UL, 0xe7066b6eUL, 0x761bd4feUL, 0xe02bd389UL, 0x5a7ada10UL,
- 0xcc4add67UL, 0x6fdfb9f9UL, 0xf9efbe8eUL, 0x43beb717UL, 0xd58eb060UL,
- 0xe8a3d6d6UL, 0x7e93d1a1UL, 0xc4c2d838UL, 0x52f2df4fUL, 0xf167bbd1UL,
- 0x6757bca6UL, 0xdd06b53fUL, 0x4b36b248UL, 0xda2b0dd8UL, 0x4c1b0aafUL,
- 0xf64a0336UL, 0x607a0441UL, 0xc3ef60dfUL, 0x55df67a8UL, 0xef8e6e31UL,
- 0x79be6946UL, 0x8cb361cbUL, 0x1a8366bcUL, 0xa0d26f25UL, 0x36e26852UL,
- 0x95770cccUL, 0x03470bbbUL, 0xb9160222UL, 0x2f260555UL, 0xbe3bbac5UL,
- 0x280bbdb2UL, 0x925ab42bUL, 0x046ab35cUL, 0xa7ffd7c2UL, 0x31cfd0b5UL,
- 0x8b9ed92cUL, 0x1daede5bUL, 0xb0c2649bUL, 0x26f263ecUL, 0x9ca36a75UL,
- 0x0a936d02UL, 0xa906099cUL, 0x3f360eebUL, 0x85670772UL, 0x13570005UL,
- 0x824abf95UL, 0x147ab8e2UL, 0xae2bb17bUL, 0x381bb60cUL, 0x9b8ed292UL,
- 0x0dbed5e5UL, 0xb7efdc7cUL, 0x21dfdb0bUL, 0xd4d2d386UL, 0x42e2d4f1UL,
- 0xf8b3dd68UL, 0x6e83da1fUL, 0xcd16be81UL, 0x5b26b9f6UL, 0xe177b06fUL,
- 0x7747b718UL, 0xe65a0888UL, 0x706a0fffUL, 0xca3b0666UL, 0x5c0b0111UL,
- 0xff9e658fUL, 0x69ae62f8UL, 0xd3ff6b61UL, 0x45cf6c16UL, 0x78e20aa0UL,
- 0xeed20dd7UL, 0x5483044eUL, 0xc2b30339UL, 0x612667a7UL, 0xf71660d0UL,
- 0x4d476949UL, 0xdb776e3eUL, 0x4a6ad1aeUL, 0xdc5ad6d9UL, 0x660bdf40UL,
- 0xf03bd837UL, 0x53aebca9UL, 0xc59ebbdeUL, 0x7fcfb247UL, 0xe9ffb530UL,
- 0x1cf2bdbdUL, 0x8ac2bacaUL, 0x3093b353UL, 0xa6a3b424UL, 0x0536d0baUL,
- 0x9306d7cdUL, 0x2957de54UL, 0xbf67d923UL, 0x2e7a66b3UL, 0xb84a61c4UL,
- 0x021b685dUL, 0x942b6f2aUL, 0x37be0bb4UL, 0xa18e0cc3UL, 0x1bdf055aUL,
- 0x8def022dUL
- },
- {
- 0x00000000UL, 0x41311b19UL, 0x82623632UL, 0xc3532d2bUL, 0x04c56c64UL,
- 0x45f4777dUL, 0x86a75a56UL, 0xc796414fUL, 0x088ad9c8UL, 0x49bbc2d1UL,
- 0x8ae8effaUL, 0xcbd9f4e3UL, 0x0c4fb5acUL, 0x4d7eaeb5UL, 0x8e2d839eUL,
- 0xcf1c9887UL, 0x5112c24aUL, 0x1023d953UL, 0xd370f478UL, 0x9241ef61UL,
- 0x55d7ae2eUL, 0x14e6b537UL, 0xd7b5981cUL, 0x96848305UL, 0x59981b82UL,
- 0x18a9009bUL, 0xdbfa2db0UL, 0x9acb36a9UL, 0x5d5d77e6UL, 0x1c6c6cffUL,
- 0xdf3f41d4UL, 0x9e0e5acdUL, 0xa2248495UL, 0xe3159f8cUL, 0x2046b2a7UL,
- 0x6177a9beUL, 0xa6e1e8f1UL, 0xe7d0f3e8UL, 0x2483dec3UL, 0x65b2c5daUL,
- 0xaaae5d5dUL, 0xeb9f4644UL, 0x28cc6b6fUL, 0x69fd7076UL, 0xae6b3139UL,
- 0xef5a2a20UL, 0x2c09070bUL, 0x6d381c12UL, 0xf33646dfUL, 0xb2075dc6UL,
- 0x715470edUL, 0x30656bf4UL, 0xf7f32abbUL, 0xb6c231a2UL, 0x75911c89UL,
- 0x34a00790UL, 0xfbbc9f17UL, 0xba8d840eUL, 0x79dea925UL, 0x38efb23cUL,
- 0xff79f373UL, 0xbe48e86aUL, 0x7d1bc541UL, 0x3c2ade58UL, 0x054f79f0UL,
- 0x447e62e9UL, 0x872d4fc2UL, 0xc61c54dbUL, 0x018a1594UL, 0x40bb0e8dUL,
- 0x83e823a6UL, 0xc2d938bfUL, 0x0dc5a038UL, 0x4cf4bb21UL, 0x8fa7960aUL,
- 0xce968d13UL, 0x0900cc5cUL, 0x4831d745UL, 0x8b62fa6eUL, 0xca53e177UL,
- 0x545dbbbaUL, 0x156ca0a3UL, 0xd63f8d88UL, 0x970e9691UL, 0x5098d7deUL,
- 0x11a9ccc7UL, 0xd2fae1ecUL, 0x93cbfaf5UL, 0x5cd76272UL, 0x1de6796bUL,
- 0xdeb55440UL, 0x9f844f59UL, 0x58120e16UL, 0x1923150fUL, 0xda703824UL,
- 0x9b41233dUL, 0xa76bfd65UL, 0xe65ae67cUL, 0x2509cb57UL, 0x6438d04eUL,
- 0xa3ae9101UL, 0xe29f8a18UL, 0x21cca733UL, 0x60fdbc2aUL, 0xafe124adUL,
- 0xeed03fb4UL, 0x2d83129fUL, 0x6cb20986UL, 0xab2448c9UL, 0xea1553d0UL,
- 0x29467efbUL, 0x687765e2UL, 0xf6793f2fUL, 0xb7482436UL, 0x741b091dUL,
- 0x352a1204UL, 0xf2bc534bUL, 0xb38d4852UL, 0x70de6579UL, 0x31ef7e60UL,
- 0xfef3e6e7UL, 0xbfc2fdfeUL, 0x7c91d0d5UL, 0x3da0cbccUL, 0xfa368a83UL,
- 0xbb07919aUL, 0x7854bcb1UL, 0x3965a7a8UL, 0x4b98833bUL, 0x0aa99822UL,
- 0xc9fab509UL, 0x88cbae10UL, 0x4f5def5fUL, 0x0e6cf446UL, 0xcd3fd96dUL,
- 0x8c0ec274UL, 0x43125af3UL, 0x022341eaUL, 0xc1706cc1UL, 0x804177d8UL,
- 0x47d73697UL, 0x06e62d8eUL, 0xc5b500a5UL, 0x84841bbcUL, 0x1a8a4171UL,
- 0x5bbb5a68UL, 0x98e87743UL, 0xd9d96c5aUL, 0x1e4f2d15UL, 0x5f7e360cUL,
- 0x9c2d1b27UL, 0xdd1c003eUL, 0x120098b9UL, 0x533183a0UL, 0x9062ae8bUL,
- 0xd153b592UL, 0x16c5f4ddUL, 0x57f4efc4UL, 0x94a7c2efUL, 0xd596d9f6UL,
- 0xe9bc07aeUL, 0xa88d1cb7UL, 0x6bde319cUL, 0x2aef2a85UL, 0xed796bcaUL,
- 0xac4870d3UL, 0x6f1b5df8UL, 0x2e2a46e1UL, 0xe136de66UL, 0xa007c57fUL,
- 0x6354e854UL, 0x2265f34dUL, 0xe5f3b202UL, 0xa4c2a91bUL, 0x67918430UL,
- 0x26a09f29UL, 0xb8aec5e4UL, 0xf99fdefdUL, 0x3accf3d6UL, 0x7bfde8cfUL,
- 0xbc6ba980UL, 0xfd5ab299UL, 0x3e099fb2UL, 0x7f3884abUL, 0xb0241c2cUL,
- 0xf1150735UL, 0x32462a1eUL, 0x73773107UL, 0xb4e17048UL, 0xf5d06b51UL,
- 0x3683467aUL, 0x77b25d63UL, 0x4ed7facbUL, 0x0fe6e1d2UL, 0xccb5ccf9UL,
- 0x8d84d7e0UL, 0x4a1296afUL, 0x0b238db6UL, 0xc870a09dUL, 0x8941bb84UL,
- 0x465d2303UL, 0x076c381aUL, 0xc43f1531UL, 0x850e0e28UL, 0x42984f67UL,
- 0x03a9547eUL, 0xc0fa7955UL, 0x81cb624cUL, 0x1fc53881UL, 0x5ef42398UL,
- 0x9da70eb3UL, 0xdc9615aaUL, 0x1b0054e5UL, 0x5a314ffcUL, 0x996262d7UL,
- 0xd85379ceUL, 0x174fe149UL, 0x567efa50UL, 0x952dd77bUL, 0xd41ccc62UL,
- 0x138a8d2dUL, 0x52bb9634UL, 0x91e8bb1fUL, 0xd0d9a006UL, 0xecf37e5eUL,
- 0xadc26547UL, 0x6e91486cUL, 0x2fa05375UL, 0xe836123aUL, 0xa9070923UL,
- 0x6a542408UL, 0x2b653f11UL, 0xe479a796UL, 0xa548bc8fUL, 0x661b91a4UL,
- 0x272a8abdUL, 0xe0bccbf2UL, 0xa18dd0ebUL, 0x62defdc0UL, 0x23efe6d9UL,
- 0xbde1bc14UL, 0xfcd0a70dUL, 0x3f838a26UL, 0x7eb2913fUL, 0xb924d070UL,
- 0xf815cb69UL, 0x3b46e642UL, 0x7a77fd5bUL, 0xb56b65dcUL, 0xf45a7ec5UL,
- 0x370953eeUL, 0x763848f7UL, 0xb1ae09b8UL, 0xf09f12a1UL, 0x33cc3f8aUL,
- 0x72fd2493UL
- },
- {
- 0x00000000UL, 0x376ac201UL, 0x6ed48403UL, 0x59be4602UL, 0xdca80907UL,
- 0xebc2cb06UL, 0xb27c8d04UL, 0x85164f05UL, 0xb851130eUL, 0x8f3bd10fUL,
- 0xd685970dUL, 0xe1ef550cUL, 0x64f91a09UL, 0x5393d808UL, 0x0a2d9e0aUL,
- 0x3d475c0bUL, 0x70a3261cUL, 0x47c9e41dUL, 0x1e77a21fUL, 0x291d601eUL,
- 0xac0b2f1bUL, 0x9b61ed1aUL, 0xc2dfab18UL, 0xf5b56919UL, 0xc8f23512UL,
- 0xff98f713UL, 0xa626b111UL, 0x914c7310UL, 0x145a3c15UL, 0x2330fe14UL,
- 0x7a8eb816UL, 0x4de47a17UL, 0xe0464d38UL, 0xd72c8f39UL, 0x8e92c93bUL,
- 0xb9f80b3aUL, 0x3cee443fUL, 0x0b84863eUL, 0x523ac03cUL, 0x6550023dUL,
- 0x58175e36UL, 0x6f7d9c37UL, 0x36c3da35UL, 0x01a91834UL, 0x84bf5731UL,
- 0xb3d59530UL, 0xea6bd332UL, 0xdd011133UL, 0x90e56b24UL, 0xa78fa925UL,
- 0xfe31ef27UL, 0xc95b2d26UL, 0x4c4d6223UL, 0x7b27a022UL, 0x2299e620UL,
- 0x15f32421UL, 0x28b4782aUL, 0x1fdeba2bUL, 0x4660fc29UL, 0x710a3e28UL,
- 0xf41c712dUL, 0xc376b32cUL, 0x9ac8f52eUL, 0xada2372fUL, 0xc08d9a70UL,
- 0xf7e75871UL, 0xae591e73UL, 0x9933dc72UL, 0x1c259377UL, 0x2b4f5176UL,
- 0x72f11774UL, 0x459bd575UL, 0x78dc897eUL, 0x4fb64b7fUL, 0x16080d7dUL,
- 0x2162cf7cUL, 0xa4748079UL, 0x931e4278UL, 0xcaa0047aUL, 0xfdcac67bUL,
- 0xb02ebc6cUL, 0x87447e6dUL, 0xdefa386fUL, 0xe990fa6eUL, 0x6c86b56bUL,
- 0x5bec776aUL, 0x02523168UL, 0x3538f369UL, 0x087faf62UL, 0x3f156d63UL,
- 0x66ab2b61UL, 0x51c1e960UL, 0xd4d7a665UL, 0xe3bd6464UL, 0xba032266UL,
- 0x8d69e067UL, 0x20cbd748UL, 0x17a11549UL, 0x4e1f534bUL, 0x7975914aUL,
- 0xfc63de4fUL, 0xcb091c4eUL, 0x92b75a4cUL, 0xa5dd984dUL, 0x989ac446UL,
- 0xaff00647UL, 0xf64e4045UL, 0xc1248244UL, 0x4432cd41UL, 0x73580f40UL,
- 0x2ae64942UL, 0x1d8c8b43UL, 0x5068f154UL, 0x67023355UL, 0x3ebc7557UL,
- 0x09d6b756UL, 0x8cc0f853UL, 0xbbaa3a52UL, 0xe2147c50UL, 0xd57ebe51UL,
- 0xe839e25aUL, 0xdf53205bUL, 0x86ed6659UL, 0xb187a458UL, 0x3491eb5dUL,
- 0x03fb295cUL, 0x5a456f5eUL, 0x6d2fad5fUL, 0x801b35e1UL, 0xb771f7e0UL,
- 0xeecfb1e2UL, 0xd9a573e3UL, 0x5cb33ce6UL, 0x6bd9fee7UL, 0x3267b8e5UL,
- 0x050d7ae4UL, 0x384a26efUL, 0x0f20e4eeUL, 0x569ea2ecUL, 0x61f460edUL,
- 0xe4e22fe8UL, 0xd388ede9UL, 0x8a36abebUL, 0xbd5c69eaUL, 0xf0b813fdUL,
- 0xc7d2d1fcUL, 0x9e6c97feUL, 0xa90655ffUL, 0x2c101afaUL, 0x1b7ad8fbUL,
- 0x42c49ef9UL, 0x75ae5cf8UL, 0x48e900f3UL, 0x7f83c2f2UL, 0x263d84f0UL,
- 0x115746f1UL, 0x944109f4UL, 0xa32bcbf5UL, 0xfa958df7UL, 0xcdff4ff6UL,
- 0x605d78d9UL, 0x5737bad8UL, 0x0e89fcdaUL, 0x39e33edbUL, 0xbcf571deUL,
- 0x8b9fb3dfUL, 0xd221f5ddUL, 0xe54b37dcUL, 0xd80c6bd7UL, 0xef66a9d6UL,
- 0xb6d8efd4UL, 0x81b22dd5UL, 0x04a462d0UL, 0x33cea0d1UL, 0x6a70e6d3UL,
- 0x5d1a24d2UL, 0x10fe5ec5UL, 0x27949cc4UL, 0x7e2adac6UL, 0x494018c7UL,
- 0xcc5657c2UL, 0xfb3c95c3UL, 0xa282d3c1UL, 0x95e811c0UL, 0xa8af4dcbUL,
- 0x9fc58fcaUL, 0xc67bc9c8UL, 0xf1110bc9UL, 0x740744ccUL, 0x436d86cdUL,
- 0x1ad3c0cfUL, 0x2db902ceUL, 0x4096af91UL, 0x77fc6d90UL, 0x2e422b92UL,
- 0x1928e993UL, 0x9c3ea696UL, 0xab546497UL, 0xf2ea2295UL, 0xc580e094UL,
- 0xf8c7bc9fUL, 0xcfad7e9eUL, 0x9613389cUL, 0xa179fa9dUL, 0x246fb598UL,
- 0x13057799UL, 0x4abb319bUL, 0x7dd1f39aUL, 0x3035898dUL, 0x075f4b8cUL,
- 0x5ee10d8eUL, 0x698bcf8fUL, 0xec9d808aUL, 0xdbf7428bUL, 0x82490489UL,
- 0xb523c688UL, 0x88649a83UL, 0xbf0e5882UL, 0xe6b01e80UL, 0xd1dadc81UL,
- 0x54cc9384UL, 0x63a65185UL, 0x3a181787UL, 0x0d72d586UL, 0xa0d0e2a9UL,
- 0x97ba20a8UL, 0xce0466aaUL, 0xf96ea4abUL, 0x7c78ebaeUL, 0x4b1229afUL,
- 0x12ac6fadUL, 0x25c6adacUL, 0x1881f1a7UL, 0x2feb33a6UL, 0x765575a4UL,
- 0x413fb7a5UL, 0xc429f8a0UL, 0xf3433aa1UL, 0xaafd7ca3UL, 0x9d97bea2UL,
- 0xd073c4b5UL, 0xe71906b4UL, 0xbea740b6UL, 0x89cd82b7UL, 0x0cdbcdb2UL,
- 0x3bb10fb3UL, 0x620f49b1UL, 0x55658bb0UL, 0x6822d7bbUL, 0x5f4815baUL,
- 0x06f653b8UL, 0x319c91b9UL, 0xb48adebcUL, 0x83e01cbdUL, 0xda5e5abfUL,
- 0xed3498beUL
- },
- {
- 0x00000000UL, 0x6567bcb8UL, 0x8bc809aaUL, 0xeeafb512UL, 0x5797628fUL,
- 0x32f0de37UL, 0xdc5f6b25UL, 0xb938d79dUL, 0xef28b4c5UL, 0x8a4f087dUL,
- 0x64e0bd6fUL, 0x018701d7UL, 0xb8bfd64aUL, 0xddd86af2UL, 0x3377dfe0UL,
- 0x56106358UL, 0x9f571950UL, 0xfa30a5e8UL, 0x149f10faUL, 0x71f8ac42UL,
- 0xc8c07bdfUL, 0xada7c767UL, 0x43087275UL, 0x266fcecdUL, 0x707fad95UL,
- 0x1518112dUL, 0xfbb7a43fUL, 0x9ed01887UL, 0x27e8cf1aUL, 0x428f73a2UL,
- 0xac20c6b0UL, 0xc9477a08UL, 0x3eaf32a0UL, 0x5bc88e18UL, 0xb5673b0aUL,
- 0xd00087b2UL, 0x6938502fUL, 0x0c5fec97UL, 0xe2f05985UL, 0x8797e53dUL,
- 0xd1878665UL, 0xb4e03addUL, 0x5a4f8fcfUL, 0x3f283377UL, 0x8610e4eaUL,
- 0xe3775852UL, 0x0dd8ed40UL, 0x68bf51f8UL, 0xa1f82bf0UL, 0xc49f9748UL,
- 0x2a30225aUL, 0x4f579ee2UL, 0xf66f497fUL, 0x9308f5c7UL, 0x7da740d5UL,
- 0x18c0fc6dUL, 0x4ed09f35UL, 0x2bb7238dUL, 0xc518969fUL, 0xa07f2a27UL,
- 0x1947fdbaUL, 0x7c204102UL, 0x928ff410UL, 0xf7e848a8UL, 0x3d58149bUL,
- 0x583fa823UL, 0xb6901d31UL, 0xd3f7a189UL, 0x6acf7614UL, 0x0fa8caacUL,
- 0xe1077fbeUL, 0x8460c306UL, 0xd270a05eUL, 0xb7171ce6UL, 0x59b8a9f4UL,
- 0x3cdf154cUL, 0x85e7c2d1UL, 0xe0807e69UL, 0x0e2fcb7bUL, 0x6b4877c3UL,
- 0xa20f0dcbUL, 0xc768b173UL, 0x29c70461UL, 0x4ca0b8d9UL, 0xf5986f44UL,
- 0x90ffd3fcUL, 0x7e5066eeUL, 0x1b37da56UL, 0x4d27b90eUL, 0x284005b6UL,
- 0xc6efb0a4UL, 0xa3880c1cUL, 0x1ab0db81UL, 0x7fd76739UL, 0x9178d22bUL,
- 0xf41f6e93UL, 0x03f7263bUL, 0x66909a83UL, 0x883f2f91UL, 0xed589329UL,
- 0x546044b4UL, 0x3107f80cUL, 0xdfa84d1eUL, 0xbacff1a6UL, 0xecdf92feUL,
- 0x89b82e46UL, 0x67179b54UL, 0x027027ecUL, 0xbb48f071UL, 0xde2f4cc9UL,
- 0x3080f9dbUL, 0x55e74563UL, 0x9ca03f6bUL, 0xf9c783d3UL, 0x176836c1UL,
- 0x720f8a79UL, 0xcb375de4UL, 0xae50e15cUL, 0x40ff544eUL, 0x2598e8f6UL,
- 0x73888baeUL, 0x16ef3716UL, 0xf8408204UL, 0x9d273ebcUL, 0x241fe921UL,
- 0x41785599UL, 0xafd7e08bUL, 0xcab05c33UL, 0x3bb659edUL, 0x5ed1e555UL,
- 0xb07e5047UL, 0xd519ecffUL, 0x6c213b62UL, 0x094687daUL, 0xe7e932c8UL,
- 0x828e8e70UL, 0xd49eed28UL, 0xb1f95190UL, 0x5f56e482UL, 0x3a31583aUL,
- 0x83098fa7UL, 0xe66e331fUL, 0x08c1860dUL, 0x6da63ab5UL, 0xa4e140bdUL,
- 0xc186fc05UL, 0x2f294917UL, 0x4a4ef5afUL, 0xf3762232UL, 0x96119e8aUL,
- 0x78be2b98UL, 0x1dd99720UL, 0x4bc9f478UL, 0x2eae48c0UL, 0xc001fdd2UL,
- 0xa566416aUL, 0x1c5e96f7UL, 0x79392a4fUL, 0x97969f5dUL, 0xf2f123e5UL,
- 0x05196b4dUL, 0x607ed7f5UL, 0x8ed162e7UL, 0xebb6de5fUL, 0x528e09c2UL,
- 0x37e9b57aUL, 0xd9460068UL, 0xbc21bcd0UL, 0xea31df88UL, 0x8f566330UL,
- 0x61f9d622UL, 0x049e6a9aUL, 0xbda6bd07UL, 0xd8c101bfUL, 0x366eb4adUL,
- 0x53090815UL, 0x9a4e721dUL, 0xff29cea5UL, 0x11867bb7UL, 0x74e1c70fUL,
- 0xcdd91092UL, 0xa8beac2aUL, 0x46111938UL, 0x2376a580UL, 0x7566c6d8UL,
- 0x10017a60UL, 0xfeaecf72UL, 0x9bc973caUL, 0x22f1a457UL, 0x479618efUL,
- 0xa939adfdUL, 0xcc5e1145UL, 0x06ee4d76UL, 0x6389f1ceUL, 0x8d2644dcUL,
- 0xe841f864UL, 0x51792ff9UL, 0x341e9341UL, 0xdab12653UL, 0xbfd69aebUL,
- 0xe9c6f9b3UL, 0x8ca1450bUL, 0x620ef019UL, 0x07694ca1UL, 0xbe519b3cUL,
- 0xdb362784UL, 0x35999296UL, 0x50fe2e2eUL, 0x99b95426UL, 0xfcdee89eUL,
- 0x12715d8cUL, 0x7716e134UL, 0xce2e36a9UL, 0xab498a11UL, 0x45e63f03UL,
- 0x208183bbUL, 0x7691e0e3UL, 0x13f65c5bUL, 0xfd59e949UL, 0x983e55f1UL,
- 0x2106826cUL, 0x44613ed4UL, 0xaace8bc6UL, 0xcfa9377eUL, 0x38417fd6UL,
- 0x5d26c36eUL, 0xb389767cUL, 0xd6eecac4UL, 0x6fd61d59UL, 0x0ab1a1e1UL,
- 0xe41e14f3UL, 0x8179a84bUL, 0xd769cb13UL, 0xb20e77abUL, 0x5ca1c2b9UL,
- 0x39c67e01UL, 0x80fea99cUL, 0xe5991524UL, 0x0b36a036UL, 0x6e511c8eUL,
- 0xa7166686UL, 0xc271da3eUL, 0x2cde6f2cUL, 0x49b9d394UL, 0xf0810409UL,
- 0x95e6b8b1UL, 0x7b490da3UL, 0x1e2eb11bUL, 0x483ed243UL, 0x2d596efbUL,
- 0xc3f6dbe9UL, 0xa6916751UL, 0x1fa9b0ccUL, 0x7ace0c74UL, 0x9461b966UL,
- 0xf10605deUL
-#endif
- }
-};
+ /* polyBits = 7976584769 0x00000001db710641L, shifted = 0xedb88320 */
+ /* CRC32 table for single bytes, auto-generated. DO NOT MODIFY! */
+ /* CRC32 table 0 for quad-bytes (little-endian), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0x77073096U, 0xee0e612cU, 0x990951baU, 0x076dc419U, 0x706af48fU, 0xe963a535U, 0x9e6495a3U
+ /* 8 */ , 0x0edb8832U, 0x79dcb8a4U, 0xe0d5e91eU, 0x97d2d988U, 0x09b64c2bU, 0x7eb17cbdU, 0xe7b82d07U, 0x90bf1d91U
+ /* 16 */ , 0x1db71064U, 0x6ab020f2U, 0xf3b97148U, 0x84be41deU, 0x1adad47dU, 0x6ddde4ebU, 0xf4d4b551U, 0x83d385c7U
+ /* 24 */ , 0x136c9856U, 0x646ba8c0U, 0xfd62f97aU, 0x8a65c9ecU, 0x14015c4fU, 0x63066cd9U, 0xfa0f3d63U, 0x8d080df5U
+ /* 32 */ , 0x3b6e20c8U, 0x4c69105eU, 0xd56041e4U, 0xa2677172U, 0x3c03e4d1U, 0x4b04d447U, 0xd20d85fdU, 0xa50ab56bU
+ /* 40 */ , 0x35b5a8faU, 0x42b2986cU, 0xdbbbc9d6U, 0xacbcf940U, 0x32d86ce3U, 0x45df5c75U, 0xdcd60dcfU, 0xabd13d59U
+ /* 48 */ , 0x26d930acU, 0x51de003aU, 0xc8d75180U, 0xbfd06116U, 0x21b4f4b5U, 0x56b3c423U, 0xcfba9599U, 0xb8bda50fU
+ /* 56 */ , 0x2802b89eU, 0x5f058808U, 0xc60cd9b2U, 0xb10be924U, 0x2f6f7c87U, 0x58684c11U, 0xc1611dabU, 0xb6662d3dU
+ /* 64 */ , 0x76dc4190U, 0x01db7106U, 0x98d220bcU, 0xefd5102aU, 0x71b18589U, 0x06b6b51fU, 0x9fbfe4a5U, 0xe8b8d433U
+ /* 72 */ , 0x7807c9a2U, 0x0f00f934U, 0x9609a88eU, 0xe10e9818U, 0x7f6a0dbbU, 0x086d3d2dU, 0x91646c97U, 0xe6635c01U
+ /* 80 */ , 0x6b6b51f4U, 0x1c6c6162U, 0x856530d8U, 0xf262004eU, 0x6c0695edU, 0x1b01a57bU, 0x8208f4c1U, 0xf50fc457U
+ /* 88 */ , 0x65b0d9c6U, 0x12b7e950U, 0x8bbeb8eaU, 0xfcb9887cU, 0x62dd1ddfU, 0x15da2d49U, 0x8cd37cf3U, 0xfbd44c65U
+ /* 96 */ , 0x4db26158U, 0x3ab551ceU, 0xa3bc0074U, 0xd4bb30e2U, 0x4adfa541U, 0x3dd895d7U, 0xa4d1c46dU, 0xd3d6f4fbU
+ /* 104 */ , 0x4369e96aU, 0x346ed9fcU, 0xad678846U, 0xda60b8d0U, 0x44042d73U, 0x33031de5U, 0xaa0a4c5fU, 0xdd0d7cc9U
+ /* 112 */ , 0x5005713cU, 0x270241aaU, 0xbe0b1010U, 0xc90c2086U, 0x5768b525U, 0x206f85b3U, 0xb966d409U, 0xce61e49fU
+ /* 120 */ , 0x5edef90eU, 0x29d9c998U, 0xb0d09822U, 0xc7d7a8b4U, 0x59b33d17U, 0x2eb40d81U, 0xb7bd5c3bU, 0xc0ba6cadU
+ /* 128 */ , 0xedb88320U, 0x9abfb3b6U, 0x03b6e20cU, 0x74b1d29aU, 0xead54739U, 0x9dd277afU, 0x04db2615U, 0x73dc1683U
+ /* 136 */ , 0xe3630b12U, 0x94643b84U, 0x0d6d6a3eU, 0x7a6a5aa8U, 0xe40ecf0bU, 0x9309ff9dU, 0x0a00ae27U, 0x7d079eb1U
+ /* 144 */ , 0xf00f9344U, 0x8708a3d2U, 0x1e01f268U, 0x6906c2feU, 0xf762575dU, 0x806567cbU, 0x196c3671U, 0x6e6b06e7U
+ /* 152 */ , 0xfed41b76U, 0x89d32be0U, 0x10da7a5aU, 0x67dd4accU, 0xf9b9df6fU, 0x8ebeeff9U, 0x17b7be43U, 0x60b08ed5U
+ /* 160 */ , 0xd6d6a3e8U, 0xa1d1937eU, 0x38d8c2c4U, 0x4fdff252U, 0xd1bb67f1U, 0xa6bc5767U, 0x3fb506ddU, 0x48b2364bU
+ /* 168 */ , 0xd80d2bdaU, 0xaf0a1b4cU, 0x36034af6U, 0x41047a60U, 0xdf60efc3U, 0xa867df55U, 0x316e8eefU, 0x4669be79U
+ /* 176 */ , 0xcb61b38cU, 0xbc66831aU, 0x256fd2a0U, 0x5268e236U, 0xcc0c7795U, 0xbb0b4703U, 0x220216b9U, 0x5505262fU
+ /* 184 */ , 0xc5ba3bbeU, 0xb2bd0b28U, 0x2bb45a92U, 0x5cb36a04U, 0xc2d7ffa7U, 0xb5d0cf31U, 0x2cd99e8bU, 0x5bdeae1dU
+ /* 192 */ , 0x9b64c2b0U, 0xec63f226U, 0x756aa39cU, 0x026d930aU, 0x9c0906a9U, 0xeb0e363fU, 0x72076785U, 0x05005713U
+ /* 200 */ , 0x95bf4a82U, 0xe2b87a14U, 0x7bb12baeU, 0x0cb61b38U, 0x92d28e9bU, 0xe5d5be0dU, 0x7cdcefb7U, 0x0bdbdf21U
+ /* 208 */ , 0x86d3d2d4U, 0xf1d4e242U, 0x68ddb3f8U, 0x1fda836eU, 0x81be16cdU, 0xf6b9265bU, 0x6fb077e1U, 0x18b74777U
+ /* 216 */ , 0x88085ae6U, 0xff0f6a70U, 0x66063bcaU, 0x11010b5cU, 0x8f659effU, 0xf862ae69U, 0x616bffd3U, 0x166ccf45U
+ /* 224 */ , 0xa00ae278U, 0xd70dd2eeU, 0x4e048354U, 0x3903b3c2U, 0xa7672661U, 0xd06016f7U, 0x4969474dU, 0x3e6e77dbU
+ /* 232 */ , 0xaed16a4aU, 0xd9d65adcU, 0x40df0b66U, 0x37d83bf0U, 0xa9bcae53U, 0xdebb9ec5U, 0x47b2cf7fU, 0x30b5ffe9U
+ /* 240 */ , 0xbdbdf21cU, 0xcabac28aU, 0x53b39330U, 0x24b4a3a6U, 0xbad03605U, 0xcdd70693U, 0x54de5729U, 0x23d967bfU
+ /* 248 */ , 0xb3667a2eU, 0xc4614ab8U, 0x5d681b02U, 0x2a6f2b94U, 0xb40bbe37U, 0xc30c8ea1U, 0x5a05df1bU, 0x2d02ef8dU
+ }
+ #ifdef CRC32_BYFOUR
+ ,
+ /* CRC32 table 1 for quad-bytes (little-endian), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0x191b3141U, 0x32366282U, 0x2b2d53c3U, 0x646cc504U, 0x7d77f445U, 0x565aa786U, 0x4f4196c7U
+ /* 8 */ , 0xc8d98a08U, 0xd1c2bb49U, 0xfaefe88aU, 0xe3f4d9cbU, 0xacb54f0cU, 0xb5ae7e4dU, 0x9e832d8eU, 0x87981ccfU
+ /* 16 */ , 0x4ac21251U, 0x53d92310U, 0x78f470d3U, 0x61ef4192U, 0x2eaed755U, 0x37b5e614U, 0x1c98b5d7U, 0x05838496U
+ /* 24 */ , 0x821b9859U, 0x9b00a918U, 0xb02dfadbU, 0xa936cb9aU, 0xe6775d5dU, 0xff6c6c1cU, 0xd4413fdfU, 0xcd5a0e9eU
+ /* 32 */ , 0x958424a2U, 0x8c9f15e3U, 0xa7b24620U, 0xbea97761U, 0xf1e8e1a6U, 0xe8f3d0e7U, 0xc3de8324U, 0xdac5b265U
+ /* 40 */ , 0x5d5daeaaU, 0x44469febU, 0x6f6bcc28U, 0x7670fd69U, 0x39316baeU, 0x202a5aefU, 0x0b07092cU, 0x121c386dU
+ /* 48 */ , 0xdf4636f3U, 0xc65d07b2U, 0xed705471U, 0xf46b6530U, 0xbb2af3f7U, 0xa231c2b6U, 0x891c9175U, 0x9007a034U
+ /* 56 */ , 0x179fbcfbU, 0x0e848dbaU, 0x25a9de79U, 0x3cb2ef38U, 0x73f379ffU, 0x6ae848beU, 0x41c51b7dU, 0x58de2a3cU
+ /* 64 */ , 0xf0794f05U, 0xe9627e44U, 0xc24f2d87U, 0xdb541cc6U, 0x94158a01U, 0x8d0ebb40U, 0xa623e883U, 0xbf38d9c2U
+ /* 72 */ , 0x38a0c50dU, 0x21bbf44cU, 0x0a96a78fU, 0x138d96ceU, 0x5ccc0009U, 0x45d73148U, 0x6efa628bU, 0x77e153caU
+ /* 80 */ , 0xbabb5d54U, 0xa3a06c15U, 0x888d3fd6U, 0x91960e97U, 0xded79850U, 0xc7cca911U, 0xece1fad2U, 0xf5facb93U
+ /* 88 */ , 0x7262d75cU, 0x6b79e61dU, 0x4054b5deU, 0x594f849fU, 0x160e1258U, 0x0f152319U, 0x243870daU, 0x3d23419bU
+ /* 96 */ , 0x65fd6ba7U, 0x7ce65ae6U, 0x57cb0925U, 0x4ed03864U, 0x0191aea3U, 0x188a9fe2U, 0x33a7cc21U, 0x2abcfd60U
+ /* 104 */ , 0xad24e1afU, 0xb43fd0eeU, 0x9f12832dU, 0x8609b26cU, 0xc94824abU, 0xd05315eaU, 0xfb7e4629U, 0xe2657768U
+ /* 112 */ , 0x2f3f79f6U, 0x362448b7U, 0x1d091b74U, 0x04122a35U, 0x4b53bcf2U, 0x52488db3U, 0x7965de70U, 0x607eef31U
+ /* 120 */ , 0xe7e6f3feU, 0xfefdc2bfU, 0xd5d0917cU, 0xcccba03dU, 0x838a36faU, 0x9a9107bbU, 0xb1bc5478U, 0xa8a76539U
+ /* 128 */ , 0x3b83984bU, 0x2298a90aU, 0x09b5fac9U, 0x10aecb88U, 0x5fef5d4fU, 0x46f46c0eU, 0x6dd93fcdU, 0x74c20e8cU
+ /* 136 */ , 0xf35a1243U, 0xea412302U, 0xc16c70c1U, 0xd8774180U, 0x9736d747U, 0x8e2de606U, 0xa500b5c5U, 0xbc1b8484U
+ /* 144 */ , 0x71418a1aU, 0x685abb5bU, 0x4377e898U, 0x5a6cd9d9U, 0x152d4f1eU, 0x0c367e5fU, 0x271b2d9cU, 0x3e001cddU
+ /* 152 */ , 0xb9980012U, 0xa0833153U, 0x8bae6290U, 0x92b553d1U, 0xddf4c516U, 0xc4eff457U, 0xefc2a794U, 0xf6d996d5U
+ /* 160 */ , 0xae07bce9U, 0xb71c8da8U, 0x9c31de6bU, 0x852aef2aU, 0xca6b79edU, 0xd37048acU, 0xf85d1b6fU, 0xe1462a2eU
+ /* 168 */ , 0x66de36e1U, 0x7fc507a0U, 0x54e85463U, 0x4df36522U, 0x02b2f3e5U, 0x1ba9c2a4U, 0x30849167U, 0x299fa026U
+ /* 176 */ , 0xe4c5aeb8U, 0xfdde9ff9U, 0xd6f3cc3aU, 0xcfe8fd7bU, 0x80a96bbcU, 0x99b25afdU, 0xb29f093eU, 0xab84387fU
+ /* 184 */ , 0x2c1c24b0U, 0x350715f1U, 0x1e2a4632U, 0x07317773U, 0x4870e1b4U, 0x516bd0f5U, 0x7a468336U, 0x635db277U
+ /* 192 */ , 0xcbfad74eU, 0xd2e1e60fU, 0xf9ccb5ccU, 0xe0d7848dU, 0xaf96124aU, 0xb68d230bU, 0x9da070c8U, 0x84bb4189U
+ /* 200 */ , 0x03235d46U, 0x1a386c07U, 0x31153fc4U, 0x280e0e85U, 0x674f9842U, 0x7e54a903U, 0x5579fac0U, 0x4c62cb81U
+ /* 208 */ , 0x8138c51fU, 0x9823f45eU, 0xb30ea79dU, 0xaa1596dcU, 0xe554001bU, 0xfc4f315aU, 0xd7626299U, 0xce7953d8U
+ /* 216 */ , 0x49e14f17U, 0x50fa7e56U, 0x7bd72d95U, 0x62cc1cd4U, 0x2d8d8a13U, 0x3496bb52U, 0x1fbbe891U, 0x06a0d9d0U
+ /* 224 */ , 0x5e7ef3ecU, 0x4765c2adU, 0x6c48916eU, 0x7553a02fU, 0x3a1236e8U, 0x230907a9U, 0x0824546aU, 0x113f652bU
+ /* 232 */ , 0x96a779e4U, 0x8fbc48a5U, 0xa4911b66U, 0xbd8a2a27U, 0xf2cbbce0U, 0xebd08da1U, 0xc0fdde62U, 0xd9e6ef23U
+ /* 240 */ , 0x14bce1bdU, 0x0da7d0fcU, 0x268a833fU, 0x3f91b27eU, 0x70d024b9U, 0x69cb15f8U, 0x42e6463bU, 0x5bfd777aU
+ /* 248 */ , 0xdc656bb5U, 0xc57e5af4U, 0xee530937U, 0xf7483876U, 0xb809aeb1U, 0xa1129ff0U, 0x8a3fcc33U, 0x9324fd72U
+ }
+ ,
+ /* CRC32 table 2 for quad-bytes (little-endian), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0x01c26a37U, 0x0384d46eU, 0x0246be59U, 0x0709a8dcU, 0x06cbc2ebU, 0x048d7cb2U, 0x054f1685U
+ /* 8 */ , 0x0e1351b8U, 0x0fd13b8fU, 0x0d9785d6U, 0x0c55efe1U, 0x091af964U, 0x08d89353U, 0x0a9e2d0aU, 0x0b5c473dU
+ /* 16 */ , 0x1c26a370U, 0x1de4c947U, 0x1fa2771eU, 0x1e601d29U, 0x1b2f0bacU, 0x1aed619bU, 0x18abdfc2U, 0x1969b5f5U
+ /* 24 */ , 0x1235f2c8U, 0x13f798ffU, 0x11b126a6U, 0x10734c91U, 0x153c5a14U, 0x14fe3023U, 0x16b88e7aU, 0x177ae44dU
+ /* 32 */ , 0x384d46e0U, 0x398f2cd7U, 0x3bc9928eU, 0x3a0bf8b9U, 0x3f44ee3cU, 0x3e86840bU, 0x3cc03a52U, 0x3d025065U
+ /* 40 */ , 0x365e1758U, 0x379c7d6fU, 0x35dac336U, 0x3418a901U, 0x3157bf84U, 0x3095d5b3U, 0x32d36beaU, 0x331101ddU
+ /* 48 */ , 0x246be590U, 0x25a98fa7U, 0x27ef31feU, 0x262d5bc9U, 0x23624d4cU, 0x22a0277bU, 0x20e69922U, 0x2124f315U
+ /* 56 */ , 0x2a78b428U, 0x2bbade1fU, 0x29fc6046U, 0x283e0a71U, 0x2d711cf4U, 0x2cb376c3U, 0x2ef5c89aU, 0x2f37a2adU
+ /* 64 */ , 0x709a8dc0U, 0x7158e7f7U, 0x731e59aeU, 0x72dc3399U, 0x7793251cU, 0x76514f2bU, 0x7417f172U, 0x75d59b45U
+ /* 72 */ , 0x7e89dc78U, 0x7f4bb64fU, 0x7d0d0816U, 0x7ccf6221U, 0x798074a4U, 0x78421e93U, 0x7a04a0caU, 0x7bc6cafdU
+ /* 80 */ , 0x6cbc2eb0U, 0x6d7e4487U, 0x6f38fadeU, 0x6efa90e9U, 0x6bb5866cU, 0x6a77ec5bU, 0x68315202U, 0x69f33835U
+ /* 88 */ , 0x62af7f08U, 0x636d153fU, 0x612bab66U, 0x60e9c151U, 0x65a6d7d4U, 0x6464bde3U, 0x662203baU, 0x67e0698dU
+ /* 96 */ , 0x48d7cb20U, 0x4915a117U, 0x4b531f4eU, 0x4a917579U, 0x4fde63fcU, 0x4e1c09cbU, 0x4c5ab792U, 0x4d98dda5U
+ /* 104 */ , 0x46c49a98U, 0x4706f0afU, 0x45404ef6U, 0x448224c1U, 0x41cd3244U, 0x400f5873U, 0x4249e62aU, 0x438b8c1dU
+ /* 112 */ , 0x54f16850U, 0x55330267U, 0x5775bc3eU, 0x56b7d609U, 0x53f8c08cU, 0x523aaabbU, 0x507c14e2U, 0x51be7ed5U
+ /* 120 */ , 0x5ae239e8U, 0x5b2053dfU, 0x5966ed86U, 0x58a487b1U, 0x5deb9134U, 0x5c29fb03U, 0x5e6f455aU, 0x5fad2f6dU
+ /* 128 */ , 0xe1351b80U, 0xe0f771b7U, 0xe2b1cfeeU, 0xe373a5d9U, 0xe63cb35cU, 0xe7fed96bU, 0xe5b86732U, 0xe47a0d05U
+ /* 136 */ , 0xef264a38U, 0xeee4200fU, 0xeca29e56U, 0xed60f461U, 0xe82fe2e4U, 0xe9ed88d3U, 0xebab368aU, 0xea695cbdU
+ /* 144 */ , 0xfd13b8f0U, 0xfcd1d2c7U, 0xfe976c9eU, 0xff5506a9U, 0xfa1a102cU, 0xfbd87a1bU, 0xf99ec442U, 0xf85cae75U
+ /* 152 */ , 0xf300e948U, 0xf2c2837fU, 0xf0843d26U, 0xf1465711U, 0xf4094194U, 0xf5cb2ba3U, 0xf78d95faU, 0xf64fffcdU
+ /* 160 */ , 0xd9785d60U, 0xd8ba3757U, 0xdafc890eU, 0xdb3ee339U, 0xde71f5bcU, 0xdfb39f8bU, 0xddf521d2U, 0xdc374be5U
+ /* 168 */ , 0xd76b0cd8U, 0xd6a966efU, 0xd4efd8b6U, 0xd52db281U, 0xd062a404U, 0xd1a0ce33U, 0xd3e6706aU, 0xd2241a5dU
+ /* 176 */ , 0xc55efe10U, 0xc49c9427U, 0xc6da2a7eU, 0xc7184049U, 0xc25756ccU, 0xc3953cfbU, 0xc1d382a2U, 0xc011e895U
+ /* 184 */ , 0xcb4dafa8U, 0xca8fc59fU, 0xc8c97bc6U, 0xc90b11f1U, 0xcc440774U, 0xcd866d43U, 0xcfc0d31aU, 0xce02b92dU
+ /* 192 */ , 0x91af9640U, 0x906dfc77U, 0x922b422eU, 0x93e92819U, 0x96a63e9cU, 0x976454abU, 0x9522eaf2U, 0x94e080c5U
+ /* 200 */ , 0x9fbcc7f8U, 0x9e7eadcfU, 0x9c381396U, 0x9dfa79a1U, 0x98b56f24U, 0x99770513U, 0x9b31bb4aU, 0x9af3d17dU
+ /* 208 */ , 0x8d893530U, 0x8c4b5f07U, 0x8e0de15eU, 0x8fcf8b69U, 0x8a809decU, 0x8b42f7dbU, 0x89044982U, 0x88c623b5U
+ /* 216 */ , 0x839a6488U, 0x82580ebfU, 0x801eb0e6U, 0x81dcdad1U, 0x8493cc54U, 0x8551a663U, 0x8717183aU, 0x86d5720dU
+ /* 224 */ , 0xa9e2d0a0U, 0xa820ba97U, 0xaa6604ceU, 0xaba46ef9U, 0xaeeb787cU, 0xaf29124bU, 0xad6fac12U, 0xacadc625U
+ /* 232 */ , 0xa7f18118U, 0xa633eb2fU, 0xa4755576U, 0xa5b73f41U, 0xa0f829c4U, 0xa13a43f3U, 0xa37cfdaaU, 0xa2be979dU
+ /* 240 */ , 0xb5c473d0U, 0xb40619e7U, 0xb640a7beU, 0xb782cd89U, 0xb2cddb0cU, 0xb30fb13bU, 0xb1490f62U, 0xb08b6555U
+ /* 248 */ , 0xbbd72268U, 0xba15485fU, 0xb853f606U, 0xb9919c31U, 0xbcde8ab4U, 0xbd1ce083U, 0xbf5a5edaU, 0xbe9834edU
+ }
+ ,
+ /* CRC32 table 3 for quad-bytes (little-endian), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0xb8bc6765U, 0xaa09c88bU, 0x12b5afeeU, 0x8f629757U, 0x37def032U, 0x256b5fdcU, 0x9dd738b9U
+ /* 8 */ , 0xc5b428efU, 0x7d084f8aU, 0x6fbde064U, 0xd7018701U, 0x4ad6bfb8U, 0xf26ad8ddU, 0xe0df7733U, 0x58631056U
+ /* 16 */ , 0x5019579fU, 0xe8a530faU, 0xfa109f14U, 0x42acf871U, 0xdf7bc0c8U, 0x67c7a7adU, 0x75720843U, 0xcdce6f26U
+ /* 24 */ , 0x95ad7f70U, 0x2d111815U, 0x3fa4b7fbU, 0x8718d09eU, 0x1acfe827U, 0xa2738f42U, 0xb0c620acU, 0x087a47c9U
+ /* 32 */ , 0xa032af3eU, 0x188ec85bU, 0x0a3b67b5U, 0xb28700d0U, 0x2f503869U, 0x97ec5f0cU, 0x8559f0e2U, 0x3de59787U
+ /* 40 */ , 0x658687d1U, 0xdd3ae0b4U, 0xcf8f4f5aU, 0x7733283fU, 0xeae41086U, 0x525877e3U, 0x40edd80dU, 0xf851bf68U
+ /* 48 */ , 0xf02bf8a1U, 0x48979fc4U, 0x5a22302aU, 0xe29e574fU, 0x7f496ff6U, 0xc7f50893U, 0xd540a77dU, 0x6dfcc018U
+ /* 56 */ , 0x359fd04eU, 0x8d23b72bU, 0x9f9618c5U, 0x272a7fa0U, 0xbafd4719U, 0x0241207cU, 0x10f48f92U, 0xa848e8f7U
+ /* 64 */ , 0x9b14583dU, 0x23a83f58U, 0x311d90b6U, 0x89a1f7d3U, 0x1476cf6aU, 0xaccaa80fU, 0xbe7f07e1U, 0x06c36084U
+ /* 72 */ , 0x5ea070d2U, 0xe61c17b7U, 0xf4a9b859U, 0x4c15df3cU, 0xd1c2e785U, 0x697e80e0U, 0x7bcb2f0eU, 0xc377486bU
+ /* 80 */ , 0xcb0d0fa2U, 0x73b168c7U, 0x6104c729U, 0xd9b8a04cU, 0x446f98f5U, 0xfcd3ff90U, 0xee66507eU, 0x56da371bU
+ /* 88 */ , 0x0eb9274dU, 0xb6054028U, 0xa4b0efc6U, 0x1c0c88a3U, 0x81dbb01aU, 0x3967d77fU, 0x2bd27891U, 0x936e1ff4U
+ /* 96 */ , 0x3b26f703U, 0x839a9066U, 0x912f3f88U, 0x299358edU, 0xb4446054U, 0x0cf80731U, 0x1e4da8dfU, 0xa6f1cfbaU
+ /* 104 */ , 0xfe92dfecU, 0x462eb889U, 0x549b1767U, 0xec277002U, 0x71f048bbU, 0xc94c2fdeU, 0xdbf98030U, 0x6345e755U
+ /* 112 */ , 0x6b3fa09cU, 0xd383c7f9U, 0xc1366817U, 0x798a0f72U, 0xe45d37cbU, 0x5ce150aeU, 0x4e54ff40U, 0xf6e89825U
+ /* 120 */ , 0xae8b8873U, 0x1637ef16U, 0x048240f8U, 0xbc3e279dU, 0x21e91f24U, 0x99557841U, 0x8be0d7afU, 0x335cb0caU
+ /* 128 */ , 0xed59b63bU, 0x55e5d15eU, 0x47507eb0U, 0xffec19d5U, 0x623b216cU, 0xda874609U, 0xc832e9e7U, 0x708e8e82U
+ /* 136 */ , 0x28ed9ed4U, 0x9051f9b1U, 0x82e4565fU, 0x3a58313aU, 0xa78f0983U, 0x1f336ee6U, 0x0d86c108U, 0xb53aa66dU
+ /* 144 */ , 0xbd40e1a4U, 0x05fc86c1U, 0x1749292fU, 0xaff54e4aU, 0x322276f3U, 0x8a9e1196U, 0x982bbe78U, 0x2097d91dU
+ /* 152 */ , 0x78f4c94bU, 0xc048ae2eU, 0xd2fd01c0U, 0x6a4166a5U, 0xf7965e1cU, 0x4f2a3979U, 0x5d9f9697U, 0xe523f1f2U
+ /* 160 */ , 0x4d6b1905U, 0xf5d77e60U, 0xe762d18eU, 0x5fdeb6ebU, 0xc2098e52U, 0x7ab5e937U, 0x680046d9U, 0xd0bc21bcU
+ /* 168 */ , 0x88df31eaU, 0x3063568fU, 0x22d6f961U, 0x9a6a9e04U, 0x07bda6bdU, 0xbf01c1d8U, 0xadb46e36U, 0x15080953U
+ /* 176 */ , 0x1d724e9aU, 0xa5ce29ffU, 0xb77b8611U, 0x0fc7e174U, 0x9210d9cdU, 0x2aacbea8U, 0x38191146U, 0x80a57623U
+ /* 184 */ , 0xd8c66675U, 0x607a0110U, 0x72cfaefeU, 0xca73c99bU, 0x57a4f122U, 0xef189647U, 0xfdad39a9U, 0x45115eccU
+ /* 192 */ , 0x764dee06U, 0xcef18963U, 0xdc44268dU, 0x64f841e8U, 0xf92f7951U, 0x41931e34U, 0x5326b1daU, 0xeb9ad6bfU
+ /* 200 */ , 0xb3f9c6e9U, 0x0b45a18cU, 0x19f00e62U, 0xa14c6907U, 0x3c9b51beU, 0x842736dbU, 0x96929935U, 0x2e2efe50U
+ /* 208 */ , 0x2654b999U, 0x9ee8defcU, 0x8c5d7112U, 0x34e11677U, 0xa9362eceU, 0x118a49abU, 0x033fe645U, 0xbb838120U
+ /* 216 */ , 0xe3e09176U, 0x5b5cf613U, 0x49e959fdU, 0xf1553e98U, 0x6c820621U, 0xd43e6144U, 0xc68bceaaU, 0x7e37a9cfU
+ /* 224 */ , 0xd67f4138U, 0x6ec3265dU, 0x7c7689b3U, 0xc4caeed6U, 0x591dd66fU, 0xe1a1b10aU, 0xf3141ee4U, 0x4ba87981U
+ /* 232 */ , 0x13cb69d7U, 0xab770eb2U, 0xb9c2a15cU, 0x017ec639U, 0x9ca9fe80U, 0x241599e5U, 0x36a0360bU, 0x8e1c516eU
+ /* 240 */ , 0x866616a7U, 0x3eda71c2U, 0x2c6fde2cU, 0x94d3b949U, 0x090481f0U, 0xb1b8e695U, 0xa30d497bU, 0x1bb12e1eU
+ /* 248 */ , 0x43d23e48U, 0xfb6e592dU, 0xe9dbf6c3U, 0x516791a6U, 0xccb0a91fU, 0x740cce7aU, 0x66b96194U, 0xde0506f1U
+ }
+ ,
+ /* CRC32 table 4 for quad-bytes ( big-endian ), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0x96300777U, 0x2c610eeeU, 0xba510999U, 0x19c46d07U, 0x8ff46a70U, 0x35a563e9U, 0xa395649eU
+ /* 8 */ , 0x3288db0eU, 0xa4b8dc79U, 0x1ee9d5e0U, 0x88d9d297U, 0x2b4cb609U, 0xbd7cb17eU, 0x072db8e7U, 0x911dbf90U
+ /* 16 */ , 0x6410b71dU, 0xf220b06aU, 0x4871b9f3U, 0xde41be84U, 0x7dd4da1aU, 0xebe4dd6dU, 0x51b5d4f4U, 0xc785d383U
+ /* 24 */ , 0x56986c13U, 0xc0a86b64U, 0x7af962fdU, 0xecc9658aU, 0x4f5c0114U, 0xd96c0663U, 0x633d0ffaU, 0xf50d088dU
+ /* 32 */ , 0xc8206e3bU, 0x5e10694cU, 0xe44160d5U, 0x727167a2U, 0xd1e4033cU, 0x47d4044bU, 0xfd850dd2U, 0x6bb50aa5U
+ /* 40 */ , 0xfaa8b535U, 0x6c98b242U, 0xd6c9bbdbU, 0x40f9bcacU, 0xe36cd832U, 0x755cdf45U, 0xcf0dd6dcU, 0x593dd1abU
+ /* 48 */ , 0xac30d926U, 0x3a00de51U, 0x8051d7c8U, 0x1661d0bfU, 0xb5f4b421U, 0x23c4b356U, 0x9995bacfU, 0x0fa5bdb8U
+ /* 56 */ , 0x9eb80228U, 0x0888055fU, 0xb2d90cc6U, 0x24e90bb1U, 0x877c6f2fU, 0x114c6858U, 0xab1d61c1U, 0x3d2d66b6U
+ /* 64 */ , 0x9041dc76U, 0x0671db01U, 0xbc20d298U, 0x2a10d5efU, 0x8985b171U, 0x1fb5b606U, 0xa5e4bf9fU, 0x33d4b8e8U
+ /* 72 */ , 0xa2c90778U, 0x34f9000fU, 0x8ea80996U, 0x18980ee1U, 0xbb0d6a7fU, 0x2d3d6d08U, 0x976c6491U, 0x015c63e6U
+ /* 80 */ , 0xf4516b6bU, 0x62616c1cU, 0xd8306585U, 0x4e0062f2U, 0xed95066cU, 0x7ba5011bU, 0xc1f40882U, 0x57c40ff5U
+ /* 88 */ , 0xc6d9b065U, 0x50e9b712U, 0xeab8be8bU, 0x7c88b9fcU, 0xdf1ddd62U, 0x492dda15U, 0xf37cd38cU, 0x654cd4fbU
+ /* 96 */ , 0x5861b24dU, 0xce51b53aU, 0x7400bca3U, 0xe230bbd4U, 0x41a5df4aU, 0xd795d83dU, 0x6dc4d1a4U, 0xfbf4d6d3U
+ /* 104 */ , 0x6ae96943U, 0xfcd96e34U, 0x468867adU, 0xd0b860daU, 0x732d0444U, 0xe51d0333U, 0x5f4c0aaaU, 0xc97c0dddU
+ /* 112 */ , 0x3c710550U, 0xaa410227U, 0x10100bbeU, 0x86200cc9U, 0x25b56857U, 0xb3856f20U, 0x09d466b9U, 0x9fe461ceU
+ /* 120 */ , 0x0ef9de5eU, 0x98c9d929U, 0x2298d0b0U, 0xb4a8d7c7U, 0x173db359U, 0x810db42eU, 0x3b5cbdb7U, 0xad6cbac0U
+ /* 128 */ , 0x2083b8edU, 0xb6b3bf9aU, 0x0ce2b603U, 0x9ad2b174U, 0x3947d5eaU, 0xaf77d29dU, 0x1526db04U, 0x8316dc73U
+ /* 136 */ , 0x120b63e3U, 0x843b6494U, 0x3e6a6d0dU, 0xa85a6a7aU, 0x0bcf0ee4U, 0x9dff0993U, 0x27ae000aU, 0xb19e077dU
+ /* 144 */ , 0x44930ff0U, 0xd2a30887U, 0x68f2011eU, 0xfec20669U, 0x5d5762f7U, 0xcb676580U, 0x71366c19U, 0xe7066b6eU
+ /* 152 */ , 0x761bd4feU, 0xe02bd389U, 0x5a7ada10U, 0xcc4add67U, 0x6fdfb9f9U, 0xf9efbe8eU, 0x43beb717U, 0xd58eb060U
+ /* 160 */ , 0xe8a3d6d6U, 0x7e93d1a1U, 0xc4c2d838U, 0x52f2df4fU, 0xf167bbd1U, 0x6757bca6U, 0xdd06b53fU, 0x4b36b248U
+ /* 168 */ , 0xda2b0dd8U, 0x4c1b0aafU, 0xf64a0336U, 0x607a0441U, 0xc3ef60dfU, 0x55df67a8U, 0xef8e6e31U, 0x79be6946U
+ /* 176 */ , 0x8cb361cbU, 0x1a8366bcU, 0xa0d26f25U, 0x36e26852U, 0x95770cccU, 0x03470bbbU, 0xb9160222U, 0x2f260555U
+ /* 184 */ , 0xbe3bbac5U, 0x280bbdb2U, 0x925ab42bU, 0x046ab35cU, 0xa7ffd7c2U, 0x31cfd0b5U, 0x8b9ed92cU, 0x1daede5bU
+ /* 192 */ , 0xb0c2649bU, 0x26f263ecU, 0x9ca36a75U, 0x0a936d02U, 0xa906099cU, 0x3f360eebU, 0x85670772U, 0x13570005U
+ /* 200 */ , 0x824abf95U, 0x147ab8e2U, 0xae2bb17bU, 0x381bb60cU, 0x9b8ed292U, 0x0dbed5e5U, 0xb7efdc7cU, 0x21dfdb0bU
+ /* 208 */ , 0xd4d2d386U, 0x42e2d4f1U, 0xf8b3dd68U, 0x6e83da1fU, 0xcd16be81U, 0x5b26b9f6U, 0xe177b06fU, 0x7747b718U
+ /* 216 */ , 0xe65a0888U, 0x706a0fffU, 0xca3b0666U, 0x5c0b0111U, 0xff9e658fU, 0x69ae62f8U, 0xd3ff6b61U, 0x45cf6c16U
+ /* 224 */ , 0x78e20aa0U, 0xeed20dd7U, 0x5483044eU, 0xc2b30339U, 0x612667a7U, 0xf71660d0U, 0x4d476949U, 0xdb776e3eU
+ /* 232 */ , 0x4a6ad1aeU, 0xdc5ad6d9U, 0x660bdf40U, 0xf03bd837U, 0x53aebca9U, 0xc59ebbdeU, 0x7fcfb247U, 0xe9ffb530U
+ /* 240 */ , 0x1cf2bdbdU, 0x8ac2bacaU, 0x3093b353U, 0xa6a3b424U, 0x0536d0baU, 0x9306d7cdU, 0x2957de54U, 0xbf67d923U
+ /* 248 */ , 0x2e7a66b3U, 0xb84a61c4U, 0x021b685dU, 0x942b6f2aU, 0x37be0bb4U, 0xa18e0cc3U, 0x1bdf055aU, 0x8def022dU
+ }
+ ,
+ /* CRC32 table 5 for quad-bytes ( big-endian ), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0x41311b19U, 0x82623632U, 0xc3532d2bU, 0x04c56c64U, 0x45f4777dU, 0x86a75a56U, 0xc796414fU
+ /* 8 */ , 0x088ad9c8U, 0x49bbc2d1U, 0x8ae8effaU, 0xcbd9f4e3U, 0x0c4fb5acU, 0x4d7eaeb5U, 0x8e2d839eU, 0xcf1c9887U
+ /* 16 */ , 0x5112c24aU, 0x1023d953U, 0xd370f478U, 0x9241ef61U, 0x55d7ae2eU, 0x14e6b537U, 0xd7b5981cU, 0x96848305U
+ /* 24 */ , 0x59981b82U, 0x18a9009bU, 0xdbfa2db0U, 0x9acb36a9U, 0x5d5d77e6U, 0x1c6c6cffU, 0xdf3f41d4U, 0x9e0e5acdU
+ /* 32 */ , 0xa2248495U, 0xe3159f8cU, 0x2046b2a7U, 0x6177a9beU, 0xa6e1e8f1U, 0xe7d0f3e8U, 0x2483dec3U, 0x65b2c5daU
+ /* 40 */ , 0xaaae5d5dU, 0xeb9f4644U, 0x28cc6b6fU, 0x69fd7076U, 0xae6b3139U, 0xef5a2a20U, 0x2c09070bU, 0x6d381c12U
+ /* 48 */ , 0xf33646dfU, 0xb2075dc6U, 0x715470edU, 0x30656bf4U, 0xf7f32abbU, 0xb6c231a2U, 0x75911c89U, 0x34a00790U
+ /* 56 */ , 0xfbbc9f17U, 0xba8d840eU, 0x79dea925U, 0x38efb23cU, 0xff79f373U, 0xbe48e86aU, 0x7d1bc541U, 0x3c2ade58U
+ /* 64 */ , 0x054f79f0U, 0x447e62e9U, 0x872d4fc2U, 0xc61c54dbU, 0x018a1594U, 0x40bb0e8dU, 0x83e823a6U, 0xc2d938bfU
+ /* 72 */ , 0x0dc5a038U, 0x4cf4bb21U, 0x8fa7960aU, 0xce968d13U, 0x0900cc5cU, 0x4831d745U, 0x8b62fa6eU, 0xca53e177U
+ /* 80 */ , 0x545dbbbaU, 0x156ca0a3U, 0xd63f8d88U, 0x970e9691U, 0x5098d7deU, 0x11a9ccc7U, 0xd2fae1ecU, 0x93cbfaf5U
+ /* 88 */ , 0x5cd76272U, 0x1de6796bU, 0xdeb55440U, 0x9f844f59U, 0x58120e16U, 0x1923150fU, 0xda703824U, 0x9b41233dU
+ /* 96 */ , 0xa76bfd65U, 0xe65ae67cU, 0x2509cb57U, 0x6438d04eU, 0xa3ae9101U, 0xe29f8a18U, 0x21cca733U, 0x60fdbc2aU
+ /* 104 */ , 0xafe124adU, 0xeed03fb4U, 0x2d83129fU, 0x6cb20986U, 0xab2448c9U, 0xea1553d0U, 0x29467efbU, 0x687765e2U
+ /* 112 */ , 0xf6793f2fU, 0xb7482436U, 0x741b091dU, 0x352a1204U, 0xf2bc534bU, 0xb38d4852U, 0x70de6579U, 0x31ef7e60U
+ /* 120 */ , 0xfef3e6e7U, 0xbfc2fdfeU, 0x7c91d0d5U, 0x3da0cbccU, 0xfa368a83U, 0xbb07919aU, 0x7854bcb1U, 0x3965a7a8U
+ /* 128 */ , 0x4b98833bU, 0x0aa99822U, 0xc9fab509U, 0x88cbae10U, 0x4f5def5fU, 0x0e6cf446U, 0xcd3fd96dU, 0x8c0ec274U
+ /* 136 */ , 0x43125af3U, 0x022341eaU, 0xc1706cc1U, 0x804177d8U, 0x47d73697U, 0x06e62d8eU, 0xc5b500a5U, 0x84841bbcU
+ /* 144 */ , 0x1a8a4171U, 0x5bbb5a68U, 0x98e87743U, 0xd9d96c5aU, 0x1e4f2d15U, 0x5f7e360cU, 0x9c2d1b27U, 0xdd1c003eU
+ /* 152 */ , 0x120098b9U, 0x533183a0U, 0x9062ae8bU, 0xd153b592U, 0x16c5f4ddU, 0x57f4efc4U, 0x94a7c2efU, 0xd596d9f6U
+ /* 160 */ , 0xe9bc07aeU, 0xa88d1cb7U, 0x6bde319cU, 0x2aef2a85U, 0xed796bcaU, 0xac4870d3U, 0x6f1b5df8U, 0x2e2a46e1U
+ /* 168 */ , 0xe136de66U, 0xa007c57fU, 0x6354e854U, 0x2265f34dU, 0xe5f3b202U, 0xa4c2a91bU, 0x67918430U, 0x26a09f29U
+ /* 176 */ , 0xb8aec5e4U, 0xf99fdefdU, 0x3accf3d6U, 0x7bfde8cfU, 0xbc6ba980U, 0xfd5ab299U, 0x3e099fb2U, 0x7f3884abU
+ /* 184 */ , 0xb0241c2cU, 0xf1150735U, 0x32462a1eU, 0x73773107U, 0xb4e17048U, 0xf5d06b51U, 0x3683467aU, 0x77b25d63U
+ /* 192 */ , 0x4ed7facbU, 0x0fe6e1d2U, 0xccb5ccf9U, 0x8d84d7e0U, 0x4a1296afU, 0x0b238db6U, 0xc870a09dU, 0x8941bb84U
+ /* 200 */ , 0x465d2303U, 0x076c381aU, 0xc43f1531U, 0x850e0e28U, 0x42984f67U, 0x03a9547eU, 0xc0fa7955U, 0x81cb624cU
+ /* 208 */ , 0x1fc53881U, 0x5ef42398U, 0x9da70eb3U, 0xdc9615aaU, 0x1b0054e5U, 0x5a314ffcU, 0x996262d7U, 0xd85379ceU
+ /* 216 */ , 0x174fe149U, 0x567efa50U, 0x952dd77bU, 0xd41ccc62U, 0x138a8d2dU, 0x52bb9634U, 0x91e8bb1fU, 0xd0d9a006U
+ /* 224 */ , 0xecf37e5eU, 0xadc26547U, 0x6e91486cU, 0x2fa05375U, 0xe836123aU, 0xa9070923U, 0x6a542408U, 0x2b653f11U
+ /* 232 */ , 0xe479a796U, 0xa548bc8fU, 0x661b91a4U, 0x272a8abdU, 0xe0bccbf2U, 0xa18dd0ebU, 0x62defdc0U, 0x23efe6d9U
+ /* 240 */ , 0xbde1bc14U, 0xfcd0a70dU, 0x3f838a26U, 0x7eb2913fU, 0xb924d070U, 0xf815cb69U, 0x3b46e642U, 0x7a77fd5bU
+ /* 248 */ , 0xb56b65dcU, 0xf45a7ec5U, 0x370953eeU, 0x763848f7U, 0xb1ae09b8U, 0xf09f12a1U, 0x33cc3f8aU, 0x72fd2493U
+ }
+ ,
+ /* CRC32 table 6 for quad-bytes ( big-endian ), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0x376ac201U, 0x6ed48403U, 0x59be4602U, 0xdca80907U, 0xebc2cb06U, 0xb27c8d04U, 0x85164f05U
+ /* 8 */ , 0xb851130eU, 0x8f3bd10fU, 0xd685970dU, 0xe1ef550cU, 0x64f91a09U, 0x5393d808U, 0x0a2d9e0aU, 0x3d475c0bU
+ /* 16 */ , 0x70a3261cU, 0x47c9e41dU, 0x1e77a21fU, 0x291d601eU, 0xac0b2f1bU, 0x9b61ed1aU, 0xc2dfab18U, 0xf5b56919U
+ /* 24 */ , 0xc8f23512U, 0xff98f713U, 0xa626b111U, 0x914c7310U, 0x145a3c15U, 0x2330fe14U, 0x7a8eb816U, 0x4de47a17U
+ /* 32 */ , 0xe0464d38U, 0xd72c8f39U, 0x8e92c93bU, 0xb9f80b3aU, 0x3cee443fU, 0x0b84863eU, 0x523ac03cU, 0x6550023dU
+ /* 40 */ , 0x58175e36U, 0x6f7d9c37U, 0x36c3da35U, 0x01a91834U, 0x84bf5731U, 0xb3d59530U, 0xea6bd332U, 0xdd011133U
+ /* 48 */ , 0x90e56b24U, 0xa78fa925U, 0xfe31ef27U, 0xc95b2d26U, 0x4c4d6223U, 0x7b27a022U, 0x2299e620U, 0x15f32421U
+ /* 56 */ , 0x28b4782aU, 0x1fdeba2bU, 0x4660fc29U, 0x710a3e28U, 0xf41c712dU, 0xc376b32cU, 0x9ac8f52eU, 0xada2372fU
+ /* 64 */ , 0xc08d9a70U, 0xf7e75871U, 0xae591e73U, 0x9933dc72U, 0x1c259377U, 0x2b4f5176U, 0x72f11774U, 0x459bd575U
+ /* 72 */ , 0x78dc897eU, 0x4fb64b7fU, 0x16080d7dU, 0x2162cf7cU, 0xa4748079U, 0x931e4278U, 0xcaa0047aU, 0xfdcac67bU
+ /* 80 */ , 0xb02ebc6cU, 0x87447e6dU, 0xdefa386fU, 0xe990fa6eU, 0x6c86b56bU, 0x5bec776aU, 0x02523168U, 0x3538f369U
+ /* 88 */ , 0x087faf62U, 0x3f156d63U, 0x66ab2b61U, 0x51c1e960U, 0xd4d7a665U, 0xe3bd6464U, 0xba032266U, 0x8d69e067U
+ /* 96 */ , 0x20cbd748U, 0x17a11549U, 0x4e1f534bU, 0x7975914aU, 0xfc63de4fU, 0xcb091c4eU, 0x92b75a4cU, 0xa5dd984dU
+ /* 104 */ , 0x989ac446U, 0xaff00647U, 0xf64e4045U, 0xc1248244U, 0x4432cd41U, 0x73580f40U, 0x2ae64942U, 0x1d8c8b43U
+ /* 112 */ , 0x5068f154U, 0x67023355U, 0x3ebc7557U, 0x09d6b756U, 0x8cc0f853U, 0xbbaa3a52U, 0xe2147c50U, 0xd57ebe51U
+ /* 120 */ , 0xe839e25aU, 0xdf53205bU, 0x86ed6659U, 0xb187a458U, 0x3491eb5dU, 0x03fb295cU, 0x5a456f5eU, 0x6d2fad5fU
+ /* 128 */ , 0x801b35e1U, 0xb771f7e0U, 0xeecfb1e2U, 0xd9a573e3U, 0x5cb33ce6U, 0x6bd9fee7U, 0x3267b8e5U, 0x050d7ae4U
+ /* 136 */ , 0x384a26efU, 0x0f20e4eeU, 0x569ea2ecU, 0x61f460edU, 0xe4e22fe8U, 0xd388ede9U, 0x8a36abebU, 0xbd5c69eaU
+ /* 144 */ , 0xf0b813fdU, 0xc7d2d1fcU, 0x9e6c97feU, 0xa90655ffU, 0x2c101afaU, 0x1b7ad8fbU, 0x42c49ef9U, 0x75ae5cf8U
+ /* 152 */ , 0x48e900f3U, 0x7f83c2f2U, 0x263d84f0U, 0x115746f1U, 0x944109f4U, 0xa32bcbf5U, 0xfa958df7U, 0xcdff4ff6U
+ /* 160 */ , 0x605d78d9U, 0x5737bad8U, 0x0e89fcdaU, 0x39e33edbU, 0xbcf571deU, 0x8b9fb3dfU, 0xd221f5ddU, 0xe54b37dcU
+ /* 168 */ , 0xd80c6bd7U, 0xef66a9d6U, 0xb6d8efd4U, 0x81b22dd5U, 0x04a462d0U, 0x33cea0d1U, 0x6a70e6d3U, 0x5d1a24d2U
+ /* 176 */ , 0x10fe5ec5U, 0x27949cc4U, 0x7e2adac6U, 0x494018c7U, 0xcc5657c2U, 0xfb3c95c3U, 0xa282d3c1U, 0x95e811c0U
+ /* 184 */ , 0xa8af4dcbU, 0x9fc58fcaU, 0xc67bc9c8U, 0xf1110bc9U, 0x740744ccU, 0x436d86cdU, 0x1ad3c0cfU, 0x2db902ceU
+ /* 192 */ , 0x4096af91U, 0x77fc6d90U, 0x2e422b92U, 0x1928e993U, 0x9c3ea696U, 0xab546497U, 0xf2ea2295U, 0xc580e094U
+ /* 200 */ , 0xf8c7bc9fU, 0xcfad7e9eU, 0x9613389cU, 0xa179fa9dU, 0x246fb598U, 0x13057799U, 0x4abb319bU, 0x7dd1f39aU
+ /* 208 */ , 0x3035898dU, 0x075f4b8cU, 0x5ee10d8eU, 0x698bcf8fU, 0xec9d808aU, 0xdbf7428bU, 0x82490489U, 0xb523c688U
+ /* 216 */ , 0x88649a83U, 0xbf0e5882U, 0xe6b01e80U, 0xd1dadc81U, 0x54cc9384U, 0x63a65185U, 0x3a181787U, 0x0d72d586U
+ /* 224 */ , 0xa0d0e2a9U, 0x97ba20a8U, 0xce0466aaU, 0xf96ea4abU, 0x7c78ebaeU, 0x4b1229afU, 0x12ac6fadU, 0x25c6adacU
+ /* 232 */ , 0x1881f1a7U, 0x2feb33a6U, 0x765575a4U, 0x413fb7a5U, 0xc429f8a0U, 0xf3433aa1U, 0xaafd7ca3U, 0x9d97bea2U
+ /* 240 */ , 0xd073c4b5U, 0xe71906b4U, 0xbea740b6U, 0x89cd82b7U, 0x0cdbcdb2U, 0x3bb10fb3U, 0x620f49b1U, 0x55658bb0U
+ /* 248 */ , 0x6822d7bbU, 0x5f4815baU, 0x06f653b8U, 0x319c91b9U, 0xb48adebcU, 0x83e01cbdU, 0xda5e5abfU, 0xed3498beU
+ }
+ ,
+ /* CRC32 table 7 for quad-bytes ( big-endian ), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0x6567bcb8U, 0x8bc809aaU, 0xeeafb512U, 0x5797628fU, 0x32f0de37U, 0xdc5f6b25U, 0xb938d79dU
+ /* 8 */ , 0xef28b4c5U, 0x8a4f087dU, 0x64e0bd6fU, 0x018701d7U, 0xb8bfd64aU, 0xddd86af2U, 0x3377dfe0U, 0x56106358U
+ /* 16 */ , 0x9f571950U, 0xfa30a5e8U, 0x149f10faU, 0x71f8ac42U, 0xc8c07bdfU, 0xada7c767U, 0x43087275U, 0x266fcecdU
+ /* 24 */ , 0x707fad95U, 0x1518112dU, 0xfbb7a43fU, 0x9ed01887U, 0x27e8cf1aU, 0x428f73a2U, 0xac20c6b0U, 0xc9477a08U
+ /* 32 */ , 0x3eaf32a0U, 0x5bc88e18U, 0xb5673b0aU, 0xd00087b2U, 0x6938502fU, 0x0c5fec97U, 0xe2f05985U, 0x8797e53dU
+ /* 40 */ , 0xd1878665U, 0xb4e03addU, 0x5a4f8fcfU, 0x3f283377U, 0x8610e4eaU, 0xe3775852U, 0x0dd8ed40U, 0x68bf51f8U
+ /* 48 */ , 0xa1f82bf0U, 0xc49f9748U, 0x2a30225aU, 0x4f579ee2U, 0xf66f497fU, 0x9308f5c7U, 0x7da740d5U, 0x18c0fc6dU
+ /* 56 */ , 0x4ed09f35U, 0x2bb7238dU, 0xc518969fU, 0xa07f2a27U, 0x1947fdbaU, 0x7c204102U, 0x928ff410U, 0xf7e848a8U
+ /* 64 */ , 0x3d58149bU, 0x583fa823U, 0xb6901d31U, 0xd3f7a189U, 0x6acf7614U, 0x0fa8caacU, 0xe1077fbeU, 0x8460c306U
+ /* 72 */ , 0xd270a05eU, 0xb7171ce6U, 0x59b8a9f4U, 0x3cdf154cU, 0x85e7c2d1U, 0xe0807e69U, 0x0e2fcb7bU, 0x6b4877c3U
+ /* 80 */ , 0xa20f0dcbU, 0xc768b173U, 0x29c70461U, 0x4ca0b8d9U, 0xf5986f44U, 0x90ffd3fcU, 0x7e5066eeU, 0x1b37da56U
+ /* 88 */ , 0x4d27b90eU, 0x284005b6U, 0xc6efb0a4U, 0xa3880c1cU, 0x1ab0db81U, 0x7fd76739U, 0x9178d22bU, 0xf41f6e93U
+ /* 96 */ , 0x03f7263bU, 0x66909a83U, 0x883f2f91U, 0xed589329U, 0x546044b4U, 0x3107f80cU, 0xdfa84d1eU, 0xbacff1a6U
+ /* 104 */ , 0xecdf92feU, 0x89b82e46U, 0x67179b54U, 0x027027ecU, 0xbb48f071U, 0xde2f4cc9U, 0x3080f9dbU, 0x55e74563U
+ /* 112 */ , 0x9ca03f6bU, 0xf9c783d3U, 0x176836c1U, 0x720f8a79U, 0xcb375de4U, 0xae50e15cU, 0x40ff544eU, 0x2598e8f6U
+ /* 120 */ , 0x73888baeU, 0x16ef3716U, 0xf8408204U, 0x9d273ebcU, 0x241fe921U, 0x41785599U, 0xafd7e08bU, 0xcab05c33U
+ /* 128 */ , 0x3bb659edU, 0x5ed1e555U, 0xb07e5047U, 0xd519ecffU, 0x6c213b62U, 0x094687daU, 0xe7e932c8U, 0x828e8e70U
+ /* 136 */ , 0xd49eed28U, 0xb1f95190U, 0x5f56e482U, 0x3a31583aU, 0x83098fa7U, 0xe66e331fU, 0x08c1860dU, 0x6da63ab5U
+ /* 144 */ , 0xa4e140bdU, 0xc186fc05U, 0x2f294917U, 0x4a4ef5afU, 0xf3762232U, 0x96119e8aU, 0x78be2b98U, 0x1dd99720U
+ /* 152 */ , 0x4bc9f478U, 0x2eae48c0U, 0xc001fdd2U, 0xa566416aU, 0x1c5e96f7U, 0x79392a4fU, 0x97969f5dU, 0xf2f123e5U
+ /* 160 */ , 0x05196b4dU, 0x607ed7f5U, 0x8ed162e7U, 0xebb6de5fU, 0x528e09c2U, 0x37e9b57aU, 0xd9460068U, 0xbc21bcd0U
+ /* 168 */ , 0xea31df88U, 0x8f566330U, 0x61f9d622U, 0x049e6a9aU, 0xbda6bd07U, 0xd8c101bfU, 0x366eb4adU, 0x53090815U
+ /* 176 */ , 0x9a4e721dU, 0xff29cea5U, 0x11867bb7U, 0x74e1c70fU, 0xcdd91092U, 0xa8beac2aU, 0x46111938U, 0x2376a580U
+ /* 184 */ , 0x7566c6d8U, 0x10017a60U, 0xfeaecf72U, 0x9bc973caU, 0x22f1a457U, 0x479618efU, 0xa939adfdU, 0xcc5e1145U
+ /* 192 */ , 0x06ee4d76U, 0x6389f1ceU, 0x8d2644dcU, 0xe841f864U, 0x51792ff9U, 0x341e9341U, 0xdab12653U, 0xbfd69aebU
+ /* 200 */ , 0xe9c6f9b3U, 0x8ca1450bU, 0x620ef019U, 0x07694ca1U, 0xbe519b3cU, 0xdb362784U, 0x35999296U, 0x50fe2e2eU
+ /* 208 */ , 0x99b95426U, 0xfcdee89eU, 0x12715d8cU, 0x7716e134U, 0xce2e36a9U, 0xab498a11U, 0x45e63f03U, 0x208183bbU
+ /* 216 */ , 0x7691e0e3U, 0x13f65c5bU, 0xfd59e949U, 0x983e55f1U, 0x2106826cU, 0x44613ed4U, 0xaace8bc6U, 0xcfa9377eU
+ /* 224 */ , 0x38417fd6U, 0x5d26c36eU, 0xb389767cU, 0xd6eecac4U, 0x6fd61d59U, 0x0ab1a1e1U, 0xe41e14f3U, 0x8179a84bU
+ /* 232 */ , 0xd769cb13U, 0xb20e77abU, 0x5ca1c2b9U, 0x39c67e01U, 0x80fea99cU, 0xe5991524U, 0x0b36a036U, 0x6e511c8eU
+ /* 240 */ , 0xa7166686U, 0xc271da3eU, 0x2cde6f2cU, 0x49b9d394U, 0xf0810409U, 0x95e6b8b1U, 0x7b490da3U, 0x1e2eb11bU
+ /* 248 */ , 0x483ed243U, 0x2d596efbU, 0xc3f6dbe9U, 0xa6916751U, 0x1fa9b0ccU, 0x7ace0c74U, 0x9461b966U, 0xf10605deU
+ }
+ #endif
+ };
+
+juint StubRoutines::ppc64::_crc32c_table[CRC32_TABLES][CRC32_COLUMN_SIZE] = {
+ /* polyBits = 4394350321 0x0000000105ec76f1L, shifted = 0x82f63b78 */
+ /* CRC32C table for single bytes, auto-generated. DO NOT MODIFY! */
+ /* CRC32C table 0 for quad-bytes (little-endian), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0xf26b8303U, 0xe13b70f7U, 0x1350f3f4U, 0xc79a971fU, 0x35f1141cU, 0x26a1e7e8U, 0xd4ca64ebU
+ /* 8 */ , 0x8ad958cfU, 0x78b2dbccU, 0x6be22838U, 0x9989ab3bU, 0x4d43cfd0U, 0xbf284cd3U, 0xac78bf27U, 0x5e133c24U
+ /* 16 */ , 0x105ec76fU, 0xe235446cU, 0xf165b798U, 0x030e349bU, 0xd7c45070U, 0x25afd373U, 0x36ff2087U, 0xc494a384U
+ /* 24 */ , 0x9a879fa0U, 0x68ec1ca3U, 0x7bbcef57U, 0x89d76c54U, 0x5d1d08bfU, 0xaf768bbcU, 0xbc267848U, 0x4e4dfb4bU
+ /* 32 */ , 0x20bd8edeU, 0xd2d60dddU, 0xc186fe29U, 0x33ed7d2aU, 0xe72719c1U, 0x154c9ac2U, 0x061c6936U, 0xf477ea35U
+ /* 40 */ , 0xaa64d611U, 0x580f5512U, 0x4b5fa6e6U, 0xb93425e5U, 0x6dfe410eU, 0x9f95c20dU, 0x8cc531f9U, 0x7eaeb2faU
+ /* 48 */ , 0x30e349b1U, 0xc288cab2U, 0xd1d83946U, 0x23b3ba45U, 0xf779deaeU, 0x05125dadU, 0x1642ae59U, 0xe4292d5aU
+ /* 56 */ , 0xba3a117eU, 0x4851927dU, 0x5b016189U, 0xa96ae28aU, 0x7da08661U, 0x8fcb0562U, 0x9c9bf696U, 0x6ef07595U
+ /* 64 */ , 0x417b1dbcU, 0xb3109ebfU, 0xa0406d4bU, 0x522bee48U, 0x86e18aa3U, 0x748a09a0U, 0x67dafa54U, 0x95b17957U
+ /* 72 */ , 0xcba24573U, 0x39c9c670U, 0x2a993584U, 0xd8f2b687U, 0x0c38d26cU, 0xfe53516fU, 0xed03a29bU, 0x1f682198U
+ /* 80 */ , 0x5125dad3U, 0xa34e59d0U, 0xb01eaa24U, 0x42752927U, 0x96bf4dccU, 0x64d4cecfU, 0x77843d3bU, 0x85efbe38U
+ /* 88 */ , 0xdbfc821cU, 0x2997011fU, 0x3ac7f2ebU, 0xc8ac71e8U, 0x1c661503U, 0xee0d9600U, 0xfd5d65f4U, 0x0f36e6f7U
+ /* 96 */ , 0x61c69362U, 0x93ad1061U, 0x80fde395U, 0x72966096U, 0xa65c047dU, 0x5437877eU, 0x4767748aU, 0xb50cf789U
+ /* 104 */ , 0xeb1fcbadU, 0x197448aeU, 0x0a24bb5aU, 0xf84f3859U, 0x2c855cb2U, 0xdeeedfb1U, 0xcdbe2c45U, 0x3fd5af46U
+ /* 112 */ , 0x7198540dU, 0x83f3d70eU, 0x90a324faU, 0x62c8a7f9U, 0xb602c312U, 0x44694011U, 0x5739b3e5U, 0xa55230e6U
+ /* 120 */ , 0xfb410cc2U, 0x092a8fc1U, 0x1a7a7c35U, 0xe811ff36U, 0x3cdb9bddU, 0xceb018deU, 0xdde0eb2aU, 0x2f8b6829U
+ /* 128 */ , 0x82f63b78U, 0x709db87bU, 0x63cd4b8fU, 0x91a6c88cU, 0x456cac67U, 0xb7072f64U, 0xa457dc90U, 0x563c5f93U
+ /* 136 */ , 0x082f63b7U, 0xfa44e0b4U, 0xe9141340U, 0x1b7f9043U, 0xcfb5f4a8U, 0x3dde77abU, 0x2e8e845fU, 0xdce5075cU
+ /* 144 */ , 0x92a8fc17U, 0x60c37f14U, 0x73938ce0U, 0x81f80fe3U, 0x55326b08U, 0xa759e80bU, 0xb4091bffU, 0x466298fcU
+ /* 152 */ , 0x1871a4d8U, 0xea1a27dbU, 0xf94ad42fU, 0x0b21572cU, 0xdfeb33c7U, 0x2d80b0c4U, 0x3ed04330U, 0xccbbc033U
+ /* 160 */ , 0xa24bb5a6U, 0x502036a5U, 0x4370c551U, 0xb11b4652U, 0x65d122b9U, 0x97baa1baU, 0x84ea524eU, 0x7681d14dU
+ /* 168 */ , 0x2892ed69U, 0xdaf96e6aU, 0xc9a99d9eU, 0x3bc21e9dU, 0xef087a76U, 0x1d63f975U, 0x0e330a81U, 0xfc588982U
+ /* 176 */ , 0xb21572c9U, 0x407ef1caU, 0x532e023eU, 0xa145813dU, 0x758fe5d6U, 0x87e466d5U, 0x94b49521U, 0x66df1622U
+ /* 184 */ , 0x38cc2a06U, 0xcaa7a905U, 0xd9f75af1U, 0x2b9cd9f2U, 0xff56bd19U, 0x0d3d3e1aU, 0x1e6dcdeeU, 0xec064eedU
+ /* 192 */ , 0xc38d26c4U, 0x31e6a5c7U, 0x22b65633U, 0xd0ddd530U, 0x0417b1dbU, 0xf67c32d8U, 0xe52cc12cU, 0x1747422fU
+ /* 200 */ , 0x49547e0bU, 0xbb3ffd08U, 0xa86f0efcU, 0x5a048dffU, 0x8ecee914U, 0x7ca56a17U, 0x6ff599e3U, 0x9d9e1ae0U
+ /* 208 */ , 0xd3d3e1abU, 0x21b862a8U, 0x32e8915cU, 0xc083125fU, 0x144976b4U, 0xe622f5b7U, 0xf5720643U, 0x07198540U
+ /* 216 */ , 0x590ab964U, 0xab613a67U, 0xb831c993U, 0x4a5a4a90U, 0x9e902e7bU, 0x6cfbad78U, 0x7fab5e8cU, 0x8dc0dd8fU
+ /* 224 */ , 0xe330a81aU, 0x115b2b19U, 0x020bd8edU, 0xf0605beeU, 0x24aa3f05U, 0xd6c1bc06U, 0xc5914ff2U, 0x37faccf1U
+ /* 232 */ , 0x69e9f0d5U, 0x9b8273d6U, 0x88d28022U, 0x7ab90321U, 0xae7367caU, 0x5c18e4c9U, 0x4f48173dU, 0xbd23943eU
+ /* 240 */ , 0xf36e6f75U, 0x0105ec76U, 0x12551f82U, 0xe03e9c81U, 0x34f4f86aU, 0xc69f7b69U, 0xd5cf889dU, 0x27a40b9eU
+ /* 248 */ , 0x79b737baU, 0x8bdcb4b9U, 0x988c474dU, 0x6ae7c44eU, 0xbe2da0a5U, 0x4c4623a6U, 0x5f16d052U, 0xad7d5351U
+ }
+ #ifdef CRC32_BYFOUR
+ ,
+ /* CRC32C table 1 for quad-bytes (little-endian), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0x13a29877U, 0x274530eeU, 0x34e7a899U, 0x4e8a61dcU, 0x5d28f9abU, 0x69cf5132U, 0x7a6dc945U
+ /* 8 */ , 0x9d14c3b8U, 0x8eb65bcfU, 0xba51f356U, 0xa9f36b21U, 0xd39ea264U, 0xc03c3a13U, 0xf4db928aU, 0xe7790afdU
+ /* 16 */ , 0x3fc5f181U, 0x2c6769f6U, 0x1880c16fU, 0x0b225918U, 0x714f905dU, 0x62ed082aU, 0x560aa0b3U, 0x45a838c4U
+ /* 24 */ , 0xa2d13239U, 0xb173aa4eU, 0x859402d7U, 0x96369aa0U, 0xec5b53e5U, 0xfff9cb92U, 0xcb1e630bU, 0xd8bcfb7cU
+ /* 32 */ , 0x7f8be302U, 0x6c297b75U, 0x58ced3ecU, 0x4b6c4b9bU, 0x310182deU, 0x22a31aa9U, 0x1644b230U, 0x05e62a47U
+ /* 40 */ , 0xe29f20baU, 0xf13db8cdU, 0xc5da1054U, 0xd6788823U, 0xac154166U, 0xbfb7d911U, 0x8b507188U, 0x98f2e9ffU
+ /* 48 */ , 0x404e1283U, 0x53ec8af4U, 0x670b226dU, 0x74a9ba1aU, 0x0ec4735fU, 0x1d66eb28U, 0x298143b1U, 0x3a23dbc6U
+ /* 56 */ , 0xdd5ad13bU, 0xcef8494cU, 0xfa1fe1d5U, 0xe9bd79a2U, 0x93d0b0e7U, 0x80722890U, 0xb4958009U, 0xa737187eU
+ /* 64 */ , 0xff17c604U, 0xecb55e73U, 0xd852f6eaU, 0xcbf06e9dU, 0xb19da7d8U, 0xa23f3fafU, 0x96d89736U, 0x857a0f41U
+ /* 72 */ , 0x620305bcU, 0x71a19dcbU, 0x45463552U, 0x56e4ad25U, 0x2c896460U, 0x3f2bfc17U, 0x0bcc548eU, 0x186eccf9U
+ /* 80 */ , 0xc0d23785U, 0xd370aff2U, 0xe797076bU, 0xf4359f1cU, 0x8e585659U, 0x9dface2eU, 0xa91d66b7U, 0xbabffec0U
+ /* 88 */ , 0x5dc6f43dU, 0x4e646c4aU, 0x7a83c4d3U, 0x69215ca4U, 0x134c95e1U, 0x00ee0d96U, 0x3409a50fU, 0x27ab3d78U
+ /* 96 */ , 0x809c2506U, 0x933ebd71U, 0xa7d915e8U, 0xb47b8d9fU, 0xce1644daU, 0xddb4dcadU, 0xe9537434U, 0xfaf1ec43U
+ /* 104 */ , 0x1d88e6beU, 0x0e2a7ec9U, 0x3acdd650U, 0x296f4e27U, 0x53028762U, 0x40a01f15U, 0x7447b78cU, 0x67e52ffbU
+ /* 112 */ , 0xbf59d487U, 0xacfb4cf0U, 0x981ce469U, 0x8bbe7c1eU, 0xf1d3b55bU, 0xe2712d2cU, 0xd69685b5U, 0xc5341dc2U
+ /* 120 */ , 0x224d173fU, 0x31ef8f48U, 0x050827d1U, 0x16aabfa6U, 0x6cc776e3U, 0x7f65ee94U, 0x4b82460dU, 0x5820de7aU
+ /* 128 */ , 0xfbc3faf9U, 0xe861628eU, 0xdc86ca17U, 0xcf245260U, 0xb5499b25U, 0xa6eb0352U, 0x920cabcbU, 0x81ae33bcU
+ /* 136 */ , 0x66d73941U, 0x7575a136U, 0x419209afU, 0x523091d8U, 0x285d589dU, 0x3bffc0eaU, 0x0f186873U, 0x1cbaf004U
+ /* 144 */ , 0xc4060b78U, 0xd7a4930fU, 0xe3433b96U, 0xf0e1a3e1U, 0x8a8c6aa4U, 0x992ef2d3U, 0xadc95a4aU, 0xbe6bc23dU
+ /* 152 */ , 0x5912c8c0U, 0x4ab050b7U, 0x7e57f82eU, 0x6df56059U, 0x1798a91cU, 0x043a316bU, 0x30dd99f2U, 0x237f0185U
+ /* 160 */ , 0x844819fbU, 0x97ea818cU, 0xa30d2915U, 0xb0afb162U, 0xcac27827U, 0xd960e050U, 0xed8748c9U, 0xfe25d0beU
+ /* 168 */ , 0x195cda43U, 0x0afe4234U, 0x3e19eaadU, 0x2dbb72daU, 0x57d6bb9fU, 0x447423e8U, 0x70938b71U, 0x63311306U
+ /* 176 */ , 0xbb8de87aU, 0xa82f700dU, 0x9cc8d894U, 0x8f6a40e3U, 0xf50789a6U, 0xe6a511d1U, 0xd242b948U, 0xc1e0213fU
+ /* 184 */ , 0x26992bc2U, 0x353bb3b5U, 0x01dc1b2cU, 0x127e835bU, 0x68134a1eU, 0x7bb1d269U, 0x4f567af0U, 0x5cf4e287U
+ /* 192 */ , 0x04d43cfdU, 0x1776a48aU, 0x23910c13U, 0x30339464U, 0x4a5e5d21U, 0x59fcc556U, 0x6d1b6dcfU, 0x7eb9f5b8U
+ /* 200 */ , 0x99c0ff45U, 0x8a626732U, 0xbe85cfabU, 0xad2757dcU, 0xd74a9e99U, 0xc4e806eeU, 0xf00fae77U, 0xe3ad3600U
+ /* 208 */ , 0x3b11cd7cU, 0x28b3550bU, 0x1c54fd92U, 0x0ff665e5U, 0x759baca0U, 0x663934d7U, 0x52de9c4eU, 0x417c0439U
+ /* 216 */ , 0xa6050ec4U, 0xb5a796b3U, 0x81403e2aU, 0x92e2a65dU, 0xe88f6f18U, 0xfb2df76fU, 0xcfca5ff6U, 0xdc68c781U
+ /* 224 */ , 0x7b5fdfffU, 0x68fd4788U, 0x5c1aef11U, 0x4fb87766U, 0x35d5be23U, 0x26772654U, 0x12908ecdU, 0x013216baU
+ /* 232 */ , 0xe64b1c47U, 0xf5e98430U, 0xc10e2ca9U, 0xd2acb4deU, 0xa8c17d9bU, 0xbb63e5ecU, 0x8f844d75U, 0x9c26d502U
+ /* 240 */ , 0x449a2e7eU, 0x5738b609U, 0x63df1e90U, 0x707d86e7U, 0x0a104fa2U, 0x19b2d7d5U, 0x2d557f4cU, 0x3ef7e73bU
+ /* 248 */ , 0xd98eedc6U, 0xca2c75b1U, 0xfecbdd28U, 0xed69455fU, 0x97048c1aU, 0x84a6146dU, 0xb041bcf4U, 0xa3e32483U
+ }
+ ,
+ /* CRC32C table 2 for quad-bytes (little-endian), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0xa541927eU, 0x4f6f520dU, 0xea2ec073U, 0x9edea41aU, 0x3b9f3664U, 0xd1b1f617U, 0x74f06469U
+ /* 8 */ , 0x38513ec5U, 0x9d10acbbU, 0x773e6cc8U, 0xd27ffeb6U, 0xa68f9adfU, 0x03ce08a1U, 0xe9e0c8d2U, 0x4ca15aacU
+ /* 16 */ , 0x70a27d8aU, 0xd5e3eff4U, 0x3fcd2f87U, 0x9a8cbdf9U, 0xee7cd990U, 0x4b3d4beeU, 0xa1138b9dU, 0x045219e3U
+ /* 24 */ , 0x48f3434fU, 0xedb2d131U, 0x079c1142U, 0xa2dd833cU, 0xd62de755U, 0x736c752bU, 0x9942b558U, 0x3c032726U
+ /* 32 */ , 0xe144fb14U, 0x4405696aU, 0xae2ba919U, 0x0b6a3b67U, 0x7f9a5f0eU, 0xdadbcd70U, 0x30f50d03U, 0x95b49f7dU
+ /* 40 */ , 0xd915c5d1U, 0x7c5457afU, 0x967a97dcU, 0x333b05a2U, 0x47cb61cbU, 0xe28af3b5U, 0x08a433c6U, 0xade5a1b8U
+ /* 48 */ , 0x91e6869eU, 0x34a714e0U, 0xde89d493U, 0x7bc846edU, 0x0f382284U, 0xaa79b0faU, 0x40577089U, 0xe516e2f7U
+ /* 56 */ , 0xa9b7b85bU, 0x0cf62a25U, 0xe6d8ea56U, 0x43997828U, 0x37691c41U, 0x92288e3fU, 0x78064e4cU, 0xdd47dc32U
+ /* 64 */ , 0xc76580d9U, 0x622412a7U, 0x880ad2d4U, 0x2d4b40aaU, 0x59bb24c3U, 0xfcfab6bdU, 0x16d476ceU, 0xb395e4b0U
+ /* 72 */ , 0xff34be1cU, 0x5a752c62U, 0xb05bec11U, 0x151a7e6fU, 0x61ea1a06U, 0xc4ab8878U, 0x2e85480bU, 0x8bc4da75U
+ /* 80 */ , 0xb7c7fd53U, 0x12866f2dU, 0xf8a8af5eU, 0x5de93d20U, 0x29195949U, 0x8c58cb37U, 0x66760b44U, 0xc337993aU
+ /* 88 */ , 0x8f96c396U, 0x2ad751e8U, 0xc0f9919bU, 0x65b803e5U, 0x1148678cU, 0xb409f5f2U, 0x5e273581U, 0xfb66a7ffU
+ /* 96 */ , 0x26217bcdU, 0x8360e9b3U, 0x694e29c0U, 0xcc0fbbbeU, 0xb8ffdfd7U, 0x1dbe4da9U, 0xf7908ddaU, 0x52d11fa4U
+ /* 104 */ , 0x1e704508U, 0xbb31d776U, 0x511f1705U, 0xf45e857bU, 0x80aee112U, 0x25ef736cU, 0xcfc1b31fU, 0x6a802161U
+ /* 112 */ , 0x56830647U, 0xf3c29439U, 0x19ec544aU, 0xbcadc634U, 0xc85da25dU, 0x6d1c3023U, 0x8732f050U, 0x2273622eU
+ /* 120 */ , 0x6ed23882U, 0xcb93aafcU, 0x21bd6a8fU, 0x84fcf8f1U, 0xf00c9c98U, 0x554d0ee6U, 0xbf63ce95U, 0x1a225cebU
+ /* 128 */ , 0x8b277743U, 0x2e66e53dU, 0xc448254eU, 0x6109b730U, 0x15f9d359U, 0xb0b84127U, 0x5a968154U, 0xffd7132aU
+ /* 136 */ , 0xb3764986U, 0x1637dbf8U, 0xfc191b8bU, 0x595889f5U, 0x2da8ed9cU, 0x88e97fe2U, 0x62c7bf91U, 0xc7862defU
+ /* 144 */ , 0xfb850ac9U, 0x5ec498b7U, 0xb4ea58c4U, 0x11abcabaU, 0x655baed3U, 0xc01a3cadU, 0x2a34fcdeU, 0x8f756ea0U
+ /* 152 */ , 0xc3d4340cU, 0x6695a672U, 0x8cbb6601U, 0x29faf47fU, 0x5d0a9016U, 0xf84b0268U, 0x1265c21bU, 0xb7245065U
+ /* 160 */ , 0x6a638c57U, 0xcf221e29U, 0x250cde5aU, 0x804d4c24U, 0xf4bd284dU, 0x51fcba33U, 0xbbd27a40U, 0x1e93e83eU
+ /* 168 */ , 0x5232b292U, 0xf77320ecU, 0x1d5de09fU, 0xb81c72e1U, 0xccec1688U, 0x69ad84f6U, 0x83834485U, 0x26c2d6fbU
+ /* 176 */ , 0x1ac1f1ddU, 0xbf8063a3U, 0x55aea3d0U, 0xf0ef31aeU, 0x841f55c7U, 0x215ec7b9U, 0xcb7007caU, 0x6e3195b4U
+ /* 184 */ , 0x2290cf18U, 0x87d15d66U, 0x6dff9d15U, 0xc8be0f6bU, 0xbc4e6b02U, 0x190ff97cU, 0xf321390fU, 0x5660ab71U
+ /* 192 */ , 0x4c42f79aU, 0xe90365e4U, 0x032da597U, 0xa66c37e9U, 0xd29c5380U, 0x77ddc1feU, 0x9df3018dU, 0x38b293f3U
+ /* 200 */ , 0x7413c95fU, 0xd1525b21U, 0x3b7c9b52U, 0x9e3d092cU, 0xeacd6d45U, 0x4f8cff3bU, 0xa5a23f48U, 0x00e3ad36U
+ /* 208 */ , 0x3ce08a10U, 0x99a1186eU, 0x738fd81dU, 0xd6ce4a63U, 0xa23e2e0aU, 0x077fbc74U, 0xed517c07U, 0x4810ee79U
+ /* 216 */ , 0x04b1b4d5U, 0xa1f026abU, 0x4bdee6d8U, 0xee9f74a6U, 0x9a6f10cfU, 0x3f2e82b1U, 0xd50042c2U, 0x7041d0bcU
+ /* 224 */ , 0xad060c8eU, 0x08479ef0U, 0xe2695e83U, 0x4728ccfdU, 0x33d8a894U, 0x96993aeaU, 0x7cb7fa99U, 0xd9f668e7U
+ /* 232 */ , 0x9557324bU, 0x3016a035U, 0xda386046U, 0x7f79f238U, 0x0b899651U, 0xaec8042fU, 0x44e6c45cU, 0xe1a75622U
+ /* 240 */ , 0xdda47104U, 0x78e5e37aU, 0x92cb2309U, 0x378ab177U, 0x437ad51eU, 0xe63b4760U, 0x0c158713U, 0xa954156dU
+ /* 248 */ , 0xe5f54fc1U, 0x40b4ddbfU, 0xaa9a1dccU, 0x0fdb8fb2U, 0x7b2bebdbU, 0xde6a79a5U, 0x3444b9d6U, 0x91052ba8U
+ }
+ ,
+ /* CRC32C table 3 for quad-bytes (little-endian), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0xdd45aab8U, 0xbf672381U, 0x62228939U, 0x7b2231f3U, 0xa6679b4bU, 0xc4451272U, 0x1900b8caU
+ /* 8 */ , 0xf64463e6U, 0x2b01c95eU, 0x49234067U, 0x9466eadfU, 0x8d665215U, 0x5023f8adU, 0x32017194U, 0xef44db2cU
+ /* 16 */ , 0xe964b13dU, 0x34211b85U, 0x560392bcU, 0x8b463804U, 0x924680ceU, 0x4f032a76U, 0x2d21a34fU, 0xf06409f7U
+ /* 24 */ , 0x1f20d2dbU, 0xc2657863U, 0xa047f15aU, 0x7d025be2U, 0x6402e328U, 0xb9474990U, 0xdb65c0a9U, 0x06206a11U
+ /* 32 */ , 0xd725148bU, 0x0a60be33U, 0x6842370aU, 0xb5079db2U, 0xac072578U, 0x71428fc0U, 0x136006f9U, 0xce25ac41U
+ /* 40 */ , 0x2161776dU, 0xfc24ddd5U, 0x9e0654ecU, 0x4343fe54U, 0x5a43469eU, 0x8706ec26U, 0xe524651fU, 0x3861cfa7U
+ /* 48 */ , 0x3e41a5b6U, 0xe3040f0eU, 0x81268637U, 0x5c632c8fU, 0x45639445U, 0x98263efdU, 0xfa04b7c4U, 0x27411d7cU
+ /* 56 */ , 0xc805c650U, 0x15406ce8U, 0x7762e5d1U, 0xaa274f69U, 0xb327f7a3U, 0x6e625d1bU, 0x0c40d422U, 0xd1057e9aU
+ /* 64 */ , 0xaba65fe7U, 0x76e3f55fU, 0x14c17c66U, 0xc984d6deU, 0xd0846e14U, 0x0dc1c4acU, 0x6fe34d95U, 0xb2a6e72dU
+ /* 72 */ , 0x5de23c01U, 0x80a796b9U, 0xe2851f80U, 0x3fc0b538U, 0x26c00df2U, 0xfb85a74aU, 0x99a72e73U, 0x44e284cbU
+ /* 80 */ , 0x42c2eedaU, 0x9f874462U, 0xfda5cd5bU, 0x20e067e3U, 0x39e0df29U, 0xe4a57591U, 0x8687fca8U, 0x5bc25610U
+ /* 88 */ , 0xb4868d3cU, 0x69c32784U, 0x0be1aebdU, 0xd6a40405U, 0xcfa4bccfU, 0x12e11677U, 0x70c39f4eU, 0xad8635f6U
+ /* 96 */ , 0x7c834b6cU, 0xa1c6e1d4U, 0xc3e468edU, 0x1ea1c255U, 0x07a17a9fU, 0xdae4d027U, 0xb8c6591eU, 0x6583f3a6U
+ /* 104 */ , 0x8ac7288aU, 0x57828232U, 0x35a00b0bU, 0xe8e5a1b3U, 0xf1e51979U, 0x2ca0b3c1U, 0x4e823af8U, 0x93c79040U
+ /* 112 */ , 0x95e7fa51U, 0x48a250e9U, 0x2a80d9d0U, 0xf7c57368U, 0xeec5cba2U, 0x3380611aU, 0x51a2e823U, 0x8ce7429bU
+ /* 120 */ , 0x63a399b7U, 0xbee6330fU, 0xdcc4ba36U, 0x0181108eU, 0x1881a844U, 0xc5c402fcU, 0xa7e68bc5U, 0x7aa3217dU
+ /* 128 */ , 0x52a0c93fU, 0x8fe56387U, 0xedc7eabeU, 0x30824006U, 0x2982f8ccU, 0xf4c75274U, 0x96e5db4dU, 0x4ba071f5U
+ /* 136 */ , 0xa4e4aad9U, 0x79a10061U, 0x1b838958U, 0xc6c623e0U, 0xdfc69b2aU, 0x02833192U, 0x60a1b8abU, 0xbde41213U
+ /* 144 */ , 0xbbc47802U, 0x6681d2baU, 0x04a35b83U, 0xd9e6f13bU, 0xc0e649f1U, 0x1da3e349U, 0x7f816a70U, 0xa2c4c0c8U
+ /* 152 */ , 0x4d801be4U, 0x90c5b15cU, 0xf2e73865U, 0x2fa292ddU, 0x36a22a17U, 0xebe780afU, 0x89c50996U, 0x5480a32eU
+ /* 160 */ , 0x8585ddb4U, 0x58c0770cU, 0x3ae2fe35U, 0xe7a7548dU, 0xfea7ec47U, 0x23e246ffU, 0x41c0cfc6U, 0x9c85657eU
+ /* 168 */ , 0x73c1be52U, 0xae8414eaU, 0xcca69dd3U, 0x11e3376bU, 0x08e38fa1U, 0xd5a62519U, 0xb784ac20U, 0x6ac10698U
+ /* 176 */ , 0x6ce16c89U, 0xb1a4c631U, 0xd3864f08U, 0x0ec3e5b0U, 0x17c35d7aU, 0xca86f7c2U, 0xa8a47efbU, 0x75e1d443U
+ /* 184 */ , 0x9aa50f6fU, 0x47e0a5d7U, 0x25c22ceeU, 0xf8878656U, 0xe1873e9cU, 0x3cc29424U, 0x5ee01d1dU, 0x83a5b7a5U
+ /* 192 */ , 0xf90696d8U, 0x24433c60U, 0x4661b559U, 0x9b241fe1U, 0x8224a72bU, 0x5f610d93U, 0x3d4384aaU, 0xe0062e12U
+ /* 200 */ , 0x0f42f53eU, 0xd2075f86U, 0xb025d6bfU, 0x6d607c07U, 0x7460c4cdU, 0xa9256e75U, 0xcb07e74cU, 0x16424df4U
+ /* 208 */ , 0x106227e5U, 0xcd278d5dU, 0xaf050464U, 0x7240aedcU, 0x6b401616U, 0xb605bcaeU, 0xd4273597U, 0x09629f2fU
+ /* 216 */ , 0xe6264403U, 0x3b63eebbU, 0x59416782U, 0x8404cd3aU, 0x9d0475f0U, 0x4041df48U, 0x22635671U, 0xff26fcc9U
+ /* 224 */ , 0x2e238253U, 0xf36628ebU, 0x9144a1d2U, 0x4c010b6aU, 0x5501b3a0U, 0x88441918U, 0xea669021U, 0x37233a99U
+ /* 232 */ , 0xd867e1b5U, 0x05224b0dU, 0x6700c234U, 0xba45688cU, 0xa345d046U, 0x7e007afeU, 0x1c22f3c7U, 0xc167597fU
+ /* 240 */ , 0xc747336eU, 0x1a0299d6U, 0x782010efU, 0xa565ba57U, 0xbc65029dU, 0x6120a825U, 0x0302211cU, 0xde478ba4U
+ /* 248 */ , 0x31035088U, 0xec46fa30U, 0x8e647309U, 0x5321d9b1U, 0x4a21617bU, 0x9764cbc3U, 0xf54642faU, 0x2803e842U
+ }
+ ,
+ /* CRC32C table 4 for quad-bytes ( big-endian ), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0x03836bf2U, 0xf7703be1U, 0xf4f35013U, 0x1f979ac7U, 0x1c14f135U, 0xe8e7a126U, 0xeb64cad4U
+ /* 8 */ , 0xcf58d98aU, 0xccdbb278U, 0x3828e26bU, 0x3bab8999U, 0xd0cf434dU, 0xd34c28bfU, 0x27bf78acU, 0x243c135eU
+ /* 16 */ , 0x6fc75e10U, 0x6c4435e2U, 0x98b765f1U, 0x9b340e03U, 0x7050c4d7U, 0x73d3af25U, 0x8720ff36U, 0x84a394c4U
+ /* 24 */ , 0xa09f879aU, 0xa31cec68U, 0x57efbc7bU, 0x546cd789U, 0xbf081d5dU, 0xbc8b76afU, 0x487826bcU, 0x4bfb4d4eU
+ /* 32 */ , 0xde8ebd20U, 0xdd0dd6d2U, 0x29fe86c1U, 0x2a7ded33U, 0xc11927e7U, 0xc29a4c15U, 0x36691c06U, 0x35ea77f4U
+ /* 40 */ , 0x11d664aaU, 0x12550f58U, 0xe6a65f4bU, 0xe52534b9U, 0x0e41fe6dU, 0x0dc2959fU, 0xf931c58cU, 0xfab2ae7eU
+ /* 48 */ , 0xb149e330U, 0xb2ca88c2U, 0x4639d8d1U, 0x45bab323U, 0xaede79f7U, 0xad5d1205U, 0x59ae4216U, 0x5a2d29e4U
+ /* 56 */ , 0x7e113abaU, 0x7d925148U, 0x8961015bU, 0x8ae26aa9U, 0x6186a07dU, 0x6205cb8fU, 0x96f69b9cU, 0x9575f06eU
+ /* 64 */ , 0xbc1d7b41U, 0xbf9e10b3U, 0x4b6d40a0U, 0x48ee2b52U, 0xa38ae186U, 0xa0098a74U, 0x54fada67U, 0x5779b195U
+ /* 72 */ , 0x7345a2cbU, 0x70c6c939U, 0x8435992aU, 0x87b6f2d8U, 0x6cd2380cU, 0x6f5153feU, 0x9ba203edU, 0x9821681fU
+ /* 80 */ , 0xd3da2551U, 0xd0594ea3U, 0x24aa1eb0U, 0x27297542U, 0xcc4dbf96U, 0xcfced464U, 0x3b3d8477U, 0x38beef85U
+ /* 88 */ , 0x1c82fcdbU, 0x1f019729U, 0xebf2c73aU, 0xe871acc8U, 0x0315661cU, 0x00960deeU, 0xf4655dfdU, 0xf7e6360fU
+ /* 96 */ , 0x6293c661U, 0x6110ad93U, 0x95e3fd80U, 0x96609672U, 0x7d045ca6U, 0x7e873754U, 0x8a746747U, 0x89f70cb5U
+ /* 104 */ , 0xadcb1febU, 0xae487419U, 0x5abb240aU, 0x59384ff8U, 0xb25c852cU, 0xb1dfeedeU, 0x452cbecdU, 0x46afd53fU
+ /* 112 */ , 0x0d549871U, 0x0ed7f383U, 0xfa24a390U, 0xf9a7c862U, 0x12c302b6U, 0x11406944U, 0xe5b33957U, 0xe63052a5U
+ /* 120 */ , 0xc20c41fbU, 0xc18f2a09U, 0x357c7a1aU, 0x36ff11e8U, 0xdd9bdb3cU, 0xde18b0ceU, 0x2aebe0ddU, 0x29688b2fU
+ /* 128 */ , 0x783bf682U, 0x7bb89d70U, 0x8f4bcd63U, 0x8cc8a691U, 0x67ac6c45U, 0x642f07b7U, 0x90dc57a4U, 0x935f3c56U
+ /* 136 */ , 0xb7632f08U, 0xb4e044faU, 0x401314e9U, 0x43907f1bU, 0xa8f4b5cfU, 0xab77de3dU, 0x5f848e2eU, 0x5c07e5dcU
+ /* 144 */ , 0x17fca892U, 0x147fc360U, 0xe08c9373U, 0xe30ff881U, 0x086b3255U, 0x0be859a7U, 0xff1b09b4U, 0xfc986246U
+ /* 152 */ , 0xd8a47118U, 0xdb271aeaU, 0x2fd44af9U, 0x2c57210bU, 0xc733ebdfU, 0xc4b0802dU, 0x3043d03eU, 0x33c0bbccU
+ /* 160 */ , 0xa6b54ba2U, 0xa5362050U, 0x51c57043U, 0x52461bb1U, 0xb922d165U, 0xbaa1ba97U, 0x4e52ea84U, 0x4dd18176U
+ /* 168 */ , 0x69ed9228U, 0x6a6ef9daU, 0x9e9da9c9U, 0x9d1ec23bU, 0x767a08efU, 0x75f9631dU, 0x810a330eU, 0x828958fcU
+ /* 176 */ , 0xc97215b2U, 0xcaf17e40U, 0x3e022e53U, 0x3d8145a1U, 0xd6e58f75U, 0xd566e487U, 0x2195b494U, 0x2216df66U
+ /* 184 */ , 0x062acc38U, 0x05a9a7caU, 0xf15af7d9U, 0xf2d99c2bU, 0x19bd56ffU, 0x1a3e3d0dU, 0xeecd6d1eU, 0xed4e06ecU
+ /* 192 */ , 0xc4268dc3U, 0xc7a5e631U, 0x3356b622U, 0x30d5ddd0U, 0xdbb11704U, 0xd8327cf6U, 0x2cc12ce5U, 0x2f424717U
+ /* 200 */ , 0x0b7e5449U, 0x08fd3fbbU, 0xfc0e6fa8U, 0xff8d045aU, 0x14e9ce8eU, 0x176aa57cU, 0xe399f56fU, 0xe01a9e9dU
+ /* 208 */ , 0xabe1d3d3U, 0xa862b821U, 0x5c91e832U, 0x5f1283c0U, 0xb4764914U, 0xb7f522e6U, 0x430672f5U, 0x40851907U
+ /* 216 */ , 0x64b90a59U, 0x673a61abU, 0x93c931b8U, 0x904a5a4aU, 0x7b2e909eU, 0x78adfb6cU, 0x8c5eab7fU, 0x8fddc08dU
+ /* 224 */ , 0x1aa830e3U, 0x192b5b11U, 0xedd80b02U, 0xee5b60f0U, 0x053faa24U, 0x06bcc1d6U, 0xf24f91c5U, 0xf1ccfa37U
+ /* 232 */ , 0xd5f0e969U, 0xd673829bU, 0x2280d288U, 0x2103b97aU, 0xca6773aeU, 0xc9e4185cU, 0x3d17484fU, 0x3e9423bdU
+ /* 240 */ , 0x756f6ef3U, 0x76ec0501U, 0x821f5512U, 0x819c3ee0U, 0x6af8f434U, 0x697b9fc6U, 0x9d88cfd5U, 0x9e0ba427U
+ /* 248 */ , 0xba37b779U, 0xb9b4dc8bU, 0x4d478c98U, 0x4ec4e76aU, 0xa5a02dbeU, 0xa623464cU, 0x52d0165fU, 0x51537dadU
+ }
+ ,
+ /* CRC32C table 5 for quad-bytes ( big-endian ), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0x7798a213U, 0xee304527U, 0x99a8e734U, 0xdc618a4eU, 0xabf9285dU, 0x3251cf69U, 0x45c96d7aU
+ /* 8 */ , 0xb8c3149dU, 0xcf5bb68eU, 0x56f351baU, 0x216bf3a9U, 0x64a29ed3U, 0x133a3cc0U, 0x8a92dbf4U, 0xfd0a79e7U
+ /* 16 */ , 0x81f1c53fU, 0xf669672cU, 0x6fc18018U, 0x1859220bU, 0x5d904f71U, 0x2a08ed62U, 0xb3a00a56U, 0xc438a845U
+ /* 24 */ , 0x3932d1a2U, 0x4eaa73b1U, 0xd7029485U, 0xa09a3696U, 0xe5535becU, 0x92cbf9ffU, 0x0b631ecbU, 0x7cfbbcd8U
+ /* 32 */ , 0x02e38b7fU, 0x757b296cU, 0xecd3ce58U, 0x9b4b6c4bU, 0xde820131U, 0xa91aa322U, 0x30b24416U, 0x472ae605U
+ /* 40 */ , 0xba209fe2U, 0xcdb83df1U, 0x5410dac5U, 0x238878d6U, 0x664115acU, 0x11d9b7bfU, 0x8871508bU, 0xffe9f298U
+ /* 48 */ , 0x83124e40U, 0xf48aec53U, 0x6d220b67U, 0x1abaa974U, 0x5f73c40eU, 0x28eb661dU, 0xb1438129U, 0xc6db233aU
+ /* 56 */ , 0x3bd15addU, 0x4c49f8ceU, 0xd5e11ffaU, 0xa279bde9U, 0xe7b0d093U, 0x90287280U, 0x098095b4U, 0x7e1837a7U
+ /* 64 */ , 0x04c617ffU, 0x735eb5ecU, 0xeaf652d8U, 0x9d6ef0cbU, 0xd8a79db1U, 0xaf3f3fa2U, 0x3697d896U, 0x410f7a85U
+ /* 72 */ , 0xbc050362U, 0xcb9da171U, 0x52354645U, 0x25ade456U, 0x6064892cU, 0x17fc2b3fU, 0x8e54cc0bU, 0xf9cc6e18U
+ /* 80 */ , 0x8537d2c0U, 0xf2af70d3U, 0x6b0797e7U, 0x1c9f35f4U, 0x5956588eU, 0x2ecefa9dU, 0xb7661da9U, 0xc0febfbaU
+ /* 88 */ , 0x3df4c65dU, 0x4a6c644eU, 0xd3c4837aU, 0xa45c2169U, 0xe1954c13U, 0x960dee00U, 0x0fa50934U, 0x783dab27U
+ /* 96 */ , 0x06259c80U, 0x71bd3e93U, 0xe815d9a7U, 0x9f8d7bb4U, 0xda4416ceU, 0xaddcb4ddU, 0x347453e9U, 0x43ecf1faU
+ /* 104 */ , 0xbee6881dU, 0xc97e2a0eU, 0x50d6cd3aU, 0x274e6f29U, 0x62870253U, 0x151fa040U, 0x8cb74774U, 0xfb2fe567U
+ /* 112 */ , 0x87d459bfU, 0xf04cfbacU, 0x69e41c98U, 0x1e7cbe8bU, 0x5bb5d3f1U, 0x2c2d71e2U, 0xb58596d6U, 0xc21d34c5U
+ /* 120 */ , 0x3f174d22U, 0x488fef31U, 0xd1270805U, 0xa6bfaa16U, 0xe376c76cU, 0x94ee657fU, 0x0d46824bU, 0x7ade2058U
+ /* 128 */ , 0xf9fac3fbU, 0x8e6261e8U, 0x17ca86dcU, 0x605224cfU, 0x259b49b5U, 0x5203eba6U, 0xcbab0c92U, 0xbc33ae81U
+ /* 136 */ , 0x4139d766U, 0x36a17575U, 0xaf099241U, 0xd8913052U, 0x9d585d28U, 0xeac0ff3bU, 0x7368180fU, 0x04f0ba1cU
+ /* 144 */ , 0x780b06c4U, 0x0f93a4d7U, 0x963b43e3U, 0xe1a3e1f0U, 0xa46a8c8aU, 0xd3f22e99U, 0x4a5ac9adU, 0x3dc26bbeU
+ /* 152 */ , 0xc0c81259U, 0xb750b04aU, 0x2ef8577eU, 0x5960f56dU, 0x1ca99817U, 0x6b313a04U, 0xf299dd30U, 0x85017f23U
+ /* 160 */ , 0xfb194884U, 0x8c81ea97U, 0x15290da3U, 0x62b1afb0U, 0x2778c2caU, 0x50e060d9U, 0xc94887edU, 0xbed025feU
+ /* 168 */ , 0x43da5c19U, 0x3442fe0aU, 0xadea193eU, 0xda72bb2dU, 0x9fbbd657U, 0xe8237444U, 0x718b9370U, 0x06133163U
+ /* 176 */ , 0x7ae88dbbU, 0x0d702fa8U, 0x94d8c89cU, 0xe3406a8fU, 0xa68907f5U, 0xd111a5e6U, 0x48b942d2U, 0x3f21e0c1U
+ /* 184 */ , 0xc22b9926U, 0xb5b33b35U, 0x2c1bdc01U, 0x5b837e12U, 0x1e4a1368U, 0x69d2b17bU, 0xf07a564fU, 0x87e2f45cU
+ /* 192 */ , 0xfd3cd404U, 0x8aa47617U, 0x130c9123U, 0x64943330U, 0x215d5e4aU, 0x56c5fc59U, 0xcf6d1b6dU, 0xb8f5b97eU
+ /* 200 */ , 0x45ffc099U, 0x3267628aU, 0xabcf85beU, 0xdc5727adU, 0x999e4ad7U, 0xee06e8c4U, 0x77ae0ff0U, 0x0036ade3U
+ /* 208 */ , 0x7ccd113bU, 0x0b55b328U, 0x92fd541cU, 0xe565f60fU, 0xa0ac9b75U, 0xd7343966U, 0x4e9cde52U, 0x39047c41U
+ /* 216 */ , 0xc40e05a6U, 0xb396a7b5U, 0x2a3e4081U, 0x5da6e292U, 0x186f8fe8U, 0x6ff72dfbU, 0xf65fcacfU, 0x81c768dcU
+ /* 224 */ , 0xffdf5f7bU, 0x8847fd68U, 0x11ef1a5cU, 0x6677b84fU, 0x23bed535U, 0x54267726U, 0xcd8e9012U, 0xba163201U
+ /* 232 */ , 0x471c4be6U, 0x3084e9f5U, 0xa92c0ec1U, 0xdeb4acd2U, 0x9b7dc1a8U, 0xece563bbU, 0x754d848fU, 0x02d5269cU
+ /* 240 */ , 0x7e2e9a44U, 0x09b63857U, 0x901edf63U, 0xe7867d70U, 0xa24f100aU, 0xd5d7b219U, 0x4c7f552dU, 0x3be7f73eU
+ /* 248 */ , 0xc6ed8ed9U, 0xb1752ccaU, 0x28ddcbfeU, 0x5f4569edU, 0x1a8c0497U, 0x6d14a684U, 0xf4bc41b0U, 0x8324e3a3U
+ }
+ ,
+ /* CRC32C table 6 for quad-bytes ( big-endian ), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0x7e9241a5U, 0x0d526f4fU, 0x73c02eeaU, 0x1aa4de9eU, 0x64369f3bU, 0x17f6b1d1U, 0x6964f074U
+ /* 8 */ , 0xc53e5138U, 0xbbac109dU, 0xc86c3e77U, 0xb6fe7fd2U, 0xdf9a8fa6U, 0xa108ce03U, 0xd2c8e0e9U, 0xac5aa14cU
+ /* 16 */ , 0x8a7da270U, 0xf4efe3d5U, 0x872fcd3fU, 0xf9bd8c9aU, 0x90d97ceeU, 0xee4b3d4bU, 0x9d8b13a1U, 0xe3195204U
+ /* 24 */ , 0x4f43f348U, 0x31d1b2edU, 0x42119c07U, 0x3c83dda2U, 0x55e72dd6U, 0x2b756c73U, 0x58b54299U, 0x2627033cU
+ /* 32 */ , 0x14fb44e1U, 0x6a690544U, 0x19a92baeU, 0x673b6a0bU, 0x0e5f9a7fU, 0x70cddbdaU, 0x030df530U, 0x7d9fb495U
+ /* 40 */ , 0xd1c515d9U, 0xaf57547cU, 0xdc977a96U, 0xa2053b33U, 0xcb61cb47U, 0xb5f38ae2U, 0xc633a408U, 0xb8a1e5adU
+ /* 48 */ , 0x9e86e691U, 0xe014a734U, 0x93d489deU, 0xed46c87bU, 0x8422380fU, 0xfab079aaU, 0x89705740U, 0xf7e216e5U
+ /* 56 */ , 0x5bb8b7a9U, 0x252af60cU, 0x56ead8e6U, 0x28789943U, 0x411c6937U, 0x3f8e2892U, 0x4c4e0678U, 0x32dc47ddU
+ /* 64 */ , 0xd98065c7U, 0xa7122462U, 0xd4d20a88U, 0xaa404b2dU, 0xc324bb59U, 0xbdb6fafcU, 0xce76d416U, 0xb0e495b3U
+ /* 72 */ , 0x1cbe34ffU, 0x622c755aU, 0x11ec5bb0U, 0x6f7e1a15U, 0x061aea61U, 0x7888abc4U, 0x0b48852eU, 0x75dac48bU
+ /* 80 */ , 0x53fdc7b7U, 0x2d6f8612U, 0x5eafa8f8U, 0x203de95dU, 0x49591929U, 0x37cb588cU, 0x440b7666U, 0x3a9937c3U
+ /* 88 */ , 0x96c3968fU, 0xe851d72aU, 0x9b91f9c0U, 0xe503b865U, 0x8c674811U, 0xf2f509b4U, 0x8135275eU, 0xffa766fbU
+ /* 96 */ , 0xcd7b2126U, 0xb3e96083U, 0xc0294e69U, 0xbebb0fccU, 0xd7dfffb8U, 0xa94dbe1dU, 0xda8d90f7U, 0xa41fd152U
+ /* 104 */ , 0x0845701eU, 0x76d731bbU, 0x05171f51U, 0x7b855ef4U, 0x12e1ae80U, 0x6c73ef25U, 0x1fb3c1cfU, 0x6121806aU
+ /* 112 */ , 0x47068356U, 0x3994c2f3U, 0x4a54ec19U, 0x34c6adbcU, 0x5da25dc8U, 0x23301c6dU, 0x50f03287U, 0x2e627322U
+ /* 120 */ , 0x8238d26eU, 0xfcaa93cbU, 0x8f6abd21U, 0xf1f8fc84U, 0x989c0cf0U, 0xe60e4d55U, 0x95ce63bfU, 0xeb5c221aU
+ /* 128 */ , 0x4377278bU, 0x3de5662eU, 0x4e2548c4U, 0x30b70961U, 0x59d3f915U, 0x2741b8b0U, 0x5481965aU, 0x2a13d7ffU
+ /* 136 */ , 0x864976b3U, 0xf8db3716U, 0x8b1b19fcU, 0xf5895859U, 0x9ceda82dU, 0xe27fe988U, 0x91bfc762U, 0xef2d86c7U
+ /* 144 */ , 0xc90a85fbU, 0xb798c45eU, 0xc458eab4U, 0xbacaab11U, 0xd3ae5b65U, 0xad3c1ac0U, 0xdefc342aU, 0xa06e758fU
+ /* 152 */ , 0x0c34d4c3U, 0x72a69566U, 0x0166bb8cU, 0x7ff4fa29U, 0x16900a5dU, 0x68024bf8U, 0x1bc26512U, 0x655024b7U
+ /* 160 */ , 0x578c636aU, 0x291e22cfU, 0x5ade0c25U, 0x244c4d80U, 0x4d28bdf4U, 0x33bafc51U, 0x407ad2bbU, 0x3ee8931eU
+ /* 168 */ , 0x92b23252U, 0xec2073f7U, 0x9fe05d1dU, 0xe1721cb8U, 0x8816ecccU, 0xf684ad69U, 0x85448383U, 0xfbd6c226U
+ /* 176 */ , 0xddf1c11aU, 0xa36380bfU, 0xd0a3ae55U, 0xae31eff0U, 0xc7551f84U, 0xb9c75e21U, 0xca0770cbU, 0xb495316eU
+ /* 184 */ , 0x18cf9022U, 0x665dd187U, 0x159dff6dU, 0x6b0fbec8U, 0x026b4ebcU, 0x7cf90f19U, 0x0f3921f3U, 0x71ab6056U
+ /* 192 */ , 0x9af7424cU, 0xe46503e9U, 0x97a52d03U, 0xe9376ca6U, 0x80539cd2U, 0xfec1dd77U, 0x8d01f39dU, 0xf393b238U
+ /* 200 */ , 0x5fc91374U, 0x215b52d1U, 0x529b7c3bU, 0x2c093d9eU, 0x456dcdeaU, 0x3bff8c4fU, 0x483fa2a5U, 0x36ade300U
+ /* 208 */ , 0x108ae03cU, 0x6e18a199U, 0x1dd88f73U, 0x634aced6U, 0x0a2e3ea2U, 0x74bc7f07U, 0x077c51edU, 0x79ee1048U
+ /* 216 */ , 0xd5b4b104U, 0xab26f0a1U, 0xd8e6de4bU, 0xa6749feeU, 0xcf106f9aU, 0xb1822e3fU, 0xc24200d5U, 0xbcd04170U
+ /* 224 */ , 0x8e0c06adU, 0xf09e4708U, 0x835e69e2U, 0xfdcc2847U, 0x94a8d833U, 0xea3a9996U, 0x99fab77cU, 0xe768f6d9U
+ /* 232 */ , 0x4b325795U, 0x35a01630U, 0x466038daU, 0x38f2797fU, 0x5196890bU, 0x2f04c8aeU, 0x5cc4e644U, 0x2256a7e1U
+ /* 240 */ , 0x0471a4ddU, 0x7ae3e578U, 0x0923cb92U, 0x77b18a37U, 0x1ed57a43U, 0x60473be6U, 0x1387150cU, 0x6d1554a9U
+ /* 248 */ , 0xc14ff5e5U, 0xbfddb440U, 0xcc1d9aaaU, 0xb28fdb0fU, 0xdbeb2b7bU, 0xa5796adeU, 0xd6b94434U, 0xa82b0591U
+ }
+ ,
+ /* CRC32C table 7 for quad-bytes ( big-endian ), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0xb8aa45ddU, 0x812367bfU, 0x39892262U, 0xf331227bU, 0x4b9b67a6U, 0x721245c4U, 0xcab80019U
+ /* 8 */ , 0xe66344f6U, 0x5ec9012bU, 0x67402349U, 0xdfea6694U, 0x1552668dU, 0xadf82350U, 0x94710132U, 0x2cdb44efU
+ /* 16 */ , 0x3db164e9U, 0x851b2134U, 0xbc920356U, 0x0438468bU, 0xce804692U, 0x762a034fU, 0x4fa3212dU, 0xf70964f0U
+ /* 24 */ , 0xdbd2201fU, 0x637865c2U, 0x5af147a0U, 0xe25b027dU, 0x28e30264U, 0x904947b9U, 0xa9c065dbU, 0x116a2006U
+ /* 32 */ , 0x8b1425d7U, 0x33be600aU, 0x0a374268U, 0xb29d07b5U, 0x782507acU, 0xc08f4271U, 0xf9066013U, 0x41ac25ceU
+ /* 40 */ , 0x6d776121U, 0xd5dd24fcU, 0xec54069eU, 0x54fe4343U, 0x9e46435aU, 0x26ec0687U, 0x1f6524e5U, 0xa7cf6138U
+ /* 48 */ , 0xb6a5413eU, 0x0e0f04e3U, 0x37862681U, 0x8f2c635cU, 0x45946345U, 0xfd3e2698U, 0xc4b704faU, 0x7c1d4127U
+ /* 56 */ , 0x50c605c8U, 0xe86c4015U, 0xd1e56277U, 0x694f27aaU, 0xa3f727b3U, 0x1b5d626eU, 0x22d4400cU, 0x9a7e05d1U
+ /* 64 */ , 0xe75fa6abU, 0x5ff5e376U, 0x667cc114U, 0xded684c9U, 0x146e84d0U, 0xacc4c10dU, 0x954de36fU, 0x2de7a6b2U
+ /* 72 */ , 0x013ce25dU, 0xb996a780U, 0x801f85e2U, 0x38b5c03fU, 0xf20dc026U, 0x4aa785fbU, 0x732ea799U, 0xcb84e244U
+ /* 80 */ , 0xdaeec242U, 0x6244879fU, 0x5bcda5fdU, 0xe367e020U, 0x29dfe039U, 0x9175a5e4U, 0xa8fc8786U, 0x1056c25bU
+ /* 88 */ , 0x3c8d86b4U, 0x8427c369U, 0xbdaee10bU, 0x0504a4d6U, 0xcfbca4cfU, 0x7716e112U, 0x4e9fc370U, 0xf63586adU
+ /* 96 */ , 0x6c4b837cU, 0xd4e1c6a1U, 0xed68e4c3U, 0x55c2a11eU, 0x9f7aa107U, 0x27d0e4daU, 0x1e59c6b8U, 0xa6f38365U
+ /* 104 */ , 0x8a28c78aU, 0x32828257U, 0x0b0ba035U, 0xb3a1e5e8U, 0x7919e5f1U, 0xc1b3a02cU, 0xf83a824eU, 0x4090c793U
+ /* 112 */ , 0x51fae795U, 0xe950a248U, 0xd0d9802aU, 0x6873c5f7U, 0xa2cbc5eeU, 0x1a618033U, 0x23e8a251U, 0x9b42e78cU
+ /* 120 */ , 0xb799a363U, 0x0f33e6beU, 0x36bac4dcU, 0x8e108101U, 0x44a88118U, 0xfc02c4c5U, 0xc58be6a7U, 0x7d21a37aU
+ /* 128 */ , 0x3fc9a052U, 0x8763e58fU, 0xbeeac7edU, 0x06408230U, 0xccf88229U, 0x7452c7f4U, 0x4ddbe596U, 0xf571a04bU
+ /* 136 */ , 0xd9aae4a4U, 0x6100a179U, 0x5889831bU, 0xe023c6c6U, 0x2a9bc6dfU, 0x92318302U, 0xabb8a160U, 0x1312e4bdU
+ /* 144 */ , 0x0278c4bbU, 0xbad28166U, 0x835ba304U, 0x3bf1e6d9U, 0xf149e6c0U, 0x49e3a31dU, 0x706a817fU, 0xc8c0c4a2U
+ /* 152 */ , 0xe41b804dU, 0x5cb1c590U, 0x6538e7f2U, 0xdd92a22fU, 0x172aa236U, 0xaf80e7ebU, 0x9609c589U, 0x2ea38054U
+ /* 160 */ , 0xb4dd8585U, 0x0c77c058U, 0x35fee23aU, 0x8d54a7e7U, 0x47eca7feU, 0xff46e223U, 0xc6cfc041U, 0x7e65859cU
+ /* 168 */ , 0x52bec173U, 0xea1484aeU, 0xd39da6ccU, 0x6b37e311U, 0xa18fe308U, 0x1925a6d5U, 0x20ac84b7U, 0x9806c16aU
+ /* 176 */ , 0x896ce16cU, 0x31c6a4b1U, 0x084f86d3U, 0xb0e5c30eU, 0x7a5dc317U, 0xc2f786caU, 0xfb7ea4a8U, 0x43d4e175U
+ /* 184 */ , 0x6f0fa59aU, 0xd7a5e047U, 0xee2cc225U, 0x568687f8U, 0x9c3e87e1U, 0x2494c23cU, 0x1d1de05eU, 0xa5b7a583U
+ /* 192 */ , 0xd89606f9U, 0x603c4324U, 0x59b56146U, 0xe11f249bU, 0x2ba72482U, 0x930d615fU, 0xaa84433dU, 0x122e06e0U
+ /* 200 */ , 0x3ef5420fU, 0x865f07d2U, 0xbfd625b0U, 0x077c606dU, 0xcdc46074U, 0x756e25a9U, 0x4ce707cbU, 0xf44d4216U
+ /* 208 */ , 0xe5276210U, 0x5d8d27cdU, 0x640405afU, 0xdcae4072U, 0x1616406bU, 0xaebc05b6U, 0x973527d4U, 0x2f9f6209U
+ /* 216 */ , 0x034426e6U, 0xbbee633bU, 0x82674159U, 0x3acd0484U, 0xf075049dU, 0x48df4140U, 0x71566322U, 0xc9fc26ffU
+ /* 224 */ , 0x5382232eU, 0xeb2866f3U, 0xd2a14491U, 0x6a0b014cU, 0xa0b30155U, 0x18194488U, 0x219066eaU, 0x993a2337U
+ /* 232 */ , 0xb5e167d8U, 0x0d4b2205U, 0x34c20067U, 0x8c6845baU, 0x46d045a3U, 0xfe7a007eU, 0xc7f3221cU, 0x7f5967c1U
+ /* 240 */ , 0x6e3347c7U, 0xd699021aU, 0xef102078U, 0x57ba65a5U, 0x9d0265bcU, 0x25a82061U, 0x1c210203U, 0xa48b47deU
+ /* 248 */ , 0x88500331U, 0x30fa46ecU, 0x0973648eU, 0xb1d92153U, 0x7b61214aU, 0xc3cb6497U, 0xfa4246f5U, 0x42e80328U
+ }
+ #endif
+ };
juint* StubRoutines::ppc64::_constants = StubRoutines::ppc64::generate_crc_constants();
diff --git a/hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.cpp b/hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.cpp
index ab87c204018..beefd27a4fe 100644
--- a/hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2015, 2017 SAP SE. All rights reserved.
+ * Copyright (c) 2015, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -643,12 +643,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
return entry;
}
-address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
- address entry = __ pc();
- __ unimplemented("generate_continuation_for");
- return entry;
-}
-
// This entry is returned to when a call returns to the interpreter.
// When we arrive here, we expect that the callee stack frame is already popped.
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
@@ -692,6 +686,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
#endif
__ sldi(size, size, Interpreter::logStackElementSize);
__ add(R15_esp, R15_esp, size);
+
+ __ check_and_handle_popframe(R11_scratch1);
+ __ check_and_handle_earlyret(R11_scratch1);
+
__ dispatch_next(state, step);
return entry;
}
@@ -1894,7 +1892,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
__ lwz(crc, 2*wordSize, argP); // Current crc state, zero extend to 64 bit to have a clean register.
StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
- __ kernel_crc32_singleByte(crc, data, dataLen, table, tmp);
+ __ kernel_crc32_singleByte(crc, data, dataLen, table, tmp, true);
// Restore caller sp for c2i case and return.
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
@@ -1910,7 +1908,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
return NULL;
}
-// CRC32 Intrinsics.
+
/**
* Method entry for static native methods:
* int java.util.zip.CRC32.updateBytes( int crc, byte[] b, int off, int len)
@@ -1986,7 +1984,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
// Performance measurements show the 1word and 2word variants to be almost equivalent,
// with very light advantages for the 1word variant. We chose the 1word variant for
// code compactness.
- __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3);
+ __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3, true);
// Restore caller sp for c2i case and return.
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
@@ -2002,8 +2000,88 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
return NULL;
}
-// Not supported
+
+/**
+ * Method entry for intrinsic-candidate (non-native) methods:
+ * int java.util.zip.CRC32C.updateBytes( int crc, byte[] b, int off, int end)
+ * int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long* buf, int off, int end)
+ * Unlike CRC32, CRC32C does not have any methods marked as native
+ * CRC32C also uses an "end" variable instead of the length variable CRC32 uses
+ **/
address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+ if (UseCRC32CIntrinsics) {
+ address start = __ pc(); // Remember stub start address (is rtn value).
+
+ // We don't generate local frame and don't align stack because
+ // we not even call stub code (we generate the code inline)
+ // and there is no safepoint on this path.
+
+ // Load parameters.
+ // Z_esp is callers operand stack pointer, i.e. it points to the parameters.
+ const Register argP = R15_esp;
+ const Register crc = R3_ARG1; // crc value
+ const Register data = R4_ARG2; // address of java byte array
+ const Register dataLen = R5_ARG3; // source data len
+ const Register table = R6_ARG4; // address of crc32c table
+
+ const Register t0 = R9; // scratch registers for crc calculation
+ const Register t1 = R10;
+ const Register t2 = R11;
+ const Register t3 = R12;
+
+ const Register tc0 = R2; // registers to hold pre-calculated column addresses
+ const Register tc1 = R7;
+ const Register tc2 = R8;
+ const Register tc3 = table; // table address is reconstructed at the end of kernel_crc32_* emitters
+
+ const Register tmp = t0; // Only used very locally to calculate byte buffer address.
+
+ // Arguments are reversed on java expression stack.
+ // Calculate address of start element.
+ if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) { // Used for "updateDirectByteBuffer".
+ BLOCK_COMMENT("CRC32C_updateDirectByteBuffer {");
+ // crc @ (SP + 5W) (32bit)
+ // buf @ (SP + 3W) (64bit ptr to long array)
+ // off @ (SP + 2W) (32bit)
+ // dataLen @ (SP + 1W) (32bit)
+ // data = buf + off
+ __ ld( data, 3*wordSize, argP); // start of byte buffer
+ __ lwa( tmp, 2*wordSize, argP); // byte buffer offset
+ __ lwa( dataLen, 1*wordSize, argP); // #bytes to process
+ __ lwz( crc, 5*wordSize, argP); // current crc state
+ __ add( data, data, tmp); // Add byte buffer offset.
+ __ sub( dataLen, dataLen, tmp); // (end_index - offset)
+ } else { // Used for "updateBytes update".
+ BLOCK_COMMENT("CRC32C_updateBytes {");
+ // crc @ (SP + 4W) (32bit)
+ // buf @ (SP + 3W) (64bit ptr to byte array)
+ // off @ (SP + 2W) (32bit)
+ // dataLen @ (SP + 1W) (32bit)
+ // data = buf + off + base_offset
+ __ ld( data, 3*wordSize, argP); // start of byte buffer
+ __ lwa( tmp, 2*wordSize, argP); // byte buffer offset
+ __ lwa( dataLen, 1*wordSize, argP); // #bytes to process
+ __ add( data, data, tmp); // add byte buffer offset
+ __ sub( dataLen, dataLen, tmp); // (end_index - offset)
+ __ lwz( crc, 4*wordSize, argP); // current crc state
+ __ addi(data, data, arrayOopDesc::base_offset_in_bytes(T_BYTE));
+ }
+
+ StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
+
+ // Performance measurements show the 1word and 2word variants to be almost equivalent,
+ // with very light advantages for the 1word variant. We chose the 1word variant for
+ // code compactness.
+ __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3, false);
+
+ // Restore caller sp for c2i case and return.
+ __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
+ __ blr();
+
+ BLOCK_COMMENT("} CRC32C_update{Bytes|DirectByteBuffer}");
+ return start;
+ }
+
return NULL;
}
diff --git a/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp b/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp
index 517a304d4e3..43f6ad1c591 100644
--- a/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp
+++ b/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2013, 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1472,13 +1472,13 @@ void TemplateTable::convert() {
case Bytecodes::_i2d:
__ extsw(R17_tos, R17_tos);
case Bytecodes::_l2d:
- __ push_l_pop_d();
+ __ move_l_to_d();
__ fcfid(F15_ftos, F15_ftos);
break;
case Bytecodes::_i2f:
__ extsw(R17_tos, R17_tos);
- __ push_l_pop_d();
+ __ move_l_to_d();
if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only
// Comment: alternatively, load with sign extend could be done by lfiwax.
__ fcfids(F15_ftos, F15_ftos);
@@ -1490,7 +1490,7 @@ void TemplateTable::convert() {
case Bytecodes::_l2f:
if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only
- __ push_l_pop_d();
+ __ move_l_to_d();
__ fcfids(F15_ftos, F15_ftos);
} else {
// Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp.
@@ -1514,7 +1514,7 @@ void TemplateTable::convert() {
__ li(R17_tos, 0); // 0 in case of NAN
__ bso(CCR0, done);
__ fctiwz(F15_ftos, F15_ftos);
- __ push_d_pop_l();
+ __ move_d_to_l();
break;
case Bytecodes::_d2l:
@@ -1523,7 +1523,7 @@ void TemplateTable::convert() {
__ li(R17_tos, 0); // 0 in case of NAN
__ bso(CCR0, done);
__ fctidz(F15_ftos, F15_ftos);
- __ push_d_pop_l();
+ __ move_d_to_l();
break;
default: ShouldNotReachHere();
@@ -3660,11 +3660,9 @@ void TemplateTable::_new() {
__ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
__ bne(CCR0, Lslow_case);
- // Get instanceKlass (load from Rcpool + sizeof(ConstantPool) + Rindex*BytesPerWord).
+ // Get instanceKlass
__ sldi(Roffset, Rindex, LogBytesPerWord);
- __ addi(Rscratch, Rcpool, sizeof(ConstantPool));
- __ isync(); // Order load of instance Klass wrt. tags.
- __ ldx(RinstanceKlass, Roffset, Rscratch);
+ __ load_resolved_klass_at_offset(Rcpool, Roffset, RinstanceKlass);
// Make sure klass is fully initialized and get instance_size.
__ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass);
@@ -3722,7 +3720,7 @@ void TemplateTable::_new() {
__ bge(CCR0, Lslow_case);
// Increment waste limit to prevent getting stuck on this slow path.
- __ addi(RtlabWasteLimitValue, RtlabWasteLimitValue, (int)ThreadLocalAllocBuffer::refill_waste_limit_increment());
+ __ add_const_optimized(RtlabWasteLimitValue, RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment());
__ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread);
}
// else: No allocation in the shared eden. // fallthru: __ b(Lslow_case);
@@ -3875,9 +3873,7 @@ void TemplateTable::checkcast() {
// Extract target class from constant pool.
__ bind(Lquicked);
__ sldi(Roffset, Roffset, LogBytesPerWord);
- __ addi(Rcpool, Rcpool, sizeof(ConstantPool));
- __ isync(); // Order load of specified Klass wrt. tags.
- __ ldx(RspecifiedKlass, Rcpool, Roffset);
+ __ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass);
// Do the checkcast.
__ bind(Lresolved);
@@ -3939,9 +3935,7 @@ void TemplateTable::instanceof() {
// Extract target class from constant pool.
__ bind(Lquicked);
__ sldi(Roffset, Roffset, LogBytesPerWord);
- __ addi(Rcpool, Rcpool, sizeof(ConstantPool));
- __ isync(); // Order load of specified Klass wrt. tags.
- __ ldx(RspecifiedKlass, Rcpool, Roffset);
+ __ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass);
// Do the checkcast.
__ bind(Lresolved);
diff --git a/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp b/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp
index 5e8be1425fa..4db0a9c20cb 100644
--- a/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp
+++ b/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,9 +28,11 @@
#include "asm/macroAssembler.inline.hpp"
#include "compiler/disassembler.hpp"
#include "memory/resourceArea.hpp"
+#include "prims/jvm.h"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "runtime/stubCodeGenerator.hpp"
+#include "utilities/align.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/globalDefinitions.hpp"
#include "vm_version_ppc.hpp"
@@ -79,7 +81,7 @@ void VM_Version::initialize() {
UINTX_FORMAT " on this machine", PowerArchitecturePPC64);
// Power 8: Configure Data Stream Control Register.
- if (has_mfdscr()) {
+ if (PowerArchitecturePPC64 >= 8 && has_mfdscr()) {
config_dscr();
}
@@ -111,7 +113,7 @@ void VM_Version::initialize() {
// Create and print feature-string.
char buf[(num_features+1) * 16]; // Max 16 chars per feature.
jio_snprintf(buf, sizeof(buf),
- "ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+ "ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
(has_fsqrt() ? " fsqrt" : ""),
(has_isel() ? " isel" : ""),
(has_lxarxeh() ? " lxarxeh" : ""),
@@ -126,7 +128,9 @@ void VM_Version::initialize() {
(has_vpmsumb() ? " vpmsumb" : ""),
(has_tcheck() ? " tcheck" : ""),
(has_mfdscr() ? " mfdscr" : ""),
- (has_vsx() ? " vsx" : "")
+ (has_vsx() ? " vsx" : ""),
+ (has_ldbrx() ? " ldbrx" : ""),
+ (has_stdbrx() ? " stdbrx" : "")
// Make sure number of %s matches num_features!
);
_features_string = os::strdup(buf);
@@ -172,18 +176,27 @@ void VM_Version::initialize() {
assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
- // Implementation does not use any of the vector instructions
- // available with Power8. Their exploitation is still pending.
+ // If defined(VM_LITTLE_ENDIAN) and running on Power8 or newer hardware,
+ // the implementation uses the vector instructions available with Power8.
+ // In all other cases, the implementation uses only generally available instructions.
if (!UseCRC32Intrinsics) {
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
}
}
- if (UseCRC32CIntrinsics) {
- if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics))
- warning("CRC32C intrinsics are not available on this CPU");
- FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
+ // Implementation does not use any of the vector instructions available with Power8.
+ // Their exploitation is still pending (aka "work in progress").
+ if (!UseCRC32CIntrinsics) {
+ if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
+ FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
+ }
+ }
+
+ // TODO: Provide implementation.
+ if (UseAdler32Intrinsics) {
+ warning("Adler32Intrinsics not available on this CPU.");
+ FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
}
// The AES intrinsic stubs require AES instruction support.
@@ -245,11 +258,6 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
}
- if (UseAdler32Intrinsics) {
- warning("Adler32Intrinsics not available on this CPU.");
- FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
- }
-
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
UseMultiplyToLenIntrinsic = true;
}
@@ -319,18 +327,6 @@ void VM_Version::initialize() {
// high lock contention. For now we do not use it by default.
vm_exit_during_initialization("UseRTMLocking flag should be only set on command line");
}
- if (!is_power_of_2(RTMTotalCountIncrRate)) {
- warning("RTMTotalCountIncrRate must be a power of 2, resetting it to 64");
- FLAG_SET_DEFAULT(RTMTotalCountIncrRate, 64);
- }
- if (RTMAbortRatio < 0 || RTMAbortRatio > 100) {
- warning("RTMAbortRatio must be in the range 0 to 100, resetting it to 50");
- FLAG_SET_DEFAULT(RTMAbortRatio, 50);
- }
- if (RTMSpinLoopCount < 0) {
- warning("RTMSpinLoopCount must not be a negative value, resetting it to 0");
- FLAG_SET_DEFAULT(RTMSpinLoopCount, 0);
- }
#else
// Only C2 does RTM locking optimization.
// Can't continue because UseRTMLocking affects UseBiasedLocking flag
@@ -659,6 +655,8 @@ void VM_Version::determine_features() {
a->tcheck(0); // code[12] -> tcheck
a->mfdscr(R0); // code[13] -> mfdscr
a->lxvd2x(VSR0, R3_ARG1); // code[14] -> vsx
+ a->ldbrx(R7, R3_ARG1, R4_ARG2); // code[15] -> ldbrx
+ a->stdbrx(R7, R3_ARG1, R4_ARG2); // code[16] -> stdbrx
a->blr();
// Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
@@ -688,7 +686,7 @@ void VM_Version::determine_features() {
// Execute code. Illegal instructions will be replaced by 0 in the signal handler.
VM_Version::_is_determine_features_test_running = true;
// We must align the first argument to 16 bytes because of the lqarx check.
- (*test)((address)align_size_up((intptr_t)mid_of_test_area, 16), (uint64_t)0);
+ (*test)(align_up((address)mid_of_test_area, 16), 0);
VM_Version::_is_determine_features_test_running = false;
// determine which instructions are legal.
@@ -708,6 +706,8 @@ void VM_Version::determine_features() {
if (code[feature_cntr++]) features |= tcheck_m;
if (code[feature_cntr++]) features |= mfdscr_m;
if (code[feature_cntr++]) features |= vsx_m;
+ if (code[feature_cntr++]) features |= ldbrx_m;
+ if (code[feature_cntr++]) features |= stdbrx_m;
// Print the detection code.
if (PrintAssembly) {
diff --git a/hotspot/src/cpu/ppc/vm/vm_version_ppc.hpp b/hotspot/src/cpu/ppc/vm/vm_version_ppc.hpp
index 2d1f8db990e..f7d5ea73aca 100644
--- a/hotspot/src/cpu/ppc/vm/vm_version_ppc.hpp
+++ b/hotspot/src/cpu/ppc/vm/vm_version_ppc.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -47,6 +47,8 @@ protected:
tcheck,
mfdscr,
vsx,
+ ldbrx,
+ stdbrx,
num_features // last entry to count features
};
enum Feature_Flag_Set {
@@ -66,6 +68,8 @@ protected:
tcheck_m = (1 << tcheck ),
mfdscr_m = (1 << mfdscr ),
vsx_m = (1 << vsx ),
+ ldbrx_m = (1 << ldbrx ),
+ stdbrx_m = (1 << stdbrx ),
all_features_m = (unsigned long)-1
};
@@ -100,6 +104,9 @@ public:
static bool has_tcheck() { return (_features & tcheck_m) != 0; }
static bool has_mfdscr() { return (_features & mfdscr_m) != 0; }
static bool has_vsx() { return (_features & vsx_m) != 0; }
+ static bool has_ldbrx() { return (_features & ldbrx_m) != 0; }
+ static bool has_stdbrx() { return (_features & stdbrx_m) != 0; }
+ static bool has_mtfprd() { return has_vpmsumb(); } // alias for P8
// Assembler testing
static void allow_all();
diff --git a/hotspot/src/cpu/s390/vm/abstractInterpreter_s390.cpp b/hotspot/src/cpu/s390/vm/abstractInterpreter_s390.cpp
index 2140ac711a6..ab6139c1354 100644
--- a/hotspot/src/cpu/s390/vm/abstractInterpreter_s390.cpp
+++ b/hotspot/src/cpu/s390/vm/abstractInterpreter_s390.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -51,11 +51,6 @@ int AbstractInterpreter::BasicType_as_index(BasicType type) {
return i;
}
-bool AbstractInterpreter::can_be_compiled(methodHandle m) {
- // No special entry points that preclude compilation.
- return true;
-}
-
// How much stack a method top interpreter activation needs in words.
int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
diff --git a/hotspot/src/cpu/s390/vm/assembler_s390.hpp b/hotspot/src/cpu/s390/vm/assembler_s390.hpp
index 0873d71b00e..8a7ae9d0756 100644
--- a/hotspot/src/cpu/s390/vm/assembler_s390.hpp
+++ b/hotspot/src/cpu/s390/vm/assembler_s390.hpp
@@ -28,8 +28,6 @@
#undef LUCY_DBG
-#define NearLabel Label
-
// Immediate is an abstraction to represent the various immediate
// operands which exist on z/Architecture. Neither this class nor
// instances hereof have an own state. It consists of methods only.
diff --git a/hotspot/src/cpu/s390/vm/bytes_s390.hpp b/hotspot/src/cpu/s390/vm/bytes_s390.hpp
index 6209624d335..f01ea07a2e2 100644
--- a/hotspot/src/cpu/s390/vm/bytes_s390.hpp
+++ b/hotspot/src/cpu/s390/vm/bytes_s390.hpp
@@ -42,12 +42,6 @@ class Bytes: AllStatic {
//
// In short, it makes no sense on z/Architecture to piecemeal get or put unaligned data.
- // Returns true if the byte ordering used by Java is different from
- // the native byte ordering of the underlying machine.
- // z/Arch is big endian, thus, a swap between native and Java ordering
- // is always a no-op.
- static inline bool is_Java_byte_ordering_different() { return false; }
-
// Only swap on little endian machines => suffix `_le'.
static inline u2 swap_u2_le(u2 x) { return x; }
static inline u4 swap_u4_le(u4 x) { return x; }
diff --git a/hotspot/src/cpu/s390/vm/c1_CodeStubs_s390.cpp b/hotspot/src/cpu/s390/vm/c1_CodeStubs_s390.cpp
index 4d429960798..ec6517894b9 100644
--- a/hotspot/src/cpu/s390/vm/c1_CodeStubs_s390.cpp
+++ b/hotspot/src/cpu/s390/vm/c1_CodeStubs_s390.cpp
@@ -31,6 +31,7 @@
#include "c1/c1_Runtime1.hpp"
#include "nativeInst_s390.hpp"
#include "runtime/sharedRuntime.hpp"
+#include "utilities/align.hpp"
#include "utilities/macros.hpp"
#include "vmreg_s390.inline.hpp"
#if INCLUDE_ALL_GCS
@@ -284,7 +285,7 @@ void PatchingStub::align_patch_site(MacroAssembler* masm) {
masm->block_comment(bc);
#endif
- masm->align(round_to(NativeGeneralJump::instruction_size, wordSize));
+ masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
}
void PatchingStub::emit_code(LIR_Assembler* ce) {
diff --git a/hotspot/src/cpu/s390/vm/c1_LIRAssembler_s390.cpp b/hotspot/src/cpu/s390/vm/c1_LIRAssembler_s390.cpp
index 84fb4205099..21634f930e7 100644
--- a/hotspot/src/cpu/s390/vm/c1_LIRAssembler_s390.cpp
+++ b/hotspot/src/cpu/s390/vm/c1_LIRAssembler_s390.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1139,14 +1139,7 @@ void LIR_Assembler::return_op(LIR_Opr result) {
__ load_const_optimized(Z_R1_scratch, pp);
// Pop the frame before the safepoint code.
- int retPC_offset = initial_frame_size_in_bytes() + _z_abi16(return_pc);
- if (Displacement::is_validDisp(retPC_offset)) {
- __ z_lg(Z_R14, retPC_offset, Z_SP);
- __ add2reg(Z_SP, initial_frame_size_in_bytes());
- } else {
- __ add2reg(Z_SP, initial_frame_size_in_bytes());
- __ restore_return_pc();
- }
+ __ pop_frame_restore_retPC(initial_frame_size_in_bytes());
if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
__ reserved_stack_check(Z_R14);
@@ -3048,9 +3041,8 @@ void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
assert_different_registers(val, crc, res);
__ load_const_optimized(res, StubRoutines::crc_table_addr());
- __ not_(crc, noreg, false); // ~crc
- __ update_byte_crc32(crc, val, res);
- __ not_(res, crc, false); // ~crc
+ __ kernel_crc32_singleByteReg(crc, val, res, true);
+ __ z_lgfr(res, crc);
}
#undef __
diff --git a/hotspot/src/cpu/s390/vm/c1_LIRGenerator_s390.cpp b/hotspot/src/cpu/s390/vm/c1_LIRGenerator_s390.cpp
index 0ec97da6230..8ebecadd85c 100644
--- a/hotspot/src/cpu/s390/vm/c1_LIRGenerator_s390.cpp
+++ b/hotspot/src/cpu/s390/vm/c1_LIRGenerator_s390.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -61,20 +61,6 @@ void LIRItem::load_nonconstant(int bits) {
}
}
-inline void load_int_as_long(LIR_List *ll, LIRItem &li, LIR_Opr dst) {
- LIR_Opr r = li.value()->operand();
- if (r->is_constant()) {
- // Constants get loaded with sign extend on this platform.
- ll->move(li.result(), dst);
- } else {
- if (!r->is_register()) {
- li.load_item_force(dst);
- }
- LIR_Opr dst_l = FrameMap::as_long_opr(dst->as_register());
- ll->convert(Bytecodes::_i2l, li.result(), dst_l); // Convert.
- }
-}
-
//--------------------------------------------------------------
// LIRGenerator
//--------------------------------------------------------------
@@ -1224,10 +1210,9 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
LIR_Opr arg2 = cc->at(1);
LIR_Opr arg3 = cc->at(2);
- // CCallingConventionRequiresIntsAsLongs
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits.
__ leal(LIR_OprFact::address(a), arg2);
- load_int_as_long(gen()->lir(), len, arg3);
+ len.load_item_force(arg3); // We skip int->long conversion here, because CRC32 stub expects int.
__ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args());
__ move(result_reg, result);
@@ -1240,7 +1225,70 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
}
void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
- Unimplemented();
+ assert(UseCRC32CIntrinsics, "or should not be here");
+ LIR_Opr result = rlock_result(x);
+
+ switch (x->id()) {
+ case vmIntrinsics::_updateBytesCRC32C:
+ case vmIntrinsics::_updateDirectByteBufferCRC32C: {
+ bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
+
+ LIRItem crc(x->argument_at(0), this);
+ LIRItem buf(x->argument_at(1), this);
+ LIRItem off(x->argument_at(2), this);
+ LIRItem end(x->argument_at(3), this);
+ buf.load_item();
+ off.load_nonconstant();
+ end.load_nonconstant();
+
+ // len = end - off
+ LIR_Opr len = end.result();
+ LIR_Opr tmpA = new_register(T_INT);
+ LIR_Opr tmpB = new_register(T_INT);
+ __ move(end.result(), tmpA);
+ __ move(off.result(), tmpB);
+ __ sub(tmpA, tmpB, tmpA);
+ len = tmpA;
+
+ LIR_Opr index = off.result();
+ int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
+ if (off.result()->is_constant()) {
+ index = LIR_OprFact::illegalOpr;
+ offset += off.result()->as_jint();
+ }
+ LIR_Opr base_op = buf.result();
+
+ if (index->is_valid()) {
+ LIR_Opr tmp = new_register(T_LONG);
+ __ convert(Bytecodes::_i2l, index, tmp);
+ index = tmp;
+ }
+
+ LIR_Address* a = new LIR_Address(base_op, index, offset, T_BYTE);
+
+ BasicTypeList signature(3);
+ signature.append(T_INT);
+ signature.append(T_ADDRESS);
+ signature.append(T_INT);
+ CallingConvention* cc = frame_map()->c_calling_convention(&signature);
+ const LIR_Opr result_reg = result_register_for (x->type());
+
+ LIR_Opr arg1 = cc->at(0);
+ LIR_Opr arg2 = cc->at(1);
+ LIR_Opr arg3 = cc->at(2);
+
+ crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32C stub doesn't care about high bits.
+ __ leal(LIR_OprFact::address(a), arg2);
+ __ move(len, cc->at(2)); // We skip int->long conversion here, because CRC32C stub expects int.
+
+ __ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), LIR_OprFact::illegalOpr, result_reg, cc->args());
+ __ move(result_reg, result);
+ break;
+ }
+ default: {
+ ShouldNotReachHere();
+ }
+ }
}
void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
@@ -1271,4 +1319,3 @@ void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
fatal("vectorizedMismatch intrinsic is not implemented on this platform");
}
-
diff --git a/hotspot/src/cpu/s390/vm/c1_MacroAssembler_s390.cpp b/hotspot/src/cpu/s390/vm/c1_MacroAssembler_s390.cpp
index f7f8c29466a..63bc8c0acf4 100644
--- a/hotspot/src/cpu/s390/vm/c1_MacroAssembler_s390.cpp
+++ b/hotspot/src/cpu/s390/vm/c1_MacroAssembler_s390.cpp
@@ -70,7 +70,7 @@ void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_by
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
generate_stack_overflow_check(bang_size_in_bytes);
save_return_pc();
- push_frame(frame_size_in_bytes); // TODO: Must we add z_abi_160?
+ push_frame(frame_size_in_bytes);
}
void C1_MacroAssembler::unverified_entry(Register receiver, Register ic_klass) {
diff --git a/hotspot/src/cpu/s390/vm/frame_s390.cpp b/hotspot/src/cpu/s390/vm/frame_s390.cpp
index bf5cc5fc207..9ba85eb0669 100644
--- a/hotspot/src/cpu/s390/vm/frame_s390.cpp
+++ b/hotspot/src/cpu/s390/vm/frame_s390.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -496,6 +496,8 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
}
}
+
+void frame::pd_ps() {}
#endif // !PRODUCT
intptr_t *frame::initial_deoptimization_info() {
diff --git a/hotspot/src/cpu/s390/vm/frame_s390.inline.hpp b/hotspot/src/cpu/s390/vm/frame_s390.inline.hpp
index 3231daf94d5..11917ac6d53 100644
--- a/hotspot/src/cpu/s390/vm/frame_s390.inline.hpp
+++ b/hotspot/src/cpu/s390/vm/frame_s390.inline.hpp
@@ -28,6 +28,7 @@
#include "code/codeCache.hpp"
#include "code/vmreg.inline.hpp"
+#include "utilities/align.hpp"
// Inline functions for z/Architecture frames:
@@ -241,7 +242,7 @@ inline void frame::interpreter_frame_set_monitor_end(BasicObjectLock* monitors)
inline int frame::interpreter_frame_monitor_size() {
// Number of stack slots for a monitor
- return round_to(BasicObjectLock::size() /* number of stack slots */,
+ return align_up(BasicObjectLock::size() /* number of stack slots */,
WordsPerLong /* Number of stack slots for a Java long. */);
}
diff --git a/hotspot/src/cpu/s390/vm/interp_masm_s390.cpp b/hotspot/src/cpu/s390/vm/interp_masm_s390.cpp
index 8cec2da25ac..bdbc7031872 100644
--- a/hotspot/src/cpu/s390/vm/interp_masm_s390.cpp
+++ b/hotspot/src/cpu/s390/vm/interp_masm_s390.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -107,24 +107,15 @@ void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) {
// TODO: Maybe implement +VerifyActivationFrameSize here.
// verify_thread(); // Too slow. We will just verify on method entry & exit.
verify_oop(Z_tos, state);
-#ifdef FAST_DISPATCH
- if (table == Interpreter::dispatch_table(state)) {
- // Use IdispatchTables.
- add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
- // Add offset to correct dispatch table.
- sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // Multiply by wordSize.
- ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // Get entry addr.
- } else
-#endif
- {
- // Dispatch table to use.
- load_absolute_address(Z_tmp_1, (address) table); // Z_tmp_1 = table;
- // 0 <= Z_bytecode < 256 => Use a 32 bit shift, because it is shorter than sllg.
- // Z_bytecode must have been loaded zero-extended for this approach to be correct.
- z_sll(Z_bytecode, LogBytesPerWord, Z_R0); // Multiply by wordSize.
- z_lg(Z_tmp_1, 0, Z_bytecode, Z_tmp_1); // Get entry addr.
- }
+ // Dispatch table to use.
+ load_absolute_address(Z_tmp_1, (address) table); // Z_tmp_1 = table;
+
+ // 0 <= Z_bytecode < 256 => Use a 32 bit shift, because it is shorter than sllg.
+ // Z_bytecode must have been loaded zero-extended for this approach to be correct.
+ z_sll(Z_bytecode, LogBytesPerWord, Z_R0); // Multiply by wordSize.
+ z_lg(Z_tmp_1, 0, Z_bytecode, Z_tmp_1); // Get entry addr.
+
z_br(Z_tmp_1);
}
@@ -371,7 +362,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
Register tmp = index; // reuse
z_sllg(index, index, LogBytesPerHeapOop); // Offset into resolved references array.
// Load pointer for resolved_references[] objArray.
- z_lg(result, ConstantPool::resolved_references_offset_in_bytes(), result);
+ z_lg(result, ConstantPool::cache_offset_in_bytes(), result);
+ z_lg(result, ConstantPoolCache::resolved_references_offset_in_bytes(), result);
// JNIHandles::resolve(result)
z_lg(result, 0, result); // Load resolved references array itself.
#ifdef ASSERT
@@ -386,6 +378,16 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result);
}
+// load cpool->resolved_klass_at(index)
+void InterpreterMacroAssembler::load_resolved_klass_at_offset(Register cpool, Register offset, Register iklass) {
+ // int value = *(Rcpool->int_at_addr(which));
+ // int resolved_klass_index = extract_low_short_from_int(value);
+ z_llgh(offset, Address(cpool, offset, sizeof(ConstantPool) + 2)); // offset = resolved_klass_index (s390 is big-endian)
+ z_sllg(offset, offset, LogBytesPerWord); // Convert 'index' to 'offset'
+ z_lg(iklass, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes())); // iklass = cpool->_resolved_klasses
+ z_lg(iklass, Address(iklass, offset, Array::base_offset_in_bytes()));
+}
+
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
Register tmp,
int bcp_offset,
diff --git a/hotspot/src/cpu/s390/vm/interp_masm_s390.hpp b/hotspot/src/cpu/s390/vm/interp_masm_s390.hpp
index 5f29a606481..bebbb4a7445 100644
--- a/hotspot/src/cpu/s390/vm/interp_masm_s390.hpp
+++ b/hotspot/src/cpu/s390/vm/interp_masm_s390.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -48,9 +48,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
bool allow_relocation,
bool check_exceptions);
- virtual void check_and_handle_popframe(Register java_thread);
- virtual void check_and_handle_earlyret(Register java_thread);
-
// Base routine for all dispatches.
void dispatch_base(TosState state, address* table);
@@ -58,6 +55,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
InterpreterMacroAssembler(CodeBuffer* c)
: MacroAssembler(c) {}
+ virtual void check_and_handle_popframe(Register java_thread);
+ virtual void check_and_handle_earlyret(Register java_thread);
+
void jump_to_entry(address entry, Register Rscratch);
virtual void load_earlyret_value(TosState state);
@@ -115,6 +115,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
void load_resolved_reference_at_index(Register result, Register index);
+ // load cpool->resolved_klass_at(index)
+ void load_resolved_klass_at_offset(Register cpool, Register offset, Register iklass);
// Pop topmost element from stack. It just disappears. Useful if
// consumed previously by access via stackTop().
diff --git a/hotspot/src/cpu/s390/vm/interpreterRT_s390.hpp b/hotspot/src/cpu/s390/vm/interpreterRT_s390.hpp
index 67e3b914c34..9a938b2c537 100644
--- a/hotspot/src/cpu/s390/vm/interpreterRT_s390.hpp
+++ b/hotspot/src/cpu/s390/vm/interpreterRT_s390.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -51,7 +51,7 @@ class SignatureHandlerGenerator: public NativeSignatureIterator {
public:
// creation
- SignatureHandlerGenerator(methodHandle method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
+ SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
_masm = new MacroAssembler(buffer);
_fp_arg_nr = 0;
}
diff --git a/hotspot/src/cpu/s390/vm/macroAssembler_s390.cpp b/hotspot/src/cpu/s390/vm/macroAssembler_s390.cpp
index d5776117436..c14d596223d 100644
--- a/hotspot/src/cpu/s390/vm/macroAssembler_s390.cpp
+++ b/hotspot/src/cpu/s390/vm/macroAssembler_s390.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1616,6 +1616,8 @@ void MacroAssembler::branch_optimized(Assembler::branch_condition cond, Label& b
if (branch_target.is_bound()) {
address branch_addr = target(branch_target);
branch_optimized(cond, branch_addr);
+ } else if (branch_target.is_near()) {
+ z_brc(cond, branch_target); // Caller assures that the target will be in range for z_brc.
} else {
z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time.
}
@@ -1674,7 +1676,8 @@ void MacroAssembler::compare_and_branch_optimized(Register r1,
bool has_sign) {
address branch_origin = pc();
bool x2_imm8 = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2));
- bool is_RelAddr16 = (branch_target.is_bound() &&
+ bool is_RelAddr16 = branch_target.is_near() ||
+ (branch_target.is_bound() &&
RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin));
unsigned int casenum = (len64?2:0)+(has_sign?0:1);
@@ -1744,13 +1747,21 @@ void MacroAssembler::compare_and_branch_optimized(Register r1,
Label& branch_target,
bool len64,
bool has_sign) {
- unsigned int casenum = (len64?2:0)+(has_sign?0:1);
+ unsigned int casenum = (len64 ? 2 : 0) + (has_sign ? 0 : 1);
if (branch_target.is_bound()) {
address branch_addr = target(branch_target);
compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign);
} else {
- {
+ if (VM_Version::has_CompareBranch() && branch_target.is_near()) {
+ switch (casenum) {
+ case 0: z_crj( r1, r2, cond, branch_target); break;
+ case 1: z_clrj( r1, r2, cond, branch_target); break;
+ case 2: z_cgrj( r1, r2, cond, branch_target); break;
+ case 3: z_clgrj(r1, r2, cond, branch_target); break;
+ default: ShouldNotReachHere(); break;
+ }
+ } else {
switch (casenum) {
case 0: z_cr( r1, r2); break;
case 1: z_clr(r1, r2); break;
@@ -2011,17 +2022,41 @@ void MacroAssembler::resize_frame_sub(Register offset, Register fp, bool load_fp
z_stg(fp, _z_abi(callers_sp), Z_SP);
}
-// Resize_frame with SP(new) = [addr].
-void MacroAssembler::resize_frame_absolute(Register addr, Register fp, bool load_fp) {
- assert_different_registers(addr, fp, Z_SP);
- if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); }
+// Resize_frame with SP(new) = [newSP] + offset.
+// This emitter is useful if we already have calculated a pointer
+// into the to-be-allocated stack space, e.g. with special alignment properties,
+// but need some additional space, e.g. for spilling.
+// newSP is the pre-calculated pointer. It must not be modified.
+// fp holds, or is filled with, the frame pointer.
+// offset is the additional increment which is added to addr to form the new SP.
+// Note: specify a negative value to reserve more space!
+// load_fp == true only indicates that fp is not pre-filled with the frame pointer.
+// It does not guarantee that fp contains the frame pointer at the end.
+void MacroAssembler::resize_frame_abs_with_offset(Register newSP, Register fp, int offset, bool load_fp) {
+ assert_different_registers(newSP, fp, Z_SP);
- if (addr != Z_R0) {
- // Minimize stalls by not using Z_SP immediately after update.
- z_stg(fp, _z_abi(callers_sp), addr);
- z_lgr(Z_SP, addr);
+ if (load_fp) {
+ z_lg(fp, _z_abi(callers_sp), Z_SP);
+ }
+
+ add2reg(Z_SP, offset, newSP);
+ z_stg(fp, _z_abi(callers_sp), Z_SP);
+}
+
+// Resize_frame with SP(new) = [newSP].
+// load_fp == true only indicates that fp is not pre-filled with the frame pointer.
+// It does not guarantee that fp contains the frame pointer at the end.
+void MacroAssembler::resize_frame_absolute(Register newSP, Register fp, bool load_fp) {
+ assert_different_registers(newSP, fp, Z_SP);
+
+ if (load_fp) {
+ z_lg(fp, _z_abi(callers_sp), Z_SP); // need to use load/store.
+ }
+
+ z_lgr(Z_SP, newSP);
+ if (newSP != Z_R0) { // make sure we generate correct code, no matter what register newSP uses.
+ z_stg(fp, _z_abi(callers_sp), newSP);
} else {
- z_lgr(Z_SP, addr);
z_stg(fp, _z_abi(callers_sp), Z_SP);
}
}
@@ -2029,17 +2064,12 @@ void MacroAssembler::resize_frame_absolute(Register addr, Register fp, bool load
// Resize_frame with SP(new) = SP(old) + offset.
void MacroAssembler::resize_frame(RegisterOrConstant offset, Register fp, bool load_fp) {
assert_different_registers(fp, Z_SP);
- if (load_fp) z_lg(fp, _z_abi(callers_sp), Z_SP);
- if (Displacement::is_validDisp((int)_z_abi(callers_sp) + offset.constant_or_zero())) {
- // Minimize stalls by first using, then updating Z_SP.
- // Do that only if we have a small positive offset or if ExtImm are available.
- z_stg(fp, Address(Z_SP, offset, _z_abi(callers_sp)));
- add64(Z_SP, offset);
- } else {
- add64(Z_SP, offset);
- z_stg(fp, _z_abi(callers_sp), Z_SP);
+ if (load_fp) {
+ z_lg(fp, _z_abi(callers_sp), Z_SP);
}
+ add64(Z_SP, offset);
+ z_stg(fp, _z_abi(callers_sp), Z_SP);
}
void MacroAssembler::push_frame(Register bytes, Register old_sp, bool copy_sp, bool bytes_with_inverted_sign) {
@@ -2052,32 +2082,32 @@ void MacroAssembler::push_frame(Register bytes, Register old_sp, bool copy_sp, b
#endif
if (copy_sp) { z_lgr(old_sp, Z_SP); }
if (bytes_with_inverted_sign) {
- z_stg(old_sp, 0, bytes, Z_SP);
- add2reg_with_index(Z_SP, 0, bytes, Z_SP);
+ z_agr(Z_SP, bytes);
} else {
z_sgr(Z_SP, bytes); // Z_sgfr sufficient, but probably not faster.
- z_stg(old_sp, 0, Z_SP);
}
+ z_stg(old_sp, _z_abi(callers_sp), Z_SP);
}
unsigned int MacroAssembler::push_frame(unsigned int bytes, Register scratch) {
long offset = Assembler::align(bytes, frame::alignment_in_bytes);
+ assert(offset > 0, "should push a frame with positive size, size = %ld.", offset);
+ assert(Displacement::is_validDisp(-offset), "frame size out of range, size = %ld", offset);
- if (Displacement::is_validDisp(-offset)) {
- // Minimize stalls by first using, then updating Z_SP.
- // Do that only if we have ExtImm available.
- z_stg(Z_SP, -offset, Z_SP);
- add2reg(Z_SP, -offset);
- } else {
- if (scratch != Z_R0 && scratch != Z_R1) {
- z_stg(Z_SP, -offset, Z_SP);
- add2reg(Z_SP, -offset);
- } else { // scratch == Z_R0 || scratch == Z_R1
- z_lgr(scratch, Z_SP);
- add2reg(Z_SP, -offset);
- z_stg(scratch, 0, Z_SP);
- }
+ // We must not write outside the current stack bounds (given by Z_SP).
+ // Thus, we have to first update Z_SP and then store the previous SP as stack linkage.
+ // We rely on Z_R0 by default to be available as scratch.
+ z_lgr(scratch, Z_SP);
+ add2reg(Z_SP, -offset);
+ z_stg(scratch, _z_abi(callers_sp), Z_SP);
+#ifdef ASSERT
+ // Just make sure nobody uses the value in the default scratch register.
+ // When another register is used, the caller might rely on it containing the frame pointer.
+ if (scratch == Z_R0) {
+ z_iihf(scratch, 0xbaadbabe);
+ z_iilf(scratch, 0xdeadbeef);
}
+#endif
return offset;
}
@@ -2095,6 +2125,20 @@ void MacroAssembler::pop_frame() {
Assembler::z_lg(Z_SP, _z_abi(callers_sp), Z_SP);
}
+// Pop current C frame and restore return PC register (Z_R14).
+void MacroAssembler::pop_frame_restore_retPC(int frame_size_in_bytes) {
+ BLOCK_COMMENT("pop_frame_restore_retPC:");
+ int retPC_offset = _z_abi16(return_pc) + frame_size_in_bytes;
+ // If possible, pop frame by add instead of load (a penny saved is a penny got :-).
+ if (Displacement::is_validDisp(retPC_offset)) {
+ z_lg(Z_R14, retPC_offset, Z_SP);
+ add2reg(Z_SP, frame_size_in_bytes);
+ } else {
+ add2reg(Z_SP, frame_size_in_bytes);
+ restore_return_pc();
+ }
+}
+
void MacroAssembler::call_VM_leaf_base(address entry_point, bool allow_relocation) {
if (allow_relocation) {
call_c(entry_point);
@@ -2741,11 +2785,11 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
BLOCK_COMMENT("lookup_interface_method {");
// Load start of itable entries into itable_entry_addr.
- z_llgf(vtable_len, Address(recv_klass, InstanceKlass::vtable_length_offset()));
+ z_llgf(vtable_len, Address(recv_klass, Klass::vtable_length_offset()));
z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
// Loop over all itable entries until desired interfaceOop(Rinterface) found.
- const int vtable_base_offset = in_bytes(InstanceKlass::vtable_start_offset());
+ const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
add2reg_with_index(itable_entry_addr,
vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(),
@@ -3474,6 +3518,17 @@ void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp
// Purpose: record the previous value if it is not null.
// All non-tmps are preserved.
//------------------------------------------------------
+// Note: Rpre_val needs special attention.
+// The flag pre_val_needed indicated that the caller of this emitter function
+// relies on Rpre_val containing the correct value, that is:
+// either the value it contained on entry to this code segment
+// or the value that was loaded into the register from (Robj+offset).
+//
+// Independent from this requirement, the contents of Rpre_val must survive
+// the push_frame() operation. push_frame() uses Z_R0_scratch by default
+// to temporarily remember the frame pointer.
+// If Rpre_val is assigned Z_R0_scratch by the caller, code must be emitted to
+// save it's value.
void MacroAssembler::g1_write_barrier_pre(Register Robj,
RegisterOrConstant offset,
Register Rpre_val, // Ideally, this is a non-volatile register.
@@ -3487,6 +3542,16 @@ void MacroAssembler::g1_write_barrier_pre(Register Robj,
const int buffer_offset = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_buf());
const int index_offset = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_index());
assert_different_registers(Rtmp1, Rtmp2, Z_R0_scratch); // None of the Rtmp must be Z_R0!!
+ assert_different_registers(Robj, Z_R0_scratch); // Used for addressing. Furthermore, push_frame destroys Z_R0!!
+ assert_different_registers(Rval, Z_R0_scratch); // push_frame destroys Z_R0!!
+
+#ifdef ASSERT
+ // make sure the register is not Z_R0. Used for addressing. Furthermore, would be destroyed by push_frame.
+ if (offset.is_register() && offset.as_register()->encoding() == 0) {
+ tty->print_cr("Roffset(g1_write_barrier_pre) = %%r%d", offset.as_register()->encoding());
+ assert(false, "bad register for offset");
+ }
+#endif
BLOCK_COMMENT("g1_write_barrier_pre {");
@@ -3500,7 +3565,10 @@ void MacroAssembler::g1_write_barrier_pre(Register Robj,
}
z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently.
- // Do we need to load the previous value into Rpre_val?
+ assert(Rpre_val != noreg, "must have a real register");
+
+
+ // If an object is given, we need to load the previous value into Rpre_val.
if (Robj != noreg) {
// Load the previous value...
Register ixReg = offset.is_register() ? offset.register_or_noreg() : Z_R0;
@@ -3510,9 +3578,9 @@ void MacroAssembler::g1_write_barrier_pre(Register Robj,
z_lg(Rpre_val, offset.constant_or_zero(), ixReg, Robj);
}
}
- assert(Rpre_val != noreg, "must have a real register");
// Is the previous value NULL?
+ // If so, we don't need to record it and we're done.
// Note: pre_val is loaded, decompressed and stored (directly or via runtime call).
// Register contents is preserved across runtime call if caller requests to do so.
z_ltgr(Rpre_val, Rpre_val);
@@ -3529,6 +3597,7 @@ void MacroAssembler::g1_write_barrier_pre(Register Robj,
// only if index > 0. Otherwise, we need runtime to handle.
// (The index field is typed as size_t.)
Register Rbuffer = Rtmp1, Rindex = Rtmp2;
+ assert_different_registers(Rbuffer, Rindex, Rpre_val);
z_lg(Rbuffer, buffer_offset, Z_thread);
@@ -3547,16 +3616,8 @@ void MacroAssembler::g1_write_barrier_pre(Register Robj,
bind(callRuntime);
- // Save Rpre_val (result) over runtime call.
- // Requires Rtmp1, Rtmp2, or Rpre_val to be non-volatile.
- Register Rpre_save = Rpre_val;
- if (pre_val_needed && Rpre_val->is_volatile()) {
- guarantee(!Rtmp1->is_volatile() || !Rtmp2->is_volatile(), "oops!");
- Rpre_save = !Rtmp1->is_volatile() ? Rtmp1 : Rtmp2;
- }
- lgr_if_needed(Rpre_save, Rpre_val);
-
- // Preserve inputs by spilling them into the top frame.
+ // Save some registers (inputs and result) over runtime call
+ // by spilling them into the top frame.
if (Robj != noreg && Robj->is_volatile()) {
z_stg(Robj, Robj->encoding()*BytesPerWord, Z_SP);
}
@@ -3568,11 +3629,20 @@ void MacroAssembler::g1_write_barrier_pre(Register Robj,
z_stg(Rval, Rval->encoding()*BytesPerWord, Z_SP);
}
+ // Save Rpre_val (result) over runtime call.
+ Register Rpre_save = Rpre_val;
+ if ((Rpre_val == Z_R0_scratch) || (pre_val_needed && Rpre_val->is_volatile())) {
+ guarantee(!Rtmp1->is_volatile() || !Rtmp2->is_volatile(), "oops!");
+ Rpre_save = !Rtmp1->is_volatile() ? Rtmp1 : Rtmp2;
+ }
+ lgr_if_needed(Rpre_save, Rpre_val);
+
// Push frame to protect top frame with return pc and spilled register values.
save_return_pc();
- push_frame_abi160(0); // Will use Z_R0 as tmp on old CPUs.
+ push_frame_abi160(0); // Will use Z_R0 as tmp.
- call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), Rpre_val, Z_thread);
+ // Rpre_val may be destroyed by push_frame().
+ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), Rpre_save, Z_thread);
pop_frame();
restore_return_pc();
@@ -3588,9 +3658,9 @@ void MacroAssembler::g1_write_barrier_pre(Register Robj,
if (Rval != noreg && Rval->is_volatile()) {
z_lg(Rval, Rval->encoding()*BytesPerWord, Z_SP);
}
-
- // Restore Rpre_val (result) after runtime call.
- lgr_if_needed(Rpre_val, Rpre_save);
+ if (pre_val_needed && Rpre_val->is_volatile()) {
+ lgr_if_needed(Rpre_val, Rpre_save);
+ }
bind(filtered);
BLOCK_COMMENT("} g1_write_barrier_pre");
@@ -3643,7 +3713,7 @@ void MacroAssembler::g1_write_barrier_post(Register Rstore_addr,
// calculate address of card
load_const_optimized(Rbase, (address)bs->byte_map_base); // Card table base.
z_srlg(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift); // Index into card table.
- add2reg_with_index(Rcard_addr, 0, Rcard_addr, Rbase); // Explicit calculation needed for cli.
+ z_algr(Rcard_addr, Rbase); // Explicit calculation needed for cli.
Rbase = noreg; // end of lifetime
// Filter young.
@@ -3687,6 +3757,7 @@ void MacroAssembler::g1_write_barrier_post(Register Rstore_addr,
// TODO: do we need a frame? Introduced to be on the safe side.
bool needs_frame = true;
+ lgr_if_needed(Rcard_addr, Rcard_addr_x); // copy back asap. push_frame will destroy Z_R0_scratch!
// VM call need frame to access(write) O register.
if (needs_frame) {
@@ -3695,7 +3766,7 @@ void MacroAssembler::g1_write_barrier_post(Register Rstore_addr,
}
// Save the live input values.
- call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr_x, Z_thread);
+ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr, Z_thread);
if (needs_frame) {
pop_frame();
@@ -4051,7 +4122,12 @@ void MacroAssembler::store_klass(Register klass, Register dst_oop, Register ck)
void MacroAssembler::store_klass_gap(Register s, Register d) {
if (UseCompressedClassPointers) {
assert(s != d, "not enough registers");
- z_st(s, Address(d, oopDesc::klass_gap_offset_in_bytes()));
+ // Support s = noreg.
+ if (s != noreg) {
+ z_st(s, Address(d, oopDesc::klass_gap_offset_in_bytes()));
+ } else {
+ z_mvhi(Address(d, oopDesc::klass_gap_offset_in_bytes()), 0);
+ }
}
}
@@ -5927,8 +6003,7 @@ void MacroAssembler::update_byte_crc32(Register crc, Register val, Register tabl
* @param len register containing number of bytes
* @param table register pointing to CRC table
*/
-void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
- Register data, bool invertCRC) {
+void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, Register data) {
assert_different_registers(crc, buf, len, table, data);
Label L_mainLoop, L_done;
@@ -5938,20 +6013,12 @@ void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register
z_ltr(len, len);
z_brnh(L_done);
- if (invertCRC) {
- not_(crc, noreg, false); // ~c
- }
-
bind(L_mainLoop);
z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
add2reg(buf, mainLoop_stepping); // Advance buffer position.
update_byte_crc32(crc, data, table);
z_brct(len, L_mainLoop); // Iterate.
- if (invertCRC) {
- not_(crc, noreg, false); // ~c
- }
-
bind(L_done);
}
@@ -5968,6 +6035,7 @@ void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register tab
// c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \
// crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24]
// #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4
+ // Pre-calculate (constant) column offsets, use columns 4..7 for big-endian.
const int ix0 = 4*(4*CRC32_COLUMN_SIZE);
const int ix1 = 5*(4*CRC32_COLUMN_SIZE);
const int ix2 = 6*(4*CRC32_COLUMN_SIZE);
@@ -5986,17 +6054,12 @@ void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register tab
rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true); // ((c >> 16) & 0xff) << 2
rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true); // ((c >> 24) & 0xff) << 2
- // Load pre-calculated table values.
- // Use columns 4..7 for big-endian.
- z_ly(t3, Address(table, t3, (intptr_t)ix0));
+ // XOR indexed table values to calculate updated crc.
z_ly(t2, Address(table, t2, (intptr_t)ix1));
- z_ly(t1, Address(table, t1, (intptr_t)ix2));
z_ly(t0, Address(table, t0, (intptr_t)ix3));
-
- // Calculate new crc from table values.
- z_xr(t2, t3);
- z_xr(t0, t1);
- z_xr(t0, t2); // Now crc contains the final checksum value.
+ z_xy(t2, Address(table, t3, (intptr_t)ix0));
+ z_xy(t0, Address(table, t1, (intptr_t)ix2));
+ z_xr(t0, t2); // Now t0 contains the updated CRC value.
lgr_if_needed(crc, t0);
}
@@ -6009,7 +6072,8 @@ void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register tab
* uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
*/
void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
- Register t0, Register t1, Register t2, Register t3) {
+ Register t0, Register t1, Register t2, Register t3,
+ bool invertCRC) {
assert_different_registers(crc, buf, len, table);
Label L_mainLoop, L_tail;
@@ -6024,7 +6088,9 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
// The situation itself is detected and handled correctly by the conditional branches
// following aghi(len, -stepping) and aghi(len, +stepping).
- not_(crc, noreg, false); // 1s complement of crc
+ if (invertCRC) {
+ not_(crc, noreg, false); // 1s complement of crc
+ }
#if 0
{
@@ -6039,7 +6105,7 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
rotate_then_insert(ctr, ctr, 62, 63, 0, true); // TODO: should set cc
z_sgfr(len, ctr); // Remaining len after alignment.
- update_byteLoop_crc32(crc, buf, ctr, table, data, false);
+ update_byteLoop_crc32(crc, buf, ctr, table, data);
}
#endif
@@ -6047,21 +6113,23 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
z_srag(ctr, len, log_stepping);
z_brnh(L_tail);
- z_lrvr(crc, crc); // Revert byte order because we are dealing with big-endian data.
+ z_lrvr(crc, crc); // Revert byte order because we are dealing with big-endian data.
rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop
BIND(L_mainLoop);
update_1word_crc32(crc, buf, table, 0, 0, crc, t1, t2, t3);
update_1word_crc32(crc, buf, table, 4, mainLoop_stepping, crc, t1, t2, t3);
- z_brct(ctr, L_mainLoop); // Iterate.
+ z_brct(ctr, L_mainLoop); // Iterate.
- z_lrvr(crc, crc); // Revert byte order back to original.
+ z_lrvr(crc, crc); // Revert byte order back to original.
// Process last few (<8) bytes of buffer.
BIND(L_tail);
- update_byteLoop_crc32(crc, buf, len, table, data, false);
+ update_byteLoop_crc32(crc, buf, len, table, data);
- not_(crc, noreg, false); // 1s complement of crc
+ if (invertCRC) {
+ not_(crc, noreg, false); // 1s complement of crc
+ }
}
/**
@@ -6073,7 +6141,8 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
* uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
*/
void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
- Register t0, Register t1, Register t2, Register t3) {
+ Register t0, Register t1, Register t2, Register t3,
+ bool invertCRC) {
assert_different_registers(crc, buf, len, table);
Label L_mainLoop, L_tail;
@@ -6087,7 +6156,9 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
// The situation itself is detected and handled correctly by the conditional branches
// following aghi(len, -stepping) and aghi(len, +stepping).
- not_(crc, noreg, false); // 1s complement of crc
+ if (invertCRC) {
+ not_(crc, noreg, false); // 1s complement of crc
+ }
// Check for short (<4 bytes) buffer.
z_srag(ctr, len, log_stepping);
@@ -6099,13 +6170,16 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
BIND(L_mainLoop);
update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3);
z_brct(ctr, L_mainLoop); // Iterate.
+
z_lrvr(crc, crc); // Revert byte order back to original.
// Process last few (<8) bytes of buffer.
BIND(L_tail);
- update_byteLoop_crc32(crc, buf, len, table, data, false);
+ update_byteLoop_crc32(crc, buf, len, table, data);
- not_(crc, noreg, false); // 1s complement of crc
+ if (invertCRC) {
+ not_(crc, noreg, false); // 1s complement of crc
+ }
}
/**
@@ -6115,22 +6189,51 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
* @param table register pointing to CRC table
*/
void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
- Register t0, Register t1, Register t2, Register t3) {
+ Register t0, Register t1, Register t2, Register t3,
+ bool invertCRC) {
assert_different_registers(crc, buf, len, table);
Register data = t0;
- update_byteLoop_crc32(crc, buf, len, table, data, true);
+ if (invertCRC) {
+ not_(crc, noreg, false); // 1s complement of crc
+ }
+
+ update_byteLoop_crc32(crc, buf, len, table, data);
+
+ if (invertCRC) {
+ not_(crc, noreg, false); // 1s complement of crc
+ }
}
-void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp) {
+void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
+ bool invertCRC) {
assert_different_registers(crc, buf, len, table, tmp);
- not_(crc, noreg, false); // ~c
+ if (invertCRC) {
+ not_(crc, noreg, false); // 1s complement of crc
+ }
z_llgc(tmp, Address(buf, (intptr_t)0)); // Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
update_byte_crc32(crc, tmp, table);
- not_(crc, noreg, false); // ~c
+ if (invertCRC) {
+ not_(crc, noreg, false); // 1s complement of crc
+ }
+}
+
+void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table,
+ bool invertCRC) {
+ assert_different_registers(crc, val, table);
+
+ if (invertCRC) {
+ not_(crc, noreg, false); // 1s complement of crc
+ }
+
+ update_byte_crc32(crc, val, table);
+
+ if (invertCRC) {
+ not_(crc, noreg, false); // 1s complement of crc
+ }
}
//
@@ -6583,11 +6686,12 @@ void MacroAssembler::verify_oop(Register oop, const char* msg) {
BLOCK_COMMENT("verify_oop {");
Register tmp = Z_R0;
- unsigned int nbytes_save = 6 *8;
+ unsigned int nbytes_save = 5*BytesPerWord;
address entry = StubRoutines::verify_oop_subroutine_entry_address();
+
save_return_pc();
push_frame_abi160(nbytes_save);
- z_stmg(Z_R0, Z_R5, 160, Z_SP);
+ z_stmg(Z_R1, Z_R5, frame::z_abi_160_size, Z_SP);
z_lgr(Z_ARG2, oop);
load_const(Z_ARG1, (address) msg);
@@ -6595,10 +6699,10 @@ void MacroAssembler::verify_oop(Register oop, const char* msg) {
z_lg(Z_R1, 0, Z_R1);
call_c(Z_R1);
- z_lmg(Z_R0, Z_R5, 160, Z_SP);
+ z_lmg(Z_R1, Z_R5, frame::z_abi_160_size, Z_SP);
pop_frame();
-
restore_return_pc();
+
BLOCK_COMMENT("} verify_oop ");
}
@@ -6620,8 +6724,8 @@ void MacroAssembler::stop(int type, const char* msg, int id) {
// Setup arguments.
load_const(Z_ARG1, (void*) stop_types[type%stop_end]);
load_const(Z_ARG2, (void*) msg);
- get_PC(Z_R14); // Following code pushes a frame without entering a new function. Use current pc as return address.
- save_return_pc(); // Saves return pc Z_R14.
+ get_PC(Z_R14); // Following code pushes a frame without entering a new function. Use current pc as return address.
+ save_return_pc(); // Saves return pc Z_R14.
push_frame_abi160(0);
call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
// The plain disassembler does not recognize illtrap. It instead displays
diff --git a/hotspot/src/cpu/s390/vm/macroAssembler_s390.hpp b/hotspot/src/cpu/s390/vm/macroAssembler_s390.hpp
index 2b4002a3bf4..8adc7544af5 100644
--- a/hotspot/src/cpu/s390/vm/macroAssembler_s390.hpp
+++ b/hotspot/src/cpu/s390/vm/macroAssembler_s390.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -440,9 +440,21 @@ class MacroAssembler: public Assembler {
// Get current PC + offset. Offset given in bytes, must be even!
address get_PC(Register result, int64_t offset);
+ // Accessing, and in particular modifying, a stack location is only safe if
+ // the stack pointer (Z_SP) is set such that the accessed stack location is
+ // in the reserved range.
+ //
+ // From a performance point of view, it is desirable not to change the SP
+ // first and then immediately use it to access the freshly reserved space.
+ // That opens a small gap, though. If, just after storing some value (the
+ // frame pointer) into the to-be-reserved space, an interrupt is caught,
+ // the handler might use the space beyond Z_SP for it's own purpose.
+ // If that happens, the stored value might get altered.
+
// Resize current frame either relatively wrt to current SP or absolute.
void resize_frame_sub(Register offset, Register fp, bool load_fp=true);
- void resize_frame_absolute(Register addr, Register fp, bool load_fp=true);
+ void resize_frame_abs_with_offset(Register newSP, Register fp, int offset, bool load_fp);
+ void resize_frame_absolute(Register addr, Register fp, bool load_fp);
void resize_frame(RegisterOrConstant offset, Register fp, bool load_fp=true);
// Push a frame of size bytes, if copy_sp is false, old_sp must already
@@ -461,6 +473,8 @@ class MacroAssembler: public Assembler {
// Pop current C frame.
void pop_frame();
+ // Pop current C frame and restore return PC register (Z_R14).
+ void pop_frame_restore_retPC(int frame_size_in_bytes);
//
// Calls
@@ -1011,22 +1025,35 @@ class MacroAssembler: public Assembler {
int before = 0, int after = 0) PRODUCT_RETURN;
// Emitters for CRC32 calculation.
+ // A note on invertCRC:
+ // Unfortunately, internal representation of crc differs between CRC32 and CRC32C.
+ // CRC32 holds it's current crc value in the externally visible representation.
+ // CRC32C holds it's current crc value in internal format, ready for updating.
+ // Thus, the crc value must be bit-flipped before updating it in the CRC32 case.
+ // In the CRC32C case, it must be bit-flipped when it is given to the outside world (getValue()).
+ // The bool invertCRC parameter indicates whether bit-flipping is required before updates.
private:
void fold_byte_crc32(Register crc, Register table, Register val, Register tmp);
void fold_8bit_crc32(Register crc, Register table, Register tmp);
+ void update_byte_crc32( Register crc, Register val, Register table);
void update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
- Register data, bool invertCRC);
+ Register data);
void update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
Register t0, Register t1, Register t2, Register t3);
public:
- void update_byte_crc32( Register crc, Register val, Register table);
- void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp);
+ void kernel_crc32_singleByteReg(Register crc, Register val, Register table,
+ bool invertCRC);
+ void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
+ bool invertCRC);
void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
- Register t0, Register t1, Register t2, Register t3);
+ Register t0, Register t1, Register t2, Register t3,
+ bool invertCRC);
void kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
- Register t0, Register t1, Register t2, Register t3);
+ Register t0, Register t1, Register t2, Register t3,
+ bool invertCRC);
void kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
- Register t0, Register t1, Register t2, Register t3);
+ Register t0, Register t1, Register t2, Register t3,
+ bool invertCRC);
// Emitters for BigInteger.multiplyToLen intrinsic
// note: length of result array (zlen) is passed on the stack
diff --git a/hotspot/src/cpu/s390/vm/metaspaceShared_s390.cpp b/hotspot/src/cpu/s390/vm/metaspaceShared_s390.cpp
deleted file mode 100644
index 8e7feacf527..00000000000
--- a/hotspot/src/cpu/s390/vm/metaspaceShared_s390.cpp
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/codeBuffer.hpp"
-#include "asm/macroAssembler.inline.hpp"
-#include "memory/metaspaceShared.hpp"
-
-// Generate the self-patching vtable method:
-//
-// This method will be called (as any other Klass virtual method) with
-// the Klass itself as the first argument. Example:
-//
-// oop obj;
-// int size = obj->klass()->klass_part()->oop_size(this);
-//
-// for which the virtual method call is Klass::oop_size();.
-//
-// The dummy method is called with the Klass object as the first
-// operand, and an object as the second argument.
-//
-
-//=====================================================================
-
-// All of the dummy methods in the vtable are essentially identical,
-// differing only by an ordinal constant, and they bear no releationship
-// to the original method which the caller intended. Also, there needs
-// to be 'vtbl_list_size' instances of the vtable in order to
-// differentiate between the 'vtable_list_size' original Klass objects.
-
-#undef __
-#define __ masm->
-
-void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
- void** vtable,
- char** md_top,
- char* md_end,
- char** mc_top,
- char* mc_end) {
-
- intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
- *(intptr_t *)(*md_top) = vtable_bytes;
- *md_top += sizeof(intptr_t);
- void** dummy_vtable = (void**)*md_top;
- *vtable = dummy_vtable;
- *md_top += vtable_bytes;
-
- // Get ready to generate dummy methods.
-
- CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
- MacroAssembler* masm = new MacroAssembler(&cb);
-
- __ unimplemented();
-}
diff --git a/hotspot/src/cpu/s390/vm/methodHandles_s390.cpp b/hotspot/src/cpu/s390/vm/methodHandles_s390.cpp
index e2f0d32cc8a..60577fd04a3 100644
--- a/hotspot/src/cpu/s390/vm/methodHandles_s390.cpp
+++ b/hotspot/src/cpu/s390/vm/methodHandles_s390.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
+#include "prims/jvm.h"
#include "prims/methodHandles.hpp"
#ifdef PRODUCT
@@ -73,7 +74,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
- KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
+ Klass* klass = SystemDictionary::well_known_klass(klass_id);
assert(temp_reg != Z_R0 && // Is used as base register!
temp_reg != noreg && temp2_reg != noreg, "need valid registers!");
@@ -200,10 +201,13 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
Address(method_temp,
NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
__ verify_oop(method_temp);
- // The following assumes that a method is normally compressed in the vmtarget field.
+ __ load_heap_oop(method_temp,
+ Address(method_temp,
+ NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())));
+ __ verify_oop(method_temp);
__ z_lg(method_temp,
Address(method_temp,
- NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));
+ NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())));
if (VerifyMethodHandles && !for_compiler_entry) {
// Make sure recv is already on stack.
@@ -371,7 +375,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
- Address member_vmtarget(member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));
+ Address member_vmtarget(member_reg, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()));
+ Address vmtarget_method(Z_method, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()));
Register temp1_recv_klass = temp1;
if (iid != vmIntrinsics::_linkToStatic) {
@@ -424,7 +429,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
}
- __ z_lg(Z_method, member_vmtarget);
+ __ load_heap_oop(Z_method, member_vmtarget);
+ __ z_lg(Z_method, vmtarget_method);
method_is_live = true;
break;
@@ -432,7 +438,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
}
- __ z_lg(Z_method, member_vmtarget);
+ __ load_heap_oop(Z_method, member_vmtarget);
+ __ z_lg(Z_method, vmtarget_method);
method_is_live = true;
break;
@@ -602,14 +609,14 @@ void trace_method_handle_stub(const char* adaptername,
void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
if (!TraceMethodHandles) { return; }
+ // If arg registers are contiguous, we can use STMG/LMG.
+ assert((Z_ARG5->encoding() - Z_ARG1->encoding() + 1) == RegisterImpl::number_of_arg_registers, "Oops");
+
BLOCK_COMMENT("trace_method_handle {");
// Save argument registers (they are used in raise exception stub).
- __ z_stg(Z_ARG1, Address(Z_SP, 16));
- __ z_stg(Z_ARG2, Address(Z_SP, 24));
- __ z_stg(Z_ARG3, Address(Z_SP, 32));
- __ z_stg(Z_ARG4, Address(Z_SP, 40));
- __ z_stg(Z_ARG5, Address(Z_SP, 48));
+ // Argument registers have contiguous register numbers -> we can use stmg/lmg.
+ __ z_stmg(Z_ARG1, Z_ARG5, 16, Z_SP);
// Setup arguments.
__ z_lgr(Z_ARG2, Z_ARG4); // mh, see generate_method_handle_interpreter_entry()
@@ -622,11 +629,9 @@ void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adapt
__ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub));
__ pop_frame();
__ restore_return_pc(); // restores to Z_R14
- __ z_lg(Z_ARG1, Address(Z_SP, 16));
- __ z_lg(Z_ARG2, Address(Z_SP, 24));
- __ z_lg(Z_ARG3, Address(Z_SP, 32));
- __ z_lg(Z_ARG4, Address(Z_SP, 40));
- __ z_lg(Z_ARG5, Address(Z_SP, 45));
+
+ // Restore argument registers
+ __ z_lmg(Z_ARG1, Z_ARG5, 16, Z_SP);
__ zap_from_to(Z_SP, Z_SP, Z_R0, Z_R1, 50, -1);
__ zap_from_to(Z_SP, Z_SP, Z_R0, Z_R1, -1, 5);
diff --git a/hotspot/src/cpu/s390/vm/s390.ad b/hotspot/src/cpu/s390/vm/s390.ad
index 7daf348c0af..b30437e0faf 100644
--- a/hotspot/src/cpu/s390/vm/s390.ad
+++ b/hotspot/src/cpu/s390/vm/s390.ad
@@ -1,6 +1,6 @@
//
-// Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
-// Copyright (c) 2016 SAP SE. All rights reserved.
+// Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2017, SAP SE. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@@ -910,16 +910,8 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
bool need_polling = do_polling() && C->is_method_compilation();
// Pop frame, restore return_pc, and all stuff needed by interpreter.
- // Pop frame by add instead of load (a penny saved is a penny got :-).
int frame_size_in_bytes = Assembler::align((C->frame_slots() << LogBytesPerInt), frame::alignment_in_bytes);
- int retPC_offset = frame_size_in_bytes + _z_abi16(return_pc);
- if (Displacement::is_validDisp(retPC_offset)) {
- __ z_lg(Z_R14, retPC_offset, Z_SP);
- __ add2reg(Z_SP, frame_size_in_bytes);
- } else {
- __ add2reg(Z_SP, frame_size_in_bytes);
- __ restore_return_pc();
- }
+ __ pop_frame_restore_retPC(frame_size_in_bytes);
if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
__ reserved_stack_check(Z_R14);
@@ -1562,7 +1554,7 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
}
// Vector ideal reg.
-const int Matcher::vector_ideal_reg(int size) {
+const uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize == 8 && size == 8, "");
return Op_RegL;
}
@@ -1577,7 +1569,7 @@ const int Matcher::min_vector_size(const BasicType bt) {
return max_vector_size(bt); // Same as max.
}
-const int Matcher::vector_shift_count_ideal_reg(int size) {
+const uint Matcher::vector_shift_count_ideal_reg(int size) {
fatal("vector shift is not supported");
return Node::NotAMachineReg;
}
@@ -6768,6 +6760,7 @@ instruct sllI_reg_imm(iRegI dst, iRegI src, immI nbits) %{
format %{ "SLL $dst,$src,$nbits\t# use RISC-like SLLG also for int" %}
ins_encode %{
int Nbit = $nbits$$constant;
+ assert((Nbit & (BitsPerJavaInteger - 1)) == Nbit, "Check shift mask in ideal graph");
__ z_sllg($dst$$Register, $src$$Register, Nbit & (BitsPerJavaInteger - 1), Z_R0);
%}
ins_pipe(pipe_class_dummy);
@@ -6841,6 +6834,7 @@ instruct sraI_reg_imm(iRegI dst, immI src, flagsReg cr) %{
format %{ "SRA $dst,$src" %}
ins_encode %{
int Nbit = $src$$constant;
+ assert((Nbit & (BitsPerJavaInteger - 1)) == Nbit, "Check shift mask in ideal graph");
__ z_sra($dst$$Register, Nbit & (BitsPerJavaInteger - 1), Z_R0);
%}
ins_pipe(pipe_class_dummy);
@@ -6893,6 +6887,7 @@ instruct srlI_reg_imm(iRegI dst, immI src) %{
format %{ "SRL $dst,$src" %}
ins_encode %{
int Nbit = $src$$constant;
+ assert((Nbit & (BitsPerJavaInteger - 1)) == Nbit, "Check shift mask in ideal graph");
__ z_srl($dst$$Register, Nbit & (BitsPerJavaInteger - 1), Z_R0);
%}
ins_pipe(pipe_class_dummy);
diff --git a/hotspot/src/cpu/s390/vm/sharedRuntime_s390.cpp b/hotspot/src/cpu/s390/vm/sharedRuntime_s390.cpp
index 89c3ae4032a..d8c1a63728b 100644
--- a/hotspot/src/cpu/s390/vm/sharedRuntime_s390.cpp
+++ b/hotspot/src/cpu/s390/vm/sharedRuntime_s390.cpp
@@ -35,6 +35,7 @@
#include "registerSaver_s390.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
+#include "utilities/align.hpp"
#include "vmreg_s390.inline.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
@@ -311,7 +312,13 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, RegisterSet reg
__ save_return_pc(return_pc);
// Push a new frame (includes stack linkage).
- __ push_frame(frame_size_in_bytes);
+ // use return_pc as scratch for push_frame. Z_R0_scratch (the default) and Z_R1_scratch are
+ // illegally used to pass parameters (SAPJVM extension) by RangeCheckStub::emit_code().
+ __ push_frame(frame_size_in_bytes, return_pc);
+ // We have to restore return_pc right away.
+ // Nobody else will. Furthermore, return_pc isn't necessarily the default (Z_R14).
+ // Nobody else knows which register we saved.
+ __ z_lg(return_pc, _z_abi16(return_pc) + frame_size_in_bytes, Z_SP);
// Register save area in new frame starts above z_abi_160 area.
int offset = register_save_offset;
@@ -541,7 +548,6 @@ void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
}
}
-#if INCLUDE_CDS
size_t SharedRuntime::trampoline_size() {
return MacroAssembler::load_const_size() + 2;
}
@@ -551,7 +557,6 @@ void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destinatio
__ load_const(Z_R1_scratch, destination);
__ z_br(Z_R1_scratch);
}
-#endif
// ---------------------------------------------------------------------------
void SharedRuntime::save_native_result(MacroAssembler * masm,
@@ -744,7 +749,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
ShouldNotReachHere();
}
}
- return round_to(stk, 2);
+ return align_up(stk, 2);
}
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
@@ -840,7 +845,7 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
ShouldNotReachHere();
}
}
- return round_to(stk, 2);
+ return align_up(stk, 2);
}
////////////////////////////////////////////////////////////////////////
@@ -1734,7 +1739,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
}
}
} // for
- total_save_slots = double_slots * 2 + round_to(single_slots, 2); // Round to even.
+ total_save_slots = double_slots * 2 + align_up(single_slots, 2); // Round to even.
}
int oop_handle_slot_offset = stack_slots;
@@ -1761,7 +1766,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Now compute actual number of stack words we need.
// Round to align stack properly.
- stack_slots = round_to(stack_slots, // 7)
+ stack_slots = align_up(stack_slots, // 7)
frame::alignment_in_bytes / VMRegImpl::stack_slot_size);
int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size;
@@ -2395,7 +2400,7 @@ static address gen_c2i_adapter(MacroAssembler *masm,
// it has already been allocated.
const int abi_scratch = frame::z_top_ijava_frame_abi_size;
- int extraspace = round_to(total_args_passed, 2)*wordSize + abi_scratch;
+ int extraspace = align_up(total_args_passed, 2)*wordSize + abi_scratch;
Register sender_SP = Z_R10;
Register value = Z_R12;
@@ -2525,9 +2530,9 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
// registers are below. By subtracting stack0, we either get a negative
// number (all values in registers) or the maximum stack slot accessed.
// Convert VMRegImpl (4 byte) stack slots to words.
- int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
+ int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
// Round up to miminum stack alignment, in wordSize
- comp_words_on_stack = round_to(comp_words_on_stack, 2);
+ comp_words_on_stack = align_up(comp_words_on_stack, 2);
__ resize_frame(-comp_words_on_stack*wordSize, Z_R0_scratch);
}
diff --git a/hotspot/src/cpu/s390/vm/stubGenerator_s390.cpp b/hotspot/src/cpu/s390/vm/stubGenerator_s390.cpp
index be107222636..d779e90c042 100644
--- a/hotspot/src/cpu/s390/vm/stubGenerator_s390.cpp
+++ b/hotspot/src/cpu/s390/vm/stubGenerator_s390.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -291,7 +291,7 @@ class StubGenerator: public StubCodeGenerator {
// Restore frame pointer.
__ z_lg(r_entryframe_fp, _z_abi(callers_sp), Z_SP);
// Pop frame. Done here to minimize stalls.
- __ z_lg(Z_SP, _z_abi(callers_sp), Z_SP);
+ __ pop_frame();
// Reload some volatile registers which we've spilled before the call
// to frame manager / native entry.
@@ -563,6 +563,9 @@ class StubGenerator: public StubCodeGenerator {
address generate_throw_exception(const char* name, address runtime_entry,
bool restore_saved_exception_pc,
Register arg1 = noreg, Register arg2 = noreg) {
+ assert_different_registers(arg1, Z_R0_scratch); // would be destroyed by push_frame()
+ assert_different_registers(arg2, Z_R0_scratch); // would be destroyed by push_frame()
+
int insts_size = 256;
int locs_size = 0;
CodeBuffer code(name, insts_size, locs_size);
@@ -623,26 +626,6 @@ class StubGenerator: public StubCodeGenerator {
#define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm):_masm)->
#endif
- //----------------------------------------------------------------------
- // The following routine generates a subroutine to throw an asynchronous
- // UnknownError when an unsafe access gets a fault that could not be
- // reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.)
- //
- // Arguments:
- // trapping PC: ??
- //
- // Results:
- // Posts an asynchronous exception, skips the trapping instruction.
- //
- address generate_handler_for_unsafe_access() {
- StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
- {
- address start = __ pc();
- __ unimplemented("StubRoutines::handler_for_unsafe_access", 86);
- return start;
- }
- }
-
// Support for uint StubRoutine::zarch::partial_subtype_check(Klass
// sub, Klass super);
//
@@ -713,11 +696,13 @@ class StubGenerator: public StubCodeGenerator {
BarrierSet* const bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
case BarrierSet::G1SATBCTLogging:
- // With G1, don't generate the call if we statically know that the target in uninitialized.
+ // With G1, don't generate the call if we statically know that the target is uninitialized.
if (!dest_uninitialized) {
// Is marking active?
Label filtered;
- Register Rtmp1 = Z_R0;
+ assert_different_registers(addr, Z_R0_scratch); // would be destroyed by push_frame()
+ assert_different_registers(count, Z_R0_scratch); // would be destroyed by push_frame()
+ Register Rtmp1 = Z_R0_scratch;
const int active_offset = in_bytes(JavaThread::satb_mark_queue_offset() +
SATBMarkQueue::byte_offset_of_active());
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
@@ -728,11 +713,11 @@ class StubGenerator: public StubCodeGenerator {
}
__ z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently.
- // __ push_frame_abi160(0);
+ // __ push_frame_abi160(0); // implicitly done in save_live_registers()
(void) RegisterSaver::save_live_registers(_masm, RegisterSaver::arg_registers);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), addr, count);
(void) RegisterSaver::restore_live_registers(_masm, RegisterSaver::arg_registers);
- // __ pop_frame();
+ // __ pop_frame(); // implicitly done in restore_live_registers()
__ bind(filtered);
}
@@ -759,16 +744,18 @@ class StubGenerator: public StubCodeGenerator {
case BarrierSet::G1SATBCTLogging:
{
if (branchToEnd) {
- // __ push_frame_abi160(0);
+ assert_different_registers(addr, Z_R0_scratch); // would be destroyed by push_frame()
+ assert_different_registers(count, Z_R0_scratch); // would be destroyed by push_frame()
+ // __ push_frame_abi160(0); // implicitly done in save_live_registers()
(void) RegisterSaver::save_live_registers(_masm, RegisterSaver::arg_registers);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count);
(void) RegisterSaver::restore_live_registers(_masm, RegisterSaver::arg_registers);
- // __ pop_frame();
+ // __ pop_frame(); // implicitly done in restore_live_registers()
} else {
// Tail call: call c and return to stub caller.
address entry_point = CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post);
- if (Z_ARG1 != addr) __ z_lgr(Z_ARG1, addr);
- if (Z_ARG2 != count) __ z_lgr(Z_ARG2, count);
+ __ lgr_if_needed(Z_ARG1, addr);
+ __ lgr_if_needed(Z_ARG2, count);
__ load_const(Z_R1, entry_point);
__ z_br(Z_R1); // Branch without linking, callee will return to stub caller.
}
@@ -1696,8 +1683,8 @@ class StubGenerator: public StubCodeGenerator {
// src must designate an even/odd register pair, holding the address/length of the original message
// Helper function which generates code to
- // - load the function code in register fCode (== Z_R0)
- // - load the data block length (depends on cipher function) in register srclen if requested.
+ // - load the function code in register fCode (== Z_R0).
+ // - load the data block length (depends on cipher function) into register srclen if requested.
// - is_decipher switches between cipher/decipher function codes
// - set_len requests (if true) loading the data block length in register srclen
void generate_load_AES_fCode(Register keylen, Register fCode, Register srclen, bool is_decipher) {
@@ -1708,12 +1695,13 @@ class StubGenerator: public StubCodeGenerator {
bool identical_dataBlk_len = (VM_Version::Cipher::_AES128_dataBlk == VM_Version::Cipher::_AES192_dataBlk)
&& (VM_Version::Cipher::_AES128_dataBlk == VM_Version::Cipher::_AES256_dataBlk);
// Expanded key length is 44/52/60 * 4 bytes for AES-128/AES-192/AES-256.
- __ z_cghi(keylen, 52);
- __ z_lghi(fCode, VM_Version::Cipher::_AES256 + mode);
+ __ z_cghi(keylen, 52); // Check only once at the beginning. keylen and fCode may share the same register.
+
+ __ z_lghi(fCode, VM_Version::Cipher::_AES128 + mode);
if (!identical_dataBlk_len) {
- __ z_lghi(srclen, VM_Version::Cipher::_AES256_dataBlk);
+ __ z_lghi(srclen, VM_Version::Cipher::_AES128_dataBlk);
}
- __ z_brh(fCode_set); // keyLen > 52: AES256
+ __ z_brl(fCode_set); // keyLen < 52: AES128
__ z_lghi(fCode, VM_Version::Cipher::_AES192 + mode);
if (!identical_dataBlk_len) {
@@ -1721,11 +1709,12 @@ class StubGenerator: public StubCodeGenerator {
}
__ z_bre(fCode_set); // keyLen == 52: AES192
- __ z_lghi(fCode, VM_Version::Cipher::_AES128 + mode);
+ __ z_lghi(fCode, VM_Version::Cipher::_AES256 + mode);
if (!identical_dataBlk_len) {
- __ z_lghi(srclen, VM_Version::Cipher::_AES128_dataBlk);
+ __ z_lghi(srclen, VM_Version::Cipher::_AES256_dataBlk);
}
- // __ z_brl(fCode_set); // keyLen < 52: AES128 // fallthru
+ // __ z_brh(fCode_set); // keyLen < 52: AES128 // fallthru
+
__ bind(fCode_set);
if (identical_dataBlk_len) {
__ z_lghi(srclen, VM_Version::Cipher::_AES128_dataBlk);
@@ -1735,6 +1724,54 @@ class StubGenerator: public StubCodeGenerator {
}
// Push a parameter block for the cipher/decipher instruction on the stack.
+ // Layout of the additional stack space allocated for AES_cipherBlockChaining:
+ //
+ // | |
+ // +--------+ <-- SP before expansion
+ // | |
+ // : : alignment loss, 0..(AES_parmBlk_align-8) bytes
+ // | |
+ // +--------+
+ // | |
+ // : : space for parameter block, size VM_Version::Cipher::_AES*_parmBlk_C
+ // | |
+ // +--------+ <-- parmBlk, octoword-aligned, start of parameter block
+ // | |
+ // : : additional stack space for spills etc., size AES_parmBlk_addspace, DW @ Z_SP not usable!!!
+ // | |
+ // +--------+ <-- Z_SP after expansion
+
+ void generate_push_Block(int dataBlk_len, int parmBlk_len, int crypto_fCode,
+ Register parmBlk, Register keylen, Register fCode, Register cv, Register key) {
+ const int AES_parmBlk_align = 32; // octoword alignment.
+ const int AES_parmBlk_addspace = 24; // Must be sufficiently large to hold all spilled registers
+ // (currently 2) PLUS 1 DW for the frame pointer.
+
+ const int cv_len = dataBlk_len;
+ const int key_len = parmBlk_len - cv_len;
+ // This len must be known at JIT compile time. Only then are we able to recalc the SP before resize.
+ // We buy this knowledge by wasting some (up to AES_parmBlk_align) bytes of stack space.
+ const int resize_len = cv_len + key_len + AES_parmBlk_align + AES_parmBlk_addspace;
+
+ // Use parmBlk as temp reg here to hold the frame pointer.
+ __ resize_frame(-resize_len, parmBlk, true);
+
+ // calculate parmBlk address from updated (resized) SP.
+ __ add2reg(parmBlk, resize_len - (cv_len + key_len), Z_SP);
+ __ z_nill(parmBlk, (~(AES_parmBlk_align-1)) & 0xffff); // Align parameter block.
+
+ // There is room for stuff in the range [parmBlk-AES_parmBlk_addspace+8, parmBlk).
+ __ z_stg(keylen, -8, parmBlk); // Spill keylen for later use.
+
+ // calculate (SP before resize) from updated SP.
+ __ add2reg(keylen, resize_len, Z_SP); // keylen holds prev SP for now.
+ __ z_stg(keylen, -16, parmBlk); // Spill prev SP for easy revert.
+
+ __ z_mvc(0, cv_len-1, parmBlk, 0, cv); // Copy cv.
+ __ z_mvc(cv_len, key_len-1, parmBlk, 0, key); // Copy key.
+ __ z_lghi(fCode, crypto_fCode);
+ }
+
// NOTE:
// Before returning, the stub has to copy the chaining value from
// the parmBlk, where it was updated by the crypto instruction, back
@@ -1743,17 +1780,14 @@ class StubGenerator: public StubCodeGenerator {
// the key length across the KMC instruction. We do so by spilling it to the stack,
// just preceding the parmBlk (at (parmBlk - 8)).
void generate_push_parmBlk(Register keylen, Register fCode, Register parmBlk, Register key, Register cv, bool is_decipher) {
- const int AES_parmBlk_align = 32;
- const int AES_parmBlk_addspace = AES_parmBlk_align; // Must be multiple of AES_parmblk_align.
- int cv_len, key_len;
int mode = is_decipher ? VM_Version::CipherMode::decipher : VM_Version::CipherMode::cipher;
Label parmBlk_128, parmBlk_192, parmBlk_256, parmBlk_set;
BLOCK_COMMENT("push parmBlk {");
if (VM_Version::has_Crypto_AES() ) { __ z_cghi(keylen, 52); }
- if (VM_Version::has_Crypto_AES256()) { __ z_brh(parmBlk_256); } // keyLen > 52: AES256
- if (VM_Version::has_Crypto_AES192()) { __ z_bre(parmBlk_192); } // keyLen == 52: AES192
if (VM_Version::has_Crypto_AES128()) { __ z_brl(parmBlk_128); } // keyLen < 52: AES128
+ if (VM_Version::has_Crypto_AES192()) { __ z_bre(parmBlk_192); } // keyLen == 52: AES192
+ if (VM_Version::has_Crypto_AES256()) { __ z_brh(parmBlk_256); } // keyLen > 52: AES256
// Security net: requested AES function not available on this CPU.
// NOTE:
@@ -1762,71 +1796,35 @@ class StubGenerator: public StubCodeGenerator {
// at all, we have at least AES-128.
__ stop_static("AES key strength not supported by CPU. Use -XX:-UseAES as remedy.", 0);
- if (VM_Version::has_Crypto_AES128()) {
- __ bind(parmBlk_128);
- cv_len = VM_Version::Cipher::_AES128_dataBlk;
- key_len = VM_Version::Cipher::_AES128_parmBlk_C - cv_len;
- __ z_lay(parmBlk, -(VM_Version::Cipher::_AES128_parmBlk_C+AES_parmBlk_align)+(AES_parmBlk_align-1), Z_SP);
- __ z_nill(parmBlk, (~(AES_parmBlk_align-1)) & 0xffff); // align parameter block
-
- // Resize the frame to accommodate for the aligned parameter block and other stuff.
- // There is room for stuff in the range [parmBlk-AES_parmBlk_addspace, parmBlk).
- __ z_stg(keylen, -8, parmBlk); // Spill keylen for later use.
- __ z_stg(Z_SP, -16, parmBlk); // Spill SP for easy revert.
- __ z_aghi(parmBlk, -AES_parmBlk_addspace); // Additional space for keylen, etc..
- __ resize_frame_absolute(parmBlk, keylen, true); // Resize frame with parmBlk being the new SP.
- __ z_aghi(parmBlk, AES_parmBlk_addspace); // Restore parameter block address.
-
- __ z_mvc(0, cv_len-1, parmBlk, 0, cv); // Copy cv.
- __ z_mvc(cv_len, key_len-1, parmBlk, 0, key); // Copy key.
- __ z_lghi(fCode, VM_Version::Cipher::_AES128 + mode);
- if (VM_Version::has_Crypto_AES192() || VM_Version::has_Crypto_AES256()) {
+ if (VM_Version::has_Crypto_AES256()) {
+ __ bind(parmBlk_256);
+ generate_push_Block(VM_Version::Cipher::_AES256_dataBlk,
+ VM_Version::Cipher::_AES256_parmBlk_C,
+ VM_Version::Cipher::_AES256 + mode,
+ parmBlk, keylen, fCode, cv, key);
+ if (VM_Version::has_Crypto_AES128() || VM_Version::has_Crypto_AES192()) {
__ z_bru(parmBlk_set); // Fallthru otherwise.
}
}
if (VM_Version::has_Crypto_AES192()) {
__ bind(parmBlk_192);
- cv_len = VM_Version::Cipher::_AES192_dataBlk;
- key_len = VM_Version::Cipher::_AES192_parmBlk_C - cv_len;
- __ z_lay(parmBlk, -(VM_Version::Cipher::_AES192_parmBlk_C+AES_parmBlk_align)+(AES_parmBlk_align-1), Z_SP);
- __ z_nill(parmBlk, (~(AES_parmBlk_align-1)) & 0xffff); // Align parameter block.
-
- // Resize the frame to accommodate for the aligned parameter block and other stuff.
- // There is room for stuff in the range [parmBlk-AES_parmBlk_addspace, parmBlk).
- __ z_stg(keylen, -8, parmBlk); // Spill keylen for later use.
- __ z_stg(Z_SP, -16, parmBlk); // Spill SP for easy revert.
- __ z_aghi(parmBlk, -AES_parmBlk_addspace); // Additional space for keylen, etc..
- __ resize_frame_absolute(parmBlk, keylen, true); // Resize frame with parmBlk being the new SP.
- __ z_aghi(parmBlk, AES_parmBlk_addspace); // Restore parameter block address.
-
- __ z_mvc(0, cv_len-1, parmBlk, 0, cv); // Copy cv.
- __ z_mvc(cv_len, key_len-1, parmBlk, 0, key); // Copy key.
- __ z_lghi(fCode, VM_Version::Cipher::_AES192 + mode);
- if (VM_Version::has_Crypto_AES256()) {
+ generate_push_Block(VM_Version::Cipher::_AES192_dataBlk,
+ VM_Version::Cipher::_AES192_parmBlk_C,
+ VM_Version::Cipher::_AES192 + mode,
+ parmBlk, keylen, fCode, cv, key);
+ if (VM_Version::has_Crypto_AES128()) {
__ z_bru(parmBlk_set); // Fallthru otherwise.
}
}
- if (VM_Version::has_Crypto_AES256()) {
- __ bind(parmBlk_256);
- cv_len = VM_Version::Cipher::_AES256_dataBlk;
- key_len = VM_Version::Cipher::_AES256_parmBlk_C - cv_len;
- __ z_lay(parmBlk, -(VM_Version::Cipher::_AES256_parmBlk_C+AES_parmBlk_align)+(AES_parmBlk_align-1), Z_SP);
- __ z_nill(parmBlk, (~(AES_parmBlk_align-1)) & 0xffff); // Align parameter block.
-
- // Resize the frame to accommodate for the aligned parameter block and other stuff.
- // There is room for stuff in the range [parmBlk-AES_parmBlk_addspace, parmBlk).
- __ z_stg(keylen, -8, parmBlk); // Spill keylen for later use.
- __ z_stg(Z_SP, -16, parmBlk); // Spill SP for easy revert.
- __ z_aghi(parmBlk, -AES_parmBlk_addspace); // Additional space for keylen, etc..
- __ resize_frame_absolute(parmBlk, keylen, true); // Resize frame with parmBlk being the new SP.
- __ z_aghi(parmBlk, AES_parmBlk_addspace); // Restore parameter block address.
-
- __ z_mvc(0, cv_len-1, parmBlk, 0, cv); // Copy cv.
- __ z_mvc(cv_len, key_len-1, parmBlk, 0, key); // Copy key.
- __ z_lghi(fCode, VM_Version::Cipher::_AES256 + mode);
- // __ z_bru(parmBlk_set); // fallthru
+ if (VM_Version::has_Crypto_AES128()) {
+ __ bind(parmBlk_128);
+ generate_push_Block(VM_Version::Cipher::_AES128_dataBlk,
+ VM_Version::Cipher::_AES128_parmBlk_C,
+ VM_Version::Cipher::_AES128 + mode,
+ parmBlk, keylen, fCode, cv, key);
+ // Fallthru
}
__ bind(parmBlk_set);
@@ -1882,41 +1880,49 @@ class StubGenerator: public StubCodeGenerator {
}
__ bind(parmBlk_set);
}
- __ z_lg(Z_SP, -16, parmBlk); // Revert resize_frame_absolute.
+ __ z_lg(Z_SP, -16, parmBlk); // Revert resize_frame_absolute. Z_SP saved by push_parmBlk.
BLOCK_COMMENT("} pop parmBlk");
}
- // Compute AES encrypt function.
- address generate_AES_encryptBlock(const char* name) {
- __ align(CodeEntryAlignment);
- StubCodeMark mark(this, "StubRoutines", name);
- unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
-
+ // Compute AES encrypt/decrypt function.
+ void generate_AES_cipherBlock(bool is_decipher) {
+ // Incoming arguments.
Register from = Z_ARG1; // source byte array
Register to = Z_ARG2; // destination byte array
Register key = Z_ARG3; // expanded key array
const Register keylen = Z_R0; // Temporarily (until fCode is set) holds the expanded key array length.
+
+ // Register definitions as required by KM instruction.
const Register fCode = Z_R0; // crypto function code
const Register parmBlk = Z_R1; // parameter block address (points to crypto key)
- const Register src = Z_ARG1; // is Z_R2
- const Register srclen = Z_ARG2; // Overwrites destination address.
- const Register dst = Z_ARG3; // Overwrites expanded key address.
+ const Register src = Z_ARG1; // Must be even reg (KM requirement).
+ const Register srclen = Z_ARG2; // Must be odd reg and pair with src. Overwrites destination address.
+ const Register dst = Z_ARG3; // Must be even reg (KM requirement). Overwrites expanded key address.
// Read key len of expanded key (in 4-byte words).
__ z_lgf(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
// Copy arguments to registers as required by crypto instruction.
__ z_lgr(parmBlk, key); // crypto key (in T_INT array).
- // __ z_lgr(src, from); // Copy not needed, src/from are identical.
- __ z_lgr(dst, to); // Copy destination address to even register.
+ __ lgr_if_needed(src, from); // Copy src address. Will not emit, src/from are identical.
+ __ z_lgr(dst, to); // Copy dst address, even register required.
- // Construct function code in Z_R0, data block length in Z_ARG2.
- generate_load_AES_fCode(keylen, fCode, srclen, false);
+ // Construct function code into fCode(Z_R0), data block length into srclen(Z_ARG2).
+ generate_load_AES_fCode(keylen, fCode, srclen, is_decipher);
- __ km(dst, src); // Cipher the message.
+ __ km(dst, src); // Cipher the message.
__ z_br(Z_R14);
+ }
+
+ // Compute AES encrypt function.
+ address generate_AES_encryptBlock(const char* name) {
+ __ align(CodeEntryAlignment);
+ StubCodeMark mark(this, "StubRoutines", name);
+ unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
+
+ generate_AES_cipherBlock(false);
return __ addr_at(start_off);
}
@@ -1925,33 +1931,9 @@ class StubGenerator: public StubCodeGenerator {
address generate_AES_decryptBlock(const char* name) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
- unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
+ unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
- Register from = Z_ARG1; // source byte array
- Register to = Z_ARG2; // destination byte array
- Register key = Z_ARG3; // expanded key array, not preset at entry!!!
-
- const Register keylen = Z_R0; // Temporarily (until fCode is set) holds the expanded key array length.
- const Register fCode = Z_R0; // crypto function code
- const Register parmBlk = Z_R1; // parameter block address (points to crypto key)
- const Register src = Z_ARG1; // is Z_R2
- const Register srclen = Z_ARG2; // Overwrites destination address.
- const Register dst = Z_ARG3; // Overwrites key address.
-
- // Read key len of expanded key (in 4-byte words).
- __ z_lgf(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
-
- // Copy arguments to registers as required by crypto instruction.
- __ z_lgr(parmBlk, key); // Copy crypto key address.
- // __ z_lgr(src, from); // Copy not needed, src/from are identical.
- __ z_lgr(dst, to); // Copy destination address to even register.
-
- // Construct function code in Z_R0, data block length in Z_ARG2.
- generate_load_AES_fCode(keylen, fCode, srclen, true);
-
- __ km(dst, src); // Cipher the message.
-
- __ z_br(Z_R14);
+ generate_AES_cipherBlock(true);
return __ addr_at(start_off);
}
@@ -1969,10 +1951,7 @@ class StubGenerator: public StubCodeGenerator {
// We align the parameter block to the next available octoword.
//
// Compute chained AES encrypt function.
- address generate_cipherBlockChaining_AES_encrypt(const char* name) {
- __ align(CodeEntryAlignment);
- StubCodeMark mark(this, "StubRoutines", name);
- unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
+ void generate_AES_cipherBlockChaining(bool is_decipher) {
Register from = Z_ARG1; // source byte array (clear text)
Register to = Z_ARG2; // destination byte array (ciphered)
@@ -1992,20 +1971,29 @@ class StubGenerator: public StubCodeGenerator {
__ z_lgf(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
// Construct parm block address in parmBlk (== Z_R1), copy cv and key to parm block.
- // Construct function code in Z_R0.
- generate_push_parmBlk(keylen, fCode, parmBlk, key, cv, false);
+ // Construct function code in fCode (Z_R0).
+ generate_push_parmBlk(keylen, fCode, parmBlk, key, cv, is_decipher);
// Prepare other registers for instruction.
- // __ z_lgr(src, from); // Not needed, registers are the same.
+ __ lgr_if_needed(src, from); // Copy src address. Will not emit, src/from are identical.
__ z_lgr(dst, to);
- __ z_llgfr(srclen, msglen); // We pass the offsets as ints, not as longs as required.
+ __ z_llgfr(srclen, msglen); // We pass the offsets as ints, not as longs as required.
- __ kmc(dst, src); // Cipher the message.
+ __ kmc(dst, src); // Cipher the message.
generate_pop_parmBlk(keylen, parmBlk, key, cv);
- __ z_llgfr(Z_RET, msglen); // We pass the offsets as ints, not as longs as required.
+ __ z_llgfr(Z_RET, msglen); // We pass the offsets as ints, not as longs as required.
__ z_br(Z_R14);
+ }
+
+ // Compute chained AES encrypt function.
+ address generate_cipherBlockChaining_AES_encrypt(const char* name) {
+ __ align(CodeEntryAlignment);
+ StubCodeMark mark(this, "StubRoutines", name);
+ unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
+
+ generate_AES_cipherBlockChaining(false);
return __ addr_at(start_off);
}
@@ -2016,38 +2004,7 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", name);
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
- Register from = Z_ARG1; // source byte array (ciphered)
- Register to = Z_ARG2; // destination byte array (clear text)
- Register key = Z_ARG3; // expanded key array, not preset at entry!!!
- Register cv = Z_ARG4; // chaining value
- const Register msglen = Z_ARG5; // Total length of the msg to be encrypted. Value must be returned
- // in Z_RET upon completion of this stub.
-
- const Register keylen = Z_R0; // Expanded key length, as read from key array. Temp only.
- const Register fCode = Z_R0; // crypto function code
- const Register parmBlk = Z_R1; // parameter block address (points to crypto key)
- const Register src = Z_ARG1; // is Z_R2
- const Register srclen = Z_ARG2; // Overwrites destination address.
- const Register dst = Z_ARG3; // Overwrites key address.
-
- // Read key len of expanded key (in 4-byte words).
- __ z_lgf(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
-
- // Construct parm block address in parmBlk (== Z_R1), copy cv and key to parm block.
- // Construct function code in Z_R0.
- generate_push_parmBlk(keylen, fCode, parmBlk, key, cv, true);
-
- // Prepare other registers for instruction.
- // __ z_lgr(src, from); // Not needed, registers are the same.
- __ z_lgr(dst, to);
- __ z_llgfr(srclen, msglen); // We pass the offsets as ints, not as longs as required.
-
- __ kmc(dst, src); // Decipher the message.
-
- generate_pop_parmBlk(keylen, parmBlk, key, cv);
-
- __ z_llgfr(Z_RET, msglen); // We pass the offsets as ints, not as longs as required.
- __ z_br(Z_R14);
+ generate_AES_cipherBlockChaining(true);
return __ addr_at(start_off);
}
@@ -2330,26 +2287,25 @@ class StubGenerator: public StubCodeGenerator {
}
-
- // Arguments:
- // Z_ARG1 - int crc
- // Z_ARG2 - byte* buf
- // Z_ARG3 - int length (of buffer)
- //
- // Result:
- // Z_RET - int crc result
- //
- // Compute CRC32 function.
- address generate_CRC32_updateBytes(const char* name) {
- __ align(CodeEntryAlignment);
- StubCodeMark mark(this, "StubRoutines", name);
- unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
+ /**
+ * Arguments:
+ *
+ * Inputs:
+ * Z_ARG1 - int crc
+ * Z_ARG2 - byte* buf
+ * Z_ARG3 - int length (of buffer)
+ *
+ * Result:
+ * Z_RET - int crc result
+ **/
+ // Compute CRC function (generic, for all polynomials).
+ void generate_CRC_updateBytes(const char* name, Register table, bool invertCRC) {
// arguments to kernel_crc32:
Register crc = Z_ARG1; // Current checksum, preset by caller or result from previous call, int.
Register data = Z_ARG2; // source byte array
Register dataLen = Z_ARG3; // #bytes to process, int
- Register table = Z_ARG4; // crc table address
+// Register table = Z_ARG4; // crc table address. Preloaded and passed in by caller.
const Register t0 = Z_R10; // work reg for kernel* emitters
const Register t1 = Z_R11; // work reg for kernel* emitters
const Register t2 = Z_R12; // work reg for kernel* emitters
@@ -2361,16 +2317,50 @@ class StubGenerator: public StubCodeGenerator {
// Crc used as int.
__ z_llgfr(dataLen, dataLen);
- StubRoutines::zarch::generate_load_crc_table_addr(_masm, table);
-
__ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
__ z_stmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 to make them available as work registers.
- __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3);
+ __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, invertCRC);
__ z_lmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 back from stack.
__ resize_frame(+(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
__ z_llgfr(Z_RET, crc); // Updated crc is function result. No copying required, just zero upper 32 bits.
__ z_br(Z_R14); // Result already in Z_RET == Z_ARG1.
+ }
+
+
+ // Compute CRC32 function.
+ address generate_CRC32_updateBytes(const char* name) {
+ __ align(CodeEntryAlignment);
+ StubCodeMark mark(this, "StubRoutines", name);
+ unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
+
+ assert(UseCRC32Intrinsics, "should not generate this stub (%s) with CRC32 intrinsics disabled", name);
+
+ BLOCK_COMMENT("CRC32_updateBytes {");
+ Register table = Z_ARG4; // crc32 table address.
+ StubRoutines::zarch::generate_load_crc_table_addr(_masm, table);
+
+ generate_CRC_updateBytes(name, table, true);
+ BLOCK_COMMENT("} CRC32_updateBytes");
+
+ return __ addr_at(start_off);
+ }
+
+
+ // Compute CRC32C function.
+ address generate_CRC32C_updateBytes(const char* name) {
+ __ align(CodeEntryAlignment);
+ StubCodeMark mark(this, "StubRoutines", name);
+ unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
+
+ assert(UseCRC32CIntrinsics, "should not generate this stub (%s) with CRC32C intrinsics disabled", name);
+
+ BLOCK_COMMENT("CRC32C_updateBytes {");
+ Register table = Z_ARG4; // crc32c table address.
+ StubRoutines::zarch::generate_load_crc32c_table_addr(_masm, table);
+
+ generate_CRC_updateBytes(name, table, false);
+ BLOCK_COMMENT("} CRC32C_updateBytes");
return __ addr_at(start_off);
}
@@ -2441,9 +2431,13 @@ class StubGenerator: public StubCodeGenerator {
// Entry points that are platform specific.
if (UseCRC32Intrinsics) {
- // We have no CRC32 table on z/Architecture.
- StubRoutines::_crc_table_adr = (address)StubRoutines::zarch::_crc_table;
- StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes");
+ StubRoutines::_crc_table_adr = (address)StubRoutines::zarch::_crc_table;
+ StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes");
+ }
+
+ if (UseCRC32CIntrinsics) {
+ StubRoutines::_crc32c_table_addr = (address)StubRoutines::zarch::_crc32c_table;
+ StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes("CRC32C_updateBytes");
}
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
@@ -2461,8 +2455,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false);
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
- StubRoutines::zarch::_handler_for_unsafe_access_entry = generate_handler_for_unsafe_access();
-
// Support for verify_oop (must happen after universe_init).
StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine();
diff --git a/hotspot/src/cpu/s390/vm/stubRoutines_s390.cpp b/hotspot/src/cpu/s390/vm/stubRoutines_s390.cpp
index 8c60ae04350..ab167d9030f 100644
--- a/hotspot/src/cpu/s390/vm/stubRoutines_s390.cpp
+++ b/hotspot/src/cpu/s390/vm/stubRoutines_s390.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,8 +33,6 @@
// Implementation of the platform-specific part of StubRoutines - for
// a description of how to extend it, see the stubRoutines.hpp file.
-address StubRoutines::zarch::_handler_for_unsafe_access_entry = NULL;
-
address StubRoutines::zarch::_partial_subtype_check = NULL;
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
@@ -44,14 +42,15 @@ int StubRoutines::zarch::_atomic_memory_operation_lock = StubRoutines::zarch::un
#define __ masm->
-void StubRoutines::zarch::generate_load_crc_table_addr(MacroAssembler* masm, Register table) {
+void StubRoutines::zarch::generate_load_absolute_address(MacroAssembler* masm, Register table, address table_addr, uint64_t table_contents) {
+ __ load_absolute_address(table, table_addr);
- __ load_absolute_address(table, StubRoutines::_crc_table_adr);
#ifdef ASSERT
- assert(_crc_table_adr != NULL, "CRC lookup table address must be initialized by now");
+ assert(table_addr != NULL, "CRC lookup table address must be initialized by now");
+ assert(*((uint32_t*)(table_addr+4)) == (uint32_t)table_contents, "Bad CRC lookup table: 0x%8.8x, expected 0x%8.8x", *((uint32_t*)(table_addr+4)), (uint32_t)table_contents);
{
Label L;
- __ load_const_optimized(Z_R0, StubRoutines::_crc_table_adr);
+ __ load_const_optimized(Z_R0, table_addr);
__ z_cgr(table, Z_R0); // safety net
__ z_bre(L);
__ z_illtrap();
@@ -60,7 +59,7 @@ void StubRoutines::zarch::generate_load_crc_table_addr(MacroAssembler* masm, Reg
}
{
Label L;
- __ load_const_optimized(Z_R0, 0x77073096UL);
+ __ load_const_optimized(Z_R0, table_contents); // crc_table: data @ offset(4)
__ z_cl(Z_R0, Address(table, 4)); // safety net
__ z_bre(L);
__ z_l(Z_R0, Address(table, 4)); // Load data from memory, we know the constant we compared against.
@@ -71,6 +70,17 @@ void StubRoutines::zarch::generate_load_crc_table_addr(MacroAssembler* masm, Reg
#endif
}
+void StubRoutines::zarch::generate_load_crc_table_addr(MacroAssembler* masm, Register table) {
+ const uint64_t table_contents = 0x77073096UL; // required contents of table[1]
+ generate_load_absolute_address(masm, table, StubRoutines::_crc_table_adr, table_contents);
+}
+
+void StubRoutines::zarch::generate_load_crc32c_table_addr(MacroAssembler* masm, Register table) {
+ const uint64_t table_contents = 0xf26b8303UL; // required contents of table[1]
+ generate_load_absolute_address(masm, table, StubRoutines::_crc32c_table_addr, table_contents);
+}
+
+
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
void StubRoutines::zarch::generate_load_trot_table_addr(MacroAssembler* masm, Register table) {
@@ -130,440 +140,590 @@ jlong StubRoutines::zarch::_trot_table[TROT_COLUMN_SIZE] = {
};
-// crc_table[] from jdk/src/share/native/java/util/zip/zlib-1.2.8/crc32.h
juint StubRoutines::zarch::_crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE] = {
- {
- 0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL,
- 0x706af48fUL, 0xe963a535UL, 0x9e6495a3UL, 0x0edb8832UL, 0x79dcb8a4UL,
- 0xe0d5e91eUL, 0x97d2d988UL, 0x09b64c2bUL, 0x7eb17cbdUL, 0xe7b82d07UL,
- 0x90bf1d91UL, 0x1db71064UL, 0x6ab020f2UL, 0xf3b97148UL, 0x84be41deUL,
- 0x1adad47dUL, 0x6ddde4ebUL, 0xf4d4b551UL, 0x83d385c7UL, 0x136c9856UL,
- 0x646ba8c0UL, 0xfd62f97aUL, 0x8a65c9ecUL, 0x14015c4fUL, 0x63066cd9UL,
- 0xfa0f3d63UL, 0x8d080df5UL, 0x3b6e20c8UL, 0x4c69105eUL, 0xd56041e4UL,
- 0xa2677172UL, 0x3c03e4d1UL, 0x4b04d447UL, 0xd20d85fdUL, 0xa50ab56bUL,
- 0x35b5a8faUL, 0x42b2986cUL, 0xdbbbc9d6UL, 0xacbcf940UL, 0x32d86ce3UL,
- 0x45df5c75UL, 0xdcd60dcfUL, 0xabd13d59UL, 0x26d930acUL, 0x51de003aUL,
- 0xc8d75180UL, 0xbfd06116UL, 0x21b4f4b5UL, 0x56b3c423UL, 0xcfba9599UL,
- 0xb8bda50fUL, 0x2802b89eUL, 0x5f058808UL, 0xc60cd9b2UL, 0xb10be924UL,
- 0x2f6f7c87UL, 0x58684c11UL, 0xc1611dabUL, 0xb6662d3dUL, 0x76dc4190UL,
- 0x01db7106UL, 0x98d220bcUL, 0xefd5102aUL, 0x71b18589UL, 0x06b6b51fUL,
- 0x9fbfe4a5UL, 0xe8b8d433UL, 0x7807c9a2UL, 0x0f00f934UL, 0x9609a88eUL,
- 0xe10e9818UL, 0x7f6a0dbbUL, 0x086d3d2dUL, 0x91646c97UL, 0xe6635c01UL,
- 0x6b6b51f4UL, 0x1c6c6162UL, 0x856530d8UL, 0xf262004eUL, 0x6c0695edUL,
- 0x1b01a57bUL, 0x8208f4c1UL, 0xf50fc457UL, 0x65b0d9c6UL, 0x12b7e950UL,
- 0x8bbeb8eaUL, 0xfcb9887cUL, 0x62dd1ddfUL, 0x15da2d49UL, 0x8cd37cf3UL,
- 0xfbd44c65UL, 0x4db26158UL, 0x3ab551ceUL, 0xa3bc0074UL, 0xd4bb30e2UL,
- 0x4adfa541UL, 0x3dd895d7UL, 0xa4d1c46dUL, 0xd3d6f4fbUL, 0x4369e96aUL,
- 0x346ed9fcUL, 0xad678846UL, 0xda60b8d0UL, 0x44042d73UL, 0x33031de5UL,
- 0xaa0a4c5fUL, 0xdd0d7cc9UL, 0x5005713cUL, 0x270241aaUL, 0xbe0b1010UL,
- 0xc90c2086UL, 0x5768b525UL, 0x206f85b3UL, 0xb966d409UL, 0xce61e49fUL,
- 0x5edef90eUL, 0x29d9c998UL, 0xb0d09822UL, 0xc7d7a8b4UL, 0x59b33d17UL,
- 0x2eb40d81UL, 0xb7bd5c3bUL, 0xc0ba6cadUL, 0xedb88320UL, 0x9abfb3b6UL,
- 0x03b6e20cUL, 0x74b1d29aUL, 0xead54739UL, 0x9dd277afUL, 0x04db2615UL,
- 0x73dc1683UL, 0xe3630b12UL, 0x94643b84UL, 0x0d6d6a3eUL, 0x7a6a5aa8UL,
- 0xe40ecf0bUL, 0x9309ff9dUL, 0x0a00ae27UL, 0x7d079eb1UL, 0xf00f9344UL,
- 0x8708a3d2UL, 0x1e01f268UL, 0x6906c2feUL, 0xf762575dUL, 0x806567cbUL,
- 0x196c3671UL, 0x6e6b06e7UL, 0xfed41b76UL, 0x89d32be0UL, 0x10da7a5aUL,
- 0x67dd4accUL, 0xf9b9df6fUL, 0x8ebeeff9UL, 0x17b7be43UL, 0x60b08ed5UL,
- 0xd6d6a3e8UL, 0xa1d1937eUL, 0x38d8c2c4UL, 0x4fdff252UL, 0xd1bb67f1UL,
- 0xa6bc5767UL, 0x3fb506ddUL, 0x48b2364bUL, 0xd80d2bdaUL, 0xaf0a1b4cUL,
- 0x36034af6UL, 0x41047a60UL, 0xdf60efc3UL, 0xa867df55UL, 0x316e8eefUL,
- 0x4669be79UL, 0xcb61b38cUL, 0xbc66831aUL, 0x256fd2a0UL, 0x5268e236UL,
- 0xcc0c7795UL, 0xbb0b4703UL, 0x220216b9UL, 0x5505262fUL, 0xc5ba3bbeUL,
- 0xb2bd0b28UL, 0x2bb45a92UL, 0x5cb36a04UL, 0xc2d7ffa7UL, 0xb5d0cf31UL,
- 0x2cd99e8bUL, 0x5bdeae1dUL, 0x9b64c2b0UL, 0xec63f226UL, 0x756aa39cUL,
- 0x026d930aUL, 0x9c0906a9UL, 0xeb0e363fUL, 0x72076785UL, 0x05005713UL,
- 0x95bf4a82UL, 0xe2b87a14UL, 0x7bb12baeUL, 0x0cb61b38UL, 0x92d28e9bUL,
- 0xe5d5be0dUL, 0x7cdcefb7UL, 0x0bdbdf21UL, 0x86d3d2d4UL, 0xf1d4e242UL,
- 0x68ddb3f8UL, 0x1fda836eUL, 0x81be16cdUL, 0xf6b9265bUL, 0x6fb077e1UL,
- 0x18b74777UL, 0x88085ae6UL, 0xff0f6a70UL, 0x66063bcaUL, 0x11010b5cUL,
- 0x8f659effUL, 0xf862ae69UL, 0x616bffd3UL, 0x166ccf45UL, 0xa00ae278UL,
- 0xd70dd2eeUL, 0x4e048354UL, 0x3903b3c2UL, 0xa7672661UL, 0xd06016f7UL,
- 0x4969474dUL, 0x3e6e77dbUL, 0xaed16a4aUL, 0xd9d65adcUL, 0x40df0b66UL,
- 0x37d83bf0UL, 0xa9bcae53UL, 0xdebb9ec5UL, 0x47b2cf7fUL, 0x30b5ffe9UL,
- 0xbdbdf21cUL, 0xcabac28aUL, 0x53b39330UL, 0x24b4a3a6UL, 0xbad03605UL,
- 0xcdd70693UL, 0x54de5729UL, 0x23d967bfUL, 0xb3667a2eUL, 0xc4614ab8UL,
- 0x5d681b02UL, 0x2a6f2b94UL, 0xb40bbe37UL, 0xc30c8ea1UL, 0x5a05df1bUL,
- 0x2d02ef8dUL
-#ifdef CRC32_BYFOUR
- },
- {
- 0x00000000UL, 0x191b3141UL, 0x32366282UL, 0x2b2d53c3UL, 0x646cc504UL,
- 0x7d77f445UL, 0x565aa786UL, 0x4f4196c7UL, 0xc8d98a08UL, 0xd1c2bb49UL,
- 0xfaefe88aUL, 0xe3f4d9cbUL, 0xacb54f0cUL, 0xb5ae7e4dUL, 0x9e832d8eUL,
- 0x87981ccfUL, 0x4ac21251UL, 0x53d92310UL, 0x78f470d3UL, 0x61ef4192UL,
- 0x2eaed755UL, 0x37b5e614UL, 0x1c98b5d7UL, 0x05838496UL, 0x821b9859UL,
- 0x9b00a918UL, 0xb02dfadbUL, 0xa936cb9aUL, 0xe6775d5dUL, 0xff6c6c1cUL,
- 0xd4413fdfUL, 0xcd5a0e9eUL, 0x958424a2UL, 0x8c9f15e3UL, 0xa7b24620UL,
- 0xbea97761UL, 0xf1e8e1a6UL, 0xe8f3d0e7UL, 0xc3de8324UL, 0xdac5b265UL,
- 0x5d5daeaaUL, 0x44469febUL, 0x6f6bcc28UL, 0x7670fd69UL, 0x39316baeUL,
- 0x202a5aefUL, 0x0b07092cUL, 0x121c386dUL, 0xdf4636f3UL, 0xc65d07b2UL,
- 0xed705471UL, 0xf46b6530UL, 0xbb2af3f7UL, 0xa231c2b6UL, 0x891c9175UL,
- 0x9007a034UL, 0x179fbcfbUL, 0x0e848dbaUL, 0x25a9de79UL, 0x3cb2ef38UL,
- 0x73f379ffUL, 0x6ae848beUL, 0x41c51b7dUL, 0x58de2a3cUL, 0xf0794f05UL,
- 0xe9627e44UL, 0xc24f2d87UL, 0xdb541cc6UL, 0x94158a01UL, 0x8d0ebb40UL,
- 0xa623e883UL, 0xbf38d9c2UL, 0x38a0c50dUL, 0x21bbf44cUL, 0x0a96a78fUL,
- 0x138d96ceUL, 0x5ccc0009UL, 0x45d73148UL, 0x6efa628bUL, 0x77e153caUL,
- 0xbabb5d54UL, 0xa3a06c15UL, 0x888d3fd6UL, 0x91960e97UL, 0xded79850UL,
- 0xc7cca911UL, 0xece1fad2UL, 0xf5facb93UL, 0x7262d75cUL, 0x6b79e61dUL,
- 0x4054b5deUL, 0x594f849fUL, 0x160e1258UL, 0x0f152319UL, 0x243870daUL,
- 0x3d23419bUL, 0x65fd6ba7UL, 0x7ce65ae6UL, 0x57cb0925UL, 0x4ed03864UL,
- 0x0191aea3UL, 0x188a9fe2UL, 0x33a7cc21UL, 0x2abcfd60UL, 0xad24e1afUL,
- 0xb43fd0eeUL, 0x9f12832dUL, 0x8609b26cUL, 0xc94824abUL, 0xd05315eaUL,
- 0xfb7e4629UL, 0xe2657768UL, 0x2f3f79f6UL, 0x362448b7UL, 0x1d091b74UL,
- 0x04122a35UL, 0x4b53bcf2UL, 0x52488db3UL, 0x7965de70UL, 0x607eef31UL,
- 0xe7e6f3feUL, 0xfefdc2bfUL, 0xd5d0917cUL, 0xcccba03dUL, 0x838a36faUL,
- 0x9a9107bbUL, 0xb1bc5478UL, 0xa8a76539UL, 0x3b83984bUL, 0x2298a90aUL,
- 0x09b5fac9UL, 0x10aecb88UL, 0x5fef5d4fUL, 0x46f46c0eUL, 0x6dd93fcdUL,
- 0x74c20e8cUL, 0xf35a1243UL, 0xea412302UL, 0xc16c70c1UL, 0xd8774180UL,
- 0x9736d747UL, 0x8e2de606UL, 0xa500b5c5UL, 0xbc1b8484UL, 0x71418a1aUL,
- 0x685abb5bUL, 0x4377e898UL, 0x5a6cd9d9UL, 0x152d4f1eUL, 0x0c367e5fUL,
- 0x271b2d9cUL, 0x3e001cddUL, 0xb9980012UL, 0xa0833153UL, 0x8bae6290UL,
- 0x92b553d1UL, 0xddf4c516UL, 0xc4eff457UL, 0xefc2a794UL, 0xf6d996d5UL,
- 0xae07bce9UL, 0xb71c8da8UL, 0x9c31de6bUL, 0x852aef2aUL, 0xca6b79edUL,
- 0xd37048acUL, 0xf85d1b6fUL, 0xe1462a2eUL, 0x66de36e1UL, 0x7fc507a0UL,
- 0x54e85463UL, 0x4df36522UL, 0x02b2f3e5UL, 0x1ba9c2a4UL, 0x30849167UL,
- 0x299fa026UL, 0xe4c5aeb8UL, 0xfdde9ff9UL, 0xd6f3cc3aUL, 0xcfe8fd7bUL,
- 0x80a96bbcUL, 0x99b25afdUL, 0xb29f093eUL, 0xab84387fUL, 0x2c1c24b0UL,
- 0x350715f1UL, 0x1e2a4632UL, 0x07317773UL, 0x4870e1b4UL, 0x516bd0f5UL,
- 0x7a468336UL, 0x635db277UL, 0xcbfad74eUL, 0xd2e1e60fUL, 0xf9ccb5ccUL,
- 0xe0d7848dUL, 0xaf96124aUL, 0xb68d230bUL, 0x9da070c8UL, 0x84bb4189UL,
- 0x03235d46UL, 0x1a386c07UL, 0x31153fc4UL, 0x280e0e85UL, 0x674f9842UL,
- 0x7e54a903UL, 0x5579fac0UL, 0x4c62cb81UL, 0x8138c51fUL, 0x9823f45eUL,
- 0xb30ea79dUL, 0xaa1596dcUL, 0xe554001bUL, 0xfc4f315aUL, 0xd7626299UL,
- 0xce7953d8UL, 0x49e14f17UL, 0x50fa7e56UL, 0x7bd72d95UL, 0x62cc1cd4UL,
- 0x2d8d8a13UL, 0x3496bb52UL, 0x1fbbe891UL, 0x06a0d9d0UL, 0x5e7ef3ecUL,
- 0x4765c2adUL, 0x6c48916eUL, 0x7553a02fUL, 0x3a1236e8UL, 0x230907a9UL,
- 0x0824546aUL, 0x113f652bUL, 0x96a779e4UL, 0x8fbc48a5UL, 0xa4911b66UL,
- 0xbd8a2a27UL, 0xf2cbbce0UL, 0xebd08da1UL, 0xc0fdde62UL, 0xd9e6ef23UL,
- 0x14bce1bdUL, 0x0da7d0fcUL, 0x268a833fUL, 0x3f91b27eUL, 0x70d024b9UL,
- 0x69cb15f8UL, 0x42e6463bUL, 0x5bfd777aUL, 0xdc656bb5UL, 0xc57e5af4UL,
- 0xee530937UL, 0xf7483876UL, 0xb809aeb1UL, 0xa1129ff0UL, 0x8a3fcc33UL,
- 0x9324fd72UL
- },
- {
- 0x00000000UL, 0x01c26a37UL, 0x0384d46eUL, 0x0246be59UL, 0x0709a8dcUL,
- 0x06cbc2ebUL, 0x048d7cb2UL, 0x054f1685UL, 0x0e1351b8UL, 0x0fd13b8fUL,
- 0x0d9785d6UL, 0x0c55efe1UL, 0x091af964UL, 0x08d89353UL, 0x0a9e2d0aUL,
- 0x0b5c473dUL, 0x1c26a370UL, 0x1de4c947UL, 0x1fa2771eUL, 0x1e601d29UL,
- 0x1b2f0bacUL, 0x1aed619bUL, 0x18abdfc2UL, 0x1969b5f5UL, 0x1235f2c8UL,
- 0x13f798ffUL, 0x11b126a6UL, 0x10734c91UL, 0x153c5a14UL, 0x14fe3023UL,
- 0x16b88e7aUL, 0x177ae44dUL, 0x384d46e0UL, 0x398f2cd7UL, 0x3bc9928eUL,
- 0x3a0bf8b9UL, 0x3f44ee3cUL, 0x3e86840bUL, 0x3cc03a52UL, 0x3d025065UL,
- 0x365e1758UL, 0x379c7d6fUL, 0x35dac336UL, 0x3418a901UL, 0x3157bf84UL,
- 0x3095d5b3UL, 0x32d36beaUL, 0x331101ddUL, 0x246be590UL, 0x25a98fa7UL,
- 0x27ef31feUL, 0x262d5bc9UL, 0x23624d4cUL, 0x22a0277bUL, 0x20e69922UL,
- 0x2124f315UL, 0x2a78b428UL, 0x2bbade1fUL, 0x29fc6046UL, 0x283e0a71UL,
- 0x2d711cf4UL, 0x2cb376c3UL, 0x2ef5c89aUL, 0x2f37a2adUL, 0x709a8dc0UL,
- 0x7158e7f7UL, 0x731e59aeUL, 0x72dc3399UL, 0x7793251cUL, 0x76514f2bUL,
- 0x7417f172UL, 0x75d59b45UL, 0x7e89dc78UL, 0x7f4bb64fUL, 0x7d0d0816UL,
- 0x7ccf6221UL, 0x798074a4UL, 0x78421e93UL, 0x7a04a0caUL, 0x7bc6cafdUL,
- 0x6cbc2eb0UL, 0x6d7e4487UL, 0x6f38fadeUL, 0x6efa90e9UL, 0x6bb5866cUL,
- 0x6a77ec5bUL, 0x68315202UL, 0x69f33835UL, 0x62af7f08UL, 0x636d153fUL,
- 0x612bab66UL, 0x60e9c151UL, 0x65a6d7d4UL, 0x6464bde3UL, 0x662203baUL,
- 0x67e0698dUL, 0x48d7cb20UL, 0x4915a117UL, 0x4b531f4eUL, 0x4a917579UL,
- 0x4fde63fcUL, 0x4e1c09cbUL, 0x4c5ab792UL, 0x4d98dda5UL, 0x46c49a98UL,
- 0x4706f0afUL, 0x45404ef6UL, 0x448224c1UL, 0x41cd3244UL, 0x400f5873UL,
- 0x4249e62aUL, 0x438b8c1dUL, 0x54f16850UL, 0x55330267UL, 0x5775bc3eUL,
- 0x56b7d609UL, 0x53f8c08cUL, 0x523aaabbUL, 0x507c14e2UL, 0x51be7ed5UL,
- 0x5ae239e8UL, 0x5b2053dfUL, 0x5966ed86UL, 0x58a487b1UL, 0x5deb9134UL,
- 0x5c29fb03UL, 0x5e6f455aUL, 0x5fad2f6dUL, 0xe1351b80UL, 0xe0f771b7UL,
- 0xe2b1cfeeUL, 0xe373a5d9UL, 0xe63cb35cUL, 0xe7fed96bUL, 0xe5b86732UL,
- 0xe47a0d05UL, 0xef264a38UL, 0xeee4200fUL, 0xeca29e56UL, 0xed60f461UL,
- 0xe82fe2e4UL, 0xe9ed88d3UL, 0xebab368aUL, 0xea695cbdUL, 0xfd13b8f0UL,
- 0xfcd1d2c7UL, 0xfe976c9eUL, 0xff5506a9UL, 0xfa1a102cUL, 0xfbd87a1bUL,
- 0xf99ec442UL, 0xf85cae75UL, 0xf300e948UL, 0xf2c2837fUL, 0xf0843d26UL,
- 0xf1465711UL, 0xf4094194UL, 0xf5cb2ba3UL, 0xf78d95faUL, 0xf64fffcdUL,
- 0xd9785d60UL, 0xd8ba3757UL, 0xdafc890eUL, 0xdb3ee339UL, 0xde71f5bcUL,
- 0xdfb39f8bUL, 0xddf521d2UL, 0xdc374be5UL, 0xd76b0cd8UL, 0xd6a966efUL,
- 0xd4efd8b6UL, 0xd52db281UL, 0xd062a404UL, 0xd1a0ce33UL, 0xd3e6706aUL,
- 0xd2241a5dUL, 0xc55efe10UL, 0xc49c9427UL, 0xc6da2a7eUL, 0xc7184049UL,
- 0xc25756ccUL, 0xc3953cfbUL, 0xc1d382a2UL, 0xc011e895UL, 0xcb4dafa8UL,
- 0xca8fc59fUL, 0xc8c97bc6UL, 0xc90b11f1UL, 0xcc440774UL, 0xcd866d43UL,
- 0xcfc0d31aUL, 0xce02b92dUL, 0x91af9640UL, 0x906dfc77UL, 0x922b422eUL,
- 0x93e92819UL, 0x96a63e9cUL, 0x976454abUL, 0x9522eaf2UL, 0x94e080c5UL,
- 0x9fbcc7f8UL, 0x9e7eadcfUL, 0x9c381396UL, 0x9dfa79a1UL, 0x98b56f24UL,
- 0x99770513UL, 0x9b31bb4aUL, 0x9af3d17dUL, 0x8d893530UL, 0x8c4b5f07UL,
- 0x8e0de15eUL, 0x8fcf8b69UL, 0x8a809decUL, 0x8b42f7dbUL, 0x89044982UL,
- 0x88c623b5UL, 0x839a6488UL, 0x82580ebfUL, 0x801eb0e6UL, 0x81dcdad1UL,
- 0x8493cc54UL, 0x8551a663UL, 0x8717183aUL, 0x86d5720dUL, 0xa9e2d0a0UL,
- 0xa820ba97UL, 0xaa6604ceUL, 0xaba46ef9UL, 0xaeeb787cUL, 0xaf29124bUL,
- 0xad6fac12UL, 0xacadc625UL, 0xa7f18118UL, 0xa633eb2fUL, 0xa4755576UL,
- 0xa5b73f41UL, 0xa0f829c4UL, 0xa13a43f3UL, 0xa37cfdaaUL, 0xa2be979dUL,
- 0xb5c473d0UL, 0xb40619e7UL, 0xb640a7beUL, 0xb782cd89UL, 0xb2cddb0cUL,
- 0xb30fb13bUL, 0xb1490f62UL, 0xb08b6555UL, 0xbbd72268UL, 0xba15485fUL,
- 0xb853f606UL, 0xb9919c31UL, 0xbcde8ab4UL, 0xbd1ce083UL, 0xbf5a5edaUL,
- 0xbe9834edUL
- },
- {
- 0x00000000UL, 0xb8bc6765UL, 0xaa09c88bUL, 0x12b5afeeUL, 0x8f629757UL,
- 0x37def032UL, 0x256b5fdcUL, 0x9dd738b9UL, 0xc5b428efUL, 0x7d084f8aUL,
- 0x6fbde064UL, 0xd7018701UL, 0x4ad6bfb8UL, 0xf26ad8ddUL, 0xe0df7733UL,
- 0x58631056UL, 0x5019579fUL, 0xe8a530faUL, 0xfa109f14UL, 0x42acf871UL,
- 0xdf7bc0c8UL, 0x67c7a7adUL, 0x75720843UL, 0xcdce6f26UL, 0x95ad7f70UL,
- 0x2d111815UL, 0x3fa4b7fbUL, 0x8718d09eUL, 0x1acfe827UL, 0xa2738f42UL,
- 0xb0c620acUL, 0x087a47c9UL, 0xa032af3eUL, 0x188ec85bUL, 0x0a3b67b5UL,
- 0xb28700d0UL, 0x2f503869UL, 0x97ec5f0cUL, 0x8559f0e2UL, 0x3de59787UL,
- 0x658687d1UL, 0xdd3ae0b4UL, 0xcf8f4f5aUL, 0x7733283fUL, 0xeae41086UL,
- 0x525877e3UL, 0x40edd80dUL, 0xf851bf68UL, 0xf02bf8a1UL, 0x48979fc4UL,
- 0x5a22302aUL, 0xe29e574fUL, 0x7f496ff6UL, 0xc7f50893UL, 0xd540a77dUL,
- 0x6dfcc018UL, 0x359fd04eUL, 0x8d23b72bUL, 0x9f9618c5UL, 0x272a7fa0UL,
- 0xbafd4719UL, 0x0241207cUL, 0x10f48f92UL, 0xa848e8f7UL, 0x9b14583dUL,
- 0x23a83f58UL, 0x311d90b6UL, 0x89a1f7d3UL, 0x1476cf6aUL, 0xaccaa80fUL,
- 0xbe7f07e1UL, 0x06c36084UL, 0x5ea070d2UL, 0xe61c17b7UL, 0xf4a9b859UL,
- 0x4c15df3cUL, 0xd1c2e785UL, 0x697e80e0UL, 0x7bcb2f0eUL, 0xc377486bUL,
- 0xcb0d0fa2UL, 0x73b168c7UL, 0x6104c729UL, 0xd9b8a04cUL, 0x446f98f5UL,
- 0xfcd3ff90UL, 0xee66507eUL, 0x56da371bUL, 0x0eb9274dUL, 0xb6054028UL,
- 0xa4b0efc6UL, 0x1c0c88a3UL, 0x81dbb01aUL, 0x3967d77fUL, 0x2bd27891UL,
- 0x936e1ff4UL, 0x3b26f703UL, 0x839a9066UL, 0x912f3f88UL, 0x299358edUL,
- 0xb4446054UL, 0x0cf80731UL, 0x1e4da8dfUL, 0xa6f1cfbaUL, 0xfe92dfecUL,
- 0x462eb889UL, 0x549b1767UL, 0xec277002UL, 0x71f048bbUL, 0xc94c2fdeUL,
- 0xdbf98030UL, 0x6345e755UL, 0x6b3fa09cUL, 0xd383c7f9UL, 0xc1366817UL,
- 0x798a0f72UL, 0xe45d37cbUL, 0x5ce150aeUL, 0x4e54ff40UL, 0xf6e89825UL,
- 0xae8b8873UL, 0x1637ef16UL, 0x048240f8UL, 0xbc3e279dUL, 0x21e91f24UL,
- 0x99557841UL, 0x8be0d7afUL, 0x335cb0caUL, 0xed59b63bUL, 0x55e5d15eUL,
- 0x47507eb0UL, 0xffec19d5UL, 0x623b216cUL, 0xda874609UL, 0xc832e9e7UL,
- 0x708e8e82UL, 0x28ed9ed4UL, 0x9051f9b1UL, 0x82e4565fUL, 0x3a58313aUL,
- 0xa78f0983UL, 0x1f336ee6UL, 0x0d86c108UL, 0xb53aa66dUL, 0xbd40e1a4UL,
- 0x05fc86c1UL, 0x1749292fUL, 0xaff54e4aUL, 0x322276f3UL, 0x8a9e1196UL,
- 0x982bbe78UL, 0x2097d91dUL, 0x78f4c94bUL, 0xc048ae2eUL, 0xd2fd01c0UL,
- 0x6a4166a5UL, 0xf7965e1cUL, 0x4f2a3979UL, 0x5d9f9697UL, 0xe523f1f2UL,
- 0x4d6b1905UL, 0xf5d77e60UL, 0xe762d18eUL, 0x5fdeb6ebUL, 0xc2098e52UL,
- 0x7ab5e937UL, 0x680046d9UL, 0xd0bc21bcUL, 0x88df31eaUL, 0x3063568fUL,
- 0x22d6f961UL, 0x9a6a9e04UL, 0x07bda6bdUL, 0xbf01c1d8UL, 0xadb46e36UL,
- 0x15080953UL, 0x1d724e9aUL, 0xa5ce29ffUL, 0xb77b8611UL, 0x0fc7e174UL,
- 0x9210d9cdUL, 0x2aacbea8UL, 0x38191146UL, 0x80a57623UL, 0xd8c66675UL,
- 0x607a0110UL, 0x72cfaefeUL, 0xca73c99bUL, 0x57a4f122UL, 0xef189647UL,
- 0xfdad39a9UL, 0x45115eccUL, 0x764dee06UL, 0xcef18963UL, 0xdc44268dUL,
- 0x64f841e8UL, 0xf92f7951UL, 0x41931e34UL, 0x5326b1daUL, 0xeb9ad6bfUL,
- 0xb3f9c6e9UL, 0x0b45a18cUL, 0x19f00e62UL, 0xa14c6907UL, 0x3c9b51beUL,
- 0x842736dbUL, 0x96929935UL, 0x2e2efe50UL, 0x2654b999UL, 0x9ee8defcUL,
- 0x8c5d7112UL, 0x34e11677UL, 0xa9362eceUL, 0x118a49abUL, 0x033fe645UL,
- 0xbb838120UL, 0xe3e09176UL, 0x5b5cf613UL, 0x49e959fdUL, 0xf1553e98UL,
- 0x6c820621UL, 0xd43e6144UL, 0xc68bceaaUL, 0x7e37a9cfUL, 0xd67f4138UL,
- 0x6ec3265dUL, 0x7c7689b3UL, 0xc4caeed6UL, 0x591dd66fUL, 0xe1a1b10aUL,
- 0xf3141ee4UL, 0x4ba87981UL, 0x13cb69d7UL, 0xab770eb2UL, 0xb9c2a15cUL,
- 0x017ec639UL, 0x9ca9fe80UL, 0x241599e5UL, 0x36a0360bUL, 0x8e1c516eUL,
- 0x866616a7UL, 0x3eda71c2UL, 0x2c6fde2cUL, 0x94d3b949UL, 0x090481f0UL,
- 0xb1b8e695UL, 0xa30d497bUL, 0x1bb12e1eUL, 0x43d23e48UL, 0xfb6e592dUL,
- 0xe9dbf6c3UL, 0x516791a6UL, 0xccb0a91fUL, 0x740cce7aUL, 0x66b96194UL,
- 0xde0506f1UL
- },
- {
- 0x00000000UL, 0x96300777UL, 0x2c610eeeUL, 0xba510999UL, 0x19c46d07UL,
- 0x8ff46a70UL, 0x35a563e9UL, 0xa395649eUL, 0x3288db0eUL, 0xa4b8dc79UL,
- 0x1ee9d5e0UL, 0x88d9d297UL, 0x2b4cb609UL, 0xbd7cb17eUL, 0x072db8e7UL,
- 0x911dbf90UL, 0x6410b71dUL, 0xf220b06aUL, 0x4871b9f3UL, 0xde41be84UL,
- 0x7dd4da1aUL, 0xebe4dd6dUL, 0x51b5d4f4UL, 0xc785d383UL, 0x56986c13UL,
- 0xc0a86b64UL, 0x7af962fdUL, 0xecc9658aUL, 0x4f5c0114UL, 0xd96c0663UL,
- 0x633d0ffaUL, 0xf50d088dUL, 0xc8206e3bUL, 0x5e10694cUL, 0xe44160d5UL,
- 0x727167a2UL, 0xd1e4033cUL, 0x47d4044bUL, 0xfd850dd2UL, 0x6bb50aa5UL,
- 0xfaa8b535UL, 0x6c98b242UL, 0xd6c9bbdbUL, 0x40f9bcacUL, 0xe36cd832UL,
- 0x755cdf45UL, 0xcf0dd6dcUL, 0x593dd1abUL, 0xac30d926UL, 0x3a00de51UL,
- 0x8051d7c8UL, 0x1661d0bfUL, 0xb5f4b421UL, 0x23c4b356UL, 0x9995bacfUL,
- 0x0fa5bdb8UL, 0x9eb80228UL, 0x0888055fUL, 0xb2d90cc6UL, 0x24e90bb1UL,
- 0x877c6f2fUL, 0x114c6858UL, 0xab1d61c1UL, 0x3d2d66b6UL, 0x9041dc76UL,
- 0x0671db01UL, 0xbc20d298UL, 0x2a10d5efUL, 0x8985b171UL, 0x1fb5b606UL,
- 0xa5e4bf9fUL, 0x33d4b8e8UL, 0xa2c90778UL, 0x34f9000fUL, 0x8ea80996UL,
- 0x18980ee1UL, 0xbb0d6a7fUL, 0x2d3d6d08UL, 0x976c6491UL, 0x015c63e6UL,
- 0xf4516b6bUL, 0x62616c1cUL, 0xd8306585UL, 0x4e0062f2UL, 0xed95066cUL,
- 0x7ba5011bUL, 0xc1f40882UL, 0x57c40ff5UL, 0xc6d9b065UL, 0x50e9b712UL,
- 0xeab8be8bUL, 0x7c88b9fcUL, 0xdf1ddd62UL, 0x492dda15UL, 0xf37cd38cUL,
- 0x654cd4fbUL, 0x5861b24dUL, 0xce51b53aUL, 0x7400bca3UL, 0xe230bbd4UL,
- 0x41a5df4aUL, 0xd795d83dUL, 0x6dc4d1a4UL, 0xfbf4d6d3UL, 0x6ae96943UL,
- 0xfcd96e34UL, 0x468867adUL, 0xd0b860daUL, 0x732d0444UL, 0xe51d0333UL,
- 0x5f4c0aaaUL, 0xc97c0dddUL, 0x3c710550UL, 0xaa410227UL, 0x10100bbeUL,
- 0x86200cc9UL, 0x25b56857UL, 0xb3856f20UL, 0x09d466b9UL, 0x9fe461ceUL,
- 0x0ef9de5eUL, 0x98c9d929UL, 0x2298d0b0UL, 0xb4a8d7c7UL, 0x173db359UL,
- 0x810db42eUL, 0x3b5cbdb7UL, 0xad6cbac0UL, 0x2083b8edUL, 0xb6b3bf9aUL,
- 0x0ce2b603UL, 0x9ad2b174UL, 0x3947d5eaUL, 0xaf77d29dUL, 0x1526db04UL,
- 0x8316dc73UL, 0x120b63e3UL, 0x843b6494UL, 0x3e6a6d0dUL, 0xa85a6a7aUL,
- 0x0bcf0ee4UL, 0x9dff0993UL, 0x27ae000aUL, 0xb19e077dUL, 0x44930ff0UL,
- 0xd2a30887UL, 0x68f2011eUL, 0xfec20669UL, 0x5d5762f7UL, 0xcb676580UL,
- 0x71366c19UL, 0xe7066b6eUL, 0x761bd4feUL, 0xe02bd389UL, 0x5a7ada10UL,
- 0xcc4add67UL, 0x6fdfb9f9UL, 0xf9efbe8eUL, 0x43beb717UL, 0xd58eb060UL,
- 0xe8a3d6d6UL, 0x7e93d1a1UL, 0xc4c2d838UL, 0x52f2df4fUL, 0xf167bbd1UL,
- 0x6757bca6UL, 0xdd06b53fUL, 0x4b36b248UL, 0xda2b0dd8UL, 0x4c1b0aafUL,
- 0xf64a0336UL, 0x607a0441UL, 0xc3ef60dfUL, 0x55df67a8UL, 0xef8e6e31UL,
- 0x79be6946UL, 0x8cb361cbUL, 0x1a8366bcUL, 0xa0d26f25UL, 0x36e26852UL,
- 0x95770cccUL, 0x03470bbbUL, 0xb9160222UL, 0x2f260555UL, 0xbe3bbac5UL,
- 0x280bbdb2UL, 0x925ab42bUL, 0x046ab35cUL, 0xa7ffd7c2UL, 0x31cfd0b5UL,
- 0x8b9ed92cUL, 0x1daede5bUL, 0xb0c2649bUL, 0x26f263ecUL, 0x9ca36a75UL,
- 0x0a936d02UL, 0xa906099cUL, 0x3f360eebUL, 0x85670772UL, 0x13570005UL,
- 0x824abf95UL, 0x147ab8e2UL, 0xae2bb17bUL, 0x381bb60cUL, 0x9b8ed292UL,
- 0x0dbed5e5UL, 0xb7efdc7cUL, 0x21dfdb0bUL, 0xd4d2d386UL, 0x42e2d4f1UL,
- 0xf8b3dd68UL, 0x6e83da1fUL, 0xcd16be81UL, 0x5b26b9f6UL, 0xe177b06fUL,
- 0x7747b718UL, 0xe65a0888UL, 0x706a0fffUL, 0xca3b0666UL, 0x5c0b0111UL,
- 0xff9e658fUL, 0x69ae62f8UL, 0xd3ff6b61UL, 0x45cf6c16UL, 0x78e20aa0UL,
- 0xeed20dd7UL, 0x5483044eUL, 0xc2b30339UL, 0x612667a7UL, 0xf71660d0UL,
- 0x4d476949UL, 0xdb776e3eUL, 0x4a6ad1aeUL, 0xdc5ad6d9UL, 0x660bdf40UL,
- 0xf03bd837UL, 0x53aebca9UL, 0xc59ebbdeUL, 0x7fcfb247UL, 0xe9ffb530UL,
- 0x1cf2bdbdUL, 0x8ac2bacaUL, 0x3093b353UL, 0xa6a3b424UL, 0x0536d0baUL,
- 0x9306d7cdUL, 0x2957de54UL, 0xbf67d923UL, 0x2e7a66b3UL, 0xb84a61c4UL,
- 0x021b685dUL, 0x942b6f2aUL, 0x37be0bb4UL, 0xa18e0cc3UL, 0x1bdf055aUL,
- 0x8def022dUL
- },
- {
- 0x00000000UL, 0x41311b19UL, 0x82623632UL, 0xc3532d2bUL, 0x04c56c64UL,
- 0x45f4777dUL, 0x86a75a56UL, 0xc796414fUL, 0x088ad9c8UL, 0x49bbc2d1UL,
- 0x8ae8effaUL, 0xcbd9f4e3UL, 0x0c4fb5acUL, 0x4d7eaeb5UL, 0x8e2d839eUL,
- 0xcf1c9887UL, 0x5112c24aUL, 0x1023d953UL, 0xd370f478UL, 0x9241ef61UL,
- 0x55d7ae2eUL, 0x14e6b537UL, 0xd7b5981cUL, 0x96848305UL, 0x59981b82UL,
- 0x18a9009bUL, 0xdbfa2db0UL, 0x9acb36a9UL, 0x5d5d77e6UL, 0x1c6c6cffUL,
- 0xdf3f41d4UL, 0x9e0e5acdUL, 0xa2248495UL, 0xe3159f8cUL, 0x2046b2a7UL,
- 0x6177a9beUL, 0xa6e1e8f1UL, 0xe7d0f3e8UL, 0x2483dec3UL, 0x65b2c5daUL,
- 0xaaae5d5dUL, 0xeb9f4644UL, 0x28cc6b6fUL, 0x69fd7076UL, 0xae6b3139UL,
- 0xef5a2a20UL, 0x2c09070bUL, 0x6d381c12UL, 0xf33646dfUL, 0xb2075dc6UL,
- 0x715470edUL, 0x30656bf4UL, 0xf7f32abbUL, 0xb6c231a2UL, 0x75911c89UL,
- 0x34a00790UL, 0xfbbc9f17UL, 0xba8d840eUL, 0x79dea925UL, 0x38efb23cUL,
- 0xff79f373UL, 0xbe48e86aUL, 0x7d1bc541UL, 0x3c2ade58UL, 0x054f79f0UL,
- 0x447e62e9UL, 0x872d4fc2UL, 0xc61c54dbUL, 0x018a1594UL, 0x40bb0e8dUL,
- 0x83e823a6UL, 0xc2d938bfUL, 0x0dc5a038UL, 0x4cf4bb21UL, 0x8fa7960aUL,
- 0xce968d13UL, 0x0900cc5cUL, 0x4831d745UL, 0x8b62fa6eUL, 0xca53e177UL,
- 0x545dbbbaUL, 0x156ca0a3UL, 0xd63f8d88UL, 0x970e9691UL, 0x5098d7deUL,
- 0x11a9ccc7UL, 0xd2fae1ecUL, 0x93cbfaf5UL, 0x5cd76272UL, 0x1de6796bUL,
- 0xdeb55440UL, 0x9f844f59UL, 0x58120e16UL, 0x1923150fUL, 0xda703824UL,
- 0x9b41233dUL, 0xa76bfd65UL, 0xe65ae67cUL, 0x2509cb57UL, 0x6438d04eUL,
- 0xa3ae9101UL, 0xe29f8a18UL, 0x21cca733UL, 0x60fdbc2aUL, 0xafe124adUL,
- 0xeed03fb4UL, 0x2d83129fUL, 0x6cb20986UL, 0xab2448c9UL, 0xea1553d0UL,
- 0x29467efbUL, 0x687765e2UL, 0xf6793f2fUL, 0xb7482436UL, 0x741b091dUL,
- 0x352a1204UL, 0xf2bc534bUL, 0xb38d4852UL, 0x70de6579UL, 0x31ef7e60UL,
- 0xfef3e6e7UL, 0xbfc2fdfeUL, 0x7c91d0d5UL, 0x3da0cbccUL, 0xfa368a83UL,
- 0xbb07919aUL, 0x7854bcb1UL, 0x3965a7a8UL, 0x4b98833bUL, 0x0aa99822UL,
- 0xc9fab509UL, 0x88cbae10UL, 0x4f5def5fUL, 0x0e6cf446UL, 0xcd3fd96dUL,
- 0x8c0ec274UL, 0x43125af3UL, 0x022341eaUL, 0xc1706cc1UL, 0x804177d8UL,
- 0x47d73697UL, 0x06e62d8eUL, 0xc5b500a5UL, 0x84841bbcUL, 0x1a8a4171UL,
- 0x5bbb5a68UL, 0x98e87743UL, 0xd9d96c5aUL, 0x1e4f2d15UL, 0x5f7e360cUL,
- 0x9c2d1b27UL, 0xdd1c003eUL, 0x120098b9UL, 0x533183a0UL, 0x9062ae8bUL,
- 0xd153b592UL, 0x16c5f4ddUL, 0x57f4efc4UL, 0x94a7c2efUL, 0xd596d9f6UL,
- 0xe9bc07aeUL, 0xa88d1cb7UL, 0x6bde319cUL, 0x2aef2a85UL, 0xed796bcaUL,
- 0xac4870d3UL, 0x6f1b5df8UL, 0x2e2a46e1UL, 0xe136de66UL, 0xa007c57fUL,
- 0x6354e854UL, 0x2265f34dUL, 0xe5f3b202UL, 0xa4c2a91bUL, 0x67918430UL,
- 0x26a09f29UL, 0xb8aec5e4UL, 0xf99fdefdUL, 0x3accf3d6UL, 0x7bfde8cfUL,
- 0xbc6ba980UL, 0xfd5ab299UL, 0x3e099fb2UL, 0x7f3884abUL, 0xb0241c2cUL,
- 0xf1150735UL, 0x32462a1eUL, 0x73773107UL, 0xb4e17048UL, 0xf5d06b51UL,
- 0x3683467aUL, 0x77b25d63UL, 0x4ed7facbUL, 0x0fe6e1d2UL, 0xccb5ccf9UL,
- 0x8d84d7e0UL, 0x4a1296afUL, 0x0b238db6UL, 0xc870a09dUL, 0x8941bb84UL,
- 0x465d2303UL, 0x076c381aUL, 0xc43f1531UL, 0x850e0e28UL, 0x42984f67UL,
- 0x03a9547eUL, 0xc0fa7955UL, 0x81cb624cUL, 0x1fc53881UL, 0x5ef42398UL,
- 0x9da70eb3UL, 0xdc9615aaUL, 0x1b0054e5UL, 0x5a314ffcUL, 0x996262d7UL,
- 0xd85379ceUL, 0x174fe149UL, 0x567efa50UL, 0x952dd77bUL, 0xd41ccc62UL,
- 0x138a8d2dUL, 0x52bb9634UL, 0x91e8bb1fUL, 0xd0d9a006UL, 0xecf37e5eUL,
- 0xadc26547UL, 0x6e91486cUL, 0x2fa05375UL, 0xe836123aUL, 0xa9070923UL,
- 0x6a542408UL, 0x2b653f11UL, 0xe479a796UL, 0xa548bc8fUL, 0x661b91a4UL,
- 0x272a8abdUL, 0xe0bccbf2UL, 0xa18dd0ebUL, 0x62defdc0UL, 0x23efe6d9UL,
- 0xbde1bc14UL, 0xfcd0a70dUL, 0x3f838a26UL, 0x7eb2913fUL, 0xb924d070UL,
- 0xf815cb69UL, 0x3b46e642UL, 0x7a77fd5bUL, 0xb56b65dcUL, 0xf45a7ec5UL,
- 0x370953eeUL, 0x763848f7UL, 0xb1ae09b8UL, 0xf09f12a1UL, 0x33cc3f8aUL,
- 0x72fd2493UL
- },
- {
- 0x00000000UL, 0x376ac201UL, 0x6ed48403UL, 0x59be4602UL, 0xdca80907UL,
- 0xebc2cb06UL, 0xb27c8d04UL, 0x85164f05UL, 0xb851130eUL, 0x8f3bd10fUL,
- 0xd685970dUL, 0xe1ef550cUL, 0x64f91a09UL, 0x5393d808UL, 0x0a2d9e0aUL,
- 0x3d475c0bUL, 0x70a3261cUL, 0x47c9e41dUL, 0x1e77a21fUL, 0x291d601eUL,
- 0xac0b2f1bUL, 0x9b61ed1aUL, 0xc2dfab18UL, 0xf5b56919UL, 0xc8f23512UL,
- 0xff98f713UL, 0xa626b111UL, 0x914c7310UL, 0x145a3c15UL, 0x2330fe14UL,
- 0x7a8eb816UL, 0x4de47a17UL, 0xe0464d38UL, 0xd72c8f39UL, 0x8e92c93bUL,
- 0xb9f80b3aUL, 0x3cee443fUL, 0x0b84863eUL, 0x523ac03cUL, 0x6550023dUL,
- 0x58175e36UL, 0x6f7d9c37UL, 0x36c3da35UL, 0x01a91834UL, 0x84bf5731UL,
- 0xb3d59530UL, 0xea6bd332UL, 0xdd011133UL, 0x90e56b24UL, 0xa78fa925UL,
- 0xfe31ef27UL, 0xc95b2d26UL, 0x4c4d6223UL, 0x7b27a022UL, 0x2299e620UL,
- 0x15f32421UL, 0x28b4782aUL, 0x1fdeba2bUL, 0x4660fc29UL, 0x710a3e28UL,
- 0xf41c712dUL, 0xc376b32cUL, 0x9ac8f52eUL, 0xada2372fUL, 0xc08d9a70UL,
- 0xf7e75871UL, 0xae591e73UL, 0x9933dc72UL, 0x1c259377UL, 0x2b4f5176UL,
- 0x72f11774UL, 0x459bd575UL, 0x78dc897eUL, 0x4fb64b7fUL, 0x16080d7dUL,
- 0x2162cf7cUL, 0xa4748079UL, 0x931e4278UL, 0xcaa0047aUL, 0xfdcac67bUL,
- 0xb02ebc6cUL, 0x87447e6dUL, 0xdefa386fUL, 0xe990fa6eUL, 0x6c86b56bUL,
- 0x5bec776aUL, 0x02523168UL, 0x3538f369UL, 0x087faf62UL, 0x3f156d63UL,
- 0x66ab2b61UL, 0x51c1e960UL, 0xd4d7a665UL, 0xe3bd6464UL, 0xba032266UL,
- 0x8d69e067UL, 0x20cbd748UL, 0x17a11549UL, 0x4e1f534bUL, 0x7975914aUL,
- 0xfc63de4fUL, 0xcb091c4eUL, 0x92b75a4cUL, 0xa5dd984dUL, 0x989ac446UL,
- 0xaff00647UL, 0xf64e4045UL, 0xc1248244UL, 0x4432cd41UL, 0x73580f40UL,
- 0x2ae64942UL, 0x1d8c8b43UL, 0x5068f154UL, 0x67023355UL, 0x3ebc7557UL,
- 0x09d6b756UL, 0x8cc0f853UL, 0xbbaa3a52UL, 0xe2147c50UL, 0xd57ebe51UL,
- 0xe839e25aUL, 0xdf53205bUL, 0x86ed6659UL, 0xb187a458UL, 0x3491eb5dUL,
- 0x03fb295cUL, 0x5a456f5eUL, 0x6d2fad5fUL, 0x801b35e1UL, 0xb771f7e0UL,
- 0xeecfb1e2UL, 0xd9a573e3UL, 0x5cb33ce6UL, 0x6bd9fee7UL, 0x3267b8e5UL,
- 0x050d7ae4UL, 0x384a26efUL, 0x0f20e4eeUL, 0x569ea2ecUL, 0x61f460edUL,
- 0xe4e22fe8UL, 0xd388ede9UL, 0x8a36abebUL, 0xbd5c69eaUL, 0xf0b813fdUL,
- 0xc7d2d1fcUL, 0x9e6c97feUL, 0xa90655ffUL, 0x2c101afaUL, 0x1b7ad8fbUL,
- 0x42c49ef9UL, 0x75ae5cf8UL, 0x48e900f3UL, 0x7f83c2f2UL, 0x263d84f0UL,
- 0x115746f1UL, 0x944109f4UL, 0xa32bcbf5UL, 0xfa958df7UL, 0xcdff4ff6UL,
- 0x605d78d9UL, 0x5737bad8UL, 0x0e89fcdaUL, 0x39e33edbUL, 0xbcf571deUL,
- 0x8b9fb3dfUL, 0xd221f5ddUL, 0xe54b37dcUL, 0xd80c6bd7UL, 0xef66a9d6UL,
- 0xb6d8efd4UL, 0x81b22dd5UL, 0x04a462d0UL, 0x33cea0d1UL, 0x6a70e6d3UL,
- 0x5d1a24d2UL, 0x10fe5ec5UL, 0x27949cc4UL, 0x7e2adac6UL, 0x494018c7UL,
- 0xcc5657c2UL, 0xfb3c95c3UL, 0xa282d3c1UL, 0x95e811c0UL, 0xa8af4dcbUL,
- 0x9fc58fcaUL, 0xc67bc9c8UL, 0xf1110bc9UL, 0x740744ccUL, 0x436d86cdUL,
- 0x1ad3c0cfUL, 0x2db902ceUL, 0x4096af91UL, 0x77fc6d90UL, 0x2e422b92UL,
- 0x1928e993UL, 0x9c3ea696UL, 0xab546497UL, 0xf2ea2295UL, 0xc580e094UL,
- 0xf8c7bc9fUL, 0xcfad7e9eUL, 0x9613389cUL, 0xa179fa9dUL, 0x246fb598UL,
- 0x13057799UL, 0x4abb319bUL, 0x7dd1f39aUL, 0x3035898dUL, 0x075f4b8cUL,
- 0x5ee10d8eUL, 0x698bcf8fUL, 0xec9d808aUL, 0xdbf7428bUL, 0x82490489UL,
- 0xb523c688UL, 0x88649a83UL, 0xbf0e5882UL, 0xe6b01e80UL, 0xd1dadc81UL,
- 0x54cc9384UL, 0x63a65185UL, 0x3a181787UL, 0x0d72d586UL, 0xa0d0e2a9UL,
- 0x97ba20a8UL, 0xce0466aaUL, 0xf96ea4abUL, 0x7c78ebaeUL, 0x4b1229afUL,
- 0x12ac6fadUL, 0x25c6adacUL, 0x1881f1a7UL, 0x2feb33a6UL, 0x765575a4UL,
- 0x413fb7a5UL, 0xc429f8a0UL, 0xf3433aa1UL, 0xaafd7ca3UL, 0x9d97bea2UL,
- 0xd073c4b5UL, 0xe71906b4UL, 0xbea740b6UL, 0x89cd82b7UL, 0x0cdbcdb2UL,
- 0x3bb10fb3UL, 0x620f49b1UL, 0x55658bb0UL, 0x6822d7bbUL, 0x5f4815baUL,
- 0x06f653b8UL, 0x319c91b9UL, 0xb48adebcUL, 0x83e01cbdUL, 0xda5e5abfUL,
- 0xed3498beUL
- },
- {
- 0x00000000UL, 0x6567bcb8UL, 0x8bc809aaUL, 0xeeafb512UL, 0x5797628fUL,
- 0x32f0de37UL, 0xdc5f6b25UL, 0xb938d79dUL, 0xef28b4c5UL, 0x8a4f087dUL,
- 0x64e0bd6fUL, 0x018701d7UL, 0xb8bfd64aUL, 0xddd86af2UL, 0x3377dfe0UL,
- 0x56106358UL, 0x9f571950UL, 0xfa30a5e8UL, 0x149f10faUL, 0x71f8ac42UL,
- 0xc8c07bdfUL, 0xada7c767UL, 0x43087275UL, 0x266fcecdUL, 0x707fad95UL,
- 0x1518112dUL, 0xfbb7a43fUL, 0x9ed01887UL, 0x27e8cf1aUL, 0x428f73a2UL,
- 0xac20c6b0UL, 0xc9477a08UL, 0x3eaf32a0UL, 0x5bc88e18UL, 0xb5673b0aUL,
- 0xd00087b2UL, 0x6938502fUL, 0x0c5fec97UL, 0xe2f05985UL, 0x8797e53dUL,
- 0xd1878665UL, 0xb4e03addUL, 0x5a4f8fcfUL, 0x3f283377UL, 0x8610e4eaUL,
- 0xe3775852UL, 0x0dd8ed40UL, 0x68bf51f8UL, 0xa1f82bf0UL, 0xc49f9748UL,
- 0x2a30225aUL, 0x4f579ee2UL, 0xf66f497fUL, 0x9308f5c7UL, 0x7da740d5UL,
- 0x18c0fc6dUL, 0x4ed09f35UL, 0x2bb7238dUL, 0xc518969fUL, 0xa07f2a27UL,
- 0x1947fdbaUL, 0x7c204102UL, 0x928ff410UL, 0xf7e848a8UL, 0x3d58149bUL,
- 0x583fa823UL, 0xb6901d31UL, 0xd3f7a189UL, 0x6acf7614UL, 0x0fa8caacUL,
- 0xe1077fbeUL, 0x8460c306UL, 0xd270a05eUL, 0xb7171ce6UL, 0x59b8a9f4UL,
- 0x3cdf154cUL, 0x85e7c2d1UL, 0xe0807e69UL, 0x0e2fcb7bUL, 0x6b4877c3UL,
- 0xa20f0dcbUL, 0xc768b173UL, 0x29c70461UL, 0x4ca0b8d9UL, 0xf5986f44UL,
- 0x90ffd3fcUL, 0x7e5066eeUL, 0x1b37da56UL, 0x4d27b90eUL, 0x284005b6UL,
- 0xc6efb0a4UL, 0xa3880c1cUL, 0x1ab0db81UL, 0x7fd76739UL, 0x9178d22bUL,
- 0xf41f6e93UL, 0x03f7263bUL, 0x66909a83UL, 0x883f2f91UL, 0xed589329UL,
- 0x546044b4UL, 0x3107f80cUL, 0xdfa84d1eUL, 0xbacff1a6UL, 0xecdf92feUL,
- 0x89b82e46UL, 0x67179b54UL, 0x027027ecUL, 0xbb48f071UL, 0xde2f4cc9UL,
- 0x3080f9dbUL, 0x55e74563UL, 0x9ca03f6bUL, 0xf9c783d3UL, 0x176836c1UL,
- 0x720f8a79UL, 0xcb375de4UL, 0xae50e15cUL, 0x40ff544eUL, 0x2598e8f6UL,
- 0x73888baeUL, 0x16ef3716UL, 0xf8408204UL, 0x9d273ebcUL, 0x241fe921UL,
- 0x41785599UL, 0xafd7e08bUL, 0xcab05c33UL, 0x3bb659edUL, 0x5ed1e555UL,
- 0xb07e5047UL, 0xd519ecffUL, 0x6c213b62UL, 0x094687daUL, 0xe7e932c8UL,
- 0x828e8e70UL, 0xd49eed28UL, 0xb1f95190UL, 0x5f56e482UL, 0x3a31583aUL,
- 0x83098fa7UL, 0xe66e331fUL, 0x08c1860dUL, 0x6da63ab5UL, 0xa4e140bdUL,
- 0xc186fc05UL, 0x2f294917UL, 0x4a4ef5afUL, 0xf3762232UL, 0x96119e8aUL,
- 0x78be2b98UL, 0x1dd99720UL, 0x4bc9f478UL, 0x2eae48c0UL, 0xc001fdd2UL,
- 0xa566416aUL, 0x1c5e96f7UL, 0x79392a4fUL, 0x97969f5dUL, 0xf2f123e5UL,
- 0x05196b4dUL, 0x607ed7f5UL, 0x8ed162e7UL, 0xebb6de5fUL, 0x528e09c2UL,
- 0x37e9b57aUL, 0xd9460068UL, 0xbc21bcd0UL, 0xea31df88UL, 0x8f566330UL,
- 0x61f9d622UL, 0x049e6a9aUL, 0xbda6bd07UL, 0xd8c101bfUL, 0x366eb4adUL,
- 0x53090815UL, 0x9a4e721dUL, 0xff29cea5UL, 0x11867bb7UL, 0x74e1c70fUL,
- 0xcdd91092UL, 0xa8beac2aUL, 0x46111938UL, 0x2376a580UL, 0x7566c6d8UL,
- 0x10017a60UL, 0xfeaecf72UL, 0x9bc973caUL, 0x22f1a457UL, 0x479618efUL,
- 0xa939adfdUL, 0xcc5e1145UL, 0x06ee4d76UL, 0x6389f1ceUL, 0x8d2644dcUL,
- 0xe841f864UL, 0x51792ff9UL, 0x341e9341UL, 0xdab12653UL, 0xbfd69aebUL,
- 0xe9c6f9b3UL, 0x8ca1450bUL, 0x620ef019UL, 0x07694ca1UL, 0xbe519b3cUL,
- 0xdb362784UL, 0x35999296UL, 0x50fe2e2eUL, 0x99b95426UL, 0xfcdee89eUL,
- 0x12715d8cUL, 0x7716e134UL, 0xce2e36a9UL, 0xab498a11UL, 0x45e63f03UL,
- 0x208183bbUL, 0x7691e0e3UL, 0x13f65c5bUL, 0xfd59e949UL, 0x983e55f1UL,
- 0x2106826cUL, 0x44613ed4UL, 0xaace8bc6UL, 0xcfa9377eUL, 0x38417fd6UL,
- 0x5d26c36eUL, 0xb389767cUL, 0xd6eecac4UL, 0x6fd61d59UL, 0x0ab1a1e1UL,
- 0xe41e14f3UL, 0x8179a84bUL, 0xd769cb13UL, 0xb20e77abUL, 0x5ca1c2b9UL,
- 0x39c67e01UL, 0x80fea99cUL, 0xe5991524UL, 0x0b36a036UL, 0x6e511c8eUL,
- 0xa7166686UL, 0xc271da3eUL, 0x2cde6f2cUL, 0x49b9d394UL, 0xf0810409UL,
- 0x95e6b8b1UL, 0x7b490da3UL, 0x1e2eb11bUL, 0x483ed243UL, 0x2d596efbUL,
- 0xc3f6dbe9UL, 0xa6916751UL, 0x1fa9b0ccUL, 0x7ace0c74UL, 0x9461b966UL,
- 0xf10605deUL
-#endif
- }
-};
+ /* polyBits = 7976584769 0x00000001db710641L, shifted = 0xedb88320 */
+ /* CRC32 table for single bytes, auto-generated. DO NOT MODIFY! */
+ /* CRC32 table 0 for quad-bytes (little-endian), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0x77073096U, 0xee0e612cU, 0x990951baU, 0x076dc419U, 0x706af48fU, 0xe963a535U, 0x9e6495a3U
+ /* 8 */ , 0x0edb8832U, 0x79dcb8a4U, 0xe0d5e91eU, 0x97d2d988U, 0x09b64c2bU, 0x7eb17cbdU, 0xe7b82d07U, 0x90bf1d91U
+ /* 16 */ , 0x1db71064U, 0x6ab020f2U, 0xf3b97148U, 0x84be41deU, 0x1adad47dU, 0x6ddde4ebU, 0xf4d4b551U, 0x83d385c7U
+ /* 24 */ , 0x136c9856U, 0x646ba8c0U, 0xfd62f97aU, 0x8a65c9ecU, 0x14015c4fU, 0x63066cd9U, 0xfa0f3d63U, 0x8d080df5U
+ /* 32 */ , 0x3b6e20c8U, 0x4c69105eU, 0xd56041e4U, 0xa2677172U, 0x3c03e4d1U, 0x4b04d447U, 0xd20d85fdU, 0xa50ab56bU
+ /* 40 */ , 0x35b5a8faU, 0x42b2986cU, 0xdbbbc9d6U, 0xacbcf940U, 0x32d86ce3U, 0x45df5c75U, 0xdcd60dcfU, 0xabd13d59U
+ /* 48 */ , 0x26d930acU, 0x51de003aU, 0xc8d75180U, 0xbfd06116U, 0x21b4f4b5U, 0x56b3c423U, 0xcfba9599U, 0xb8bda50fU
+ /* 56 */ , 0x2802b89eU, 0x5f058808U, 0xc60cd9b2U, 0xb10be924U, 0x2f6f7c87U, 0x58684c11U, 0xc1611dabU, 0xb6662d3dU
+ /* 64 */ , 0x76dc4190U, 0x01db7106U, 0x98d220bcU, 0xefd5102aU, 0x71b18589U, 0x06b6b51fU, 0x9fbfe4a5U, 0xe8b8d433U
+ /* 72 */ , 0x7807c9a2U, 0x0f00f934U, 0x9609a88eU, 0xe10e9818U, 0x7f6a0dbbU, 0x086d3d2dU, 0x91646c97U, 0xe6635c01U
+ /* 80 */ , 0x6b6b51f4U, 0x1c6c6162U, 0x856530d8U, 0xf262004eU, 0x6c0695edU, 0x1b01a57bU, 0x8208f4c1U, 0xf50fc457U
+ /* 88 */ , 0x65b0d9c6U, 0x12b7e950U, 0x8bbeb8eaU, 0xfcb9887cU, 0x62dd1ddfU, 0x15da2d49U, 0x8cd37cf3U, 0xfbd44c65U
+ /* 96 */ , 0x4db26158U, 0x3ab551ceU, 0xa3bc0074U, 0xd4bb30e2U, 0x4adfa541U, 0x3dd895d7U, 0xa4d1c46dU, 0xd3d6f4fbU
+ /* 104 */ , 0x4369e96aU, 0x346ed9fcU, 0xad678846U, 0xda60b8d0U, 0x44042d73U, 0x33031de5U, 0xaa0a4c5fU, 0xdd0d7cc9U
+ /* 112 */ , 0x5005713cU, 0x270241aaU, 0xbe0b1010U, 0xc90c2086U, 0x5768b525U, 0x206f85b3U, 0xb966d409U, 0xce61e49fU
+ /* 120 */ , 0x5edef90eU, 0x29d9c998U, 0xb0d09822U, 0xc7d7a8b4U, 0x59b33d17U, 0x2eb40d81U, 0xb7bd5c3bU, 0xc0ba6cadU
+ /* 128 */ , 0xedb88320U, 0x9abfb3b6U, 0x03b6e20cU, 0x74b1d29aU, 0xead54739U, 0x9dd277afU, 0x04db2615U, 0x73dc1683U
+ /* 136 */ , 0xe3630b12U, 0x94643b84U, 0x0d6d6a3eU, 0x7a6a5aa8U, 0xe40ecf0bU, 0x9309ff9dU, 0x0a00ae27U, 0x7d079eb1U
+ /* 144 */ , 0xf00f9344U, 0x8708a3d2U, 0x1e01f268U, 0x6906c2feU, 0xf762575dU, 0x806567cbU, 0x196c3671U, 0x6e6b06e7U
+ /* 152 */ , 0xfed41b76U, 0x89d32be0U, 0x10da7a5aU, 0x67dd4accU, 0xf9b9df6fU, 0x8ebeeff9U, 0x17b7be43U, 0x60b08ed5U
+ /* 160 */ , 0xd6d6a3e8U, 0xa1d1937eU, 0x38d8c2c4U, 0x4fdff252U, 0xd1bb67f1U, 0xa6bc5767U, 0x3fb506ddU, 0x48b2364bU
+ /* 168 */ , 0xd80d2bdaU, 0xaf0a1b4cU, 0x36034af6U, 0x41047a60U, 0xdf60efc3U, 0xa867df55U, 0x316e8eefU, 0x4669be79U
+ /* 176 */ , 0xcb61b38cU, 0xbc66831aU, 0x256fd2a0U, 0x5268e236U, 0xcc0c7795U, 0xbb0b4703U, 0x220216b9U, 0x5505262fU
+ /* 184 */ , 0xc5ba3bbeU, 0xb2bd0b28U, 0x2bb45a92U, 0x5cb36a04U, 0xc2d7ffa7U, 0xb5d0cf31U, 0x2cd99e8bU, 0x5bdeae1dU
+ /* 192 */ , 0x9b64c2b0U, 0xec63f226U, 0x756aa39cU, 0x026d930aU, 0x9c0906a9U, 0xeb0e363fU, 0x72076785U, 0x05005713U
+ /* 200 */ , 0x95bf4a82U, 0xe2b87a14U, 0x7bb12baeU, 0x0cb61b38U, 0x92d28e9bU, 0xe5d5be0dU, 0x7cdcefb7U, 0x0bdbdf21U
+ /* 208 */ , 0x86d3d2d4U, 0xf1d4e242U, 0x68ddb3f8U, 0x1fda836eU, 0x81be16cdU, 0xf6b9265bU, 0x6fb077e1U, 0x18b74777U
+ /* 216 */ , 0x88085ae6U, 0xff0f6a70U, 0x66063bcaU, 0x11010b5cU, 0x8f659effU, 0xf862ae69U, 0x616bffd3U, 0x166ccf45U
+ /* 224 */ , 0xa00ae278U, 0xd70dd2eeU, 0x4e048354U, 0x3903b3c2U, 0xa7672661U, 0xd06016f7U, 0x4969474dU, 0x3e6e77dbU
+ /* 232 */ , 0xaed16a4aU, 0xd9d65adcU, 0x40df0b66U, 0x37d83bf0U, 0xa9bcae53U, 0xdebb9ec5U, 0x47b2cf7fU, 0x30b5ffe9U
+ /* 240 */ , 0xbdbdf21cU, 0xcabac28aU, 0x53b39330U, 0x24b4a3a6U, 0xbad03605U, 0xcdd70693U, 0x54de5729U, 0x23d967bfU
+ /* 248 */ , 0xb3667a2eU, 0xc4614ab8U, 0x5d681b02U, 0x2a6f2b94U, 0xb40bbe37U, 0xc30c8ea1U, 0x5a05df1bU, 0x2d02ef8dU
+ }
+ #ifdef CRC32_BYFOUR
+ ,
+ /* CRC32 table 1 for quad-bytes (little-endian), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0x191b3141U, 0x32366282U, 0x2b2d53c3U, 0x646cc504U, 0x7d77f445U, 0x565aa786U, 0x4f4196c7U
+ /* 8 */ , 0xc8d98a08U, 0xd1c2bb49U, 0xfaefe88aU, 0xe3f4d9cbU, 0xacb54f0cU, 0xb5ae7e4dU, 0x9e832d8eU, 0x87981ccfU
+ /* 16 */ , 0x4ac21251U, 0x53d92310U, 0x78f470d3U, 0x61ef4192U, 0x2eaed755U, 0x37b5e614U, 0x1c98b5d7U, 0x05838496U
+ /* 24 */ , 0x821b9859U, 0x9b00a918U, 0xb02dfadbU, 0xa936cb9aU, 0xe6775d5dU, 0xff6c6c1cU, 0xd4413fdfU, 0xcd5a0e9eU
+ /* 32 */ , 0x958424a2U, 0x8c9f15e3U, 0xa7b24620U, 0xbea97761U, 0xf1e8e1a6U, 0xe8f3d0e7U, 0xc3de8324U, 0xdac5b265U
+ /* 40 */ , 0x5d5daeaaU, 0x44469febU, 0x6f6bcc28U, 0x7670fd69U, 0x39316baeU, 0x202a5aefU, 0x0b07092cU, 0x121c386dU
+ /* 48 */ , 0xdf4636f3U, 0xc65d07b2U, 0xed705471U, 0xf46b6530U, 0xbb2af3f7U, 0xa231c2b6U, 0x891c9175U, 0x9007a034U
+ /* 56 */ , 0x179fbcfbU, 0x0e848dbaU, 0x25a9de79U, 0x3cb2ef38U, 0x73f379ffU, 0x6ae848beU, 0x41c51b7dU, 0x58de2a3cU
+ /* 64 */ , 0xf0794f05U, 0xe9627e44U, 0xc24f2d87U, 0xdb541cc6U, 0x94158a01U, 0x8d0ebb40U, 0xa623e883U, 0xbf38d9c2U
+ /* 72 */ , 0x38a0c50dU, 0x21bbf44cU, 0x0a96a78fU, 0x138d96ceU, 0x5ccc0009U, 0x45d73148U, 0x6efa628bU, 0x77e153caU
+ /* 80 */ , 0xbabb5d54U, 0xa3a06c15U, 0x888d3fd6U, 0x91960e97U, 0xded79850U, 0xc7cca911U, 0xece1fad2U, 0xf5facb93U
+ /* 88 */ , 0x7262d75cU, 0x6b79e61dU, 0x4054b5deU, 0x594f849fU, 0x160e1258U, 0x0f152319U, 0x243870daU, 0x3d23419bU
+ /* 96 */ , 0x65fd6ba7U, 0x7ce65ae6U, 0x57cb0925U, 0x4ed03864U, 0x0191aea3U, 0x188a9fe2U, 0x33a7cc21U, 0x2abcfd60U
+ /* 104 */ , 0xad24e1afU, 0xb43fd0eeU, 0x9f12832dU, 0x8609b26cU, 0xc94824abU, 0xd05315eaU, 0xfb7e4629U, 0xe2657768U
+ /* 112 */ , 0x2f3f79f6U, 0x362448b7U, 0x1d091b74U, 0x04122a35U, 0x4b53bcf2U, 0x52488db3U, 0x7965de70U, 0x607eef31U
+ /* 120 */ , 0xe7e6f3feU, 0xfefdc2bfU, 0xd5d0917cU, 0xcccba03dU, 0x838a36faU, 0x9a9107bbU, 0xb1bc5478U, 0xa8a76539U
+ /* 128 */ , 0x3b83984bU, 0x2298a90aU, 0x09b5fac9U, 0x10aecb88U, 0x5fef5d4fU, 0x46f46c0eU, 0x6dd93fcdU, 0x74c20e8cU
+ /* 136 */ , 0xf35a1243U, 0xea412302U, 0xc16c70c1U, 0xd8774180U, 0x9736d747U, 0x8e2de606U, 0xa500b5c5U, 0xbc1b8484U
+ /* 144 */ , 0x71418a1aU, 0x685abb5bU, 0x4377e898U, 0x5a6cd9d9U, 0x152d4f1eU, 0x0c367e5fU, 0x271b2d9cU, 0x3e001cddU
+ /* 152 */ , 0xb9980012U, 0xa0833153U, 0x8bae6290U, 0x92b553d1U, 0xddf4c516U, 0xc4eff457U, 0xefc2a794U, 0xf6d996d5U
+ /* 160 */ , 0xae07bce9U, 0xb71c8da8U, 0x9c31de6bU, 0x852aef2aU, 0xca6b79edU, 0xd37048acU, 0xf85d1b6fU, 0xe1462a2eU
+ /* 168 */ , 0x66de36e1U, 0x7fc507a0U, 0x54e85463U, 0x4df36522U, 0x02b2f3e5U, 0x1ba9c2a4U, 0x30849167U, 0x299fa026U
+ /* 176 */ , 0xe4c5aeb8U, 0xfdde9ff9U, 0xd6f3cc3aU, 0xcfe8fd7bU, 0x80a96bbcU, 0x99b25afdU, 0xb29f093eU, 0xab84387fU
+ /* 184 */ , 0x2c1c24b0U, 0x350715f1U, 0x1e2a4632U, 0x07317773U, 0x4870e1b4U, 0x516bd0f5U, 0x7a468336U, 0x635db277U
+ /* 192 */ , 0xcbfad74eU, 0xd2e1e60fU, 0xf9ccb5ccU, 0xe0d7848dU, 0xaf96124aU, 0xb68d230bU, 0x9da070c8U, 0x84bb4189U
+ /* 200 */ , 0x03235d46U, 0x1a386c07U, 0x31153fc4U, 0x280e0e85U, 0x674f9842U, 0x7e54a903U, 0x5579fac0U, 0x4c62cb81U
+ /* 208 */ , 0x8138c51fU, 0x9823f45eU, 0xb30ea79dU, 0xaa1596dcU, 0xe554001bU, 0xfc4f315aU, 0xd7626299U, 0xce7953d8U
+ /* 216 */ , 0x49e14f17U, 0x50fa7e56U, 0x7bd72d95U, 0x62cc1cd4U, 0x2d8d8a13U, 0x3496bb52U, 0x1fbbe891U, 0x06a0d9d0U
+ /* 224 */ , 0x5e7ef3ecU, 0x4765c2adU, 0x6c48916eU, 0x7553a02fU, 0x3a1236e8U, 0x230907a9U, 0x0824546aU, 0x113f652bU
+ /* 232 */ , 0x96a779e4U, 0x8fbc48a5U, 0xa4911b66U, 0xbd8a2a27U, 0xf2cbbce0U, 0xebd08da1U, 0xc0fdde62U, 0xd9e6ef23U
+ /* 240 */ , 0x14bce1bdU, 0x0da7d0fcU, 0x268a833fU, 0x3f91b27eU, 0x70d024b9U, 0x69cb15f8U, 0x42e6463bU, 0x5bfd777aU
+ /* 248 */ , 0xdc656bb5U, 0xc57e5af4U, 0xee530937U, 0xf7483876U, 0xb809aeb1U, 0xa1129ff0U, 0x8a3fcc33U, 0x9324fd72U
+ }
+ ,
+ /* CRC32 table 2 for quad-bytes (little-endian), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0x01c26a37U, 0x0384d46eU, 0x0246be59U, 0x0709a8dcU, 0x06cbc2ebU, 0x048d7cb2U, 0x054f1685U
+ /* 8 */ , 0x0e1351b8U, 0x0fd13b8fU, 0x0d9785d6U, 0x0c55efe1U, 0x091af964U, 0x08d89353U, 0x0a9e2d0aU, 0x0b5c473dU
+ /* 16 */ , 0x1c26a370U, 0x1de4c947U, 0x1fa2771eU, 0x1e601d29U, 0x1b2f0bacU, 0x1aed619bU, 0x18abdfc2U, 0x1969b5f5U
+ /* 24 */ , 0x1235f2c8U, 0x13f798ffU, 0x11b126a6U, 0x10734c91U, 0x153c5a14U, 0x14fe3023U, 0x16b88e7aU, 0x177ae44dU
+ /* 32 */ , 0x384d46e0U, 0x398f2cd7U, 0x3bc9928eU, 0x3a0bf8b9U, 0x3f44ee3cU, 0x3e86840bU, 0x3cc03a52U, 0x3d025065U
+ /* 40 */ , 0x365e1758U, 0x379c7d6fU, 0x35dac336U, 0x3418a901U, 0x3157bf84U, 0x3095d5b3U, 0x32d36beaU, 0x331101ddU
+ /* 48 */ , 0x246be590U, 0x25a98fa7U, 0x27ef31feU, 0x262d5bc9U, 0x23624d4cU, 0x22a0277bU, 0x20e69922U, 0x2124f315U
+ /* 56 */ , 0x2a78b428U, 0x2bbade1fU, 0x29fc6046U, 0x283e0a71U, 0x2d711cf4U, 0x2cb376c3U, 0x2ef5c89aU, 0x2f37a2adU
+ /* 64 */ , 0x709a8dc0U, 0x7158e7f7U, 0x731e59aeU, 0x72dc3399U, 0x7793251cU, 0x76514f2bU, 0x7417f172U, 0x75d59b45U
+ /* 72 */ , 0x7e89dc78U, 0x7f4bb64fU, 0x7d0d0816U, 0x7ccf6221U, 0x798074a4U, 0x78421e93U, 0x7a04a0caU, 0x7bc6cafdU
+ /* 80 */ , 0x6cbc2eb0U, 0x6d7e4487U, 0x6f38fadeU, 0x6efa90e9U, 0x6bb5866cU, 0x6a77ec5bU, 0x68315202U, 0x69f33835U
+ /* 88 */ , 0x62af7f08U, 0x636d153fU, 0x612bab66U, 0x60e9c151U, 0x65a6d7d4U, 0x6464bde3U, 0x662203baU, 0x67e0698dU
+ /* 96 */ , 0x48d7cb20U, 0x4915a117U, 0x4b531f4eU, 0x4a917579U, 0x4fde63fcU, 0x4e1c09cbU, 0x4c5ab792U, 0x4d98dda5U
+ /* 104 */ , 0x46c49a98U, 0x4706f0afU, 0x45404ef6U, 0x448224c1U, 0x41cd3244U, 0x400f5873U, 0x4249e62aU, 0x438b8c1dU
+ /* 112 */ , 0x54f16850U, 0x55330267U, 0x5775bc3eU, 0x56b7d609U, 0x53f8c08cU, 0x523aaabbU, 0x507c14e2U, 0x51be7ed5U
+ /* 120 */ , 0x5ae239e8U, 0x5b2053dfU, 0x5966ed86U, 0x58a487b1U, 0x5deb9134U, 0x5c29fb03U, 0x5e6f455aU, 0x5fad2f6dU
+ /* 128 */ , 0xe1351b80U, 0xe0f771b7U, 0xe2b1cfeeU, 0xe373a5d9U, 0xe63cb35cU, 0xe7fed96bU, 0xe5b86732U, 0xe47a0d05U
+ /* 136 */ , 0xef264a38U, 0xeee4200fU, 0xeca29e56U, 0xed60f461U, 0xe82fe2e4U, 0xe9ed88d3U, 0xebab368aU, 0xea695cbdU
+ /* 144 */ , 0xfd13b8f0U, 0xfcd1d2c7U, 0xfe976c9eU, 0xff5506a9U, 0xfa1a102cU, 0xfbd87a1bU, 0xf99ec442U, 0xf85cae75U
+ /* 152 */ , 0xf300e948U, 0xf2c2837fU, 0xf0843d26U, 0xf1465711U, 0xf4094194U, 0xf5cb2ba3U, 0xf78d95faU, 0xf64fffcdU
+ /* 160 */ , 0xd9785d60U, 0xd8ba3757U, 0xdafc890eU, 0xdb3ee339U, 0xde71f5bcU, 0xdfb39f8bU, 0xddf521d2U, 0xdc374be5U
+ /* 168 */ , 0xd76b0cd8U, 0xd6a966efU, 0xd4efd8b6U, 0xd52db281U, 0xd062a404U, 0xd1a0ce33U, 0xd3e6706aU, 0xd2241a5dU
+ /* 176 */ , 0xc55efe10U, 0xc49c9427U, 0xc6da2a7eU, 0xc7184049U, 0xc25756ccU, 0xc3953cfbU, 0xc1d382a2U, 0xc011e895U
+ /* 184 */ , 0xcb4dafa8U, 0xca8fc59fU, 0xc8c97bc6U, 0xc90b11f1U, 0xcc440774U, 0xcd866d43U, 0xcfc0d31aU, 0xce02b92dU
+ /* 192 */ , 0x91af9640U, 0x906dfc77U, 0x922b422eU, 0x93e92819U, 0x96a63e9cU, 0x976454abU, 0x9522eaf2U, 0x94e080c5U
+ /* 200 */ , 0x9fbcc7f8U, 0x9e7eadcfU, 0x9c381396U, 0x9dfa79a1U, 0x98b56f24U, 0x99770513U, 0x9b31bb4aU, 0x9af3d17dU
+ /* 208 */ , 0x8d893530U, 0x8c4b5f07U, 0x8e0de15eU, 0x8fcf8b69U, 0x8a809decU, 0x8b42f7dbU, 0x89044982U, 0x88c623b5U
+ /* 216 */ , 0x839a6488U, 0x82580ebfU, 0x801eb0e6U, 0x81dcdad1U, 0x8493cc54U, 0x8551a663U, 0x8717183aU, 0x86d5720dU
+ /* 224 */ , 0xa9e2d0a0U, 0xa820ba97U, 0xaa6604ceU, 0xaba46ef9U, 0xaeeb787cU, 0xaf29124bU, 0xad6fac12U, 0xacadc625U
+ /* 232 */ , 0xa7f18118U, 0xa633eb2fU, 0xa4755576U, 0xa5b73f41U, 0xa0f829c4U, 0xa13a43f3U, 0xa37cfdaaU, 0xa2be979dU
+ /* 240 */ , 0xb5c473d0U, 0xb40619e7U, 0xb640a7beU, 0xb782cd89U, 0xb2cddb0cU, 0xb30fb13bU, 0xb1490f62U, 0xb08b6555U
+ /* 248 */ , 0xbbd72268U, 0xba15485fU, 0xb853f606U, 0xb9919c31U, 0xbcde8ab4U, 0xbd1ce083U, 0xbf5a5edaU, 0xbe9834edU
+ }
+ ,
+ /* CRC32 table 3 for quad-bytes (little-endian), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0xb8bc6765U, 0xaa09c88bU, 0x12b5afeeU, 0x8f629757U, 0x37def032U, 0x256b5fdcU, 0x9dd738b9U
+ /* 8 */ , 0xc5b428efU, 0x7d084f8aU, 0x6fbde064U, 0xd7018701U, 0x4ad6bfb8U, 0xf26ad8ddU, 0xe0df7733U, 0x58631056U
+ /* 16 */ , 0x5019579fU, 0xe8a530faU, 0xfa109f14U, 0x42acf871U, 0xdf7bc0c8U, 0x67c7a7adU, 0x75720843U, 0xcdce6f26U
+ /* 24 */ , 0x95ad7f70U, 0x2d111815U, 0x3fa4b7fbU, 0x8718d09eU, 0x1acfe827U, 0xa2738f42U, 0xb0c620acU, 0x087a47c9U
+ /* 32 */ , 0xa032af3eU, 0x188ec85bU, 0x0a3b67b5U, 0xb28700d0U, 0x2f503869U, 0x97ec5f0cU, 0x8559f0e2U, 0x3de59787U
+ /* 40 */ , 0x658687d1U, 0xdd3ae0b4U, 0xcf8f4f5aU, 0x7733283fU, 0xeae41086U, 0x525877e3U, 0x40edd80dU, 0xf851bf68U
+ /* 48 */ , 0xf02bf8a1U, 0x48979fc4U, 0x5a22302aU, 0xe29e574fU, 0x7f496ff6U, 0xc7f50893U, 0xd540a77dU, 0x6dfcc018U
+ /* 56 */ , 0x359fd04eU, 0x8d23b72bU, 0x9f9618c5U, 0x272a7fa0U, 0xbafd4719U, 0x0241207cU, 0x10f48f92U, 0xa848e8f7U
+ /* 64 */ , 0x9b14583dU, 0x23a83f58U, 0x311d90b6U, 0x89a1f7d3U, 0x1476cf6aU, 0xaccaa80fU, 0xbe7f07e1U, 0x06c36084U
+ /* 72 */ , 0x5ea070d2U, 0xe61c17b7U, 0xf4a9b859U, 0x4c15df3cU, 0xd1c2e785U, 0x697e80e0U, 0x7bcb2f0eU, 0xc377486bU
+ /* 80 */ , 0xcb0d0fa2U, 0x73b168c7U, 0x6104c729U, 0xd9b8a04cU, 0x446f98f5U, 0xfcd3ff90U, 0xee66507eU, 0x56da371bU
+ /* 88 */ , 0x0eb9274dU, 0xb6054028U, 0xa4b0efc6U, 0x1c0c88a3U, 0x81dbb01aU, 0x3967d77fU, 0x2bd27891U, 0x936e1ff4U
+ /* 96 */ , 0x3b26f703U, 0x839a9066U, 0x912f3f88U, 0x299358edU, 0xb4446054U, 0x0cf80731U, 0x1e4da8dfU, 0xa6f1cfbaU
+ /* 104 */ , 0xfe92dfecU, 0x462eb889U, 0x549b1767U, 0xec277002U, 0x71f048bbU, 0xc94c2fdeU, 0xdbf98030U, 0x6345e755U
+ /* 112 */ , 0x6b3fa09cU, 0xd383c7f9U, 0xc1366817U, 0x798a0f72U, 0xe45d37cbU, 0x5ce150aeU, 0x4e54ff40U, 0xf6e89825U
+ /* 120 */ , 0xae8b8873U, 0x1637ef16U, 0x048240f8U, 0xbc3e279dU, 0x21e91f24U, 0x99557841U, 0x8be0d7afU, 0x335cb0caU
+ /* 128 */ , 0xed59b63bU, 0x55e5d15eU, 0x47507eb0U, 0xffec19d5U, 0x623b216cU, 0xda874609U, 0xc832e9e7U, 0x708e8e82U
+ /* 136 */ , 0x28ed9ed4U, 0x9051f9b1U, 0x82e4565fU, 0x3a58313aU, 0xa78f0983U, 0x1f336ee6U, 0x0d86c108U, 0xb53aa66dU
+ /* 144 */ , 0xbd40e1a4U, 0x05fc86c1U, 0x1749292fU, 0xaff54e4aU, 0x322276f3U, 0x8a9e1196U, 0x982bbe78U, 0x2097d91dU
+ /* 152 */ , 0x78f4c94bU, 0xc048ae2eU, 0xd2fd01c0U, 0x6a4166a5U, 0xf7965e1cU, 0x4f2a3979U, 0x5d9f9697U, 0xe523f1f2U
+ /* 160 */ , 0x4d6b1905U, 0xf5d77e60U, 0xe762d18eU, 0x5fdeb6ebU, 0xc2098e52U, 0x7ab5e937U, 0x680046d9U, 0xd0bc21bcU
+ /* 168 */ , 0x88df31eaU, 0x3063568fU, 0x22d6f961U, 0x9a6a9e04U, 0x07bda6bdU, 0xbf01c1d8U, 0xadb46e36U, 0x15080953U
+ /* 176 */ , 0x1d724e9aU, 0xa5ce29ffU, 0xb77b8611U, 0x0fc7e174U, 0x9210d9cdU, 0x2aacbea8U, 0x38191146U, 0x80a57623U
+ /* 184 */ , 0xd8c66675U, 0x607a0110U, 0x72cfaefeU, 0xca73c99bU, 0x57a4f122U, 0xef189647U, 0xfdad39a9U, 0x45115eccU
+ /* 192 */ , 0x764dee06U, 0xcef18963U, 0xdc44268dU, 0x64f841e8U, 0xf92f7951U, 0x41931e34U, 0x5326b1daU, 0xeb9ad6bfU
+ /* 200 */ , 0xb3f9c6e9U, 0x0b45a18cU, 0x19f00e62U, 0xa14c6907U, 0x3c9b51beU, 0x842736dbU, 0x96929935U, 0x2e2efe50U
+ /* 208 */ , 0x2654b999U, 0x9ee8defcU, 0x8c5d7112U, 0x34e11677U, 0xa9362eceU, 0x118a49abU, 0x033fe645U, 0xbb838120U
+ /* 216 */ , 0xe3e09176U, 0x5b5cf613U, 0x49e959fdU, 0xf1553e98U, 0x6c820621U, 0xd43e6144U, 0xc68bceaaU, 0x7e37a9cfU
+ /* 224 */ , 0xd67f4138U, 0x6ec3265dU, 0x7c7689b3U, 0xc4caeed6U, 0x591dd66fU, 0xe1a1b10aU, 0xf3141ee4U, 0x4ba87981U
+ /* 232 */ , 0x13cb69d7U, 0xab770eb2U, 0xb9c2a15cU, 0x017ec639U, 0x9ca9fe80U, 0x241599e5U, 0x36a0360bU, 0x8e1c516eU
+ /* 240 */ , 0x866616a7U, 0x3eda71c2U, 0x2c6fde2cU, 0x94d3b949U, 0x090481f0U, 0xb1b8e695U, 0xa30d497bU, 0x1bb12e1eU
+ /* 248 */ , 0x43d23e48U, 0xfb6e592dU, 0xe9dbf6c3U, 0x516791a6U, 0xccb0a91fU, 0x740cce7aU, 0x66b96194U, 0xde0506f1U
+ }
+ ,
+ /* CRC32 table 4 for quad-bytes ( big-endian ), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0x96300777U, 0x2c610eeeU, 0xba510999U, 0x19c46d07U, 0x8ff46a70U, 0x35a563e9U, 0xa395649eU
+ /* 8 */ , 0x3288db0eU, 0xa4b8dc79U, 0x1ee9d5e0U, 0x88d9d297U, 0x2b4cb609U, 0xbd7cb17eU, 0x072db8e7U, 0x911dbf90U
+ /* 16 */ , 0x6410b71dU, 0xf220b06aU, 0x4871b9f3U, 0xde41be84U, 0x7dd4da1aU, 0xebe4dd6dU, 0x51b5d4f4U, 0xc785d383U
+ /* 24 */ , 0x56986c13U, 0xc0a86b64U, 0x7af962fdU, 0xecc9658aU, 0x4f5c0114U, 0xd96c0663U, 0x633d0ffaU, 0xf50d088dU
+ /* 32 */ , 0xc8206e3bU, 0x5e10694cU, 0xe44160d5U, 0x727167a2U, 0xd1e4033cU, 0x47d4044bU, 0xfd850dd2U, 0x6bb50aa5U
+ /* 40 */ , 0xfaa8b535U, 0x6c98b242U, 0xd6c9bbdbU, 0x40f9bcacU, 0xe36cd832U, 0x755cdf45U, 0xcf0dd6dcU, 0x593dd1abU
+ /* 48 */ , 0xac30d926U, 0x3a00de51U, 0x8051d7c8U, 0x1661d0bfU, 0xb5f4b421U, 0x23c4b356U, 0x9995bacfU, 0x0fa5bdb8U
+ /* 56 */ , 0x9eb80228U, 0x0888055fU, 0xb2d90cc6U, 0x24e90bb1U, 0x877c6f2fU, 0x114c6858U, 0xab1d61c1U, 0x3d2d66b6U
+ /* 64 */ , 0x9041dc76U, 0x0671db01U, 0xbc20d298U, 0x2a10d5efU, 0x8985b171U, 0x1fb5b606U, 0xa5e4bf9fU, 0x33d4b8e8U
+ /* 72 */ , 0xa2c90778U, 0x34f9000fU, 0x8ea80996U, 0x18980ee1U, 0xbb0d6a7fU, 0x2d3d6d08U, 0x976c6491U, 0x015c63e6U
+ /* 80 */ , 0xf4516b6bU, 0x62616c1cU, 0xd8306585U, 0x4e0062f2U, 0xed95066cU, 0x7ba5011bU, 0xc1f40882U, 0x57c40ff5U
+ /* 88 */ , 0xc6d9b065U, 0x50e9b712U, 0xeab8be8bU, 0x7c88b9fcU, 0xdf1ddd62U, 0x492dda15U, 0xf37cd38cU, 0x654cd4fbU
+ /* 96 */ , 0x5861b24dU, 0xce51b53aU, 0x7400bca3U, 0xe230bbd4U, 0x41a5df4aU, 0xd795d83dU, 0x6dc4d1a4U, 0xfbf4d6d3U
+ /* 104 */ , 0x6ae96943U, 0xfcd96e34U, 0x468867adU, 0xd0b860daU, 0x732d0444U, 0xe51d0333U, 0x5f4c0aaaU, 0xc97c0dddU
+ /* 112 */ , 0x3c710550U, 0xaa410227U, 0x10100bbeU, 0x86200cc9U, 0x25b56857U, 0xb3856f20U, 0x09d466b9U, 0x9fe461ceU
+ /* 120 */ , 0x0ef9de5eU, 0x98c9d929U, 0x2298d0b0U, 0xb4a8d7c7U, 0x173db359U, 0x810db42eU, 0x3b5cbdb7U, 0xad6cbac0U
+ /* 128 */ , 0x2083b8edU, 0xb6b3bf9aU, 0x0ce2b603U, 0x9ad2b174U, 0x3947d5eaU, 0xaf77d29dU, 0x1526db04U, 0x8316dc73U
+ /* 136 */ , 0x120b63e3U, 0x843b6494U, 0x3e6a6d0dU, 0xa85a6a7aU, 0x0bcf0ee4U, 0x9dff0993U, 0x27ae000aU, 0xb19e077dU
+ /* 144 */ , 0x44930ff0U, 0xd2a30887U, 0x68f2011eU, 0xfec20669U, 0x5d5762f7U, 0xcb676580U, 0x71366c19U, 0xe7066b6eU
+ /* 152 */ , 0x761bd4feU, 0xe02bd389U, 0x5a7ada10U, 0xcc4add67U, 0x6fdfb9f9U, 0xf9efbe8eU, 0x43beb717U, 0xd58eb060U
+ /* 160 */ , 0xe8a3d6d6U, 0x7e93d1a1U, 0xc4c2d838U, 0x52f2df4fU, 0xf167bbd1U, 0x6757bca6U, 0xdd06b53fU, 0x4b36b248U
+ /* 168 */ , 0xda2b0dd8U, 0x4c1b0aafU, 0xf64a0336U, 0x607a0441U, 0xc3ef60dfU, 0x55df67a8U, 0xef8e6e31U, 0x79be6946U
+ /* 176 */ , 0x8cb361cbU, 0x1a8366bcU, 0xa0d26f25U, 0x36e26852U, 0x95770cccU, 0x03470bbbU, 0xb9160222U, 0x2f260555U
+ /* 184 */ , 0xbe3bbac5U, 0x280bbdb2U, 0x925ab42bU, 0x046ab35cU, 0xa7ffd7c2U, 0x31cfd0b5U, 0x8b9ed92cU, 0x1daede5bU
+ /* 192 */ , 0xb0c2649bU, 0x26f263ecU, 0x9ca36a75U, 0x0a936d02U, 0xa906099cU, 0x3f360eebU, 0x85670772U, 0x13570005U
+ /* 200 */ , 0x824abf95U, 0x147ab8e2U, 0xae2bb17bU, 0x381bb60cU, 0x9b8ed292U, 0x0dbed5e5U, 0xb7efdc7cU, 0x21dfdb0bU
+ /* 208 */ , 0xd4d2d386U, 0x42e2d4f1U, 0xf8b3dd68U, 0x6e83da1fU, 0xcd16be81U, 0x5b26b9f6U, 0xe177b06fU, 0x7747b718U
+ /* 216 */ , 0xe65a0888U, 0x706a0fffU, 0xca3b0666U, 0x5c0b0111U, 0xff9e658fU, 0x69ae62f8U, 0xd3ff6b61U, 0x45cf6c16U
+ /* 224 */ , 0x78e20aa0U, 0xeed20dd7U, 0x5483044eU, 0xc2b30339U, 0x612667a7U, 0xf71660d0U, 0x4d476949U, 0xdb776e3eU
+ /* 232 */ , 0x4a6ad1aeU, 0xdc5ad6d9U, 0x660bdf40U, 0xf03bd837U, 0x53aebca9U, 0xc59ebbdeU, 0x7fcfb247U, 0xe9ffb530U
+ /* 240 */ , 0x1cf2bdbdU, 0x8ac2bacaU, 0x3093b353U, 0xa6a3b424U, 0x0536d0baU, 0x9306d7cdU, 0x2957de54U, 0xbf67d923U
+ /* 248 */ , 0x2e7a66b3U, 0xb84a61c4U, 0x021b685dU, 0x942b6f2aU, 0x37be0bb4U, 0xa18e0cc3U, 0x1bdf055aU, 0x8def022dU
+ }
+ ,
+ /* CRC32 table 5 for quad-bytes ( big-endian ), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0x41311b19U, 0x82623632U, 0xc3532d2bU, 0x04c56c64U, 0x45f4777dU, 0x86a75a56U, 0xc796414fU
+ /* 8 */ , 0x088ad9c8U, 0x49bbc2d1U, 0x8ae8effaU, 0xcbd9f4e3U, 0x0c4fb5acU, 0x4d7eaeb5U, 0x8e2d839eU, 0xcf1c9887U
+ /* 16 */ , 0x5112c24aU, 0x1023d953U, 0xd370f478U, 0x9241ef61U, 0x55d7ae2eU, 0x14e6b537U, 0xd7b5981cU, 0x96848305U
+ /* 24 */ , 0x59981b82U, 0x18a9009bU, 0xdbfa2db0U, 0x9acb36a9U, 0x5d5d77e6U, 0x1c6c6cffU, 0xdf3f41d4U, 0x9e0e5acdU
+ /* 32 */ , 0xa2248495U, 0xe3159f8cU, 0x2046b2a7U, 0x6177a9beU, 0xa6e1e8f1U, 0xe7d0f3e8U, 0x2483dec3U, 0x65b2c5daU
+ /* 40 */ , 0xaaae5d5dU, 0xeb9f4644U, 0x28cc6b6fU, 0x69fd7076U, 0xae6b3139U, 0xef5a2a20U, 0x2c09070bU, 0x6d381c12U
+ /* 48 */ , 0xf33646dfU, 0xb2075dc6U, 0x715470edU, 0x30656bf4U, 0xf7f32abbU, 0xb6c231a2U, 0x75911c89U, 0x34a00790U
+ /* 56 */ , 0xfbbc9f17U, 0xba8d840eU, 0x79dea925U, 0x38efb23cU, 0xff79f373U, 0xbe48e86aU, 0x7d1bc541U, 0x3c2ade58U
+ /* 64 */ , 0x054f79f0U, 0x447e62e9U, 0x872d4fc2U, 0xc61c54dbU, 0x018a1594U, 0x40bb0e8dU, 0x83e823a6U, 0xc2d938bfU
+ /* 72 */ , 0x0dc5a038U, 0x4cf4bb21U, 0x8fa7960aU, 0xce968d13U, 0x0900cc5cU, 0x4831d745U, 0x8b62fa6eU, 0xca53e177U
+ /* 80 */ , 0x545dbbbaU, 0x156ca0a3U, 0xd63f8d88U, 0x970e9691U, 0x5098d7deU, 0x11a9ccc7U, 0xd2fae1ecU, 0x93cbfaf5U
+ /* 88 */ , 0x5cd76272U, 0x1de6796bU, 0xdeb55440U, 0x9f844f59U, 0x58120e16U, 0x1923150fU, 0xda703824U, 0x9b41233dU
+ /* 96 */ , 0xa76bfd65U, 0xe65ae67cU, 0x2509cb57U, 0x6438d04eU, 0xa3ae9101U, 0xe29f8a18U, 0x21cca733U, 0x60fdbc2aU
+ /* 104 */ , 0xafe124adU, 0xeed03fb4U, 0x2d83129fU, 0x6cb20986U, 0xab2448c9U, 0xea1553d0U, 0x29467efbU, 0x687765e2U
+ /* 112 */ , 0xf6793f2fU, 0xb7482436U, 0x741b091dU, 0x352a1204U, 0xf2bc534bU, 0xb38d4852U, 0x70de6579U, 0x31ef7e60U
+ /* 120 */ , 0xfef3e6e7U, 0xbfc2fdfeU, 0x7c91d0d5U, 0x3da0cbccU, 0xfa368a83U, 0xbb07919aU, 0x7854bcb1U, 0x3965a7a8U
+ /* 128 */ , 0x4b98833bU, 0x0aa99822U, 0xc9fab509U, 0x88cbae10U, 0x4f5def5fU, 0x0e6cf446U, 0xcd3fd96dU, 0x8c0ec274U
+ /* 136 */ , 0x43125af3U, 0x022341eaU, 0xc1706cc1U, 0x804177d8U, 0x47d73697U, 0x06e62d8eU, 0xc5b500a5U, 0x84841bbcU
+ /* 144 */ , 0x1a8a4171U, 0x5bbb5a68U, 0x98e87743U, 0xd9d96c5aU, 0x1e4f2d15U, 0x5f7e360cU, 0x9c2d1b27U, 0xdd1c003eU
+ /* 152 */ , 0x120098b9U, 0x533183a0U, 0x9062ae8bU, 0xd153b592U, 0x16c5f4ddU, 0x57f4efc4U, 0x94a7c2efU, 0xd596d9f6U
+ /* 160 */ , 0xe9bc07aeU, 0xa88d1cb7U, 0x6bde319cU, 0x2aef2a85U, 0xed796bcaU, 0xac4870d3U, 0x6f1b5df8U, 0x2e2a46e1U
+ /* 168 */ , 0xe136de66U, 0xa007c57fU, 0x6354e854U, 0x2265f34dU, 0xe5f3b202U, 0xa4c2a91bU, 0x67918430U, 0x26a09f29U
+ /* 176 */ , 0xb8aec5e4U, 0xf99fdefdU, 0x3accf3d6U, 0x7bfde8cfU, 0xbc6ba980U, 0xfd5ab299U, 0x3e099fb2U, 0x7f3884abU
+ /* 184 */ , 0xb0241c2cU, 0xf1150735U, 0x32462a1eU, 0x73773107U, 0xb4e17048U, 0xf5d06b51U, 0x3683467aU, 0x77b25d63U
+ /* 192 */ , 0x4ed7facbU, 0x0fe6e1d2U, 0xccb5ccf9U, 0x8d84d7e0U, 0x4a1296afU, 0x0b238db6U, 0xc870a09dU, 0x8941bb84U
+ /* 200 */ , 0x465d2303U, 0x076c381aU, 0xc43f1531U, 0x850e0e28U, 0x42984f67U, 0x03a9547eU, 0xc0fa7955U, 0x81cb624cU
+ /* 208 */ , 0x1fc53881U, 0x5ef42398U, 0x9da70eb3U, 0xdc9615aaU, 0x1b0054e5U, 0x5a314ffcU, 0x996262d7U, 0xd85379ceU
+ /* 216 */ , 0x174fe149U, 0x567efa50U, 0x952dd77bU, 0xd41ccc62U, 0x138a8d2dU, 0x52bb9634U, 0x91e8bb1fU, 0xd0d9a006U
+ /* 224 */ , 0xecf37e5eU, 0xadc26547U, 0x6e91486cU, 0x2fa05375U, 0xe836123aU, 0xa9070923U, 0x6a542408U, 0x2b653f11U
+ /* 232 */ , 0xe479a796U, 0xa548bc8fU, 0x661b91a4U, 0x272a8abdU, 0xe0bccbf2U, 0xa18dd0ebU, 0x62defdc0U, 0x23efe6d9U
+ /* 240 */ , 0xbde1bc14U, 0xfcd0a70dU, 0x3f838a26U, 0x7eb2913fU, 0xb924d070U, 0xf815cb69U, 0x3b46e642U, 0x7a77fd5bU
+ /* 248 */ , 0xb56b65dcU, 0xf45a7ec5U, 0x370953eeU, 0x763848f7U, 0xb1ae09b8U, 0xf09f12a1U, 0x33cc3f8aU, 0x72fd2493U
+ }
+ ,
+ /* CRC32 table 6 for quad-bytes ( big-endian ), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0x376ac201U, 0x6ed48403U, 0x59be4602U, 0xdca80907U, 0xebc2cb06U, 0xb27c8d04U, 0x85164f05U
+ /* 8 */ , 0xb851130eU, 0x8f3bd10fU, 0xd685970dU, 0xe1ef550cU, 0x64f91a09U, 0x5393d808U, 0x0a2d9e0aU, 0x3d475c0bU
+ /* 16 */ , 0x70a3261cU, 0x47c9e41dU, 0x1e77a21fU, 0x291d601eU, 0xac0b2f1bU, 0x9b61ed1aU, 0xc2dfab18U, 0xf5b56919U
+ /* 24 */ , 0xc8f23512U, 0xff98f713U, 0xa626b111U, 0x914c7310U, 0x145a3c15U, 0x2330fe14U, 0x7a8eb816U, 0x4de47a17U
+ /* 32 */ , 0xe0464d38U, 0xd72c8f39U, 0x8e92c93bU, 0xb9f80b3aU, 0x3cee443fU, 0x0b84863eU, 0x523ac03cU, 0x6550023dU
+ /* 40 */ , 0x58175e36U, 0x6f7d9c37U, 0x36c3da35U, 0x01a91834U, 0x84bf5731U, 0xb3d59530U, 0xea6bd332U, 0xdd011133U
+ /* 48 */ , 0x90e56b24U, 0xa78fa925U, 0xfe31ef27U, 0xc95b2d26U, 0x4c4d6223U, 0x7b27a022U, 0x2299e620U, 0x15f32421U
+ /* 56 */ , 0x28b4782aU, 0x1fdeba2bU, 0x4660fc29U, 0x710a3e28U, 0xf41c712dU, 0xc376b32cU, 0x9ac8f52eU, 0xada2372fU
+ /* 64 */ , 0xc08d9a70U, 0xf7e75871U, 0xae591e73U, 0x9933dc72U, 0x1c259377U, 0x2b4f5176U, 0x72f11774U, 0x459bd575U
+ /* 72 */ , 0x78dc897eU, 0x4fb64b7fU, 0x16080d7dU, 0x2162cf7cU, 0xa4748079U, 0x931e4278U, 0xcaa0047aU, 0xfdcac67bU
+ /* 80 */ , 0xb02ebc6cU, 0x87447e6dU, 0xdefa386fU, 0xe990fa6eU, 0x6c86b56bU, 0x5bec776aU, 0x02523168U, 0x3538f369U
+ /* 88 */ , 0x087faf62U, 0x3f156d63U, 0x66ab2b61U, 0x51c1e960U, 0xd4d7a665U, 0xe3bd6464U, 0xba032266U, 0x8d69e067U
+ /* 96 */ , 0x20cbd748U, 0x17a11549U, 0x4e1f534bU, 0x7975914aU, 0xfc63de4fU, 0xcb091c4eU, 0x92b75a4cU, 0xa5dd984dU
+ /* 104 */ , 0x989ac446U, 0xaff00647U, 0xf64e4045U, 0xc1248244U, 0x4432cd41U, 0x73580f40U, 0x2ae64942U, 0x1d8c8b43U
+ /* 112 */ , 0x5068f154U, 0x67023355U, 0x3ebc7557U, 0x09d6b756U, 0x8cc0f853U, 0xbbaa3a52U, 0xe2147c50U, 0xd57ebe51U
+ /* 120 */ , 0xe839e25aU, 0xdf53205bU, 0x86ed6659U, 0xb187a458U, 0x3491eb5dU, 0x03fb295cU, 0x5a456f5eU, 0x6d2fad5fU
+ /* 128 */ , 0x801b35e1U, 0xb771f7e0U, 0xeecfb1e2U, 0xd9a573e3U, 0x5cb33ce6U, 0x6bd9fee7U, 0x3267b8e5U, 0x050d7ae4U
+ /* 136 */ , 0x384a26efU, 0x0f20e4eeU, 0x569ea2ecU, 0x61f460edU, 0xe4e22fe8U, 0xd388ede9U, 0x8a36abebU, 0xbd5c69eaU
+ /* 144 */ , 0xf0b813fdU, 0xc7d2d1fcU, 0x9e6c97feU, 0xa90655ffU, 0x2c101afaU, 0x1b7ad8fbU, 0x42c49ef9U, 0x75ae5cf8U
+ /* 152 */ , 0x48e900f3U, 0x7f83c2f2U, 0x263d84f0U, 0x115746f1U, 0x944109f4U, 0xa32bcbf5U, 0xfa958df7U, 0xcdff4ff6U
+ /* 160 */ , 0x605d78d9U, 0x5737bad8U, 0x0e89fcdaU, 0x39e33edbU, 0xbcf571deU, 0x8b9fb3dfU, 0xd221f5ddU, 0xe54b37dcU
+ /* 168 */ , 0xd80c6bd7U, 0xef66a9d6U, 0xb6d8efd4U, 0x81b22dd5U, 0x04a462d0U, 0x33cea0d1U, 0x6a70e6d3U, 0x5d1a24d2U
+ /* 176 */ , 0x10fe5ec5U, 0x27949cc4U, 0x7e2adac6U, 0x494018c7U, 0xcc5657c2U, 0xfb3c95c3U, 0xa282d3c1U, 0x95e811c0U
+ /* 184 */ , 0xa8af4dcbU, 0x9fc58fcaU, 0xc67bc9c8U, 0xf1110bc9U, 0x740744ccU, 0x436d86cdU, 0x1ad3c0cfU, 0x2db902ceU
+ /* 192 */ , 0x4096af91U, 0x77fc6d90U, 0x2e422b92U, 0x1928e993U, 0x9c3ea696U, 0xab546497U, 0xf2ea2295U, 0xc580e094U
+ /* 200 */ , 0xf8c7bc9fU, 0xcfad7e9eU, 0x9613389cU, 0xa179fa9dU, 0x246fb598U, 0x13057799U, 0x4abb319bU, 0x7dd1f39aU
+ /* 208 */ , 0x3035898dU, 0x075f4b8cU, 0x5ee10d8eU, 0x698bcf8fU, 0xec9d808aU, 0xdbf7428bU, 0x82490489U, 0xb523c688U
+ /* 216 */ , 0x88649a83U, 0xbf0e5882U, 0xe6b01e80U, 0xd1dadc81U, 0x54cc9384U, 0x63a65185U, 0x3a181787U, 0x0d72d586U
+ /* 224 */ , 0xa0d0e2a9U, 0x97ba20a8U, 0xce0466aaU, 0xf96ea4abU, 0x7c78ebaeU, 0x4b1229afU, 0x12ac6fadU, 0x25c6adacU
+ /* 232 */ , 0x1881f1a7U, 0x2feb33a6U, 0x765575a4U, 0x413fb7a5U, 0xc429f8a0U, 0xf3433aa1U, 0xaafd7ca3U, 0x9d97bea2U
+ /* 240 */ , 0xd073c4b5U, 0xe71906b4U, 0xbea740b6U, 0x89cd82b7U, 0x0cdbcdb2U, 0x3bb10fb3U, 0x620f49b1U, 0x55658bb0U
+ /* 248 */ , 0x6822d7bbU, 0x5f4815baU, 0x06f653b8U, 0x319c91b9U, 0xb48adebcU, 0x83e01cbdU, 0xda5e5abfU, 0xed3498beU
+ }
+ ,
+ /* CRC32 table 7 for quad-bytes ( big-endian ), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0x6567bcb8U, 0x8bc809aaU, 0xeeafb512U, 0x5797628fU, 0x32f0de37U, 0xdc5f6b25U, 0xb938d79dU
+ /* 8 */ , 0xef28b4c5U, 0x8a4f087dU, 0x64e0bd6fU, 0x018701d7U, 0xb8bfd64aU, 0xddd86af2U, 0x3377dfe0U, 0x56106358U
+ /* 16 */ , 0x9f571950U, 0xfa30a5e8U, 0x149f10faU, 0x71f8ac42U, 0xc8c07bdfU, 0xada7c767U, 0x43087275U, 0x266fcecdU
+ /* 24 */ , 0x707fad95U, 0x1518112dU, 0xfbb7a43fU, 0x9ed01887U, 0x27e8cf1aU, 0x428f73a2U, 0xac20c6b0U, 0xc9477a08U
+ /* 32 */ , 0x3eaf32a0U, 0x5bc88e18U, 0xb5673b0aU, 0xd00087b2U, 0x6938502fU, 0x0c5fec97U, 0xe2f05985U, 0x8797e53dU
+ /* 40 */ , 0xd1878665U, 0xb4e03addU, 0x5a4f8fcfU, 0x3f283377U, 0x8610e4eaU, 0xe3775852U, 0x0dd8ed40U, 0x68bf51f8U
+ /* 48 */ , 0xa1f82bf0U, 0xc49f9748U, 0x2a30225aU, 0x4f579ee2U, 0xf66f497fU, 0x9308f5c7U, 0x7da740d5U, 0x18c0fc6dU
+ /* 56 */ , 0x4ed09f35U, 0x2bb7238dU, 0xc518969fU, 0xa07f2a27U, 0x1947fdbaU, 0x7c204102U, 0x928ff410U, 0xf7e848a8U
+ /* 64 */ , 0x3d58149bU, 0x583fa823U, 0xb6901d31U, 0xd3f7a189U, 0x6acf7614U, 0x0fa8caacU, 0xe1077fbeU, 0x8460c306U
+ /* 72 */ , 0xd270a05eU, 0xb7171ce6U, 0x59b8a9f4U, 0x3cdf154cU, 0x85e7c2d1U, 0xe0807e69U, 0x0e2fcb7bU, 0x6b4877c3U
+ /* 80 */ , 0xa20f0dcbU, 0xc768b173U, 0x29c70461U, 0x4ca0b8d9U, 0xf5986f44U, 0x90ffd3fcU, 0x7e5066eeU, 0x1b37da56U
+ /* 88 */ , 0x4d27b90eU, 0x284005b6U, 0xc6efb0a4U, 0xa3880c1cU, 0x1ab0db81U, 0x7fd76739U, 0x9178d22bU, 0xf41f6e93U
+ /* 96 */ , 0x03f7263bU, 0x66909a83U, 0x883f2f91U, 0xed589329U, 0x546044b4U, 0x3107f80cU, 0xdfa84d1eU, 0xbacff1a6U
+ /* 104 */ , 0xecdf92feU, 0x89b82e46U, 0x67179b54U, 0x027027ecU, 0xbb48f071U, 0xde2f4cc9U, 0x3080f9dbU, 0x55e74563U
+ /* 112 */ , 0x9ca03f6bU, 0xf9c783d3U, 0x176836c1U, 0x720f8a79U, 0xcb375de4U, 0xae50e15cU, 0x40ff544eU, 0x2598e8f6U
+ /* 120 */ , 0x73888baeU, 0x16ef3716U, 0xf8408204U, 0x9d273ebcU, 0x241fe921U, 0x41785599U, 0xafd7e08bU, 0xcab05c33U
+ /* 128 */ , 0x3bb659edU, 0x5ed1e555U, 0xb07e5047U, 0xd519ecffU, 0x6c213b62U, 0x094687daU, 0xe7e932c8U, 0x828e8e70U
+ /* 136 */ , 0xd49eed28U, 0xb1f95190U, 0x5f56e482U, 0x3a31583aU, 0x83098fa7U, 0xe66e331fU, 0x08c1860dU, 0x6da63ab5U
+ /* 144 */ , 0xa4e140bdU, 0xc186fc05U, 0x2f294917U, 0x4a4ef5afU, 0xf3762232U, 0x96119e8aU, 0x78be2b98U, 0x1dd99720U
+ /* 152 */ , 0x4bc9f478U, 0x2eae48c0U, 0xc001fdd2U, 0xa566416aU, 0x1c5e96f7U, 0x79392a4fU, 0x97969f5dU, 0xf2f123e5U
+ /* 160 */ , 0x05196b4dU, 0x607ed7f5U, 0x8ed162e7U, 0xebb6de5fU, 0x528e09c2U, 0x37e9b57aU, 0xd9460068U, 0xbc21bcd0U
+ /* 168 */ , 0xea31df88U, 0x8f566330U, 0x61f9d622U, 0x049e6a9aU, 0xbda6bd07U, 0xd8c101bfU, 0x366eb4adU, 0x53090815U
+ /* 176 */ , 0x9a4e721dU, 0xff29cea5U, 0x11867bb7U, 0x74e1c70fU, 0xcdd91092U, 0xa8beac2aU, 0x46111938U, 0x2376a580U
+ /* 184 */ , 0x7566c6d8U, 0x10017a60U, 0xfeaecf72U, 0x9bc973caU, 0x22f1a457U, 0x479618efU, 0xa939adfdU, 0xcc5e1145U
+ /* 192 */ , 0x06ee4d76U, 0x6389f1ceU, 0x8d2644dcU, 0xe841f864U, 0x51792ff9U, 0x341e9341U, 0xdab12653U, 0xbfd69aebU
+ /* 200 */ , 0xe9c6f9b3U, 0x8ca1450bU, 0x620ef019U, 0x07694ca1U, 0xbe519b3cU, 0xdb362784U, 0x35999296U, 0x50fe2e2eU
+ /* 208 */ , 0x99b95426U, 0xfcdee89eU, 0x12715d8cU, 0x7716e134U, 0xce2e36a9U, 0xab498a11U, 0x45e63f03U, 0x208183bbU
+ /* 216 */ , 0x7691e0e3U, 0x13f65c5bU, 0xfd59e949U, 0x983e55f1U, 0x2106826cU, 0x44613ed4U, 0xaace8bc6U, 0xcfa9377eU
+ /* 224 */ , 0x38417fd6U, 0x5d26c36eU, 0xb389767cU, 0xd6eecac4U, 0x6fd61d59U, 0x0ab1a1e1U, 0xe41e14f3U, 0x8179a84bU
+ /* 232 */ , 0xd769cb13U, 0xb20e77abU, 0x5ca1c2b9U, 0x39c67e01U, 0x80fea99cU, 0xe5991524U, 0x0b36a036U, 0x6e511c8eU
+ /* 240 */ , 0xa7166686U, 0xc271da3eU, 0x2cde6f2cU, 0x49b9d394U, 0xf0810409U, 0x95e6b8b1U, 0x7b490da3U, 0x1e2eb11bU
+ /* 248 */ , 0x483ed243U, 0x2d596efbU, 0xc3f6dbe9U, 0xa6916751U, 0x1fa9b0ccU, 0x7ace0c74U, 0x9461b966U, 0xf10605deU
+ }
+ #endif
+ };
+
+juint StubRoutines::zarch::_crc32c_table[CRC32_TABLES][CRC32_COLUMN_SIZE] = {
+ /* polyBits = 4394350321 0x0000000105ec76f1L, shifted = 0x82f63b78 */
+ /* CRC32C table for single bytes, auto-generated. DO NOT MODIFY! */
+ /* CRC32C table 0 for quad-bytes (little-endian), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0xf26b8303U, 0xe13b70f7U, 0x1350f3f4U, 0xc79a971fU, 0x35f1141cU, 0x26a1e7e8U, 0xd4ca64ebU
+ /* 8 */ , 0x8ad958cfU, 0x78b2dbccU, 0x6be22838U, 0x9989ab3bU, 0x4d43cfd0U, 0xbf284cd3U, 0xac78bf27U, 0x5e133c24U
+ /* 16 */ , 0x105ec76fU, 0xe235446cU, 0xf165b798U, 0x030e349bU, 0xd7c45070U, 0x25afd373U, 0x36ff2087U, 0xc494a384U
+ /* 24 */ , 0x9a879fa0U, 0x68ec1ca3U, 0x7bbcef57U, 0x89d76c54U, 0x5d1d08bfU, 0xaf768bbcU, 0xbc267848U, 0x4e4dfb4bU
+ /* 32 */ , 0x20bd8edeU, 0xd2d60dddU, 0xc186fe29U, 0x33ed7d2aU, 0xe72719c1U, 0x154c9ac2U, 0x061c6936U, 0xf477ea35U
+ /* 40 */ , 0xaa64d611U, 0x580f5512U, 0x4b5fa6e6U, 0xb93425e5U, 0x6dfe410eU, 0x9f95c20dU, 0x8cc531f9U, 0x7eaeb2faU
+ /* 48 */ , 0x30e349b1U, 0xc288cab2U, 0xd1d83946U, 0x23b3ba45U, 0xf779deaeU, 0x05125dadU, 0x1642ae59U, 0xe4292d5aU
+ /* 56 */ , 0xba3a117eU, 0x4851927dU, 0x5b016189U, 0xa96ae28aU, 0x7da08661U, 0x8fcb0562U, 0x9c9bf696U, 0x6ef07595U
+ /* 64 */ , 0x417b1dbcU, 0xb3109ebfU, 0xa0406d4bU, 0x522bee48U, 0x86e18aa3U, 0x748a09a0U, 0x67dafa54U, 0x95b17957U
+ /* 72 */ , 0xcba24573U, 0x39c9c670U, 0x2a993584U, 0xd8f2b687U, 0x0c38d26cU, 0xfe53516fU, 0xed03a29bU, 0x1f682198U
+ /* 80 */ , 0x5125dad3U, 0xa34e59d0U, 0xb01eaa24U, 0x42752927U, 0x96bf4dccU, 0x64d4cecfU, 0x77843d3bU, 0x85efbe38U
+ /* 88 */ , 0xdbfc821cU, 0x2997011fU, 0x3ac7f2ebU, 0xc8ac71e8U, 0x1c661503U, 0xee0d9600U, 0xfd5d65f4U, 0x0f36e6f7U
+ /* 96 */ , 0x61c69362U, 0x93ad1061U, 0x80fde395U, 0x72966096U, 0xa65c047dU, 0x5437877eU, 0x4767748aU, 0xb50cf789U
+ /* 104 */ , 0xeb1fcbadU, 0x197448aeU, 0x0a24bb5aU, 0xf84f3859U, 0x2c855cb2U, 0xdeeedfb1U, 0xcdbe2c45U, 0x3fd5af46U
+ /* 112 */ , 0x7198540dU, 0x83f3d70eU, 0x90a324faU, 0x62c8a7f9U, 0xb602c312U, 0x44694011U, 0x5739b3e5U, 0xa55230e6U
+ /* 120 */ , 0xfb410cc2U, 0x092a8fc1U, 0x1a7a7c35U, 0xe811ff36U, 0x3cdb9bddU, 0xceb018deU, 0xdde0eb2aU, 0x2f8b6829U
+ /* 128 */ , 0x82f63b78U, 0x709db87bU, 0x63cd4b8fU, 0x91a6c88cU, 0x456cac67U, 0xb7072f64U, 0xa457dc90U, 0x563c5f93U
+ /* 136 */ , 0x082f63b7U, 0xfa44e0b4U, 0xe9141340U, 0x1b7f9043U, 0xcfb5f4a8U, 0x3dde77abU, 0x2e8e845fU, 0xdce5075cU
+ /* 144 */ , 0x92a8fc17U, 0x60c37f14U, 0x73938ce0U, 0x81f80fe3U, 0x55326b08U, 0xa759e80bU, 0xb4091bffU, 0x466298fcU
+ /* 152 */ , 0x1871a4d8U, 0xea1a27dbU, 0xf94ad42fU, 0x0b21572cU, 0xdfeb33c7U, 0x2d80b0c4U, 0x3ed04330U, 0xccbbc033U
+ /* 160 */ , 0xa24bb5a6U, 0x502036a5U, 0x4370c551U, 0xb11b4652U, 0x65d122b9U, 0x97baa1baU, 0x84ea524eU, 0x7681d14dU
+ /* 168 */ , 0x2892ed69U, 0xdaf96e6aU, 0xc9a99d9eU, 0x3bc21e9dU, 0xef087a76U, 0x1d63f975U, 0x0e330a81U, 0xfc588982U
+ /* 176 */ , 0xb21572c9U, 0x407ef1caU, 0x532e023eU, 0xa145813dU, 0x758fe5d6U, 0x87e466d5U, 0x94b49521U, 0x66df1622U
+ /* 184 */ , 0x38cc2a06U, 0xcaa7a905U, 0xd9f75af1U, 0x2b9cd9f2U, 0xff56bd19U, 0x0d3d3e1aU, 0x1e6dcdeeU, 0xec064eedU
+ /* 192 */ , 0xc38d26c4U, 0x31e6a5c7U, 0x22b65633U, 0xd0ddd530U, 0x0417b1dbU, 0xf67c32d8U, 0xe52cc12cU, 0x1747422fU
+ /* 200 */ , 0x49547e0bU, 0xbb3ffd08U, 0xa86f0efcU, 0x5a048dffU, 0x8ecee914U, 0x7ca56a17U, 0x6ff599e3U, 0x9d9e1ae0U
+ /* 208 */ , 0xd3d3e1abU, 0x21b862a8U, 0x32e8915cU, 0xc083125fU, 0x144976b4U, 0xe622f5b7U, 0xf5720643U, 0x07198540U
+ /* 216 */ , 0x590ab964U, 0xab613a67U, 0xb831c993U, 0x4a5a4a90U, 0x9e902e7bU, 0x6cfbad78U, 0x7fab5e8cU, 0x8dc0dd8fU
+ /* 224 */ , 0xe330a81aU, 0x115b2b19U, 0x020bd8edU, 0xf0605beeU, 0x24aa3f05U, 0xd6c1bc06U, 0xc5914ff2U, 0x37faccf1U
+ /* 232 */ , 0x69e9f0d5U, 0x9b8273d6U, 0x88d28022U, 0x7ab90321U, 0xae7367caU, 0x5c18e4c9U, 0x4f48173dU, 0xbd23943eU
+ /* 240 */ , 0xf36e6f75U, 0x0105ec76U, 0x12551f82U, 0xe03e9c81U, 0x34f4f86aU, 0xc69f7b69U, 0xd5cf889dU, 0x27a40b9eU
+ /* 248 */ , 0x79b737baU, 0x8bdcb4b9U, 0x988c474dU, 0x6ae7c44eU, 0xbe2da0a5U, 0x4c4623a6U, 0x5f16d052U, 0xad7d5351U
+ }
+ #ifdef CRC32_BYFOUR
+ ,
+ /* CRC32C table 1 for quad-bytes (little-endian), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0x13a29877U, 0x274530eeU, 0x34e7a899U, 0x4e8a61dcU, 0x5d28f9abU, 0x69cf5132U, 0x7a6dc945U
+ /* 8 */ , 0x9d14c3b8U, 0x8eb65bcfU, 0xba51f356U, 0xa9f36b21U, 0xd39ea264U, 0xc03c3a13U, 0xf4db928aU, 0xe7790afdU
+ /* 16 */ , 0x3fc5f181U, 0x2c6769f6U, 0x1880c16fU, 0x0b225918U, 0x714f905dU, 0x62ed082aU, 0x560aa0b3U, 0x45a838c4U
+ /* 24 */ , 0xa2d13239U, 0xb173aa4eU, 0x859402d7U, 0x96369aa0U, 0xec5b53e5U, 0xfff9cb92U, 0xcb1e630bU, 0xd8bcfb7cU
+ /* 32 */ , 0x7f8be302U, 0x6c297b75U, 0x58ced3ecU, 0x4b6c4b9bU, 0x310182deU, 0x22a31aa9U, 0x1644b230U, 0x05e62a47U
+ /* 40 */ , 0xe29f20baU, 0xf13db8cdU, 0xc5da1054U, 0xd6788823U, 0xac154166U, 0xbfb7d911U, 0x8b507188U, 0x98f2e9ffU
+ /* 48 */ , 0x404e1283U, 0x53ec8af4U, 0x670b226dU, 0x74a9ba1aU, 0x0ec4735fU, 0x1d66eb28U, 0x298143b1U, 0x3a23dbc6U
+ /* 56 */ , 0xdd5ad13bU, 0xcef8494cU, 0xfa1fe1d5U, 0xe9bd79a2U, 0x93d0b0e7U, 0x80722890U, 0xb4958009U, 0xa737187eU
+ /* 64 */ , 0xff17c604U, 0xecb55e73U, 0xd852f6eaU, 0xcbf06e9dU, 0xb19da7d8U, 0xa23f3fafU, 0x96d89736U, 0x857a0f41U
+ /* 72 */ , 0x620305bcU, 0x71a19dcbU, 0x45463552U, 0x56e4ad25U, 0x2c896460U, 0x3f2bfc17U, 0x0bcc548eU, 0x186eccf9U
+ /* 80 */ , 0xc0d23785U, 0xd370aff2U, 0xe797076bU, 0xf4359f1cU, 0x8e585659U, 0x9dface2eU, 0xa91d66b7U, 0xbabffec0U
+ /* 88 */ , 0x5dc6f43dU, 0x4e646c4aU, 0x7a83c4d3U, 0x69215ca4U, 0x134c95e1U, 0x00ee0d96U, 0x3409a50fU, 0x27ab3d78U
+ /* 96 */ , 0x809c2506U, 0x933ebd71U, 0xa7d915e8U, 0xb47b8d9fU, 0xce1644daU, 0xddb4dcadU, 0xe9537434U, 0xfaf1ec43U
+ /* 104 */ , 0x1d88e6beU, 0x0e2a7ec9U, 0x3acdd650U, 0x296f4e27U, 0x53028762U, 0x40a01f15U, 0x7447b78cU, 0x67e52ffbU
+ /* 112 */ , 0xbf59d487U, 0xacfb4cf0U, 0x981ce469U, 0x8bbe7c1eU, 0xf1d3b55bU, 0xe2712d2cU, 0xd69685b5U, 0xc5341dc2U
+ /* 120 */ , 0x224d173fU, 0x31ef8f48U, 0x050827d1U, 0x16aabfa6U, 0x6cc776e3U, 0x7f65ee94U, 0x4b82460dU, 0x5820de7aU
+ /* 128 */ , 0xfbc3faf9U, 0xe861628eU, 0xdc86ca17U, 0xcf245260U, 0xb5499b25U, 0xa6eb0352U, 0x920cabcbU, 0x81ae33bcU
+ /* 136 */ , 0x66d73941U, 0x7575a136U, 0x419209afU, 0x523091d8U, 0x285d589dU, 0x3bffc0eaU, 0x0f186873U, 0x1cbaf004U
+ /* 144 */ , 0xc4060b78U, 0xd7a4930fU, 0xe3433b96U, 0xf0e1a3e1U, 0x8a8c6aa4U, 0x992ef2d3U, 0xadc95a4aU, 0xbe6bc23dU
+ /* 152 */ , 0x5912c8c0U, 0x4ab050b7U, 0x7e57f82eU, 0x6df56059U, 0x1798a91cU, 0x043a316bU, 0x30dd99f2U, 0x237f0185U
+ /* 160 */ , 0x844819fbU, 0x97ea818cU, 0xa30d2915U, 0xb0afb162U, 0xcac27827U, 0xd960e050U, 0xed8748c9U, 0xfe25d0beU
+ /* 168 */ , 0x195cda43U, 0x0afe4234U, 0x3e19eaadU, 0x2dbb72daU, 0x57d6bb9fU, 0x447423e8U, 0x70938b71U, 0x63311306U
+ /* 176 */ , 0xbb8de87aU, 0xa82f700dU, 0x9cc8d894U, 0x8f6a40e3U, 0xf50789a6U, 0xe6a511d1U, 0xd242b948U, 0xc1e0213fU
+ /* 184 */ , 0x26992bc2U, 0x353bb3b5U, 0x01dc1b2cU, 0x127e835bU, 0x68134a1eU, 0x7bb1d269U, 0x4f567af0U, 0x5cf4e287U
+ /* 192 */ , 0x04d43cfdU, 0x1776a48aU, 0x23910c13U, 0x30339464U, 0x4a5e5d21U, 0x59fcc556U, 0x6d1b6dcfU, 0x7eb9f5b8U
+ /* 200 */ , 0x99c0ff45U, 0x8a626732U, 0xbe85cfabU, 0xad2757dcU, 0xd74a9e99U, 0xc4e806eeU, 0xf00fae77U, 0xe3ad3600U
+ /* 208 */ , 0x3b11cd7cU, 0x28b3550bU, 0x1c54fd92U, 0x0ff665e5U, 0x759baca0U, 0x663934d7U, 0x52de9c4eU, 0x417c0439U
+ /* 216 */ , 0xa6050ec4U, 0xb5a796b3U, 0x81403e2aU, 0x92e2a65dU, 0xe88f6f18U, 0xfb2df76fU, 0xcfca5ff6U, 0xdc68c781U
+ /* 224 */ , 0x7b5fdfffU, 0x68fd4788U, 0x5c1aef11U, 0x4fb87766U, 0x35d5be23U, 0x26772654U, 0x12908ecdU, 0x013216baU
+ /* 232 */ , 0xe64b1c47U, 0xf5e98430U, 0xc10e2ca9U, 0xd2acb4deU, 0xa8c17d9bU, 0xbb63e5ecU, 0x8f844d75U, 0x9c26d502U
+ /* 240 */ , 0x449a2e7eU, 0x5738b609U, 0x63df1e90U, 0x707d86e7U, 0x0a104fa2U, 0x19b2d7d5U, 0x2d557f4cU, 0x3ef7e73bU
+ /* 248 */ , 0xd98eedc6U, 0xca2c75b1U, 0xfecbdd28U, 0xed69455fU, 0x97048c1aU, 0x84a6146dU, 0xb041bcf4U, 0xa3e32483U
+ }
+ ,
+ /* CRC32C table 2 for quad-bytes (little-endian), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0xa541927eU, 0x4f6f520dU, 0xea2ec073U, 0x9edea41aU, 0x3b9f3664U, 0xd1b1f617U, 0x74f06469U
+ /* 8 */ , 0x38513ec5U, 0x9d10acbbU, 0x773e6cc8U, 0xd27ffeb6U, 0xa68f9adfU, 0x03ce08a1U, 0xe9e0c8d2U, 0x4ca15aacU
+ /* 16 */ , 0x70a27d8aU, 0xd5e3eff4U, 0x3fcd2f87U, 0x9a8cbdf9U, 0xee7cd990U, 0x4b3d4beeU, 0xa1138b9dU, 0x045219e3U
+ /* 24 */ , 0x48f3434fU, 0xedb2d131U, 0x079c1142U, 0xa2dd833cU, 0xd62de755U, 0x736c752bU, 0x9942b558U, 0x3c032726U
+ /* 32 */ , 0xe144fb14U, 0x4405696aU, 0xae2ba919U, 0x0b6a3b67U, 0x7f9a5f0eU, 0xdadbcd70U, 0x30f50d03U, 0x95b49f7dU
+ /* 40 */ , 0xd915c5d1U, 0x7c5457afU, 0x967a97dcU, 0x333b05a2U, 0x47cb61cbU, 0xe28af3b5U, 0x08a433c6U, 0xade5a1b8U
+ /* 48 */ , 0x91e6869eU, 0x34a714e0U, 0xde89d493U, 0x7bc846edU, 0x0f382284U, 0xaa79b0faU, 0x40577089U, 0xe516e2f7U
+ /* 56 */ , 0xa9b7b85bU, 0x0cf62a25U, 0xe6d8ea56U, 0x43997828U, 0x37691c41U, 0x92288e3fU, 0x78064e4cU, 0xdd47dc32U
+ /* 64 */ , 0xc76580d9U, 0x622412a7U, 0x880ad2d4U, 0x2d4b40aaU, 0x59bb24c3U, 0xfcfab6bdU, 0x16d476ceU, 0xb395e4b0U
+ /* 72 */ , 0xff34be1cU, 0x5a752c62U, 0xb05bec11U, 0x151a7e6fU, 0x61ea1a06U, 0xc4ab8878U, 0x2e85480bU, 0x8bc4da75U
+ /* 80 */ , 0xb7c7fd53U, 0x12866f2dU, 0xf8a8af5eU, 0x5de93d20U, 0x29195949U, 0x8c58cb37U, 0x66760b44U, 0xc337993aU
+ /* 88 */ , 0x8f96c396U, 0x2ad751e8U, 0xc0f9919bU, 0x65b803e5U, 0x1148678cU, 0xb409f5f2U, 0x5e273581U, 0xfb66a7ffU
+ /* 96 */ , 0x26217bcdU, 0x8360e9b3U, 0x694e29c0U, 0xcc0fbbbeU, 0xb8ffdfd7U, 0x1dbe4da9U, 0xf7908ddaU, 0x52d11fa4U
+ /* 104 */ , 0x1e704508U, 0xbb31d776U, 0x511f1705U, 0xf45e857bU, 0x80aee112U, 0x25ef736cU, 0xcfc1b31fU, 0x6a802161U
+ /* 112 */ , 0x56830647U, 0xf3c29439U, 0x19ec544aU, 0xbcadc634U, 0xc85da25dU, 0x6d1c3023U, 0x8732f050U, 0x2273622eU
+ /* 120 */ , 0x6ed23882U, 0xcb93aafcU, 0x21bd6a8fU, 0x84fcf8f1U, 0xf00c9c98U, 0x554d0ee6U, 0xbf63ce95U, 0x1a225cebU
+ /* 128 */ , 0x8b277743U, 0x2e66e53dU, 0xc448254eU, 0x6109b730U, 0x15f9d359U, 0xb0b84127U, 0x5a968154U, 0xffd7132aU
+ /* 136 */ , 0xb3764986U, 0x1637dbf8U, 0xfc191b8bU, 0x595889f5U, 0x2da8ed9cU, 0x88e97fe2U, 0x62c7bf91U, 0xc7862defU
+ /* 144 */ , 0xfb850ac9U, 0x5ec498b7U, 0xb4ea58c4U, 0x11abcabaU, 0x655baed3U, 0xc01a3cadU, 0x2a34fcdeU, 0x8f756ea0U
+ /* 152 */ , 0xc3d4340cU, 0x6695a672U, 0x8cbb6601U, 0x29faf47fU, 0x5d0a9016U, 0xf84b0268U, 0x1265c21bU, 0xb7245065U
+ /* 160 */ , 0x6a638c57U, 0xcf221e29U, 0x250cde5aU, 0x804d4c24U, 0xf4bd284dU, 0x51fcba33U, 0xbbd27a40U, 0x1e93e83eU
+ /* 168 */ , 0x5232b292U, 0xf77320ecU, 0x1d5de09fU, 0xb81c72e1U, 0xccec1688U, 0x69ad84f6U, 0x83834485U, 0x26c2d6fbU
+ /* 176 */ , 0x1ac1f1ddU, 0xbf8063a3U, 0x55aea3d0U, 0xf0ef31aeU, 0x841f55c7U, 0x215ec7b9U, 0xcb7007caU, 0x6e3195b4U
+ /* 184 */ , 0x2290cf18U, 0x87d15d66U, 0x6dff9d15U, 0xc8be0f6bU, 0xbc4e6b02U, 0x190ff97cU, 0xf321390fU, 0x5660ab71U
+ /* 192 */ , 0x4c42f79aU, 0xe90365e4U, 0x032da597U, 0xa66c37e9U, 0xd29c5380U, 0x77ddc1feU, 0x9df3018dU, 0x38b293f3U
+ /* 200 */ , 0x7413c95fU, 0xd1525b21U, 0x3b7c9b52U, 0x9e3d092cU, 0xeacd6d45U, 0x4f8cff3bU, 0xa5a23f48U, 0x00e3ad36U
+ /* 208 */ , 0x3ce08a10U, 0x99a1186eU, 0x738fd81dU, 0xd6ce4a63U, 0xa23e2e0aU, 0x077fbc74U, 0xed517c07U, 0x4810ee79U
+ /* 216 */ , 0x04b1b4d5U, 0xa1f026abU, 0x4bdee6d8U, 0xee9f74a6U, 0x9a6f10cfU, 0x3f2e82b1U, 0xd50042c2U, 0x7041d0bcU
+ /* 224 */ , 0xad060c8eU, 0x08479ef0U, 0xe2695e83U, 0x4728ccfdU, 0x33d8a894U, 0x96993aeaU, 0x7cb7fa99U, 0xd9f668e7U
+ /* 232 */ , 0x9557324bU, 0x3016a035U, 0xda386046U, 0x7f79f238U, 0x0b899651U, 0xaec8042fU, 0x44e6c45cU, 0xe1a75622U
+ /* 240 */ , 0xdda47104U, 0x78e5e37aU, 0x92cb2309U, 0x378ab177U, 0x437ad51eU, 0xe63b4760U, 0x0c158713U, 0xa954156dU
+ /* 248 */ , 0xe5f54fc1U, 0x40b4ddbfU, 0xaa9a1dccU, 0x0fdb8fb2U, 0x7b2bebdbU, 0xde6a79a5U, 0x3444b9d6U, 0x91052ba8U
+ }
+ ,
+ /* CRC32C table 3 for quad-bytes (little-endian), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0xdd45aab8U, 0xbf672381U, 0x62228939U, 0x7b2231f3U, 0xa6679b4bU, 0xc4451272U, 0x1900b8caU
+ /* 8 */ , 0xf64463e6U, 0x2b01c95eU, 0x49234067U, 0x9466eadfU, 0x8d665215U, 0x5023f8adU, 0x32017194U, 0xef44db2cU
+ /* 16 */ , 0xe964b13dU, 0x34211b85U, 0x560392bcU, 0x8b463804U, 0x924680ceU, 0x4f032a76U, 0x2d21a34fU, 0xf06409f7U
+ /* 24 */ , 0x1f20d2dbU, 0xc2657863U, 0xa047f15aU, 0x7d025be2U, 0x6402e328U, 0xb9474990U, 0xdb65c0a9U, 0x06206a11U
+ /* 32 */ , 0xd725148bU, 0x0a60be33U, 0x6842370aU, 0xb5079db2U, 0xac072578U, 0x71428fc0U, 0x136006f9U, 0xce25ac41U
+ /* 40 */ , 0x2161776dU, 0xfc24ddd5U, 0x9e0654ecU, 0x4343fe54U, 0x5a43469eU, 0x8706ec26U, 0xe524651fU, 0x3861cfa7U
+ /* 48 */ , 0x3e41a5b6U, 0xe3040f0eU, 0x81268637U, 0x5c632c8fU, 0x45639445U, 0x98263efdU, 0xfa04b7c4U, 0x27411d7cU
+ /* 56 */ , 0xc805c650U, 0x15406ce8U, 0x7762e5d1U, 0xaa274f69U, 0xb327f7a3U, 0x6e625d1bU, 0x0c40d422U, 0xd1057e9aU
+ /* 64 */ , 0xaba65fe7U, 0x76e3f55fU, 0x14c17c66U, 0xc984d6deU, 0xd0846e14U, 0x0dc1c4acU, 0x6fe34d95U, 0xb2a6e72dU
+ /* 72 */ , 0x5de23c01U, 0x80a796b9U, 0xe2851f80U, 0x3fc0b538U, 0x26c00df2U, 0xfb85a74aU, 0x99a72e73U, 0x44e284cbU
+ /* 80 */ , 0x42c2eedaU, 0x9f874462U, 0xfda5cd5bU, 0x20e067e3U, 0x39e0df29U, 0xe4a57591U, 0x8687fca8U, 0x5bc25610U
+ /* 88 */ , 0xb4868d3cU, 0x69c32784U, 0x0be1aebdU, 0xd6a40405U, 0xcfa4bccfU, 0x12e11677U, 0x70c39f4eU, 0xad8635f6U
+ /* 96 */ , 0x7c834b6cU, 0xa1c6e1d4U, 0xc3e468edU, 0x1ea1c255U, 0x07a17a9fU, 0xdae4d027U, 0xb8c6591eU, 0x6583f3a6U
+ /* 104 */ , 0x8ac7288aU, 0x57828232U, 0x35a00b0bU, 0xe8e5a1b3U, 0xf1e51979U, 0x2ca0b3c1U, 0x4e823af8U, 0x93c79040U
+ /* 112 */ , 0x95e7fa51U, 0x48a250e9U, 0x2a80d9d0U, 0xf7c57368U, 0xeec5cba2U, 0x3380611aU, 0x51a2e823U, 0x8ce7429bU
+ /* 120 */ , 0x63a399b7U, 0xbee6330fU, 0xdcc4ba36U, 0x0181108eU, 0x1881a844U, 0xc5c402fcU, 0xa7e68bc5U, 0x7aa3217dU
+ /* 128 */ , 0x52a0c93fU, 0x8fe56387U, 0xedc7eabeU, 0x30824006U, 0x2982f8ccU, 0xf4c75274U, 0x96e5db4dU, 0x4ba071f5U
+ /* 136 */ , 0xa4e4aad9U, 0x79a10061U, 0x1b838958U, 0xc6c623e0U, 0xdfc69b2aU, 0x02833192U, 0x60a1b8abU, 0xbde41213U
+ /* 144 */ , 0xbbc47802U, 0x6681d2baU, 0x04a35b83U, 0xd9e6f13bU, 0xc0e649f1U, 0x1da3e349U, 0x7f816a70U, 0xa2c4c0c8U
+ /* 152 */ , 0x4d801be4U, 0x90c5b15cU, 0xf2e73865U, 0x2fa292ddU, 0x36a22a17U, 0xebe780afU, 0x89c50996U, 0x5480a32eU
+ /* 160 */ , 0x8585ddb4U, 0x58c0770cU, 0x3ae2fe35U, 0xe7a7548dU, 0xfea7ec47U, 0x23e246ffU, 0x41c0cfc6U, 0x9c85657eU
+ /* 168 */ , 0x73c1be52U, 0xae8414eaU, 0xcca69dd3U, 0x11e3376bU, 0x08e38fa1U, 0xd5a62519U, 0xb784ac20U, 0x6ac10698U
+ /* 176 */ , 0x6ce16c89U, 0xb1a4c631U, 0xd3864f08U, 0x0ec3e5b0U, 0x17c35d7aU, 0xca86f7c2U, 0xa8a47efbU, 0x75e1d443U
+ /* 184 */ , 0x9aa50f6fU, 0x47e0a5d7U, 0x25c22ceeU, 0xf8878656U, 0xe1873e9cU, 0x3cc29424U, 0x5ee01d1dU, 0x83a5b7a5U
+ /* 192 */ , 0xf90696d8U, 0x24433c60U, 0x4661b559U, 0x9b241fe1U, 0x8224a72bU, 0x5f610d93U, 0x3d4384aaU, 0xe0062e12U
+ /* 200 */ , 0x0f42f53eU, 0xd2075f86U, 0xb025d6bfU, 0x6d607c07U, 0x7460c4cdU, 0xa9256e75U, 0xcb07e74cU, 0x16424df4U
+ /* 208 */ , 0x106227e5U, 0xcd278d5dU, 0xaf050464U, 0x7240aedcU, 0x6b401616U, 0xb605bcaeU, 0xd4273597U, 0x09629f2fU
+ /* 216 */ , 0xe6264403U, 0x3b63eebbU, 0x59416782U, 0x8404cd3aU, 0x9d0475f0U, 0x4041df48U, 0x22635671U, 0xff26fcc9U
+ /* 224 */ , 0x2e238253U, 0xf36628ebU, 0x9144a1d2U, 0x4c010b6aU, 0x5501b3a0U, 0x88441918U, 0xea669021U, 0x37233a99U
+ /* 232 */ , 0xd867e1b5U, 0x05224b0dU, 0x6700c234U, 0xba45688cU, 0xa345d046U, 0x7e007afeU, 0x1c22f3c7U, 0xc167597fU
+ /* 240 */ , 0xc747336eU, 0x1a0299d6U, 0x782010efU, 0xa565ba57U, 0xbc65029dU, 0x6120a825U, 0x0302211cU, 0xde478ba4U
+ /* 248 */ , 0x31035088U, 0xec46fa30U, 0x8e647309U, 0x5321d9b1U, 0x4a21617bU, 0x9764cbc3U, 0xf54642faU, 0x2803e842U
+ }
+ ,
+ /* CRC32C table 4 for quad-bytes ( big-endian ), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0x03836bf2U, 0xf7703be1U, 0xf4f35013U, 0x1f979ac7U, 0x1c14f135U, 0xe8e7a126U, 0xeb64cad4U
+ /* 8 */ , 0xcf58d98aU, 0xccdbb278U, 0x3828e26bU, 0x3bab8999U, 0xd0cf434dU, 0xd34c28bfU, 0x27bf78acU, 0x243c135eU
+ /* 16 */ , 0x6fc75e10U, 0x6c4435e2U, 0x98b765f1U, 0x9b340e03U, 0x7050c4d7U, 0x73d3af25U, 0x8720ff36U, 0x84a394c4U
+ /* 24 */ , 0xa09f879aU, 0xa31cec68U, 0x57efbc7bU, 0x546cd789U, 0xbf081d5dU, 0xbc8b76afU, 0x487826bcU, 0x4bfb4d4eU
+ /* 32 */ , 0xde8ebd20U, 0xdd0dd6d2U, 0x29fe86c1U, 0x2a7ded33U, 0xc11927e7U, 0xc29a4c15U, 0x36691c06U, 0x35ea77f4U
+ /* 40 */ , 0x11d664aaU, 0x12550f58U, 0xe6a65f4bU, 0xe52534b9U, 0x0e41fe6dU, 0x0dc2959fU, 0xf931c58cU, 0xfab2ae7eU
+ /* 48 */ , 0xb149e330U, 0xb2ca88c2U, 0x4639d8d1U, 0x45bab323U, 0xaede79f7U, 0xad5d1205U, 0x59ae4216U, 0x5a2d29e4U
+ /* 56 */ , 0x7e113abaU, 0x7d925148U, 0x8961015bU, 0x8ae26aa9U, 0x6186a07dU, 0x6205cb8fU, 0x96f69b9cU, 0x9575f06eU
+ /* 64 */ , 0xbc1d7b41U, 0xbf9e10b3U, 0x4b6d40a0U, 0x48ee2b52U, 0xa38ae186U, 0xa0098a74U, 0x54fada67U, 0x5779b195U
+ /* 72 */ , 0x7345a2cbU, 0x70c6c939U, 0x8435992aU, 0x87b6f2d8U, 0x6cd2380cU, 0x6f5153feU, 0x9ba203edU, 0x9821681fU
+ /* 80 */ , 0xd3da2551U, 0xd0594ea3U, 0x24aa1eb0U, 0x27297542U, 0xcc4dbf96U, 0xcfced464U, 0x3b3d8477U, 0x38beef85U
+ /* 88 */ , 0x1c82fcdbU, 0x1f019729U, 0xebf2c73aU, 0xe871acc8U, 0x0315661cU, 0x00960deeU, 0xf4655dfdU, 0xf7e6360fU
+ /* 96 */ , 0x6293c661U, 0x6110ad93U, 0x95e3fd80U, 0x96609672U, 0x7d045ca6U, 0x7e873754U, 0x8a746747U, 0x89f70cb5U
+ /* 104 */ , 0xadcb1febU, 0xae487419U, 0x5abb240aU, 0x59384ff8U, 0xb25c852cU, 0xb1dfeedeU, 0x452cbecdU, 0x46afd53fU
+ /* 112 */ , 0x0d549871U, 0x0ed7f383U, 0xfa24a390U, 0xf9a7c862U, 0x12c302b6U, 0x11406944U, 0xe5b33957U, 0xe63052a5U
+ /* 120 */ , 0xc20c41fbU, 0xc18f2a09U, 0x357c7a1aU, 0x36ff11e8U, 0xdd9bdb3cU, 0xde18b0ceU, 0x2aebe0ddU, 0x29688b2fU
+ /* 128 */ , 0x783bf682U, 0x7bb89d70U, 0x8f4bcd63U, 0x8cc8a691U, 0x67ac6c45U, 0x642f07b7U, 0x90dc57a4U, 0x935f3c56U
+ /* 136 */ , 0xb7632f08U, 0xb4e044faU, 0x401314e9U, 0x43907f1bU, 0xa8f4b5cfU, 0xab77de3dU, 0x5f848e2eU, 0x5c07e5dcU
+ /* 144 */ , 0x17fca892U, 0x147fc360U, 0xe08c9373U, 0xe30ff881U, 0x086b3255U, 0x0be859a7U, 0xff1b09b4U, 0xfc986246U
+ /* 152 */ , 0xd8a47118U, 0xdb271aeaU, 0x2fd44af9U, 0x2c57210bU, 0xc733ebdfU, 0xc4b0802dU, 0x3043d03eU, 0x33c0bbccU
+ /* 160 */ , 0xa6b54ba2U, 0xa5362050U, 0x51c57043U, 0x52461bb1U, 0xb922d165U, 0xbaa1ba97U, 0x4e52ea84U, 0x4dd18176U
+ /* 168 */ , 0x69ed9228U, 0x6a6ef9daU, 0x9e9da9c9U, 0x9d1ec23bU, 0x767a08efU, 0x75f9631dU, 0x810a330eU, 0x828958fcU
+ /* 176 */ , 0xc97215b2U, 0xcaf17e40U, 0x3e022e53U, 0x3d8145a1U, 0xd6e58f75U, 0xd566e487U, 0x2195b494U, 0x2216df66U
+ /* 184 */ , 0x062acc38U, 0x05a9a7caU, 0xf15af7d9U, 0xf2d99c2bU, 0x19bd56ffU, 0x1a3e3d0dU, 0xeecd6d1eU, 0xed4e06ecU
+ /* 192 */ , 0xc4268dc3U, 0xc7a5e631U, 0x3356b622U, 0x30d5ddd0U, 0xdbb11704U, 0xd8327cf6U, 0x2cc12ce5U, 0x2f424717U
+ /* 200 */ , 0x0b7e5449U, 0x08fd3fbbU, 0xfc0e6fa8U, 0xff8d045aU, 0x14e9ce8eU, 0x176aa57cU, 0xe399f56fU, 0xe01a9e9dU
+ /* 208 */ , 0xabe1d3d3U, 0xa862b821U, 0x5c91e832U, 0x5f1283c0U, 0xb4764914U, 0xb7f522e6U, 0x430672f5U, 0x40851907U
+ /* 216 */ , 0x64b90a59U, 0x673a61abU, 0x93c931b8U, 0x904a5a4aU, 0x7b2e909eU, 0x78adfb6cU, 0x8c5eab7fU, 0x8fddc08dU
+ /* 224 */ , 0x1aa830e3U, 0x192b5b11U, 0xedd80b02U, 0xee5b60f0U, 0x053faa24U, 0x06bcc1d6U, 0xf24f91c5U, 0xf1ccfa37U
+ /* 232 */ , 0xd5f0e969U, 0xd673829bU, 0x2280d288U, 0x2103b97aU, 0xca6773aeU, 0xc9e4185cU, 0x3d17484fU, 0x3e9423bdU
+ /* 240 */ , 0x756f6ef3U, 0x76ec0501U, 0x821f5512U, 0x819c3ee0U, 0x6af8f434U, 0x697b9fc6U, 0x9d88cfd5U, 0x9e0ba427U
+ /* 248 */ , 0xba37b779U, 0xb9b4dc8bU, 0x4d478c98U, 0x4ec4e76aU, 0xa5a02dbeU, 0xa623464cU, 0x52d0165fU, 0x51537dadU
+ }
+ ,
+ /* CRC32C table 5 for quad-bytes ( big-endian ), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0x7798a213U, 0xee304527U, 0x99a8e734U, 0xdc618a4eU, 0xabf9285dU, 0x3251cf69U, 0x45c96d7aU
+ /* 8 */ , 0xb8c3149dU, 0xcf5bb68eU, 0x56f351baU, 0x216bf3a9U, 0x64a29ed3U, 0x133a3cc0U, 0x8a92dbf4U, 0xfd0a79e7U
+ /* 16 */ , 0x81f1c53fU, 0xf669672cU, 0x6fc18018U, 0x1859220bU, 0x5d904f71U, 0x2a08ed62U, 0xb3a00a56U, 0xc438a845U
+ /* 24 */ , 0x3932d1a2U, 0x4eaa73b1U, 0xd7029485U, 0xa09a3696U, 0xe5535becU, 0x92cbf9ffU, 0x0b631ecbU, 0x7cfbbcd8U
+ /* 32 */ , 0x02e38b7fU, 0x757b296cU, 0xecd3ce58U, 0x9b4b6c4bU, 0xde820131U, 0xa91aa322U, 0x30b24416U, 0x472ae605U
+ /* 40 */ , 0xba209fe2U, 0xcdb83df1U, 0x5410dac5U, 0x238878d6U, 0x664115acU, 0x11d9b7bfU, 0x8871508bU, 0xffe9f298U
+ /* 48 */ , 0x83124e40U, 0xf48aec53U, 0x6d220b67U, 0x1abaa974U, 0x5f73c40eU, 0x28eb661dU, 0xb1438129U, 0xc6db233aU
+ /* 56 */ , 0x3bd15addU, 0x4c49f8ceU, 0xd5e11ffaU, 0xa279bde9U, 0xe7b0d093U, 0x90287280U, 0x098095b4U, 0x7e1837a7U
+ /* 64 */ , 0x04c617ffU, 0x735eb5ecU, 0xeaf652d8U, 0x9d6ef0cbU, 0xd8a79db1U, 0xaf3f3fa2U, 0x3697d896U, 0x410f7a85U
+ /* 72 */ , 0xbc050362U, 0xcb9da171U, 0x52354645U, 0x25ade456U, 0x6064892cU, 0x17fc2b3fU, 0x8e54cc0bU, 0xf9cc6e18U
+ /* 80 */ , 0x8537d2c0U, 0xf2af70d3U, 0x6b0797e7U, 0x1c9f35f4U, 0x5956588eU, 0x2ecefa9dU, 0xb7661da9U, 0xc0febfbaU
+ /* 88 */ , 0x3df4c65dU, 0x4a6c644eU, 0xd3c4837aU, 0xa45c2169U, 0xe1954c13U, 0x960dee00U, 0x0fa50934U, 0x783dab27U
+ /* 96 */ , 0x06259c80U, 0x71bd3e93U, 0xe815d9a7U, 0x9f8d7bb4U, 0xda4416ceU, 0xaddcb4ddU, 0x347453e9U, 0x43ecf1faU
+ /* 104 */ , 0xbee6881dU, 0xc97e2a0eU, 0x50d6cd3aU, 0x274e6f29U, 0x62870253U, 0x151fa040U, 0x8cb74774U, 0xfb2fe567U
+ /* 112 */ , 0x87d459bfU, 0xf04cfbacU, 0x69e41c98U, 0x1e7cbe8bU, 0x5bb5d3f1U, 0x2c2d71e2U, 0xb58596d6U, 0xc21d34c5U
+ /* 120 */ , 0x3f174d22U, 0x488fef31U, 0xd1270805U, 0xa6bfaa16U, 0xe376c76cU, 0x94ee657fU, 0x0d46824bU, 0x7ade2058U
+ /* 128 */ , 0xf9fac3fbU, 0x8e6261e8U, 0x17ca86dcU, 0x605224cfU, 0x259b49b5U, 0x5203eba6U, 0xcbab0c92U, 0xbc33ae81U
+ /* 136 */ , 0x4139d766U, 0x36a17575U, 0xaf099241U, 0xd8913052U, 0x9d585d28U, 0xeac0ff3bU, 0x7368180fU, 0x04f0ba1cU
+ /* 144 */ , 0x780b06c4U, 0x0f93a4d7U, 0x963b43e3U, 0xe1a3e1f0U, 0xa46a8c8aU, 0xd3f22e99U, 0x4a5ac9adU, 0x3dc26bbeU
+ /* 152 */ , 0xc0c81259U, 0xb750b04aU, 0x2ef8577eU, 0x5960f56dU, 0x1ca99817U, 0x6b313a04U, 0xf299dd30U, 0x85017f23U
+ /* 160 */ , 0xfb194884U, 0x8c81ea97U, 0x15290da3U, 0x62b1afb0U, 0x2778c2caU, 0x50e060d9U, 0xc94887edU, 0xbed025feU
+ /* 168 */ , 0x43da5c19U, 0x3442fe0aU, 0xadea193eU, 0xda72bb2dU, 0x9fbbd657U, 0xe8237444U, 0x718b9370U, 0x06133163U
+ /* 176 */ , 0x7ae88dbbU, 0x0d702fa8U, 0x94d8c89cU, 0xe3406a8fU, 0xa68907f5U, 0xd111a5e6U, 0x48b942d2U, 0x3f21e0c1U
+ /* 184 */ , 0xc22b9926U, 0xb5b33b35U, 0x2c1bdc01U, 0x5b837e12U, 0x1e4a1368U, 0x69d2b17bU, 0xf07a564fU, 0x87e2f45cU
+ /* 192 */ , 0xfd3cd404U, 0x8aa47617U, 0x130c9123U, 0x64943330U, 0x215d5e4aU, 0x56c5fc59U, 0xcf6d1b6dU, 0xb8f5b97eU
+ /* 200 */ , 0x45ffc099U, 0x3267628aU, 0xabcf85beU, 0xdc5727adU, 0x999e4ad7U, 0xee06e8c4U, 0x77ae0ff0U, 0x0036ade3U
+ /* 208 */ , 0x7ccd113bU, 0x0b55b328U, 0x92fd541cU, 0xe565f60fU, 0xa0ac9b75U, 0xd7343966U, 0x4e9cde52U, 0x39047c41U
+ /* 216 */ , 0xc40e05a6U, 0xb396a7b5U, 0x2a3e4081U, 0x5da6e292U, 0x186f8fe8U, 0x6ff72dfbU, 0xf65fcacfU, 0x81c768dcU
+ /* 224 */ , 0xffdf5f7bU, 0x8847fd68U, 0x11ef1a5cU, 0x6677b84fU, 0x23bed535U, 0x54267726U, 0xcd8e9012U, 0xba163201U
+ /* 232 */ , 0x471c4be6U, 0x3084e9f5U, 0xa92c0ec1U, 0xdeb4acd2U, 0x9b7dc1a8U, 0xece563bbU, 0x754d848fU, 0x02d5269cU
+ /* 240 */ , 0x7e2e9a44U, 0x09b63857U, 0x901edf63U, 0xe7867d70U, 0xa24f100aU, 0xd5d7b219U, 0x4c7f552dU, 0x3be7f73eU
+ /* 248 */ , 0xc6ed8ed9U, 0xb1752ccaU, 0x28ddcbfeU, 0x5f4569edU, 0x1a8c0497U, 0x6d14a684U, 0xf4bc41b0U, 0x8324e3a3U
+ }
+ ,
+ /* CRC32C table 6 for quad-bytes ( big-endian ), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0x7e9241a5U, 0x0d526f4fU, 0x73c02eeaU, 0x1aa4de9eU, 0x64369f3bU, 0x17f6b1d1U, 0x6964f074U
+ /* 8 */ , 0xc53e5138U, 0xbbac109dU, 0xc86c3e77U, 0xb6fe7fd2U, 0xdf9a8fa6U, 0xa108ce03U, 0xd2c8e0e9U, 0xac5aa14cU
+ /* 16 */ , 0x8a7da270U, 0xf4efe3d5U, 0x872fcd3fU, 0xf9bd8c9aU, 0x90d97ceeU, 0xee4b3d4bU, 0x9d8b13a1U, 0xe3195204U
+ /* 24 */ , 0x4f43f348U, 0x31d1b2edU, 0x42119c07U, 0x3c83dda2U, 0x55e72dd6U, 0x2b756c73U, 0x58b54299U, 0x2627033cU
+ /* 32 */ , 0x14fb44e1U, 0x6a690544U, 0x19a92baeU, 0x673b6a0bU, 0x0e5f9a7fU, 0x70cddbdaU, 0x030df530U, 0x7d9fb495U
+ /* 40 */ , 0xd1c515d9U, 0xaf57547cU, 0xdc977a96U, 0xa2053b33U, 0xcb61cb47U, 0xb5f38ae2U, 0xc633a408U, 0xb8a1e5adU
+ /* 48 */ , 0x9e86e691U, 0xe014a734U, 0x93d489deU, 0xed46c87bU, 0x8422380fU, 0xfab079aaU, 0x89705740U, 0xf7e216e5U
+ /* 56 */ , 0x5bb8b7a9U, 0x252af60cU, 0x56ead8e6U, 0x28789943U, 0x411c6937U, 0x3f8e2892U, 0x4c4e0678U, 0x32dc47ddU
+ /* 64 */ , 0xd98065c7U, 0xa7122462U, 0xd4d20a88U, 0xaa404b2dU, 0xc324bb59U, 0xbdb6fafcU, 0xce76d416U, 0xb0e495b3U
+ /* 72 */ , 0x1cbe34ffU, 0x622c755aU, 0x11ec5bb0U, 0x6f7e1a15U, 0x061aea61U, 0x7888abc4U, 0x0b48852eU, 0x75dac48bU
+ /* 80 */ , 0x53fdc7b7U, 0x2d6f8612U, 0x5eafa8f8U, 0x203de95dU, 0x49591929U, 0x37cb588cU, 0x440b7666U, 0x3a9937c3U
+ /* 88 */ , 0x96c3968fU, 0xe851d72aU, 0x9b91f9c0U, 0xe503b865U, 0x8c674811U, 0xf2f509b4U, 0x8135275eU, 0xffa766fbU
+ /* 96 */ , 0xcd7b2126U, 0xb3e96083U, 0xc0294e69U, 0xbebb0fccU, 0xd7dfffb8U, 0xa94dbe1dU, 0xda8d90f7U, 0xa41fd152U
+ /* 104 */ , 0x0845701eU, 0x76d731bbU, 0x05171f51U, 0x7b855ef4U, 0x12e1ae80U, 0x6c73ef25U, 0x1fb3c1cfU, 0x6121806aU
+ /* 112 */ , 0x47068356U, 0x3994c2f3U, 0x4a54ec19U, 0x34c6adbcU, 0x5da25dc8U, 0x23301c6dU, 0x50f03287U, 0x2e627322U
+ /* 120 */ , 0x8238d26eU, 0xfcaa93cbU, 0x8f6abd21U, 0xf1f8fc84U, 0x989c0cf0U, 0xe60e4d55U, 0x95ce63bfU, 0xeb5c221aU
+ /* 128 */ , 0x4377278bU, 0x3de5662eU, 0x4e2548c4U, 0x30b70961U, 0x59d3f915U, 0x2741b8b0U, 0x5481965aU, 0x2a13d7ffU
+ /* 136 */ , 0x864976b3U, 0xf8db3716U, 0x8b1b19fcU, 0xf5895859U, 0x9ceda82dU, 0xe27fe988U, 0x91bfc762U, 0xef2d86c7U
+ /* 144 */ , 0xc90a85fbU, 0xb798c45eU, 0xc458eab4U, 0xbacaab11U, 0xd3ae5b65U, 0xad3c1ac0U, 0xdefc342aU, 0xa06e758fU
+ /* 152 */ , 0x0c34d4c3U, 0x72a69566U, 0x0166bb8cU, 0x7ff4fa29U, 0x16900a5dU, 0x68024bf8U, 0x1bc26512U, 0x655024b7U
+ /* 160 */ , 0x578c636aU, 0x291e22cfU, 0x5ade0c25U, 0x244c4d80U, 0x4d28bdf4U, 0x33bafc51U, 0x407ad2bbU, 0x3ee8931eU
+ /* 168 */ , 0x92b23252U, 0xec2073f7U, 0x9fe05d1dU, 0xe1721cb8U, 0x8816ecccU, 0xf684ad69U, 0x85448383U, 0xfbd6c226U
+ /* 176 */ , 0xddf1c11aU, 0xa36380bfU, 0xd0a3ae55U, 0xae31eff0U, 0xc7551f84U, 0xb9c75e21U, 0xca0770cbU, 0xb495316eU
+ /* 184 */ , 0x18cf9022U, 0x665dd187U, 0x159dff6dU, 0x6b0fbec8U, 0x026b4ebcU, 0x7cf90f19U, 0x0f3921f3U, 0x71ab6056U
+ /* 192 */ , 0x9af7424cU, 0xe46503e9U, 0x97a52d03U, 0xe9376ca6U, 0x80539cd2U, 0xfec1dd77U, 0x8d01f39dU, 0xf393b238U
+ /* 200 */ , 0x5fc91374U, 0x215b52d1U, 0x529b7c3bU, 0x2c093d9eU, 0x456dcdeaU, 0x3bff8c4fU, 0x483fa2a5U, 0x36ade300U
+ /* 208 */ , 0x108ae03cU, 0x6e18a199U, 0x1dd88f73U, 0x634aced6U, 0x0a2e3ea2U, 0x74bc7f07U, 0x077c51edU, 0x79ee1048U
+ /* 216 */ , 0xd5b4b104U, 0xab26f0a1U, 0xd8e6de4bU, 0xa6749feeU, 0xcf106f9aU, 0xb1822e3fU, 0xc24200d5U, 0xbcd04170U
+ /* 224 */ , 0x8e0c06adU, 0xf09e4708U, 0x835e69e2U, 0xfdcc2847U, 0x94a8d833U, 0xea3a9996U, 0x99fab77cU, 0xe768f6d9U
+ /* 232 */ , 0x4b325795U, 0x35a01630U, 0x466038daU, 0x38f2797fU, 0x5196890bU, 0x2f04c8aeU, 0x5cc4e644U, 0x2256a7e1U
+ /* 240 */ , 0x0471a4ddU, 0x7ae3e578U, 0x0923cb92U, 0x77b18a37U, 0x1ed57a43U, 0x60473be6U, 0x1387150cU, 0x6d1554a9U
+ /* 248 */ , 0xc14ff5e5U, 0xbfddb440U, 0xcc1d9aaaU, 0xb28fdb0fU, 0xdbeb2b7bU, 0xa5796adeU, 0xd6b94434U, 0xa82b0591U
+ }
+ ,
+ /* CRC32C table 7 for quad-bytes ( big-endian ), auto-generated. DO NOT MODIFY! */
+ {
+ /* 0 */ 0x00000000U, 0xb8aa45ddU, 0x812367bfU, 0x39892262U, 0xf331227bU, 0x4b9b67a6U, 0x721245c4U, 0xcab80019U
+ /* 8 */ , 0xe66344f6U, 0x5ec9012bU, 0x67402349U, 0xdfea6694U, 0x1552668dU, 0xadf82350U, 0x94710132U, 0x2cdb44efU
+ /* 16 */ , 0x3db164e9U, 0x851b2134U, 0xbc920356U, 0x0438468bU, 0xce804692U, 0x762a034fU, 0x4fa3212dU, 0xf70964f0U
+ /* 24 */ , 0xdbd2201fU, 0x637865c2U, 0x5af147a0U, 0xe25b027dU, 0x28e30264U, 0x904947b9U, 0xa9c065dbU, 0x116a2006U
+ /* 32 */ , 0x8b1425d7U, 0x33be600aU, 0x0a374268U, 0xb29d07b5U, 0x782507acU, 0xc08f4271U, 0xf9066013U, 0x41ac25ceU
+ /* 40 */ , 0x6d776121U, 0xd5dd24fcU, 0xec54069eU, 0x54fe4343U, 0x9e46435aU, 0x26ec0687U, 0x1f6524e5U, 0xa7cf6138U
+ /* 48 */ , 0xb6a5413eU, 0x0e0f04e3U, 0x37862681U, 0x8f2c635cU, 0x45946345U, 0xfd3e2698U, 0xc4b704faU, 0x7c1d4127U
+ /* 56 */ , 0x50c605c8U, 0xe86c4015U, 0xd1e56277U, 0x694f27aaU, 0xa3f727b3U, 0x1b5d626eU, 0x22d4400cU, 0x9a7e05d1U
+ /* 64 */ , 0xe75fa6abU, 0x5ff5e376U, 0x667cc114U, 0xded684c9U, 0x146e84d0U, 0xacc4c10dU, 0x954de36fU, 0x2de7a6b2U
+ /* 72 */ , 0x013ce25dU, 0xb996a780U, 0x801f85e2U, 0x38b5c03fU, 0xf20dc026U, 0x4aa785fbU, 0x732ea799U, 0xcb84e244U
+ /* 80 */ , 0xdaeec242U, 0x6244879fU, 0x5bcda5fdU, 0xe367e020U, 0x29dfe039U, 0x9175a5e4U, 0xa8fc8786U, 0x1056c25bU
+ /* 88 */ , 0x3c8d86b4U, 0x8427c369U, 0xbdaee10bU, 0x0504a4d6U, 0xcfbca4cfU, 0x7716e112U, 0x4e9fc370U, 0xf63586adU
+ /* 96 */ , 0x6c4b837cU, 0xd4e1c6a1U, 0xed68e4c3U, 0x55c2a11eU, 0x9f7aa107U, 0x27d0e4daU, 0x1e59c6b8U, 0xa6f38365U
+ /* 104 */ , 0x8a28c78aU, 0x32828257U, 0x0b0ba035U, 0xb3a1e5e8U, 0x7919e5f1U, 0xc1b3a02cU, 0xf83a824eU, 0x4090c793U
+ /* 112 */ , 0x51fae795U, 0xe950a248U, 0xd0d9802aU, 0x6873c5f7U, 0xa2cbc5eeU, 0x1a618033U, 0x23e8a251U, 0x9b42e78cU
+ /* 120 */ , 0xb799a363U, 0x0f33e6beU, 0x36bac4dcU, 0x8e108101U, 0x44a88118U, 0xfc02c4c5U, 0xc58be6a7U, 0x7d21a37aU
+ /* 128 */ , 0x3fc9a052U, 0x8763e58fU, 0xbeeac7edU, 0x06408230U, 0xccf88229U, 0x7452c7f4U, 0x4ddbe596U, 0xf571a04bU
+ /* 136 */ , 0xd9aae4a4U, 0x6100a179U, 0x5889831bU, 0xe023c6c6U, 0x2a9bc6dfU, 0x92318302U, 0xabb8a160U, 0x1312e4bdU
+ /* 144 */ , 0x0278c4bbU, 0xbad28166U, 0x835ba304U, 0x3bf1e6d9U, 0xf149e6c0U, 0x49e3a31dU, 0x706a817fU, 0xc8c0c4a2U
+ /* 152 */ , 0xe41b804dU, 0x5cb1c590U, 0x6538e7f2U, 0xdd92a22fU, 0x172aa236U, 0xaf80e7ebU, 0x9609c589U, 0x2ea38054U
+ /* 160 */ , 0xb4dd8585U, 0x0c77c058U, 0x35fee23aU, 0x8d54a7e7U, 0x47eca7feU, 0xff46e223U, 0xc6cfc041U, 0x7e65859cU
+ /* 168 */ , 0x52bec173U, 0xea1484aeU, 0xd39da6ccU, 0x6b37e311U, 0xa18fe308U, 0x1925a6d5U, 0x20ac84b7U, 0x9806c16aU
+ /* 176 */ , 0x896ce16cU, 0x31c6a4b1U, 0x084f86d3U, 0xb0e5c30eU, 0x7a5dc317U, 0xc2f786caU, 0xfb7ea4a8U, 0x43d4e175U
+ /* 184 */ , 0x6f0fa59aU, 0xd7a5e047U, 0xee2cc225U, 0x568687f8U, 0x9c3e87e1U, 0x2494c23cU, 0x1d1de05eU, 0xa5b7a583U
+ /* 192 */ , 0xd89606f9U, 0x603c4324U, 0x59b56146U, 0xe11f249bU, 0x2ba72482U, 0x930d615fU, 0xaa84433dU, 0x122e06e0U
+ /* 200 */ , 0x3ef5420fU, 0x865f07d2U, 0xbfd625b0U, 0x077c606dU, 0xcdc46074U, 0x756e25a9U, 0x4ce707cbU, 0xf44d4216U
+ /* 208 */ , 0xe5276210U, 0x5d8d27cdU, 0x640405afU, 0xdcae4072U, 0x1616406bU, 0xaebc05b6U, 0x973527d4U, 0x2f9f6209U
+ /* 216 */ , 0x034426e6U, 0xbbee633bU, 0x82674159U, 0x3acd0484U, 0xf075049dU, 0x48df4140U, 0x71566322U, 0xc9fc26ffU
+ /* 224 */ , 0x5382232eU, 0xeb2866f3U, 0xd2a14491U, 0x6a0b014cU, 0xa0b30155U, 0x18194488U, 0x219066eaU, 0x993a2337U
+ /* 232 */ , 0xb5e167d8U, 0x0d4b2205U, 0x34c20067U, 0x8c6845baU, 0x46d045a3U, 0xfe7a007eU, 0xc7f3221cU, 0x7f5967c1U
+ /* 240 */ , 0x6e3347c7U, 0xd699021aU, 0xef102078U, 0x57ba65a5U, 0x9d0265bcU, 0x25a82061U, 0x1c210203U, 0xa48b47deU
+ /* 248 */ , 0x88500331U, 0x30fa46ecU, 0x0973648eU, 0xb1d92153U, 0x7b61214aU, 0xc3cb6497U, 0xfa4246f5U, 0x42e80328U
+ }
+ #endif
+ };
diff --git a/hotspot/src/cpu/s390/vm/stubRoutines_s390.hpp b/hotspot/src/cpu/s390/vm/stubRoutines_s390.hpp
index 5bb64303b6b..b2509b23094 100644
--- a/hotspot/src/cpu/s390/vm/stubRoutines_s390.hpp
+++ b/hotspot/src/cpu/s390/vm/stubRoutines_s390.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -68,12 +68,11 @@ class zarch {
};
private:
- static address _handler_for_unsafe_access_entry;
-
static int _atomic_memory_operation_lock;
static address _partial_subtype_check;
static juint _crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
+ static juint _crc32c_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
static address _trot_table_addr;
@@ -91,11 +90,11 @@ class zarch {
static int atomic_memory_operation_lock() { return _atomic_memory_operation_lock; }
static void set_atomic_memory_operation_lock(int value) { _atomic_memory_operation_lock = value; }
- static address handler_for_unsafe_access_entry() { return _handler_for_unsafe_access_entry; }
-
static address partial_subtype_check() { return _partial_subtype_check; }
+ static void generate_load_absolute_address(MacroAssembler* masm, Register table, address table_addr, uint64_t table_contents);
static void generate_load_crc_table_addr(MacroAssembler* masm, Register table);
+ static void generate_load_crc32c_table_addr(MacroAssembler* masm, Register table);
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
static void generate_load_trot_table_addr(MacroAssembler* masm, Register table);
diff --git a/hotspot/src/cpu/s390/vm/templateInterpreterGenerator_s390.cpp b/hotspot/src/cpu/s390/vm/templateInterpreterGenerator_s390.cpp
index 20a9a3e9571..5d94689b383 100644
--- a/hotspot/src/cpu/s390/vm/templateInterpreterGenerator_s390.cpp
+++ b/hotspot/src/cpu/s390/vm/templateInterpreterGenerator_s390.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -121,9 +121,8 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
// We use target_sp for storing arguments in the C frame.
__ save_return_pc();
-
- __ z_stmg(Z_R10,Z_R13,-32,Z_SP);
- __ push_frame_abi160(32);
+ __ push_frame_abi160(4*BytesPerWord); // Reserve space to save the tmp_[1..4] registers.
+ __ z_stmg(Z_R10, Z_R13, frame::z_abi_160_size, Z_SP); // Save registers only after frame is pushed.
__ z_lgr(arg_java, Z_ARG1);
@@ -341,9 +340,9 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
// Method exit, all arguments proocessed.
__ bind(loop_end);
+ __ z_lmg(Z_R10, Z_R13, frame::z_abi_160_size, Z_SP); // restore registers before frame is popped.
__ pop_frame();
__ restore_return_pc();
- __ z_lmg(Z_R10,Z_R13,-32,Z_SP);
__ z_br(Z_R14);
// Copy int arguments.
@@ -642,13 +641,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
return entry;
}
-// Unused, should never pass by.
-address TemplateInterpreterGenerator::generate_continuation_for (TosState state) {
- address entry = __ pc();
- __ should_not_reach_here();
- return entry;
-}
-
address TemplateInterpreterGenerator::generate_return_entry_for (TosState state, int step, size_t index_size) {
address entry = __ pc();
@@ -683,6 +675,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for (TosState state,
__ z_llgc(size, Address(cache, offset, flags_offset+(sizeof(size_t)-1)));
__ z_sllg(size, size, Interpreter::logStackElementSize); // Each argument size in bytes.
__ z_agr(Z_esp, size); // Pop arguments.
+
+ __ check_and_handle_popframe(Z_thread);
+ __ check_and_handle_earlyret(Z_thread);
+
__ dispatch_next(state, step);
BLOCK_COMMENT("} return_entry");
@@ -1186,11 +1182,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// native_call: assert that mdo == NULL
const bool check_for_mdo = !native_call DEBUG_ONLY(|| native_call);
if (ProfileInterpreter && check_for_mdo) {
-#ifdef FAST_DISPATCH
- // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
- // they both use I2.
- assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
-#endif // FAST_DISPATCH
Label get_continue;
__ load_and_test_long(Rmdp, method_(method_data));
@@ -1240,13 +1231,9 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// Advance local_addr to point behind locals (creates positive incr. in loop).
__ z_lg(Z_R1_scratch, Address(Z_method, Method::const_offset()));
- __ z_llgh(Z_R0_scratch,
- Address(Z_R1_scratch, ConstMethod::size_of_locals_offset()));
- if (Z_R0_scratch == Z_R0) {
- __ z_aghi(Z_R0_scratch, -1);
- } else {
- __ add2reg(Z_R0_scratch, -1);
- }
+ __ z_llgh(Z_R0_scratch, Address(Z_R1_scratch, ConstMethod::size_of_locals_offset()));
+ __ add2reg(Z_R0_scratch, -1);
+
__ z_lgr(local_addr/*locals*/, Z_locals);
__ z_sllg(Z_R0_scratch, Z_R0_scratch, LogBytesPerWord);
__ z_sllg(local_count, local_count, LogBytesPerWord); // Local_count are non param locals.
@@ -1933,8 +1920,11 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
return entry_point;
}
-// Method entry for static native methods:
-// int java.util.zip.CRC32.update(int crc, int b)
+
+/**
+ * Method entry for static native methods:
+ * int java.util.zip.CRC32.update(int crc, int b)
+ */
address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
if (UseCRC32Intrinsics) {
@@ -1964,7 +1954,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
__ z_llgf(crc, 2 * wordSize, argP); // Current crc state, zero extend to 64 bit to have a clean register.
StubRoutines::zarch::generate_load_crc_table_addr(_masm, table);
- __ kernel_crc32_singleByte(crc, data, dataLen, table, Z_R1);
+ __ kernel_crc32_singleByte(crc, data, dataLen, table, Z_R1, true);
// Restore caller sp for c2i case.
__ resize_frame_absolute(Z_R10, Z_R0, true); // Cut the stack back to where the caller started.
@@ -1983,9 +1973,11 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
}
-// Method entry for static native methods:
-// int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
-// int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
+/**
+ * Method entry for static native methods:
+ * int java.util.zip.CRC32.updateBytes( int crc, byte[] b, int off, int len)
+ * int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len)
+ */
address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
if (UseCRC32Intrinsics) {
@@ -2020,10 +2012,10 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
// data = buf + off
BLOCK_COMMENT("CRC32_updateByteBuffer {");
__ z_llgf(crc, 5*wordSize, argP); // current crc state
- __ z_lg(data, 3*wordSize, argP); // start of byte buffer
+ __ z_lg(data, 3*wordSize, argP); // start of byte buffer
__ z_agf(data, 2*wordSize, argP); // Add byte buffer offset.
__ z_lgf(dataLen, 1*wordSize, argP); // #bytes to process
- } else { // Used for "updateBytes update".
+ } else { // Used for "updateBytes update".
// crc @ (SP + 4W) (32bit)
// buf @ (SP + 3W) (64bit ptr to byte array)
// off @ (SP + 2W) (32bit)
@@ -2031,7 +2023,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
// data = buf + off + base_offset
BLOCK_COMMENT("CRC32_updateBytes {");
__ z_llgf(crc, 4*wordSize, argP); // current crc state
- __ z_lg(data, 3*wordSize, argP); // start of byte buffer
+ __ z_lg(data, 3*wordSize, argP); // start of byte buffer
__ z_agf(data, 2*wordSize, argP); // Add byte buffer offset.
__ z_lgf(dataLen, 1*wordSize, argP); // #bytes to process
__ z_aghi(data, arrayOopDesc::base_offset_in_bytes(T_BYTE));
@@ -2041,7 +2033,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
__ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
__ z_stmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 to make them available as work registers.
- __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3);
+ __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, true);
__ z_lmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 back from stack.
// Restore caller sp for c2i case.
@@ -2060,8 +2052,79 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
return NULL;
}
-// Not supported
+
+/**
+ * Method entry for intrinsic-candidate (non-native) methods:
+ * int java.util.zip.CRC32C.updateBytes( int crc, byte[] b, int off, int end)
+ * int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long* buf, int off, int end)
+ * Unlike CRC32, CRC32C does not have any methods marked as native
+ * CRC32C also uses an "end" variable instead of the length variable CRC32 uses
+ */
address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+
+ if (UseCRC32CIntrinsics) {
+ uint64_t entry_off = __ offset();
+
+ // We don't generate local frame and don't align stack because
+ // we call stub code and there is no safepoint on this path.
+
+ // Load parameters.
+ // Z_esp is callers operand stack pointer, i.e. it points to the parameters.
+ const Register argP = Z_esp;
+ const Register crc = Z_ARG1; // crc value
+ const Register data = Z_ARG2; // address of java byte array
+ const Register dataLen = Z_ARG3; // source data len
+ const Register table = Z_ARG4; // address of crc32 table
+ const Register t0 = Z_R10; // work reg for kernel* emitters
+ const Register t1 = Z_R11; // work reg for kernel* emitters
+ const Register t2 = Z_R12; // work reg for kernel* emitters
+ const Register t3 = Z_R13; // work reg for kernel* emitters
+
+ // Arguments are reversed on java expression stack.
+ // Calculate address of start element.
+ if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) { // Used for "updateByteBuffer direct".
+ // crc @ (SP + 5W) (32bit)
+ // buf @ (SP + 3W) (64bit ptr to long array)
+ // off @ (SP + 2W) (32bit)
+ // dataLen @ (SP + 1W) (32bit)
+ // data = buf + off
+ BLOCK_COMMENT("CRC32C_updateDirectByteBuffer {");
+ __ z_llgf(crc, 5*wordSize, argP); // current crc state
+ __ z_lg(data, 3*wordSize, argP); // start of byte buffer
+ __ z_agf(data, 2*wordSize, argP); // Add byte buffer offset.
+ __ z_lgf(dataLen, 1*wordSize, argP); // #bytes to process, calculated as
+ __ z_sgf(dataLen, Address(argP, 2*wordSize)); // (end_index - offset)
+ } else { // Used for "updateBytes update".
+ // crc @ (SP + 4W) (32bit)
+ // buf @ (SP + 3W) (64bit ptr to byte array)
+ // off @ (SP + 2W) (32bit)
+ // dataLen @ (SP + 1W) (32bit)
+ // data = buf + off + base_offset
+ BLOCK_COMMENT("CRC32C_updateBytes {");
+ __ z_llgf(crc, 4*wordSize, argP); // current crc state
+ __ z_lg(data, 3*wordSize, argP); // start of byte buffer
+ __ z_agf(data, 2*wordSize, argP); // Add byte buffer offset.
+ __ z_lgf(dataLen, 1*wordSize, argP); // #bytes to process, calculated as
+ __ z_sgf(dataLen, Address(argP, 2*wordSize)); // (end_index - offset)
+ __ z_aghi(data, arrayOopDesc::base_offset_in_bytes(T_BYTE));
+ }
+
+ StubRoutines::zarch::generate_load_crc32c_table_addr(_masm, table);
+
+ __ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
+ __ z_stmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 to make them available as work registers.
+ __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, false);
+ __ z_lmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 back from stack.
+
+ // Restore caller sp for c2i case.
+ __ resize_frame_absolute(Z_R10, Z_R0, true); // Cut the stack back to where the caller started.
+
+ __ z_br(Z_R14);
+
+ BLOCK_COMMENT("} CRC32C_update{Bytes|DirectByteBuffer}");
+ return __ addr_at(entry_off);
+ }
+
return NULL;
}
diff --git a/hotspot/src/cpu/s390/vm/templateTable_s390.cpp b/hotspot/src/cpu/s390/vm/templateTable_s390.cpp
index 83d707dbd60..becbb0e48b5 100644
--- a/hotspot/src/cpu/s390/vm/templateTable_s390.cpp
+++ b/hotspot/src/cpu/s390/vm/templateTable_s390.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -3466,7 +3466,7 @@ void TemplateTable::invokevirtual_helper(Register index,
__ z_sllg(index, index, exact_log2(vtableEntry::size_in_bytes()));
__ mem2reg_opt(method,
Address(Z_tmp_2, index,
- InstanceKlass::vtable_start_offset() + in_ByteSize(vtableEntry::method_offset_in_bytes())));
+ Klass::vtable_start_offset() + in_ByteSize(vtableEntry::method_offset_in_bytes())));
__ profile_arguments_type(Z_ARG4, method, Z_ARG5, true);
__ jump_from_interpreted(method, Z_ARG4);
BLOCK_COMMENT("} invokevirtual_helper");
@@ -3708,7 +3708,7 @@ void TemplateTable::_new() {
__ z_sllg(offset, offset, LogBytesPerWord); // Convert to to offset.
// Get InstanceKlass.
Register iklass = cpool;
- __ z_lg(iklass, Address(cpool, offset, sizeof(ConstantPool)));
+ __ load_resolved_klass_at_offset(cpool, offset, iklass);
// Make sure klass is initialized & doesn't have finalizer.
// Make sure klass is fully initialized.
@@ -3895,7 +3895,7 @@ void TemplateTable::checkcast() {
__ z_lgr(Z_ARG4, Z_tos); // Save receiver.
__ z_sllg(index, index, LogBytesPerWord); // index2bytes for addressing
- __ mem2reg_opt(klass, Address(cpool, index, sizeof(ConstantPool)));
+ __ load_resolved_klass_at_offset(cpool, index, klass);
__ bind(resolved);
@@ -3969,8 +3969,7 @@ void TemplateTable::instanceof() {
__ load_klass(subklass, Z_tos);
__ z_sllg(index, index, LogBytesPerWord); // index2bytes for addressing
- __ mem2reg_opt(klass,
- Address(cpool, index, sizeof(ConstantPool)));
+ __ load_resolved_klass_at_offset(cpool, index, klass);
__ bind(resolved);
diff --git a/hotspot/src/cpu/s390/vm/vm_version_s390.cpp b/hotspot/src/cpu/s390/vm/vm_version_s390.cpp
index 55dbd30aec8..709a9fdf6ed 100644
--- a/hotspot/src/cpu/s390/vm/vm_version_s390.cpp
+++ b/hotspot/src/cpu/s390/vm/vm_version_s390.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
#include "compiler/disassembler.hpp"
#include "code/compiledIC.hpp"
#include "memory/resourceArea.hpp"
+#include "prims/jvm.h"
#include "runtime/java.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "vm_version_s390.hpp"
@@ -111,13 +112,23 @@ void VM_Version::initialize() {
ContendedPaddingWidth = cache_line_size;
}
- // On z/Architecture, the CRC32 intrinsics had to be implemented "by hand".
- // They cannot be based on the CHECKSUM instruction which has been there
- // since the very beginning (of z/Architecture). It computes "some kind of" a checksum
- // which has nothing to do with the CRC32 algorithm.
+ // On z/Architecture, the CRC32/CRC32C intrinsics are implemented "by hand".
+ // TODO: Provide implementation based on the vector instructions available from z13.
+ // Note: The CHECKSUM instruction, which has been there since the very beginning
+ // (of z/Architecture), computes "some kind of" a checksum.
+ // It has nothing to do with the CRC32 algorithm.
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
}
+ if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
+ FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
+ }
+
+ // TODO: Provide implementation.
+ if (UseAdler32Intrinsics) {
+ warning("Adler32Intrinsics not available on this CPU.");
+ FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
+ }
// On z/Architecture, we take UseAES as the general switch to enable/disable the AES intrinsics.
// The specific, and yet to be defined, switches UseAESxxxIntrinsics will then be set
@@ -142,6 +153,10 @@ void VM_Version::initialize() {
warning("AES intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
+ if (UseAESIntrinsics && !UseAES) {
+ warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled.");
+ FLAG_SET_DEFAULT(UseAESIntrinsics, false);
+ }
// TODO: implement AES/CTR intrinsics
if (UseAESCTRIntrinsics) {
@@ -195,11 +210,6 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
}
- if (UseAdler32Intrinsics) {
- warning("Adler32Intrinsics not available on this CPU.");
- FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
- }
-
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true);
}
diff --git a/hotspot/src/cpu/s390/vm/vtableStubs_s390.cpp b/hotspot/src/cpu/s390/vm/vtableStubs_s390.cpp
index c2c3ab246b8..de0a351ea04 100644
--- a/hotspot/src/cpu/s390/vm/vtableStubs_s390.cpp
+++ b/hotspot/src/cpu/s390/vm/vtableStubs_s390.cpp
@@ -83,7 +83,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
__ load_klass(rcvr_klass, Z_ARG1);
// Set method (in case of interpreted method), and destination address.
- int entry_offset = in_bytes(InstanceKlass::vtable_start_offset()) +
+ int entry_offset = in_bytes(Klass::vtable_start_offset()) +
vtable_index * vtableEntry::size_in_bytes();
#ifndef PRODUCT
@@ -96,8 +96,8 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// worst case actual size
padding_bytes += __ load_const_size() - __ load_const_optimized_rtn_len(vtable_idx, vtable_index*vtableEntry::size_in_bytes(), true);
- assert(Immediate::is_uimm12(in_bytes(InstanceKlass::vtable_length_offset())), "disp to large");
- __ z_cl(vtable_idx, in_bytes(InstanceKlass::vtable_length_offset()), rcvr_klass);
+ assert(Immediate::is_uimm12(in_bytes(Klass::vtable_length_offset())), "disp to large");
+ __ z_cl(vtable_idx, in_bytes(Klass::vtable_length_offset()), rcvr_klass);
__ z_brl(L);
__ z_lghi(Z_ARG3, vtable_index); // Debug code, don't optimize.
__ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), Z_ARG1, Z_ARG3, false);
@@ -187,11 +187,11 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
__ load_klass(rcvr_klass, Z_ARG1);
// Load start of itable entries into itable_entry.
- __ z_llgf(vtable_len, Address(rcvr_klass, InstanceKlass::vtable_length_offset()));
+ __ z_llgf(vtable_len, Address(rcvr_klass, Klass::vtable_length_offset()));
__ z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
// Loop over all itable entries until desired interfaceOop(Rinterface) found.
- const int vtable_base_offset = in_bytes(InstanceKlass::vtable_start_offset());
+ const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
// Count unused bytes.
start_pc = __ pc();
__ add2reg_with_index(itable_entry_addr, vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(), rcvr_klass, vtable_len);
diff --git a/hotspot/src/cpu/sparc/vm/abstractInterpreter_sparc.cpp b/hotspot/src/cpu/sparc/vm/abstractInterpreter_sparc.cpp
index f9ba7983fd2..c96f1fe978d 100644
--- a/hotspot/src/cpu/sparc/vm/abstractInterpreter_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/abstractInterpreter_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "runtime/arguments.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/synchronizer.hpp"
+#include "utilities/align.hpp"
#include "utilities/macros.hpp"
@@ -52,11 +53,6 @@ int AbstractInterpreter::BasicType_as_index(BasicType type) {
return i;
}
-bool AbstractInterpreter::can_be_compiled(methodHandle m) {
- // No special entry points that preclude compilation
- return true;
-}
-
static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
// Figure out the size of an interpreter frame (in words) given that we have a fully allocated
@@ -71,12 +67,12 @@ static int size_activation_helper(int callee_extra_locals, int max_stack, int mo
// the caller so we must ensure that it is properly aligned for our callee.
//
const int rounded_vm_local_words =
- round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
+ align_up((int)frame::interpreter_frame_vm_local_words,WordsPerLong);
// callee_locals and max_stack are counts, not the size in frame.
const int locals_size =
- round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong);
+ align_up(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong);
const int max_stack_words = max_stack * Interpreter::stackElementWords;
- return (round_to((max_stack_words
+ return (align_up((max_stack_words
+ rounded_vm_local_words
+ frame::memory_parameter_word_sp_offset), WordsPerLong)
// already rounded
@@ -87,7 +83,7 @@ static int size_activation_helper(int callee_extra_locals, int max_stack, int mo
int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
// See call_stub code
- int call_stub_size = round_to(7 + frame::memory_parameter_word_sp_offset,
+ int call_stub_size = align_up(7 + frame::memory_parameter_word_sp_offset,
WordsPerLong); // 7 + register save area
// Save space for one monitor to get into the interpreted method in case
@@ -110,7 +106,7 @@ int AbstractInterpreter::size_activation(int max_stack,
int monitor_size = monitors * frame::interpreter_frame_monitor_size();
- assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
+ assert(is_aligned(monitor_size, WordsPerLong), "must align");
//
// Note: if you look closely this appears to be doing something much different
@@ -136,8 +132,8 @@ int AbstractInterpreter::size_activation(int max_stack,
// there is no sense in messing working code.
//
- int rounded_cls = round_to((callee_locals - callee_params), WordsPerLong);
- assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align");
+ int rounded_cls = align_up((callee_locals - callee_params), WordsPerLong);
+ assert(is_aligned(rounded_cls, WordsPerLong), "must align");
int raw_frame_size = size_activation_helper(rounded_cls, max_stack, monitor_size);
@@ -171,9 +167,9 @@ void AbstractInterpreter::layout_activation(Method* method,
// even if not fully filled out.
assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame");
- int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
+ int rounded_vm_local_words = align_up((int)frame::interpreter_frame_vm_local_words,WordsPerLong);
int monitor_size = moncount * frame::interpreter_frame_monitor_size();
- assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
+ assert(is_aligned(monitor_size, WordsPerLong), "must align");
intptr_t* fp = interpreter_frame->fp();
@@ -203,7 +199,7 @@ void AbstractInterpreter::layout_activation(Method* method,
int parm_words = caller_actual_parameters * Interpreter::stackElementWords;
locals = Lesp_ptr + parm_words;
int delta = local_words - parm_words;
- int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
+ int computed_sp_adjustment = (delta > 0) ? align_up(delta, WordsPerLong) : 0;
*interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
if (!is_bottom_frame) {
// Llast_SP is set below for the current frame to SP (with the
@@ -270,9 +266,7 @@ void AbstractInterpreter::layout_activation(Method* method,
assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area");
assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area");
}
-#ifdef _LP64
assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd");
-#endif
*interpreter_frame->register_addr(Lmethod) = (intptr_t) method;
*interpreter_frame->register_addr(Llocals) = (intptr_t) locals;
@@ -283,9 +277,6 @@ void AbstractInterpreter::layout_activation(Method* method,
*interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache();
// save the mirror in the interpreter frame
*interpreter_frame->interpreter_frame_mirror_addr() = method->method_holder()->java_mirror();
-#ifdef FAST_DISPATCH
- *interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table();
-#endif
#ifdef ASSERT
BasicObjectLock* mp = (BasicObjectLock*)monitors;
diff --git a/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp b/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp
index 2cc945ad2a1..61c789bf83c 100644
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp
@@ -26,6 +26,36 @@
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
+#include "assembler_sparc.hpp"
+
int AbstractAssembler::code_fill_byte() {
return 0x00; // illegal instruction 0x00000000
}
+
+#ifdef VALIDATE_PIPELINE
+/* Walk over the current code section and verify that there are no obvious
+ * pipeline hazards exposed in the code generated.
+ */
+void Assembler::validate_no_pipeline_hazards() {
+ const CodeSection* csect = code_section();
+
+ address addr0 = csect->start();
+ address addrN = csect->end();
+ uint32_t prev = 0;
+
+ assert((addrN - addr0) % BytesPerInstWord == 0, "must be");
+
+ for (address pc = addr0; pc != addrN; pc += BytesPerInstWord) {
+ uint32_t insn = *reinterpret_cast(pc);
+
+ // 1. General case: No CTI immediately after other CTI
+ assert(!(is_cti(prev) && is_cti(insn)), "CTI-CTI not allowed.");
+
+ // 2. Special case: No CTI immediately after/before RDPC
+ assert(!(is_cti(prev) && is_rdpc(insn)), "CTI-RDPC not allowed.");
+ assert(!(is_rdpc(prev) && is_cti(insn)), "RDPC-CTI not allowed.");
+
+ prev = insn;
+ }
+}
+#endif
diff --git a/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp b/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp
index ec1a2423aa0..69822951928 100644
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,10 +28,10 @@
#include "asm/register.hpp"
// The SPARC Assembler: Pure assembler doing NO optimizations on the instruction
-// level; i.e., what you write
-// is what you get. The Assembler is generating code into a CodeBuffer.
+// level; i.e., what you write is what you get. The Assembler is generating code
+// into a CodeBuffer.
-class Assembler : public AbstractAssembler {
+class Assembler : public AbstractAssembler {
friend class AbstractAssembler;
friend class AddressLiteral;
@@ -244,18 +244,18 @@ class Assembler : public AbstractAssembler {
};
enum op5s {
- aes_eround01_op5 = 0x00,
- aes_eround23_op5 = 0x01,
- aes_dround01_op5 = 0x02,
- aes_dround23_op5 = 0x03,
- aes_eround01_l_op5 = 0x04,
- aes_eround23_l_op5 = 0x05,
- aes_dround01_l_op5 = 0x06,
- aes_dround23_l_op5 = 0x07,
- aes_kexpand1_op5 = 0x08
+ aes_eround01_op5 = 0x00,
+ aes_eround23_op5 = 0x01,
+ aes_dround01_op5 = 0x02,
+ aes_dround23_op5 = 0x03,
+ aes_eround01_l_op5 = 0x04,
+ aes_eround23_l_op5 = 0x05,
+ aes_dround01_l_op5 = 0x06,
+ aes_dround23_l_op5 = 0x07,
+ aes_kexpand1_op5 = 0x08
};
- enum RCondition { rc_z = 1, rc_lez = 2, rc_lz = 3, rc_nz = 5, rc_gz = 6, rc_gez = 7, rc_last = rc_gez };
+ enum RCondition { rc_z = 1, rc_lez = 2, rc_lz = 3, rc_nz = 5, rc_gz = 6, rc_gez = 7, rc_last = rc_gez };
enum Condition {
// for FBfcc & FBPfcc instruction
@@ -278,59 +278,38 @@ class Assembler : public AbstractAssembler {
f_unorderedOrLessOrEqual = 14,
f_ordered = 15,
- // V8 coproc, pp 123 v8 manual
-
- cp_always = 8,
- cp_never = 0,
- cp_3 = 7,
- cp_2 = 6,
- cp_2or3 = 5,
- cp_1 = 4,
- cp_1or3 = 3,
- cp_1or2 = 2,
- cp_1or2or3 = 1,
- cp_0 = 9,
- cp_0or3 = 10,
- cp_0or2 = 11,
- cp_0or2or3 = 12,
- cp_0or1 = 13,
- cp_0or1or3 = 14,
- cp_0or1or2 = 15,
-
-
// for integers
- never = 0,
- equal = 1,
- zero = 1,
- lessEqual = 2,
- less = 3,
- lessEqualUnsigned = 4,
- lessUnsigned = 5,
- carrySet = 5,
- negative = 6,
- overflowSet = 7,
- always = 8,
- notEqual = 9,
- notZero = 9,
- greater = 10,
- greaterEqual = 11,
- greaterUnsigned = 12,
- greaterEqualUnsigned = 13,
- carryClear = 13,
- positive = 14,
- overflowClear = 15
+ never = 0,
+ equal = 1,
+ zero = 1,
+ lessEqual = 2,
+ less = 3,
+ lessEqualUnsigned = 4,
+ lessUnsigned = 5,
+ carrySet = 5,
+ negative = 6,
+ overflowSet = 7,
+ always = 8,
+ notEqual = 9,
+ notZero = 9,
+ greater = 10,
+ greaterEqual = 11,
+ greaterUnsigned = 12,
+ greaterEqualUnsigned = 13,
+ carryClear = 13,
+ positive = 14,
+ overflowClear = 15
};
enum CC {
- icc = 0, xcc = 2,
// ptr_cc is the correct condition code for a pointer or intptr_t:
- ptr_cc = NOT_LP64(icc) LP64_ONLY(xcc),
- fcc0 = 0, fcc1 = 1, fcc2 = 2, fcc3 = 3
+ icc = 0, xcc = 2, ptr_cc = xcc,
+ fcc0 = 0, fcc1 = 1, fcc2 = 2, fcc3 = 3
};
enum PrefetchFcn {
- severalReads = 0, oneRead = 1, severalWritesAndPossiblyReads = 2, oneWrite = 3, page = 4
+ severalReads = 0, oneRead = 1, severalWritesAndPossiblyReads = 2, oneWrite = 3, page = 4
};
public:
@@ -354,7 +333,7 @@ class Assembler : public AbstractAssembler {
return is_simm(d, nbits + 2);
}
- address target_distance(Label& L) {
+ address target_distance(Label &L) {
// Assembler::target(L) should be called only when
// a branch instruction is emitted since non-bound
// labels record current pc() as a branch address.
@@ -364,7 +343,7 @@ class Assembler : public AbstractAssembler {
}
// test if label is in simm16 range in words (wdisp16).
- bool is_in_wdisp16_range(Label& L) {
+ bool is_in_wdisp16_range(Label &L) {
return is_in_wdisp_range(target_distance(L), pc(), 16);
}
// test if the distance between two addresses fits in simm30 range in words
@@ -392,41 +371,39 @@ class Assembler : public AbstractAssembler {
// and be sign-extended. Check the range.
static void assert_signed_range(intptr_t x, int nbits) {
- assert(nbits == 32 || (-(1 << nbits-1) <= x && x < ( 1 << nbits-1)),
+ assert(nbits == 32 || (-(1 << nbits-1) <= x && x < (1 << nbits-1)),
"value out of range: x=" INTPTR_FORMAT ", nbits=%d", x, nbits);
}
static void assert_signed_word_disp_range(intptr_t x, int nbits) {
- assert( (x & 3) == 0, "not word aligned");
+ assert((x & 3) == 0, "not word aligned");
assert_signed_range(x, nbits + 2);
}
static void assert_unsigned_const(int x, int nbits) {
- assert( juint(x) < juint(1 << nbits), "unsigned constant out of range");
+ assert(juint(x) < juint(1 << nbits), "unsigned constant out of range");
}
- // fields: note bits numbered from LSB = 0,
- // fields known by inclusive bit range
+ // fields: note bits numbered from LSB = 0, fields known by inclusive bit range
static int fmask(juint hi_bit, juint lo_bit) {
- assert( hi_bit >= lo_bit && 0 <= lo_bit && hi_bit < 32, "bad bits");
- return (1 << ( hi_bit-lo_bit + 1 )) - 1;
+ assert(hi_bit >= lo_bit && 0 <= lo_bit && hi_bit < 32, "bad bits");
+ return (1 << (hi_bit-lo_bit + 1)) - 1;
}
// inverse of u_field
static int inv_u_field(int x, int hi_bit, int lo_bit) {
juint r = juint(x) >> lo_bit;
- r &= fmask( hi_bit, lo_bit);
+ r &= fmask(hi_bit, lo_bit);
return int(r);
}
-
// signed version: extract from field and sign-extend
static int inv_s_field(int x, int hi_bit, int lo_bit) {
int sign_shift = 31 - hi_bit;
- return inv_u_field( ((x << sign_shift) >> sign_shift), hi_bit, lo_bit);
+ return inv_u_field(((x << sign_shift) >> sign_shift), hi_bit, lo_bit);
}
// given a field that ranges from hi_bit to lo_bit (inclusive,
@@ -435,72 +412,102 @@ class Assembler : public AbstractAssembler {
#ifdef ASSERT
static int u_field(int x, int hi_bit, int lo_bit) {
- assert( ( x & ~fmask(hi_bit, lo_bit)) == 0,
+ assert((x & ~fmask(hi_bit, lo_bit)) == 0,
"value out of range");
int r = x << lo_bit;
- assert( inv_u_field(r, hi_bit, lo_bit) == x, "just checking");
+ assert(inv_u_field(r, hi_bit, lo_bit) == x, "just checking");
return r;
}
#else
// make sure this is inlined as it will reduce code size significantly
- #define u_field(x, hi_bit, lo_bit) ((x) << (lo_bit))
+ #define u_field(x, hi_bit, lo_bit) ((x) << (lo_bit))
#endif
- static int inv_op( int x ) { return inv_u_field(x, 31, 30); }
- static int inv_op2( int x ) { return inv_u_field(x, 24, 22); }
- static int inv_op3( int x ) { return inv_u_field(x, 24, 19); }
- static int inv_cond( int x ){ return inv_u_field(x, 28, 25); }
+ static int inv_op(int x) { return inv_u_field(x, 31, 30); }
+ static int inv_op2(int x) { return inv_u_field(x, 24, 22); }
+ static int inv_op3(int x) { return inv_u_field(x, 24, 19); }
+ static int inv_cond(int x) { return inv_u_field(x, 28, 25); }
- static bool inv_immed( int x ) { return (x & Assembler::immed(true)) != 0; }
+ static bool inv_immed(int x) { return (x & Assembler::immed(true)) != 0; }
- static Register inv_rd( int x ) { return as_Register(inv_u_field(x, 29, 25)); }
- static Register inv_rs1( int x ) { return as_Register(inv_u_field(x, 18, 14)); }
- static Register inv_rs2( int x ) { return as_Register(inv_u_field(x, 4, 0)); }
+ static Register inv_rd(int x) { return as_Register(inv_u_field(x, 29, 25)); }
+ static Register inv_rs1(int x) { return as_Register(inv_u_field(x, 18, 14)); }
+ static Register inv_rs2(int x) { return as_Register(inv_u_field(x, 4, 0)); }
- static int op( int x) { return u_field(x, 31, 30); }
- static int rd( Register r) { return u_field(r->encoding(), 29, 25); }
- static int fcn( int x) { return u_field(x, 29, 25); }
- static int op3( int x) { return u_field(x, 24, 19); }
- static int rs1( Register r) { return u_field(r->encoding(), 18, 14); }
- static int rs2( Register r) { return u_field(r->encoding(), 4, 0); }
- static int annul( bool a) { return u_field(a ? 1 : 0, 29, 29); }
- static int cond( int x) { return u_field(x, 28, 25); }
- static int cond_mov( int x) { return u_field(x, 17, 14); }
- static int rcond( RCondition x) { return u_field(x, 12, 10); }
- static int op2( int x) { return u_field(x, 24, 22); }
- static int predict( bool p) { return u_field(p ? 1 : 0, 19, 19); }
- static int branchcc( CC fcca) { return u_field(fcca, 21, 20); }
- static int cmpcc( CC fcca) { return u_field(fcca, 26, 25); }
- static int imm_asi( int x) { return u_field(x, 12, 5); }
- static int immed( bool i) { return u_field(i ? 1 : 0, 13, 13); }
- static int opf_low6( int w) { return u_field(w, 10, 5); }
- static int opf_low5( int w) { return u_field(w, 9, 5); }
- static int op5( int x) { return u_field(x, 8, 5); }
- static int trapcc( CC cc) { return u_field(cc, 12, 11); }
- static int sx( int i) { return u_field(i, 12, 12); } // shift x=1 means 64-bit
- static int opf( int x) { return u_field(x, 13, 5); }
+ static int op(int x) { return u_field(x, 31, 30); }
+ static int rd(Register r) { return u_field(r->encoding(), 29, 25); }
+ static int fcn(int x) { return u_field(x, 29, 25); }
+ static int op3(int x) { return u_field(x, 24, 19); }
+ static int rs1(Register r) { return u_field(r->encoding(), 18, 14); }
+ static int rs2(Register r) { return u_field(r->encoding(), 4, 0); }
+ static int annul(bool a) { return u_field(a ? 1 : 0, 29, 29); }
+ static int cond(int x) { return u_field(x, 28, 25); }
+ static int cond_mov(int x) { return u_field(x, 17, 14); }
+ static int rcond(RCondition x) { return u_field(x, 12, 10); }
+ static int op2(int x) { return u_field(x, 24, 22); }
+ static int predict(bool p) { return u_field(p ? 1 : 0, 19, 19); }
+ static int branchcc(CC fcca) { return u_field(fcca, 21, 20); }
+ static int cmpcc(CC fcca) { return u_field(fcca, 26, 25); }
+ static int imm_asi(int x) { return u_field(x, 12, 5); }
+ static int immed(bool i) { return u_field(i ? 1 : 0, 13, 13); }
+ static int opf_low6(int w) { return u_field(w, 10, 5); }
+ static int opf_low5(int w) { return u_field(w, 9, 5); }
+ static int op5(int x) { return u_field(x, 8, 5); }
+ static int trapcc(CC cc) { return u_field(cc, 12, 11); }
+ static int sx(int i) { return u_field(i, 12, 12); } // shift x=1 means 64-bit
+ static int opf(int x) { return u_field(x, 13, 5); }
- static bool is_cbcond( int x ) {
+ static bool is_cbcond(int x) {
return (VM_Version::has_cbcond() && (inv_cond(x) > rc_last) &&
inv_op(x) == branch_op && inv_op2(x) == bpr_op2);
}
- static bool is_cxb( int x ) {
+ static bool is_cxb(int x) {
assert(is_cbcond(x), "wrong instruction");
- return (x & (1<<21)) != 0;
+ return (x & (1 << 21)) != 0;
}
- static int cond_cbcond( int x) { return u_field((((x & 8)<<1) + 8 + (x & 7)), 29, 25); }
- static int inv_cond_cbcond(int x) {
- assert(is_cbcond(x), "wrong instruction");
- return inv_u_field(x, 27, 25) | (inv_u_field(x, 29, 29)<<3);
+ static bool is_branch(int x) {
+ if (inv_op(x) != Assembler::branch_op) return false;
+
+ bool is_bpr = inv_op2(x) == Assembler::bpr_op2;
+ bool is_bp = inv_op2(x) == Assembler::bp_op2;
+ bool is_br = inv_op2(x) == Assembler::br_op2;
+ bool is_fp = inv_op2(x) == Assembler::fb_op2;
+ bool is_fbp = inv_op2(x) == Assembler::fbp_op2;
+
+ return is_bpr || is_bp || is_br || is_fp || is_fbp;
+ }
+ static bool is_call(int x) {
+ return inv_op(x) == Assembler::call_op;
+ }
+ static bool is_jump(int x) {
+ if (inv_op(x) != Assembler::arith_op) return false;
+
+ bool is_jmpl = inv_op3(x) == Assembler::jmpl_op3;
+ bool is_rett = inv_op3(x) == Assembler::rett_op3;
+
+ return is_jmpl || is_rett;
+ }
+ static bool is_rdpc(int x) {
+ return (inv_op(x) == Assembler::arith_op && inv_op3(x) == Assembler::rdreg_op3 &&
+ inv_u_field(x, 18, 14) == 5);
+ }
+ static bool is_cti(int x) {
+ return is_branch(x) || is_call(x) || is_jump(x); // Ignoring done/retry
}
- static int opf_cc( CC c, bool useFloat ) { return u_field((useFloat ? 0 : 4) + c, 13, 11); }
- static int mov_cc( CC c, bool useFloat ) { return u_field(useFloat ? 0 : 1, 18, 18) | u_field(c, 12, 11); }
+ static int cond_cbcond(int x) { return u_field((((x & 8) << 1) + 8 + (x & 7)), 29, 25); }
+ static int inv_cond_cbcond(int x) {
+ assert(is_cbcond(x), "wrong instruction");
+ return inv_u_field(x, 27, 25) | (inv_u_field(x, 29, 29) << 3);
+ }
- static int fd( FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 29, 25); };
- static int fs1(FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 18, 14); };
- static int fs2(FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 4, 0); };
- static int fs3(FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 13, 9); };
+ static int opf_cc(CC c, bool useFloat) { return u_field((useFloat ? 0 : 4) + c, 13, 11); }
+ static int mov_cc(CC c, bool useFloat) { return u_field(useFloat ? 0 : 1, 18, 18) | u_field(c, 12, 11); }
+
+ static int fd(FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 29, 25); };
+ static int fs1(FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 18, 14); };
+ static int fs2(FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 4, 0); };
+ static int fs3(FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 13, 9); };
// some float instructions use this encoding on the op3 field
static int alt_op3(int op, FloatRegisterImpl::Width w) {
@@ -514,23 +521,22 @@ class Assembler : public AbstractAssembler {
return op3(r);
}
-
// compute inverse of simm
static int inv_simm(int x, int nbits) {
return (int)(x << (32 - nbits)) >> (32 - nbits);
}
- static int inv_simm13( int x ) { return inv_simm(x, 13); }
+ static int inv_simm13(int x) { return inv_simm(x, 13); }
// signed immediate, in low bits, nbits long
static int simm(int x, int nbits) {
assert_signed_range(x, nbits);
- return x & (( 1 << nbits ) - 1);
+ return x & ((1 << nbits) - 1);
}
// compute inverse of wdisp16
static intptr_t inv_wdisp16(int x, intptr_t pos) {
- int lo = x & (( 1 << 14 ) - 1);
+ int lo = x & ((1 << 14) - 1);
int hi = (x >> 20) & 3;
if (hi >= 2) hi |= ~1;
return (((hi << 14) | lo) << 2) + pos;
@@ -540,9 +546,8 @@ class Assembler : public AbstractAssembler {
static int wdisp16(intptr_t x, intptr_t off) {
intptr_t xx = x - off;
assert_signed_word_disp_range(xx, 16);
- int r = (xx >> 2) & ((1 << 14) - 1)
- | ( ( (xx>>(2+14)) & 3 ) << 20 );
- assert( inv_wdisp16(r, off) == x, "inverse is not inverse");
+ int r = (xx >> 2) & ((1 << 14) - 1) | (((xx >> (2+14)) & 3) << 20);
+ assert(inv_wdisp16(r, off) == x, "inverse is not inverse");
return r;
}
@@ -560,260 +565,292 @@ class Assembler : public AbstractAssembler {
assert(VM_Version::has_cbcond(), "This CPU does not have CBCOND instruction");
intptr_t xx = x - off;
assert_signed_word_disp_range(xx, 10);
- int r = ( ( (xx >> 2 ) & ((1 << 8) - 1) ) << 5 )
- | ( ( (xx >> (2+8)) & 3 ) << 19 );
+ int r = (((xx >> 2) & ((1 << 8) - 1)) << 5) | (((xx >> (2+8)) & 3) << 19);
// Have to fake cbcond instruction to pass assert in inv_wdisp10()
- assert(inv_wdisp10((r | op(branch_op) | cond_cbcond(rc_last+1) | op2(bpr_op2)), off) == x, "inverse is not inverse");
+ assert(inv_wdisp10((r | op(branch_op) | cond_cbcond(rc_last+1) | op2(bpr_op2)), off) == x, "inverse is not inverse");
return r;
}
// word displacement in low-order nbits bits
- static intptr_t inv_wdisp( int x, intptr_t pos, int nbits ) {
- int pre_sign_extend = x & (( 1 << nbits ) - 1);
- int r = pre_sign_extend >= ( 1 << (nbits-1) )
- ? pre_sign_extend | ~(( 1 << nbits ) - 1)
- : pre_sign_extend;
+ static intptr_t inv_wdisp(int x, intptr_t pos, int nbits) {
+ int pre_sign_extend = x & ((1 << nbits) - 1);
+ int r = (pre_sign_extend >= (1 << (nbits - 1)) ?
+ pre_sign_extend | ~((1 << nbits) - 1) : pre_sign_extend);
return (r << 2) + pos;
}
- static int wdisp( intptr_t x, intptr_t off, int nbits ) {
+ static int wdisp(intptr_t x, intptr_t off, int nbits) {
intptr_t xx = x - off;
assert_signed_word_disp_range(xx, nbits);
- int r = (xx >> 2) & (( 1 << nbits ) - 1);
- assert( inv_wdisp( r, off, nbits ) == x, "inverse not inverse");
+ int r = (xx >> 2) & ((1 << nbits) - 1);
+ assert(inv_wdisp(r, off, nbits) == x, "inverse not inverse");
return r;
}
// Extract the top 32 bits in a 64 bit word
- static int32_t hi32( int64_t x ) {
- int32_t r = int32_t( (uint64_t)x >> 32 );
+ static int32_t hi32(int64_t x) {
+ int32_t r = int32_t((uint64_t)x >> 32);
return r;
}
// given a sethi instruction, extract the constant, left-justified
- static int inv_hi22( int x ) {
+ static int inv_hi22(int x) {
return x << 10;
}
// create an imm22 field, given a 32-bit left-justified constant
- static int hi22( int x ) {
- int r = int( juint(x) >> 10 );
- assert( (r & ~((1 << 22) - 1)) == 0, "just checkin'");
+ static int hi22(int x) {
+ int r = int(juint(x) >> 10);
+ assert((r & ~((1 << 22) - 1)) == 0, "just checkin'");
return r;
}
// create a low10 __value__ (not a field) for a given a 32-bit constant
- static int low10( int x ) {
+ static int low10(int x) {
return x & ((1 << 10) - 1);
}
// create a low12 __value__ (not a field) for a given a 32-bit constant
- static int low12( int x ) {
+ static int low12(int x) {
return x & ((1 << 12) - 1);
}
// AES crypto instructions supported only on certain processors
- static void aes_only() { assert( VM_Version::has_aes(), "This instruction only works on SPARC with AES instructions support"); }
+ static void aes_only() { assert(VM_Version::has_aes(), "This instruction only works on SPARC with AES instructions support"); }
// SHA crypto instructions supported only on certain processors
- static void sha1_only() { assert( VM_Version::has_sha1(), "This instruction only works on SPARC with SHA1"); }
- static void sha256_only() { assert( VM_Version::has_sha256(), "This instruction only works on SPARC with SHA256"); }
- static void sha512_only() { assert( VM_Version::has_sha512(), "This instruction only works on SPARC with SHA512"); }
+ static void sha1_only() { assert(VM_Version::has_sha1(), "This instruction only works on SPARC with SHA1"); }
+ static void sha256_only() { assert(VM_Version::has_sha256(), "This instruction only works on SPARC with SHA256"); }
+ static void sha512_only() { assert(VM_Version::has_sha512(), "This instruction only works on SPARC with SHA512"); }
// CRC32C instruction supported only on certain processors
- static void crc32c_only() { assert( VM_Version::has_crc32c(), "This instruction only works on SPARC with CRC32C"); }
+ static void crc32c_only() { assert(VM_Version::has_crc32c(), "This instruction only works on SPARC with CRC32C"); }
+
+ // FMAf instructions supported only on certain processors
+ static void fmaf_only() { assert(VM_Version::has_fmaf(), "This instruction only works on SPARC with FMAf"); }
// instruction only in VIS1
- static void vis1_only() { assert( VM_Version::has_vis1(), "This instruction only works on SPARC with VIS1"); }
+ static void vis1_only() { assert(VM_Version::has_vis1(), "This instruction only works on SPARC with VIS1"); }
// instruction only in VIS2
- static void vis2_only() { assert( VM_Version::has_vis2(), "This instruction only works on SPARC with VIS2"); }
+ static void vis2_only() { assert(VM_Version::has_vis2(), "This instruction only works on SPARC with VIS2"); }
// instruction only in VIS3
- static void vis3_only() { assert( VM_Version::has_vis3(), "This instruction only works on SPARC with VIS3"); }
-
- // instruction only in v9
- static void v9_only() { } // do nothing
+ static void vis3_only() { assert(VM_Version::has_vis3(), "This instruction only works on SPARC with VIS3"); }
// instruction deprecated in v9
- static void v9_dep() { } // do nothing for now
-
- // v8 has no CC field
- static void v8_no_cc(CC cc) { if (cc) v9_only(); }
+ static void v9_dep() { } // do nothing for now
protected:
- // Simple delay-slot scheme:
- // In order to check the programmer, the assembler keeps track of deley slots.
- // It forbids CTIs in delay slots (conservative, but should be OK).
- // Also, when putting an instruction into a delay slot, you must say
- // asm->delayed()->add(...), in order to check that you don't omit
- // delay-slot instructions.
- // To implement this, we use a simple FSA
-
#ifdef ASSERT
- #define CHECK_DELAY
+#define VALIDATE_PIPELINE
#endif
-#ifdef CHECK_DELAY
- enum Delay_state { no_delay, at_delay_slot, filling_delay_slot } delay_state;
+
+#ifdef VALIDATE_PIPELINE
+ // A simple delay-slot scheme:
+ // In order to check the programmer, the assembler keeps track of delay-slots.
+ // It forbids CTIs in delay-slots (conservative, but should be OK). Also, when
+ // emitting an instruction into a delay-slot, you must do so using delayed(),
+ // e.g. asm->delayed()->add(...), in order to check that you do not omit the
+ // delay-slot instruction. To implement this, we use a simple FSA.
+ enum { NoDelay, AtDelay, FillDelay } _delay_state;
+
+ // A simple hazard scheme:
+ // In order to avoid pipeline stalls, due to single cycle pipeline hazards, we
+ // adopt a simplistic state tracking mechanism that will enforce an additional
+ // 'nop' instruction to be inserted prior to emitting an instruction that can
+ // expose a given hazard (currently, PC-related hazards only).
+ enum { NoHazard, PcHazard } _hazard_state;
#endif
public:
- // Tells assembler next instruction must NOT be in delay slot.
- // Use at start of multinstruction macros.
+ // Tell the assembler that the next instruction must NOT be in delay-slot.
+ // Use at start of multi-instruction macros.
void assert_not_delayed() {
- // This is a separate overloading to avoid creation of string constants
- // in non-asserted code--with some compilers this pollutes the object code.
-#ifdef CHECK_DELAY
- assert_not_delayed("next instruction should not be a delay slot");
-#endif
- }
- void assert_not_delayed(const char* msg) {
-#ifdef CHECK_DELAY
- assert(delay_state == no_delay, msg);
+ // This is a separate entry to avoid the creation of string constants in
+ // non-asserted code, with some compilers this pollutes the object code.
+#ifdef VALIDATE_PIPELINE
+ assert_no_delay("Next instruction should not be in a delay-slot.");
#endif
}
protected:
- // Insert a nop if the previous is cbcond
- inline void insert_nop_after_cbcond();
+ void assert_no_delay(const char* msg) {
+#ifdef VALIDATE_PIPELINE
+ assert(_delay_state == NoDelay, msg);
+#endif
+ }
- // Delay slot helpers
- // cti is called when emitting control-transfer instruction,
- // BEFORE doing the emitting.
- // Only effective when assertion-checking is enabled.
+ void assert_no_hazard() {
+#ifdef VALIDATE_PIPELINE
+ assert(_hazard_state == NoHazard, "Unsolicited pipeline hazard.");
+#endif
+ }
+
+ private:
+ inline int32_t prev_insn() {
+ assert(offset() > 0, "Interface violation.");
+ int32_t* addr = (int32_t*)pc() - 1;
+ return *addr;
+ }
+
+#ifdef VALIDATE_PIPELINE
+ void validate_no_pipeline_hazards();
+#endif
+
+ protected:
+ // Avoid possible pipeline stall by inserting an additional 'nop' instruction,
+ // if the previous instruction is a 'cbcond' or a 'rdpc'.
+ inline void avoid_pipeline_stall();
+
+ // A call to cti() is made before emitting a control-transfer instruction (CTI)
+ // in order to assert a CTI is not emitted right after a 'cbcond', nor in the
+ // delay-slot of another CTI. Only effective when assertions are enabled.
void cti() {
- // A cbcond instruction immediately followed by a CTI
- // instruction introduces pipeline stalls, we need to avoid that.
- no_cbcond_before();
-#ifdef CHECK_DELAY
- assert_not_delayed("cti should not be in delay slot");
+ // A 'cbcond' or 'rdpc' instruction immediately followed by a CTI introduces
+ // a pipeline stall, which we make sure to prohibit.
+ assert_no_cbcond_before();
+ assert_no_rdpc_before();
+#ifdef VALIDATE_PIPELINE
+ assert_no_hazard();
+ assert_no_delay("CTI in delay-slot.");
#endif
}
- // called when emitting cti with a delay slot, AFTER emitting
- void has_delay_slot() {
-#ifdef CHECK_DELAY
- assert_not_delayed("just checking");
- delay_state = at_delay_slot;
+ // Called when emitting CTI with a delay-slot, AFTER emitting.
+ inline void induce_delay_slot() {
+#ifdef VALIDATE_PIPELINE
+ assert_no_delay("Already in delay-slot.");
+ _delay_state = AtDelay;
#endif
}
- // cbcond instruction should not be generated one after an other
- bool cbcond_before() {
- if (offset() == 0) return false; // it is first instruction
- int x = *(int*)(intptr_t(pc()) - 4); // previous instruction
- return is_cbcond(x);
+ inline void induce_pc_hazard() {
+#ifdef VALIDATE_PIPELINE
+ assert_no_hazard();
+ _hazard_state = PcHazard;
+#endif
}
- void no_cbcond_before() {
- assert(offset() == 0 || !cbcond_before(), "cbcond should not follow an other cbcond");
- }
-public:
+ bool is_cbcond_before() { return offset() > 0 ? is_cbcond(prev_insn()) : false; }
- bool use_cbcond(Label& L) {
- if (!UseCBCond || cbcond_before()) return false;
+ bool is_rdpc_before() { return offset() > 0 ? is_rdpc(prev_insn()) : false; }
+
+ void assert_no_cbcond_before() {
+ assert(offset() == 0 || !is_cbcond_before(), "CBCOND should not be followed by CTI.");
+ }
+
+ void assert_no_rdpc_before() {
+ assert(offset() == 0 || !is_rdpc_before(), "RDPC should not be followed by CTI.");
+ }
+
+ public:
+
+ bool use_cbcond(Label &L) {
+ if (!UseCBCond || is_cbcond_before()) return false;
intptr_t x = intptr_t(target_distance(L)) - intptr_t(pc());
- assert( (x & 3) == 0, "not word aligned");
+ assert((x & 3) == 0, "not word aligned");
return is_simm12(x);
}
// Tells assembler you know that next instruction is delayed
Assembler* delayed() {
-#ifdef CHECK_DELAY
- assert ( delay_state == at_delay_slot, "delayed instruction is not in delay slot");
- delay_state = filling_delay_slot;
+#ifdef VALIDATE_PIPELINE
+ assert(_delay_state == AtDelay, "Delayed instruction not in delay-slot.");
+ _delay_state = FillDelay;
#endif
return this;
}
void flush() {
-#ifdef CHECK_DELAY
- assert ( delay_state == no_delay, "ending code with a delay slot");
+#ifdef VALIDATE_PIPELINE
+ assert(_delay_state == NoDelay, "Ending code with a delay-slot.");
+ validate_no_pipeline_hazards();
#endif
AbstractAssembler::flush();
}
inline void emit_int32(int); // shadows AbstractAssembler::emit_int32
- inline void emit_data(int x);
- inline void emit_data(int, RelocationHolder const&);
+ inline void emit_data(int);
+ inline void emit_data(int, RelocationHolder const &rspec);
inline void emit_data(int, relocInfo::relocType rtype);
- // helper for above fcns
+ // helper for above functions
inline void check_delay();
public:
// instructions, refer to page numbers in the SPARC Architecture Manual, V9
- // pp 135 (addc was addx in v8)
+ // pp 135
- inline void add(Register s1, Register s2, Register d );
- inline void add(Register s1, int simm13a, Register d );
+ inline void add(Register s1, Register s2, Register d);
+ inline void add(Register s1, int simm13a, Register d);
- inline void addcc( Register s1, Register s2, Register d );
- inline void addcc( Register s1, int simm13a, Register d );
- inline void addc( Register s1, Register s2, Register d );
- inline void addc( Register s1, int simm13a, Register d );
- inline void addccc( Register s1, Register s2, Register d );
- inline void addccc( Register s1, int simm13a, Register d );
+ inline void addcc(Register s1, Register s2, Register d);
+ inline void addcc(Register s1, int simm13a, Register d);
+ inline void addc(Register s1, Register s2, Register d);
+ inline void addc(Register s1, int simm13a, Register d);
+ inline void addccc(Register s1, Register s2, Register d);
+ inline void addccc(Register s1, int simm13a, Register d);
// 4-operand AES instructions
- inline void aes_eround01( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d );
- inline void aes_eround23( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d );
- inline void aes_dround01( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d );
- inline void aes_dround23( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d );
- inline void aes_eround01_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d );
- inline void aes_eround23_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d );
- inline void aes_dround01_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d );
- inline void aes_dround23_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d );
- inline void aes_kexpand1( FloatRegister s1, FloatRegister s2, int imm5a, FloatRegister d );
+ inline void aes_eround01(FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d);
+ inline void aes_eround23(FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d);
+ inline void aes_dround01(FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d);
+ inline void aes_dround23(FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d);
+ inline void aes_eround01_l(FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d);
+ inline void aes_eround23_l(FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d);
+ inline void aes_dround01_l(FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d);
+ inline void aes_dround23_l(FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d);
+ inline void aes_kexpand1(FloatRegister s1, FloatRegister s2, int imm5a, FloatRegister d);
// 3-operand AES instructions
- inline void aes_kexpand0( FloatRegister s1, FloatRegister s2, FloatRegister d );
- inline void aes_kexpand2( FloatRegister s1, FloatRegister s2, FloatRegister d );
+ inline void aes_kexpand0(FloatRegister s1, FloatRegister s2, FloatRegister d);
+ inline void aes_kexpand2(FloatRegister s1, FloatRegister s2, FloatRegister d);
// pp 136
inline void bpr(RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt = relocInfo::none);
- inline void bpr(RCondition c, bool a, Predict p, Register s1, Label& L);
+ inline void bpr(RCondition c, bool a, Predict p, Register s1, Label &L);
// compare and branch
- inline void cbcond(Condition c, CC cc, Register s1, Register s2, Label& L);
- inline void cbcond(Condition c, CC cc, Register s1, int simm5, Label& L);
+ inline void cbcond(Condition c, CC cc, Register s1, Register s2, Label &L);
+ inline void cbcond(Condition c, CC cc, Register s1, int simm5, Label &L);
protected: // use MacroAssembler::br instead
// pp 138
- inline void fb( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none );
- inline void fb( Condition c, bool a, Label& L );
+ inline void fb(Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none);
+ inline void fb(Condition c, bool a, Label &L);
// pp 141
- inline void fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
- inline void fbp( Condition c, bool a, CC cc, Predict p, Label& L );
+ inline void fbp(Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none);
+ inline void fbp(Condition c, bool a, CC cc, Predict p, Label &L);
// pp 144
- inline void br( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none );
- inline void br( Condition c, bool a, Label& L );
+ inline void br(Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none);
+ inline void br(Condition c, bool a, Label &L);
// pp 146
- inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
- inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
+ inline void bp(Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none);
+ inline void bp(Condition c, bool a, CC cc, Predict p, Label &L);
// pp 149
- inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type );
- inline void call( Label& L, relocInfo::relocType rt = relocInfo::runtime_call_type );
+ inline void call(address d, relocInfo::relocType rt = relocInfo::runtime_call_type);
+ inline void call(Label &L, relocInfo::relocType rt = relocInfo::runtime_call_type);
- inline void call( address d, RelocationHolder const& rspec );
+ inline void call(address d, RelocationHolder const &rspec);
public:
@@ -824,19 +861,19 @@ public:
// at address s1 is swapped with the data in d. If the values are not equal,
// the the contents of memory at s1 is loaded into d, without the swap.
- inline void casa( Register s1, Register s2, Register d, int ia = -1 );
- inline void casxa( Register s1, Register s2, Register d, int ia = -1 );
+ inline void casa(Register s1, Register s2, Register d, int ia = -1);
+ inline void casxa(Register s1, Register s2, Register d, int ia = -1);
// pp 152
- inline void udiv( Register s1, Register s2, Register d );
- inline void udiv( Register s1, int simm13a, Register d );
- inline void sdiv( Register s1, Register s2, Register d );
- inline void sdiv( Register s1, int simm13a, Register d );
- inline void udivcc( Register s1, Register s2, Register d );
- inline void udivcc( Register s1, int simm13a, Register d );
- inline void sdivcc( Register s1, Register s2, Register d );
- inline void sdivcc( Register s1, int simm13a, Register d );
+ inline void udiv(Register s1, Register s2, Register d);
+ inline void udiv(Register s1, int simm13a, Register d);
+ inline void sdiv(Register s1, Register s2, Register d);
+ inline void sdiv(Register s1, int simm13a, Register d);
+ inline void udivcc(Register s1, Register s2, Register d);
+ inline void udivcc(Register s1, int simm13a, Register d);
+ inline void sdivcc(Register s1, Register s2, Register d);
+ inline void sdivcc(Register s1, int simm13a, Register d);
// pp 155
@@ -845,54 +882,58 @@ public:
// pp 156
- inline void fadd( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d );
- inline void fsub( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d );
+ inline void fadd(FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d);
+ inline void fsub(FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d);
// pp 157
- inline void fcmp( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2);
- inline void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2);
+ inline void fcmp(FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2);
+ inline void fcmpe(FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2);
// pp 159
- inline void ftox( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d );
- inline void ftoi( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d );
+ inline void ftox(FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
+ inline void ftoi(FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
// pp 160
- inline void ftof( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s, FloatRegister d );
+ inline void ftof(FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s, FloatRegister d);
// pp 161
- inline void fxtof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d );
- inline void fitof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d );
+ inline void fxtof(FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
+ inline void fitof(FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
// pp 162
- inline void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d );
+ inline void fmov(FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
- inline void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d );
+ inline void fneg(FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
- inline void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d );
+ inline void fabs(FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
// pp 163
- inline void fmul( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d );
- inline void fmul( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s1, FloatRegister s2, FloatRegister d );
- inline void fdiv( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d );
+ inline void fmul(FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d);
+ inline void fmul(FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s1, FloatRegister s2, FloatRegister d);
+ inline void fdiv(FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d);
// FXORs/FXORd instructions
- inline void fxor( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d );
+ inline void fxor(FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d);
// pp 164
- inline void fsqrt( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d );
+ inline void fsqrt(FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
+
+ // fmaf instructions.
+
+ inline void fmadd(FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d);
// pp 165
- inline void flush( Register s1, Register s2 );
- inline void flush( Register s1, int simm13a);
+ inline void flush(Register s1, Register s2);
+ inline void flush(Register s1, int simm13a);
// pp 167
@@ -900,139 +941,140 @@ public:
// pp 168
- void illtrap( int const22a);
- // v8 unimp == illtrap(0)
+ void illtrap(int const22a);
// pp 169
- void impdep1( int id1, int const19a );
- void impdep2( int id1, int const19a );
+ void impdep1(int id1, int const19a);
+ void impdep2(int id1, int const19a);
// pp 170
- void jmpl( Register s1, Register s2, Register d );
- void jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec = RelocationHolder() );
+ void jmpl(Register s1, Register s2, Register d);
+ void jmpl(Register s1, int simm13a, Register d,
+ RelocationHolder const &rspec = RelocationHolder());
// 171
inline void ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d);
- inline void ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec = RelocationHolder());
+ inline void ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d,
+ RelocationHolder const &rspec = RelocationHolder());
- inline void ldfsr( Register s1, Register s2 );
- inline void ldfsr( Register s1, int simm13a);
- inline void ldxfsr( Register s1, Register s2 );
- inline void ldxfsr( Register s1, int simm13a);
+ inline void ldfsr(Register s1, Register s2);
+ inline void ldfsr(Register s1, int simm13a);
+ inline void ldxfsr(Register s1, Register s2);
+ inline void ldxfsr(Register s1, int simm13a);
// 173
- inline void ldfa( FloatRegisterImpl::Width w, Register s1, Register s2, int ia, FloatRegister d );
- inline void ldfa( FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d );
+ inline void ldfa(FloatRegisterImpl::Width w, Register s1, Register s2, int ia, FloatRegister d);
+ inline void ldfa(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d);
- // pp 175, lduw is ld on v8
+ // pp 175
- inline void ldsb( Register s1, Register s2, Register d );
- inline void ldsb( Register s1, int simm13a, Register d);
- inline void ldsh( Register s1, Register s2, Register d );
- inline void ldsh( Register s1, int simm13a, Register d);
- inline void ldsw( Register s1, Register s2, Register d );
- inline void ldsw( Register s1, int simm13a, Register d);
- inline void ldub( Register s1, Register s2, Register d );
- inline void ldub( Register s1, int simm13a, Register d);
- inline void lduh( Register s1, Register s2, Register d );
- inline void lduh( Register s1, int simm13a, Register d);
- inline void lduw( Register s1, Register s2, Register d );
- inline void lduw( Register s1, int simm13a, Register d);
- inline void ldx( Register s1, Register s2, Register d );
- inline void ldx( Register s1, int simm13a, Register d);
- inline void ldd( Register s1, Register s2, Register d );
- inline void ldd( Register s1, int simm13a, Register d);
+ inline void ldsb(Register s1, Register s2, Register d);
+ inline void ldsb(Register s1, int simm13a, Register d);
+ inline void ldsh(Register s1, Register s2, Register d);
+ inline void ldsh(Register s1, int simm13a, Register d);
+ inline void ldsw(Register s1, Register s2, Register d);
+ inline void ldsw(Register s1, int simm13a, Register d);
+ inline void ldub(Register s1, Register s2, Register d);
+ inline void ldub(Register s1, int simm13a, Register d);
+ inline void lduh(Register s1, Register s2, Register d);
+ inline void lduh(Register s1, int simm13a, Register d);
+ inline void lduw(Register s1, Register s2, Register d);
+ inline void lduw(Register s1, int simm13a, Register d);
+ inline void ldx(Register s1, Register s2, Register d);
+ inline void ldx(Register s1, int simm13a, Register d);
+ inline void ldd(Register s1, Register s2, Register d);
+ inline void ldd(Register s1, int simm13a, Register d);
// pp 177
- inline void ldsba( Register s1, Register s2, int ia, Register d );
- inline void ldsba( Register s1, int simm13a, Register d );
- inline void ldsha( Register s1, Register s2, int ia, Register d );
- inline void ldsha( Register s1, int simm13a, Register d );
- inline void ldswa( Register s1, Register s2, int ia, Register d );
- inline void ldswa( Register s1, int simm13a, Register d );
- inline void lduba( Register s1, Register s2, int ia, Register d );
- inline void lduba( Register s1, int simm13a, Register d );
- inline void lduha( Register s1, Register s2, int ia, Register d );
- inline void lduha( Register s1, int simm13a, Register d );
- inline void lduwa( Register s1, Register s2, int ia, Register d );
- inline void lduwa( Register s1, int simm13a, Register d );
- inline void ldxa( Register s1, Register s2, int ia, Register d );
- inline void ldxa( Register s1, int simm13a, Register d );
+ inline void ldsba(Register s1, Register s2, int ia, Register d);
+ inline void ldsba(Register s1, int simm13a, Register d);
+ inline void ldsha(Register s1, Register s2, int ia, Register d);
+ inline void ldsha(Register s1, int simm13a, Register d);
+ inline void ldswa(Register s1, Register s2, int ia, Register d);
+ inline void ldswa(Register s1, int simm13a, Register d);
+ inline void lduba(Register s1, Register s2, int ia, Register d);
+ inline void lduba(Register s1, int simm13a, Register d);
+ inline void lduha(Register s1, Register s2, int ia, Register d);
+ inline void lduha(Register s1, int simm13a, Register d);
+ inline void lduwa(Register s1, Register s2, int ia, Register d);
+ inline void lduwa(Register s1, int simm13a, Register d);
+ inline void ldxa(Register s1, Register s2, int ia, Register d);
+ inline void ldxa(Register s1, int simm13a, Register d);
// pp 181
- inline void and3( Register s1, Register s2, Register d );
- inline void and3( Register s1, int simm13a, Register d );
- inline void andcc( Register s1, Register s2, Register d );
- inline void andcc( Register s1, int simm13a, Register d );
- inline void andn( Register s1, Register s2, Register d );
- inline void andn( Register s1, int simm13a, Register d );
- inline void andncc( Register s1, Register s2, Register d );
- inline void andncc( Register s1, int simm13a, Register d );
- inline void or3( Register s1, Register s2, Register d );
- inline void or3( Register s1, int simm13a, Register d );
- inline void orcc( Register s1, Register s2, Register d );
- inline void orcc( Register s1, int simm13a, Register d );
- inline void orn( Register s1, Register s2, Register d );
- inline void orn( Register s1, int simm13a, Register d );
- inline void orncc( Register s1, Register s2, Register d );
- inline void orncc( Register s1, int simm13a, Register d );
- inline void xor3( Register s1, Register s2, Register d );
- inline void xor3( Register s1, int simm13a, Register d );
- inline void xorcc( Register s1, Register s2, Register d );
- inline void xorcc( Register s1, int simm13a, Register d );
- inline void xnor( Register s1, Register s2, Register d );
- inline void xnor( Register s1, int simm13a, Register d );
- inline void xnorcc( Register s1, Register s2, Register d );
- inline void xnorcc( Register s1, int simm13a, Register d );
+ inline void and3(Register s1, Register s2, Register d);
+ inline void and3(Register s1, int simm13a, Register d);
+ inline void andcc(Register s1, Register s2, Register d);
+ inline void andcc(Register s1, int simm13a, Register d);
+ inline void andn(Register s1, Register s2, Register d);
+ inline void andn(Register s1, int simm13a, Register d);
+ inline void andncc(Register s1, Register s2, Register d);
+ inline void andncc(Register s1, int simm13a, Register d);
+ inline void or3(Register s1, Register s2, Register d);
+ inline void or3(Register s1, int simm13a, Register d);
+ inline void orcc(Register s1, Register s2, Register d);
+ inline void orcc(Register s1, int simm13a, Register d);
+ inline void orn(Register s1, Register s2, Register d);
+ inline void orn(Register s1, int simm13a, Register d);
+ inline void orncc(Register s1, Register s2, Register d);
+ inline void orncc(Register s1, int simm13a, Register d);
+ inline void xor3(Register s1, Register s2, Register d);
+ inline void xor3(Register s1, int simm13a, Register d);
+ inline void xorcc(Register s1, Register s2, Register d);
+ inline void xorcc(Register s1, int simm13a, Register d);
+ inline void xnor(Register s1, Register s2, Register d);
+ inline void xnor(Register s1, int simm13a, Register d);
+ inline void xnorcc(Register s1, Register s2, Register d);
+ inline void xnorcc(Register s1, int simm13a, Register d);
// pp 183
- inline void membar( Membar_mask_bits const7a );
+ inline void membar(Membar_mask_bits const7a);
// pp 185
- inline void fmov( FloatRegisterImpl::Width w, Condition c, bool floatCC, CC cca, FloatRegister s2, FloatRegister d );
+ inline void fmov(FloatRegisterImpl::Width w, Condition c, bool floatCC, CC cca, FloatRegister s2, FloatRegister d);
// pp 189
- inline void fmov( FloatRegisterImpl::Width w, RCondition c, Register s1, FloatRegister s2, FloatRegister d );
+ inline void fmov(FloatRegisterImpl::Width w, RCondition c, Register s1, FloatRegister s2, FloatRegister d);
// pp 191
- inline void movcc( Condition c, bool floatCC, CC cca, Register s2, Register d );
- inline void movcc( Condition c, bool floatCC, CC cca, int simm11a, Register d );
+ inline void movcc(Condition c, bool floatCC, CC cca, Register s2, Register d);
+ inline void movcc(Condition c, bool floatCC, CC cca, int simm11a, Register d);
// pp 195
- inline void movr( RCondition c, Register s1, Register s2, Register d );
- inline void movr( RCondition c, Register s1, int simm10a, Register d );
+ inline void movr(RCondition c, Register s1, Register s2, Register d);
+ inline void movr(RCondition c, Register s1, int simm10a, Register d);
// pp 196
- inline void mulx( Register s1, Register s2, Register d );
- inline void mulx( Register s1, int simm13a, Register d );
- inline void sdivx( Register s1, Register s2, Register d );
- inline void sdivx( Register s1, int simm13a, Register d );
- inline void udivx( Register s1, Register s2, Register d );
- inline void udivx( Register s1, int simm13a, Register d );
+ inline void mulx(Register s1, Register s2, Register d);
+ inline void mulx(Register s1, int simm13a, Register d);
+ inline void sdivx(Register s1, Register s2, Register d);
+ inline void sdivx(Register s1, int simm13a, Register d);
+ inline void udivx(Register s1, Register s2, Register d);
+ inline void udivx(Register s1, int simm13a, Register d);
// pp 197
- inline void umul( Register s1, Register s2, Register d );
- inline void umul( Register s1, int simm13a, Register d );
- inline void smul( Register s1, Register s2, Register d );
- inline void smul( Register s1, int simm13a, Register d );
- inline void umulcc( Register s1, Register s2, Register d );
- inline void umulcc( Register s1, int simm13a, Register d );
- inline void smulcc( Register s1, Register s2, Register d );
- inline void smulcc( Register s1, int simm13a, Register d );
+ inline void umul(Register s1, Register s2, Register d);
+ inline void umul(Register s1, int simm13a, Register d);
+ inline void smul(Register s1, Register s2, Register d);
+ inline void smul(Register s1, int simm13a, Register d);
+ inline void umulcc(Register s1, Register s2, Register d);
+ inline void umulcc(Register s1, int simm13a, Register d);
+ inline void smulcc(Register s1, Register s2, Register d);
+ inline void smulcc(Register s1, int simm13a, Register d);
// pp 201
@@ -1042,40 +1084,40 @@ public:
// pp 202
- inline void popc( Register s, Register d);
- inline void popc( int simm13a, Register d);
+ inline void popc(Register s, Register d);
+ inline void popc(int simm13a, Register d);
// pp 203
- inline void prefetch( Register s1, Register s2, PrefetchFcn f);
- inline void prefetch( Register s1, int simm13a, PrefetchFcn f);
+ inline void prefetch(Register s1, Register s2, PrefetchFcn f);
+ inline void prefetch(Register s1, int simm13a, PrefetchFcn f);
- inline void prefetcha( Register s1, Register s2, int ia, PrefetchFcn f );
- inline void prefetcha( Register s1, int simm13a, PrefetchFcn f );
+ inline void prefetcha(Register s1, Register s2, int ia, PrefetchFcn f);
+ inline void prefetcha(Register s1, int simm13a, PrefetchFcn f);
// pp 208
// not implementing read privileged register
- inline void rdy( Register d);
- inline void rdccr( Register d);
- inline void rdasi( Register d);
- inline void rdtick( Register d);
- inline void rdpc( Register d);
- inline void rdfprs( Register d);
+ inline void rdy(Register d);
+ inline void rdccr(Register d);
+ inline void rdasi(Register d);
+ inline void rdtick(Register d);
+ inline void rdpc(Register d);
+ inline void rdfprs(Register d);
// pp 213
- inline void rett( Register s1, Register s2);
- inline void rett( Register s1, int simm13a, relocInfo::relocType rt = relocInfo::none);
+ inline void rett(Register s1, Register s2);
+ inline void rett(Register s1, int simm13a, relocInfo::relocType rt = relocInfo::none);
// pp 214
- inline void save( Register s1, Register s2, Register d );
- inline void save( Register s1, int simm13a, Register d );
+ inline void save(Register s1, Register s2, Register d);
+ inline void save(Register s1, int simm13a, Register d);
- inline void restore( Register s1 = G0, Register s2 = G0, Register d = G0 );
- inline void restore( Register s1, int simm13a, Register d );
+ inline void restore(Register s1 = G0, Register s2 = G0, Register d = G0);
+ inline void restore(Register s1, int simm13a, Register d);
// pp 216
@@ -1084,26 +1126,27 @@ public:
// pp 217
- inline void sethi( int imm22a, Register d, RelocationHolder const& rspec = RelocationHolder() );
+ inline void sethi(int imm22a, Register d, RelocationHolder const &rspec = RelocationHolder());
+
// pp 218
- inline void sll( Register s1, Register s2, Register d );
- inline void sll( Register s1, int imm5a, Register d );
- inline void srl( Register s1, Register s2, Register d );
- inline void srl( Register s1, int imm5a, Register d );
- inline void sra( Register s1, Register s2, Register d );
- inline void sra( Register s1, int imm5a, Register d );
+ inline void sll(Register s1, Register s2, Register d);
+ inline void sll(Register s1, int imm5a, Register d);
+ inline void srl(Register s1, Register s2, Register d);
+ inline void srl(Register s1, int imm5a, Register d);
+ inline void sra(Register s1, Register s2, Register d);
+ inline void sra(Register s1, int imm5a, Register d);
- inline void sllx( Register s1, Register s2, Register d );
- inline void sllx( Register s1, int imm6a, Register d );
- inline void srlx( Register s1, Register s2, Register d );
- inline void srlx( Register s1, int imm6a, Register d );
- inline void srax( Register s1, Register s2, Register d );
- inline void srax( Register s1, int imm6a, Register d );
+ inline void sllx(Register s1, Register s2, Register d);
+ inline void sllx(Register s1, int imm6a, Register d);
+ inline void srlx(Register s1, Register s2, Register d);
+ inline void srlx(Register s1, int imm6a, Register d);
+ inline void srax(Register s1, Register s2, Register d);
+ inline void srax(Register s1, int imm6a, Register d);
// pp 220
- inline void sir( int simm13a );
+ inline void sir(int simm13a);
// pp 221
@@ -1111,125 +1154,125 @@ public:
// pp 222
- inline void stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2);
- inline void stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a);
+ inline void stf(FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2);
+ inline void stf(FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a);
- inline void stfsr( Register s1, Register s2 );
- inline void stfsr( Register s1, int simm13a);
- inline void stxfsr( Register s1, Register s2 );
- inline void stxfsr( Register s1, int simm13a);
+ inline void stfsr(Register s1, Register s2);
+ inline void stfsr(Register s1, int simm13a);
+ inline void stxfsr(Register s1, Register s2);
+ inline void stxfsr(Register s1, int simm13a);
- // pp 224
+ // pp 224
- inline void stfa( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2, int ia );
- inline void stfa( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a );
+ inline void stfa(FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2, int ia);
+ inline void stfa(FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a);
- // p 226
+ // pp 226
- inline void stb( Register d, Register s1, Register s2 );
- inline void stb( Register d, Register s1, int simm13a);
- inline void sth( Register d, Register s1, Register s2 );
- inline void sth( Register d, Register s1, int simm13a);
- inline void stw( Register d, Register s1, Register s2 );
- inline void stw( Register d, Register s1, int simm13a);
- inline void stx( Register d, Register s1, Register s2 );
- inline void stx( Register d, Register s1, int simm13a);
- inline void std( Register d, Register s1, Register s2 );
- inline void std( Register d, Register s1, int simm13a);
+ inline void stb(Register d, Register s1, Register s2);
+ inline void stb(Register d, Register s1, int simm13a);
+ inline void sth(Register d, Register s1, Register s2);
+ inline void sth(Register d, Register s1, int simm13a);
+ inline void stw(Register d, Register s1, Register s2);
+ inline void stw(Register d, Register s1, int simm13a);
+ inline void stx(Register d, Register s1, Register s2);
+ inline void stx(Register d, Register s1, int simm13a);
+ inline void std(Register d, Register s1, Register s2);
+ inline void std(Register d, Register s1, int simm13a);
// pp 177
- inline void stba( Register d, Register s1, Register s2, int ia );
- inline void stba( Register d, Register s1, int simm13a );
- inline void stha( Register d, Register s1, Register s2, int ia );
- inline void stha( Register d, Register s1, int simm13a );
- inline void stwa( Register d, Register s1, Register s2, int ia );
- inline void stwa( Register d, Register s1, int simm13a );
- inline void stxa( Register d, Register s1, Register s2, int ia );
- inline void stxa( Register d, Register s1, int simm13a );
- inline void stda( Register d, Register s1, Register s2, int ia );
- inline void stda( Register d, Register s1, int simm13a );
+ inline void stba(Register d, Register s1, Register s2, int ia);
+ inline void stba(Register d, Register s1, int simm13a);
+ inline void stha(Register d, Register s1, Register s2, int ia);
+ inline void stha(Register d, Register s1, int simm13a);
+ inline void stwa(Register d, Register s1, Register s2, int ia);
+ inline void stwa(Register d, Register s1, int simm13a);
+ inline void stxa(Register d, Register s1, Register s2, int ia);
+ inline void stxa(Register d, Register s1, int simm13a);
+ inline void stda(Register d, Register s1, Register s2, int ia);
+ inline void stda(Register d, Register s1, int simm13a);
// pp 230
- inline void sub( Register s1, Register s2, Register d );
- inline void sub( Register s1, int simm13a, Register d );
+ inline void sub(Register s1, Register s2, Register d);
+ inline void sub(Register s1, int simm13a, Register d);
- inline void subcc( Register s1, Register s2, Register d );
- inline void subcc( Register s1, int simm13a, Register d );
- inline void subc( Register s1, Register s2, Register d );
- inline void subc( Register s1, int simm13a, Register d );
- inline void subccc( Register s1, Register s2, Register d );
- inline void subccc( Register s1, int simm13a, Register d );
+ inline void subcc(Register s1, Register s2, Register d);
+ inline void subcc(Register s1, int simm13a, Register d);
+ inline void subc(Register s1, Register s2, Register d);
+ inline void subc(Register s1, int simm13a, Register d);
+ inline void subccc(Register s1, Register s2, Register d);
+ inline void subccc(Register s1, int simm13a, Register d);
// pp 231
- inline void swap( Register s1, Register s2, Register d );
- inline void swap( Register s1, int simm13a, Register d);
+ inline void swap(Register s1, Register s2, Register d);
+ inline void swap(Register s1, int simm13a, Register d);
// pp 232
- inline void swapa( Register s1, Register s2, int ia, Register d );
- inline void swapa( Register s1, int simm13a, Register d );
+ inline void swapa(Register s1, Register s2, int ia, Register d);
+ inline void swapa(Register s1, int simm13a, Register d);
// pp 234, note op in book is wrong, see pp 268
- inline void taddcc( Register s1, Register s2, Register d );
- inline void taddcc( Register s1, int simm13a, Register d );
+ inline void taddcc(Register s1, Register s2, Register d);
+ inline void taddcc(Register s1, int simm13a, Register d);
// pp 235
- inline void tsubcc( Register s1, Register s2, Register d );
- inline void tsubcc( Register s1, int simm13a, Register d );
+ inline void tsubcc(Register s1, Register s2, Register d);
+ inline void tsubcc(Register s1, int simm13a, Register d);
// pp 237
- inline void trap( Condition c, CC cc, Register s1, Register s2 );
- inline void trap( Condition c, CC cc, Register s1, int trapa );
+ inline void trap(Condition c, CC cc, Register s1, Register s2);
+ inline void trap(Condition c, CC cc, Register s1, int trapa);
// simple uncond. trap
- inline void trap( int trapa );
+ inline void trap(int trapa);
// pp 239 omit write priv register for now
- inline void wry( Register d);
+ inline void wry(Register d);
inline void wrccr(Register s);
inline void wrccr(Register s, int simm13a);
inline void wrasi(Register d);
// wrasi(d, imm) stores (d xor imm) to asi
inline void wrasi(Register d, int simm13a);
- inline void wrfprs( Register d);
+ inline void wrfprs(Register d);
- // VIS1 instructions
+ // VIS1 instructions
- inline void alignaddr( Register s1, Register s2, Register d );
+ inline void alignaddr(Register s1, Register s2, Register d);
- inline void faligndata( FloatRegister s1, FloatRegister s2, FloatRegister d );
+ inline void faligndata(FloatRegister s1, FloatRegister s2, FloatRegister d);
- inline void fzero( FloatRegisterImpl::Width w, FloatRegister d );
+ inline void fzero(FloatRegisterImpl::Width w, FloatRegister d);
- inline void fsrc2( FloatRegisterImpl::Width w, FloatRegister s2, FloatRegister d );
+ inline void fsrc2(FloatRegisterImpl::Width w, FloatRegister s2, FloatRegister d);
- inline void fnot1( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister d );
+ inline void fnot1(FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister d);
- inline void fpmerge( FloatRegister s1, FloatRegister s2, FloatRegister d );
+ inline void fpmerge(FloatRegister s1, FloatRegister s2, FloatRegister d);
- inline void stpartialf( Register s1, Register s2, FloatRegister d, int ia = -1 );
+ inline void stpartialf(Register s1, Register s2, FloatRegister d, int ia = -1);
- // VIS2 instructions
+ // VIS2 instructions
- inline void edge8n( Register s1, Register s2, Register d );
+ inline void edge8n(Register s1, Register s2, Register d);
- inline void bmask( Register s1, Register s2, Register d );
- inline void bshuffle( FloatRegister s1, FloatRegister s2, FloatRegister d );
+ inline void bmask(Register s1, Register s2, Register d);
+ inline void bshuffle(FloatRegister s1, FloatRegister s2, FloatRegister d);
// VIS3 instructions
- inline void movstosw( FloatRegister s, Register d );
- inline void movstouw( FloatRegister s, Register d );
- inline void movdtox( FloatRegister s, Register d );
+ inline void movstosw(FloatRegister s, Register d);
+ inline void movstouw(FloatRegister s, Register d);
+ inline void movdtox(FloatRegister s, Register d);
- inline void movwtos( Register s, FloatRegister d );
- inline void movxtod( Register s, FloatRegister d );
+ inline void movwtos(Register s, FloatRegister d);
+ inline void movxtod(Register s, FloatRegister d);
inline void xmulx(Register s1, Register s2, Register d);
inline void xmulxhi(Register s1, Register s2, Register d);
@@ -1242,12 +1285,13 @@ public:
// CRC32C instruction
- inline void crc32c( FloatRegister s1, FloatRegister s2, FloatRegister d );
+ inline void crc32c(FloatRegister s1, FloatRegister s2, FloatRegister d);
// Creation
Assembler(CodeBuffer* code) : AbstractAssembler(code) {
-#ifdef CHECK_DELAY
- delay_state = no_delay;
+#ifdef VALIDATE_PIPELINE
+ _delay_state = NoDelay;
+ _hazard_state = NoHazard;
#endif
}
};
diff --git a/hotspot/src/cpu/sparc/vm/assembler_sparc.inline.hpp b/hotspot/src/cpu/sparc/vm/assembler_sparc.inline.hpp
index c18b07ec019..070a1f80db3 100644
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.inline.hpp
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,21 +28,42 @@
#include "asm/assembler.hpp"
-inline void Assembler::insert_nop_after_cbcond() {
- if (UseCBCond && cbcond_before()) {
+inline void Assembler::avoid_pipeline_stall() {
+#ifdef VALIDATE_PIPELINE
+ if (_hazard_state == PcHazard) {
+ assert(is_cbcond_before() || is_rdpc_before(), "PC-hazard not preceeded by CBCOND or RDPC.");
+ assert_no_delay("Must not have PC-hazard state in delay-slot.");
nop();
+ _hazard_state = NoHazard;
+ }
+#endif
+
+ bool post_cond = is_cbcond_before();
+ bool post_rdpc = is_rdpc_before();
+
+ if (post_cond || post_rdpc) {
+ nop();
+#ifdef VALIDATE_PIPELINE
+ if (_hazard_state != PcHazard) {
+ assert(post_cond, "CBCOND before when no hazard @0x%p\n", pc());
+ assert(post_rdpc, "RDPC before when no hazard @0x%p\n", pc());
+ }
+#endif
}
}
inline void Assembler::check_delay() {
-# ifdef CHECK_DELAY
- guarantee( delay_state != at_delay_slot, "must say delayed() when filling delay slot");
- delay_state = no_delay;
-# endif
+#ifdef VALIDATE_PIPELINE
+ guarantee(_delay_state != AtDelay, "Use delayed() when filling delay-slot");
+ _delay_state = NoDelay;
+#endif
}
inline void Assembler::emit_int32(int x) {
check_delay();
+#ifdef VALIDATE_PIPELINE
+ _hazard_state = NoHazard;
+#endif
AbstractAssembler::emit_int32(x);
}
@@ -55,394 +76,1024 @@ inline void Assembler::emit_data(int x, relocInfo::relocType rtype) {
emit_int32(x);
}
-inline void Assembler::emit_data(int x, RelocationHolder const& rspec) {
+inline void Assembler::emit_data(int x, RelocationHolder const &rspec) {
relocate(rspec);
emit_int32(x);
}
-inline void Assembler::add(Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::add(Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
+inline void Assembler::add(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::add(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
-inline void Assembler::addcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::addcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::addc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(addc_op3 ) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::addc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(addc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::addccc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::addccc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-
-inline void Assembler::aes_eround01( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround01_op5) | fs2(s2, FloatRegisterImpl::D) ); }
-inline void Assembler::aes_eround23( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround23_op5) | fs2(s2, FloatRegisterImpl::D) ); }
-inline void Assembler::aes_dround01( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround01_op5) | fs2(s2, FloatRegisterImpl::D) ); }
-inline void Assembler::aes_dround23( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround23_op5) | fs2(s2, FloatRegisterImpl::D) ); }
-inline void Assembler::aes_eround01_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround01_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
-inline void Assembler::aes_eround23_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround23_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
-inline void Assembler::aes_dround01_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround01_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
-inline void Assembler::aes_dround23_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround23_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
-inline void Assembler::aes_kexpand1( FloatRegister s1, FloatRegister s2, int imm5a, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | u_field(imm5a, 13, 9) | op5(aes_kexpand1_op5) | fs2(s2, FloatRegisterImpl::D) ); }
+inline void Assembler::addcc(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::addcc(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::addc(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(addc_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::addc(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(addc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::addccc(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::addccc(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::aes_eround01(FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d) {
+ aes_only();
+ emit_int32(op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround01_op5) | fs2(s2, FloatRegisterImpl::D));
+}
+inline void Assembler::aes_eround23(FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d) {
+ aes_only();
+ emit_int32(op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround23_op5) | fs2(s2, FloatRegisterImpl::D));
+}
+inline void Assembler::aes_dround01(FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d) {
+ aes_only();
+ emit_int32(op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround01_op5) | fs2(s2, FloatRegisterImpl::D));
+}
+inline void Assembler::aes_dround23(FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d) {
+ aes_only();
+ emit_int32(op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround23_op5) | fs2(s2, FloatRegisterImpl::D));
+}
+inline void Assembler::aes_eround01_l(FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d) {
+ aes_only();
+ emit_int32(op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround01_l_op5) | fs2(s2, FloatRegisterImpl::D));
+}
+inline void Assembler::aes_eround23_l(FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d) {
+ aes_only();
+ emit_int32(op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround23_l_op5) | fs2(s2, FloatRegisterImpl::D));
+}
+inline void Assembler::aes_dround01_l(FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d) {
+ aes_only();
+ emit_int32(op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround01_l_op5) | fs2(s2, FloatRegisterImpl::D));
+}
+inline void Assembler::aes_dround23_l(FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d) {
+ aes_only();
+ emit_int32(op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround23_l_op5) | fs2(s2, FloatRegisterImpl::D));
+}
+inline void Assembler::aes_kexpand1(FloatRegister s1, FloatRegister s2, int imm5a, FloatRegister d) {
+ aes_only();
+ emit_int32(op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | u_field(imm5a, 13, 9) | op5(aes_kexpand1_op5) | fs2(s2, FloatRegisterImpl::D));
+}
// 3-operand AES instructions
-inline void Assembler::aes_kexpand0( FloatRegister s1, FloatRegister s2, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes3_op3) | fs1(s1, FloatRegisterImpl::D) | opf(aes_kexpand0_opf) | fs2(s2, FloatRegisterImpl::D) ); }
-inline void Assembler::aes_kexpand2( FloatRegister s1, FloatRegister s2, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes3_op3) | fs1(s1, FloatRegisterImpl::D) | opf(aes_kexpand2_opf) | fs2(s2, FloatRegisterImpl::D) ); }
-
-inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); }
-inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { insert_nop_after_cbcond(); bpr( c, a, p, s1, target(L)); }
-
-inline void Assembler::fb( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
-inline void Assembler::fb( Condition c, bool a, Label& L ) { insert_nop_after_cbcond(); fb(c, a, target(L)); }
-
-inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
-inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { insert_nop_after_cbcond(); fbp(c, a, cc, p, target(L)); }
-
-inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
-inline void Assembler::br( Condition c, bool a, Label& L ) { insert_nop_after_cbcond(); br(c, a, target(L)); }
-
-inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
-inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) { insert_nop_after_cbcond(); bp(c, a, cc, p, target(L)); }
-
-// compare and branch
-inline void Assembler::cbcond(Condition c, CC cc, Register s1, Register s2, Label& L) { cti(); no_cbcond_before(); emit_data(op(branch_op) | cond_cbcond(c) | op2(bpr_op2) | branchcc(cc) | wdisp10(intptr_t(target(L)), intptr_t(pc())) | rs1(s1) | rs2(s2)); }
-inline void Assembler::cbcond(Condition c, CC cc, Register s1, int simm5, Label& L) { cti(); no_cbcond_before(); emit_data(op(branch_op) | cond_cbcond(c) | op2(bpr_op2) | branchcc(cc) | wdisp10(intptr_t(target(L)), intptr_t(pc())) | rs1(s1) | immed(true) | simm(simm5, 5)); }
-
-inline void Assembler::call( address d, relocInfo::relocType rt ) { insert_nop_after_cbcond(); cti(); emit_data( op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rt); has_delay_slot(); assert(rt != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec"); }
-inline void Assembler::call( Label& L, relocInfo::relocType rt ) { insert_nop_after_cbcond(); call( target(L), rt); }
-
-inline void Assembler::call( address d, RelocationHolder const& rspec ) { insert_nop_after_cbcond(); cti(); emit_data( op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rspec); has_delay_slot(); assert(rspec.type() != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec"); }
-
-inline void Assembler::casa( Register s1, Register s2, Register d, int ia ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(casa_op3 ) | rs1(s1) | (ia == -1 ? immed(true) : imm_asi(ia)) | rs2(s2)); }
-inline void Assembler::casxa( Register s1, Register s2, Register d, int ia ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(casxa_op3) | rs1(s1) | (ia == -1 ? immed(true) : imm_asi(ia)) | rs2(s2)); }
-
-inline void Assembler::udiv( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(udiv_op3 ) | rs1(s1) | rs2(s2)); }
-inline void Assembler::udiv( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(udiv_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::sdiv( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sdiv_op3 ) | rs1(s1) | rs2(s2)); }
-inline void Assembler::sdiv( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sdiv_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::udivcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(udiv_op3 | cc_bit_op3) | rs1(s1) | rs2(s2)); }
-inline void Assembler::udivcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(udiv_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::sdivcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sdiv_op3 | cc_bit_op3) | rs1(s1) | rs2(s2)); }
-inline void Assembler::sdivcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sdiv_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-
-inline void Assembler::done() { v9_only(); cti(); emit_int32( op(arith_op) | fcn(0) | op3(done_op3) ); }
-inline void Assembler::retry() { v9_only(); cti(); emit_int32( op(arith_op) | fcn(1) | op3(retry_op3) ); }
-
-inline void Assembler::fadd( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x40 + w) | fs2(s2, w)); }
-inline void Assembler::fsub( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x44 + w) | fs2(s2, w)); }
-
-inline void Assembler::fcmp( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); }
-inline void Assembler::fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); }
-
-inline void Assembler::ftox( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(fpop1_op3) | opf(0x80 + w) | fs2(s, w)); }
-inline void Assembler::ftoi( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::S) | op3(fpop1_op3) | opf(0xd0 + w) | fs2(s, w)); }
-
-inline void Assembler::ftof( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, dw) | op3(fpop1_op3) | opf(0xc0 + sw + dw*4) | fs2(s, sw)); }
-
-inline void Assembler::fxtof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only(); emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x80 + w*4) | fs2(s, FloatRegisterImpl::D)); }
-inline void Assembler::fitof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0xc0 + w*4) | fs2(s, FloatRegisterImpl::S)); }
-
-inline void Assembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); }
-inline void Assembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); }
-inline void Assembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); }
-inline void Assembler::fmul( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x48 + w) | fs2(s2, w)); }
-inline void Assembler::fmul( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, dw) | op3(fpop1_op3) | fs1(s1, sw) | opf(0x60 + sw + dw*4) | fs2(s2, sw)); }
-inline void Assembler::fdiv( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x4c + w) | fs2(s2, w)); }
-
-inline void Assembler::fxor( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, w) | op3(flog3_op3) | fs1(s1, w) | opf(0x6E - w) | fs2(s2, w)); }
-
-inline void Assembler::fsqrt( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x28 + w) | fs2(s, w)); }
-
-inline void Assembler::flush( Register s1, Register s2) { emit_int32( op(arith_op) | op3(flush_op3) | rs1(s1) | rs2(s2)); }
-inline void Assembler::flush( Register s1, int simm13a) { emit_data( op(arith_op) | op3(flush_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-
-inline void Assembler::flushw() { v9_only(); emit_int32( op(arith_op) | op3(flushw_op3) ); }
-
-inline void Assembler::illtrap( int const22a) { if (const22a != 0) v9_only(); emit_int32( op(branch_op) | u_field(const22a, 21, 0) ); }
-
-inline void Assembler::impdep1( int id1, int const19a ) { v9_only(); emit_int32( op(arith_op) | fcn(id1) | op3(impdep1_op3) | u_field(const19a, 18, 0)); }
-inline void Assembler::impdep2( int id1, int const19a ) { v9_only(); emit_int32( op(arith_op) | fcn(id1) | op3(impdep2_op3) | u_field(const19a, 18, 0)); }
-
-inline void Assembler::jmpl( Register s1, Register s2, Register d ) { insert_nop_after_cbcond(); cti(); emit_int32( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
-inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { insert_nop_after_cbcond(); cti(); emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); }
-
-inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
-
-inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::ldxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-
-inline void Assembler::ldfa( FloatRegisterImpl::Width w, Register s1, Register s2, int ia, FloatRegister d ) { v9_only(); emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
-inline void Assembler::ldfa( FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d ) { v9_only(); emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-
-inline void Assembler::ldsb( Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::ldsb( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-
-inline void Assembler::ldsh( Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldsh_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::ldsh( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsh_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::ldsw( Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldsw_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::ldsw( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsw_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::ldub( Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldub_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::ldub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::lduh( Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(lduh_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::lduh( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(lduh_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::lduw( Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(lduw_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::lduw( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(lduw_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-
-inline void Assembler::ldx( Register s1, Register s2, Register d) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::ldx( Register s1, int simm13a, Register d) { v9_only(); emit_data( op(ldst_op) | rd(d) | op3(ldx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::ldd( Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::ldd( Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-
-inline void Assembler::ldsba( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldsb_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
-inline void Assembler::ldsba( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldsb_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::ldsha( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldsh_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
-inline void Assembler::ldsha( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldsh_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::ldswa( Register s1, Register s2, int ia, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldsw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
-inline void Assembler::ldswa( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldsw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::lduba( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldub_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
-inline void Assembler::lduba( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldub_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::lduha( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(lduh_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
-inline void Assembler::lduha( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(lduh_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::lduwa( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
-inline void Assembler::lduwa( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::ldxa( Register s1, Register s2, int ia, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
-inline void Assembler::ldxa( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-
-inline void Assembler::and3( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::and3( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::andcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(and_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::andcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(and_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::andn( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(andn_op3 ) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::andn( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(andn_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::andncc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::andncc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::or3( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::or3( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::orcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(or_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::orcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(or_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::orn( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::orn( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::orncc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(orn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::orncc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(orn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::xor3( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::xor3( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::xorcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xor_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::xorcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xor_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::xnor( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xnor_op3 ) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::xnor( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xnor_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::xnorcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xnor_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::xnorcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xnor_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-
-inline void Assembler::membar( Membar_mask_bits const7a ) { v9_only(); emit_int32( op(arith_op) | op3(membar_op3) | rs1(O7) | immed(true) | u_field( int(const7a), 6, 0)); }
-
-inline void Assembler::fmov( FloatRegisterImpl::Width w, Condition c, bool floatCC, CC cca, FloatRegister s2, FloatRegister d ) { v9_only(); emit_int32( op(arith_op) | fd(d, w) | op3(fpop2_op3) | cond_mov(c) | opf_cc(cca, floatCC) | opf_low6(w) | fs2(s2, w)); }
-
-inline void Assembler::fmov( FloatRegisterImpl::Width w, RCondition c, Register s1, FloatRegister s2, FloatRegister d ) { v9_only(); emit_int32( op(arith_op) | fd(d, w) | op3(fpop2_op3) | rs1(s1) | rcond(c) | opf_low5(4 + w) | fs2(s2, w)); }
-
-inline void Assembler::movcc( Condition c, bool floatCC, CC cca, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(movcc_op3) | mov_cc(cca, floatCC) | cond_mov(c) | rs2(s2) ); }
-inline void Assembler::movcc( Condition c, bool floatCC, CC cca, int simm11a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(movcc_op3) | mov_cc(cca, floatCC) | cond_mov(c) | immed(true) | simm(simm11a, 11) ); }
-
-inline void Assembler::movr( RCondition c, Register s1, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(movr_op3) | rs1(s1) | rcond(c) | rs2(s2) ); }
-inline void Assembler::movr( RCondition c, Register s1, int simm10a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(movr_op3) | rs1(s1) | rcond(c) | immed(true) | simm(simm10a, 10) ); }
-
-inline void Assembler::mulx( Register s1, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(mulx_op3 ) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::mulx( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(mulx_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::sdivx( Register s1, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(sdivx_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::sdivx( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(sdivx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::udivx( Register s1, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(udivx_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::udivx( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(udivx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-
-inline void Assembler::umul( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(umul_op3 ) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::umul( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(umul_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::smul( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 ) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::smul( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::umulcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(umul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::umulcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(umul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::smulcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::smulcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-
-inline void Assembler::nop() { emit_int32( op(branch_op) | op2(sethi_op2) ); }
-
-inline void Assembler::sw_count() { emit_int32( op(branch_op) | op2(sethi_op2) | 0x3f0 ); }
-
-inline void Assembler::popc( Register s, Register d) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(popc_op3) | rs2(s)); }
-inline void Assembler::popc( int simm13a, Register d) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(popc_op3) | immed(true) | simm(simm13a, 13)); }
-
-inline void Assembler::prefetch( Register s1, Register s2, PrefetchFcn f) { v9_only(); emit_int32( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::prefetch( Register s1, int simm13a, PrefetchFcn f) { v9_only(); emit_data( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-
-inline void Assembler::prefetcha( Register s1, Register s2, int ia, PrefetchFcn f ) { v9_only(); emit_int32( op(ldst_op) | fcn(f) | op3(prefetch_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
-inline void Assembler::prefetcha( Register s1, int simm13a, PrefetchFcn f ) { v9_only(); emit_int32( op(ldst_op) | fcn(f) | op3(prefetch_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-
-inline void Assembler::rdy( Register d) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(0, 18, 14)); }
-inline void Assembler::rdccr( Register d) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(2, 18, 14)); }
-inline void Assembler::rdasi( Register d) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(3, 18, 14)); }
-inline void Assembler::rdtick( Register d) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(4, 18, 14)); } // Spoon!
-inline void Assembler::rdpc( Register d) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(5, 18, 14)); }
-inline void Assembler::rdfprs( Register d) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(6, 18, 14)); }
-
-inline void Assembler::rett( Register s1, Register s2 ) { cti(); emit_int32( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
-inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { cti(); emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt); has_delay_slot(); }
-
-inline void Assembler::save( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(save_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::save( Register s1, int simm13a, Register d ) {
- // make sure frame is at least large enough for the register save area
- assert(-simm13a >= 16 * wordSize, "frame too small");
- emit_int32( op(arith_op) | rd(d) | op3(save_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) );
+inline void Assembler::aes_kexpand0(FloatRegister s1, FloatRegister s2, FloatRegister d) {
+ aes_only();
+ emit_int32(op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes3_op3) | fs1(s1, FloatRegisterImpl::D) | opf(aes_kexpand0_opf) | fs2(s2, FloatRegisterImpl::D));
+}
+inline void Assembler::aes_kexpand2(FloatRegister s1, FloatRegister s2, FloatRegister d) {
+ aes_only();
+ emit_int32(op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes3_op3) | fs1(s1, FloatRegisterImpl::D) | opf(aes_kexpand2_opf) | fs2(s2, FloatRegisterImpl::D));
}
-inline void Assembler::restore( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(restore_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::restore( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(restore_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
+inline void Assembler::bpr(RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt) {
+ avoid_pipeline_stall();
+ cti();
+ emit_data(op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt);
+ induce_delay_slot();
+}
+inline void Assembler::bpr(RCondition c, bool a, Predict p, Register s1, Label &L) {
+ // Note[+]: All assembly emit routines using the 'target()' branch back-patch
+ // resolver must call 'avoid_pipeline_stall()' prior to calling 'target()'
+ // (we must do so even though the call will be made, as here, in the above
+ // implementation of 'bpr()', invoked below). The reason is the assumption
+ // made in 'target()', where using the current PC as the address for back-
+ // patching prevents any additional code to be emitted _after_ the address
+ // has been set (implicitly) in order to refer to the correct instruction.
+ avoid_pipeline_stall();
+ bpr(c, a, p, s1, target(L));
+}
+
+inline void Assembler::fb(Condition c, bool a, address d, relocInfo::relocType rt) {
+ v9_dep();
+ avoid_pipeline_stall();
+ cti();
+ emit_data(op(branch_op) | annul(a) | cond(c) | op2(fb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt);
+ induce_delay_slot();
+}
+inline void Assembler::fb(Condition c, bool a, Label &L) {
+ avoid_pipeline_stall();
+ fb(c, a, target(L));
+}
+
+inline void Assembler::fbp(Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt) {
+ avoid_pipeline_stall();
+ cti();
+ emit_data(op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt);
+ induce_delay_slot();
+}
+inline void Assembler::fbp(Condition c, bool a, CC cc, Predict p, Label &L) {
+ avoid_pipeline_stall();
+ fbp(c, a, cc, p, target(L));
+}
+
+inline void Assembler::br(Condition c, bool a, address d, relocInfo::relocType rt) {
+ v9_dep();
+ avoid_pipeline_stall();
+ cti();
+ emit_data(op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt);
+ induce_delay_slot();
+}
+inline void Assembler::br(Condition c, bool a, Label &L) {
+ avoid_pipeline_stall();
+ br(c, a, target(L));
+}
+
+inline void Assembler::bp(Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt) {
+ avoid_pipeline_stall();
+ cti();
+ emit_data(op(branch_op) | annul(a) | cond(c) | op2(bp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt);
+ induce_delay_slot();
+}
+inline void Assembler::bp(Condition c, bool a, CC cc, Predict p, Label &L) {
+ avoid_pipeline_stall();
+ bp(c, a, cc, p, target(L));
+}
+
+// compare and branch
+inline void Assembler::cbcond(Condition c, CC cc, Register s1, Register s2, Label &L) {
+ avoid_pipeline_stall();
+ cti();
+ emit_data(op(branch_op) | cond_cbcond(c) | op2(bpr_op2) | branchcc(cc) | wdisp10(intptr_t(target(L)), intptr_t(pc())) | rs1(s1) | rs2(s2));
+ induce_pc_hazard();
+}
+inline void Assembler::cbcond(Condition c, CC cc, Register s1, int simm5, Label &L) {
+ avoid_pipeline_stall();
+ cti();
+ emit_data(op(branch_op) | cond_cbcond(c) | op2(bpr_op2) | branchcc(cc) | wdisp10(intptr_t(target(L)), intptr_t(pc())) | rs1(s1) | immed(true) | simm(simm5, 5));
+ induce_pc_hazard();
+}
+
+inline void Assembler::call(address d, relocInfo::relocType rt) {
+ avoid_pipeline_stall();
+ cti();
+ emit_data(op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rt);
+ induce_delay_slot();
+ assert(rt != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec");
+}
+inline void Assembler::call(Label &L, relocInfo::relocType rt) {
+ avoid_pipeline_stall();
+ call(target(L), rt);
+}
+
+inline void Assembler::call(address d, RelocationHolder const &rspec) {
+ avoid_pipeline_stall();
+ cti();
+ emit_data(op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rspec);
+ induce_delay_slot();
+ assert(rspec.type() != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec");
+}
+
+inline void Assembler::casa(Register s1, Register s2, Register d, int ia) {
+ emit_int32(op(ldst_op) | rd(d) | op3(casa_op3) | rs1(s1) | (ia == -1 ? immed(true) : imm_asi(ia)) | rs2(s2));
+}
+inline void Assembler::casxa(Register s1, Register s2, Register d, int ia) {
+ emit_int32(op(ldst_op) | rd(d) | op3(casxa_op3) | rs1(s1) | (ia == -1 ? immed(true) : imm_asi(ia)) | rs2(s2));
+}
+
+inline void Assembler::udiv(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(udiv_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::udiv(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(udiv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::sdiv(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(sdiv_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::sdiv(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(sdiv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::udivcc(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(udiv_op3 | cc_bit_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::udivcc(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(udiv_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::sdivcc(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(sdiv_op3 | cc_bit_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::sdivcc(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(sdiv_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+
+inline void Assembler::done() {
+ cti();
+ emit_int32(op(arith_op) | fcn(0) | op3(done_op3));
+}
+inline void Assembler::retry() {
+ cti();
+ emit_int32(op(arith_op) | fcn(1) | op3(retry_op3));
+}
+
+inline void Assembler::fadd(FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d) {
+ emit_int32(op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x40 + w) | fs2(s2, w));
+}
+inline void Assembler::fsub(FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d) {
+ emit_int32(op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x44 + w) | fs2(s2, w));
+}
+
+inline void Assembler::fcmp(FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) {
+ emit_int32(op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w));
+}
+inline void Assembler::fcmpe(FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) {
+ emit_int32(op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w));
+}
+
+inline void Assembler::ftox(FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) {
+ emit_int32(op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(fpop1_op3) | opf(0x80 + w) | fs2(s, w));
+}
+inline void Assembler::ftoi(FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) {
+ emit_int32(op(arith_op) | fd(d, FloatRegisterImpl::S) | op3(fpop1_op3) | opf(0xd0 + w) | fs2(s, w));
+}
+
+inline void Assembler::ftof(FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s, FloatRegister d) {
+ emit_int32(op(arith_op) | fd(d, dw) | op3(fpop1_op3) | opf(0xc0 + sw + dw*4) | fs2(s, sw));
+}
+
+inline void Assembler::fxtof(FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) {
+ emit_int32(op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x80 + w*4) | fs2(s, FloatRegisterImpl::D));
+}
+inline void Assembler::fitof(FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) {
+ emit_int32(op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0xc0 + w*4) | fs2(s, FloatRegisterImpl::S));
+}
+
+inline void Assembler::fmov(FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) {
+ emit_int32(op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w));
+}
+inline void Assembler::fneg(FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) {
+ emit_int32(op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w));
+}
+inline void Assembler::fabs(FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) {
+ emit_int32(op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w));
+}
+inline void Assembler::fmul(FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d) {
+ emit_int32(op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x48 + w) | fs2(s2, w));
+}
+inline void Assembler::fmul(FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s1, FloatRegister s2, FloatRegister d) {
+ emit_int32(op(arith_op) | fd(d, dw) | op3(fpop1_op3) | fs1(s1, sw) | opf(0x60 + sw + dw*4) | fs2(s2, sw));
+}
+inline void Assembler::fdiv(FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d) {
+ emit_int32(op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x4c + w) | fs2(s2, w));
+}
+
+inline void Assembler::fxor(FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d) {
+ vis1_only();
+ emit_int32(op(arith_op) | fd(d, w) | op3(flog3_op3) | fs1(s1, w) | opf(0x6E - w) | fs2(s2, w));
+}
+
+inline void Assembler::fsqrt(FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) {
+ emit_int32(op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x28 + w) | fs2(s, w));
+}
+
+inline void Assembler::fmadd(FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d) {
+ fmaf_only();
+ emit_int32(op(arith_op) | fd(d, w) | op3(stpartialf_op3) | fs1(s1, w) | fs3(s3, w) | op5(w) | fs2(s2, w));
+}
+
+inline void Assembler::flush(Register s1, Register s2) {
+ emit_int32(op(arith_op) | op3(flush_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::flush(Register s1, int simm13a) {
+ emit_data(op(arith_op) | op3(flush_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+
+inline void Assembler::flushw() {
+ emit_int32(op(arith_op) | op3(flushw_op3));
+}
+
+inline void Assembler::illtrap(int const22a) {
+ emit_int32(op(branch_op) | u_field(const22a, 21, 0));
+}
+
+inline void Assembler::impdep1(int id1, int const19a) {
+ emit_int32(op(arith_op) | fcn(id1) | op3(impdep1_op3) | u_field(const19a, 18, 0));
+}
+inline void Assembler::impdep2(int id1, int const19a) {
+ emit_int32(op(arith_op) | fcn(id1) | op3(impdep2_op3) | u_field(const19a, 18, 0));
+}
+
+inline void Assembler::jmpl(Register s1, Register s2, Register d) {
+ avoid_pipeline_stall();
+ cti();
+ emit_int32(op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2));
+ induce_delay_slot();
+}
+inline void Assembler::jmpl(Register s1, int simm13a, Register d, RelocationHolder const &rspec) {
+ avoid_pipeline_stall();
+ cti();
+ emit_data(op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec);
+ induce_delay_slot();
+}
+
+inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) {
+ emit_int32(op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const &rspec) {
+ emit_data(op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec);
+}
+
+inline void Assembler::ldxfsr(Register s1, Register s2) {
+ emit_int32(op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::ldxfsr(Register s1, int simm13a) {
+ emit_data(op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+
+inline void Assembler::ldfa(FloatRegisterImpl::Width w, Register s1, Register s2, int ia, FloatRegister d) {
+ emit_int32(op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2));
+}
+inline void Assembler::ldfa(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d) {
+ emit_int32(op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+
+inline void Assembler::ldsb(Register s1, Register s2, Register d) {
+ emit_int32(op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::ldsb(Register s1, int simm13a, Register d) {
+ emit_data(op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+
+inline void Assembler::ldsh(Register s1, Register s2, Register d) {
+ emit_int32(op(ldst_op) | rd(d) | op3(ldsh_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::ldsh(Register s1, int simm13a, Register d) {
+ emit_data(op(ldst_op) | rd(d) | op3(ldsh_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::ldsw(Register s1, Register s2, Register d) {
+ emit_int32(op(ldst_op) | rd(d) | op3(ldsw_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::ldsw(Register s1, int simm13a, Register d) {
+ emit_data(op(ldst_op) | rd(d) | op3(ldsw_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::ldub(Register s1, Register s2, Register d) {
+ emit_int32(op(ldst_op) | rd(d) | op3(ldub_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::ldub(Register s1, int simm13a, Register d) {
+ emit_data(op(ldst_op) | rd(d) | op3(ldub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::lduh(Register s1, Register s2, Register d) {
+ emit_int32(op(ldst_op) | rd(d) | op3(lduh_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::lduh(Register s1, int simm13a, Register d) {
+ emit_data(op(ldst_op) | rd(d) | op3(lduh_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::lduw(Register s1, Register s2, Register d) {
+ emit_int32(op(ldst_op) | rd(d) | op3(lduw_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::lduw(Register s1, int simm13a, Register d) {
+ emit_data(op(ldst_op) | rd(d) | op3(lduw_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+
+inline void Assembler::ldx(Register s1, Register s2, Register d) {
+ emit_int32(op(ldst_op) | rd(d) | op3(ldx_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::ldx(Register s1, int simm13a, Register d) {
+ emit_data(op(ldst_op) | rd(d) | op3(ldx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::ldd(Register s1, Register s2, Register d) {
+ v9_dep();
+ assert(d->is_even(), "not even");
+ emit_int32(op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::ldd(Register s1, int simm13a, Register d) {
+ v9_dep();
+ assert(d->is_even(), "not even");
+ emit_data(op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+
+inline void Assembler::ldsba(Register s1, Register s2, int ia, Register d) {
+ emit_int32(op(ldst_op) | rd(d) | op3(ldsb_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2));
+}
+inline void Assembler::ldsba(Register s1, int simm13a, Register d) {
+ emit_int32(op(ldst_op) | rd(d) | op3(ldsb_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::ldsha(Register s1, Register s2, int ia, Register d) {
+ emit_int32(op(ldst_op) | rd(d) | op3(ldsh_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2));
+}
+inline void Assembler::ldsha(Register s1, int simm13a, Register d) {
+ emit_int32(op(ldst_op) | rd(d) | op3(ldsh_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::ldswa(Register s1, Register s2, int ia, Register d) {
+ emit_int32(op(ldst_op) | rd(d) | op3(ldsw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2));
+}
+inline void Assembler::ldswa(Register s1, int simm13a, Register d) {
+ emit_int32(op(ldst_op) | rd(d) | op3(ldsw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::lduba(Register s1, Register s2, int ia, Register d) {
+ emit_int32(op(ldst_op) | rd(d) | op3(ldub_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2));
+}
+inline void Assembler::lduba(Register s1, int simm13a, Register d) {
+ emit_int32(op(ldst_op) | rd(d) | op3(ldub_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::lduha(Register s1, Register s2, int ia, Register d) {
+ emit_int32(op(ldst_op) | rd(d) | op3(lduh_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2));
+}
+inline void Assembler::lduha(Register s1, int simm13a, Register d) {
+ emit_int32(op(ldst_op) | rd(d) | op3(lduh_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::lduwa(Register s1, Register s2, int ia, Register d) {
+ emit_int32(op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2));
+}
+inline void Assembler::lduwa(Register s1, int simm13a, Register d) {
+ emit_int32(op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::ldxa(Register s1, Register s2, int ia, Register d) {
+ emit_int32(op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2));
+}
+inline void Assembler::ldxa(Register s1, int simm13a, Register d) {
+ emit_int32(op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+
+inline void Assembler::and3(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(and_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::and3(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(and_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::andcc(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(and_op3 | cc_bit_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::andcc(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(and_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::andn(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(andn_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::andn(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(andn_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::andncc(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::andncc(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::or3(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(or_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::or3(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(or_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::orcc(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(or_op3 | cc_bit_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::orcc(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(or_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::orn(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::orn(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::orncc(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(orn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::orncc(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(orn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::xor3(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(xor_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::xor3(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(xor_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::xorcc(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(xor_op3 | cc_bit_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::xorcc(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(xor_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::xnor(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(xnor_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::xnor(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(xnor_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::xnorcc(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(xnor_op3 | cc_bit_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::xnorcc(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(xnor_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+
+inline void Assembler::membar(Membar_mask_bits const7a) {
+ emit_int32(op(arith_op) | op3(membar_op3) | rs1(O7) | immed(true) | u_field(int(const7a), 6, 0));
+}
+
+inline void Assembler::fmov(FloatRegisterImpl::Width w, Condition c, bool floatCC, CC cca, FloatRegister s2, FloatRegister d) {
+ emit_int32(op(arith_op) | fd(d, w) | op3(fpop2_op3) | cond_mov(c) | opf_cc(cca, floatCC) | opf_low6(w) | fs2(s2, w));
+}
+
+inline void Assembler::fmov(FloatRegisterImpl::Width w, RCondition c, Register s1, FloatRegister s2, FloatRegister d) {
+ emit_int32(op(arith_op) | fd(d, w) | op3(fpop2_op3) | rs1(s1) | rcond(c) | opf_low5(4 + w) | fs2(s2, w));
+}
+
+inline void Assembler::movcc(Condition c, bool floatCC, CC cca, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(movcc_op3) | mov_cc(cca, floatCC) | cond_mov(c) | rs2(s2));
+}
+inline void Assembler::movcc(Condition c, bool floatCC, CC cca, int simm11a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(movcc_op3) | mov_cc(cca, floatCC) | cond_mov(c) | immed(true) | simm(simm11a, 11));
+}
+
+inline void Assembler::movr(RCondition c, Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(movr_op3) | rs1(s1) | rcond(c) | rs2(s2));
+}
+inline void Assembler::movr(RCondition c, Register s1, int simm10a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(movr_op3) | rs1(s1) | rcond(c) | immed(true) | simm(simm10a, 10));
+}
+
+inline void Assembler::mulx(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(mulx_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::mulx(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(mulx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::sdivx(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(sdivx_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::sdivx(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(sdivx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::udivx(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(udivx_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::udivx(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(udivx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+
+inline void Assembler::umul(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(umul_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::umul(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(umul_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::smul(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(smul_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::smul(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(smul_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::umulcc(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(umul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::umulcc(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(umul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::smulcc(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::smulcc(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+
+inline void Assembler::nop() {
+ emit_int32(op(branch_op) | op2(sethi_op2));
+}
+
+inline void Assembler::sw_count() {
+ emit_int32(op(branch_op) | op2(sethi_op2) | 0x3f0);
+}
+
+inline void Assembler::popc(Register s, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(popc_op3) | rs2(s));
+}
+inline void Assembler::popc(int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(popc_op3) | immed(true) | simm(simm13a, 13));
+}
+
+inline void Assembler::prefetch(Register s1, Register s2, PrefetchFcn f) {
+ emit_int32(op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::prefetch(Register s1, int simm13a, PrefetchFcn f) {
+ emit_data(op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+
+inline void Assembler::prefetcha(Register s1, Register s2, int ia, PrefetchFcn f) {
+ emit_int32(op(ldst_op) | fcn(f) | op3(prefetch_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2));
+}
+inline void Assembler::prefetcha(Register s1, int simm13a, PrefetchFcn f) {
+ emit_int32(op(ldst_op) | fcn(f) | op3(prefetch_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+
+inline void Assembler::rdy(Register d) {
+ v9_dep();
+ emit_int32(op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(0, 18, 14));
+}
+inline void Assembler::rdccr(Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(2, 18, 14));
+}
+inline void Assembler::rdasi(Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(3, 18, 14));
+}
+inline void Assembler::rdtick(Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(4, 18, 14));
+}
+inline void Assembler::rdpc(Register d) {
+ avoid_pipeline_stall();
+ cti();
+ emit_int32(op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(5, 18, 14));
+ induce_pc_hazard();
+}
+inline void Assembler::rdfprs(Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(6, 18, 14));
+}
+
+inline void Assembler::rett(Register s1, Register s2) {
+ cti();
+ emit_int32(op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2));
+ induce_delay_slot();
+}
+inline void Assembler::rett(Register s1, int simm13a, relocInfo::relocType rt) {
+ cti();
+ emit_data(op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt);
+ induce_delay_slot();
+}
+
+inline void Assembler::save(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(save_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::save(Register s1, int simm13a, Register d) {
+ // make sure frame is at least large enough for the register save area
+ assert(-simm13a >= 16 * wordSize, "frame too small");
+ emit_int32(op(arith_op) | rd(d) | op3(save_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+
+inline void Assembler::restore(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(restore_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::restore(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(restore_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
// pp 216
-inline void Assembler::saved() { v9_only(); emit_int32( op(arith_op) | fcn(0) | op3(saved_op3)); }
-inline void Assembler::restored() { v9_only(); emit_int32( op(arith_op) | fcn(1) | op3(saved_op3)); }
+inline void Assembler::saved() {
+ emit_int32(op(arith_op) | fcn(0) | op3(saved_op3));
+}
+inline void Assembler::restored() {
+ emit_int32(op(arith_op) | fcn(1) | op3(saved_op3));
+}
-inline void Assembler::sethi( int imm22a, Register d, RelocationHolder const& rspec ) { emit_data( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(imm22a), rspec); }
+inline void Assembler::sethi(int imm22a, Register d, RelocationHolder const &rspec) {
+ emit_data(op(branch_op) | rd(d) | op2(sethi_op2) | hi22(imm22a), rspec);
+}
-inline void Assembler::sll( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(0) | rs2(s2) ); }
-inline void Assembler::sll( Register s1, int imm5a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0) ); }
-inline void Assembler::srl( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(0) | rs2(s2) ); }
-inline void Assembler::srl( Register s1, int imm5a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0) ); }
-inline void Assembler::sra( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(0) | rs2(s2) ); }
-inline void Assembler::sra( Register s1, int imm5a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0) ); }
+inline void Assembler::sll(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(0) | rs2(s2));
+}
+inline void Assembler::sll(Register s1, int imm5a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0));
+}
+inline void Assembler::srl(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(0) | rs2(s2));
+}
+inline void Assembler::srl(Register s1, int imm5a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0));
+}
+inline void Assembler::sra(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(0) | rs2(s2));
+}
+inline void Assembler::sra(Register s1, int imm5a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0));
+}
-inline void Assembler::sllx( Register s1, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(1) | rs2(s2) ); }
-inline void Assembler::sllx( Register s1, int imm6a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0) ); }
-inline void Assembler::srlx( Register s1, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(1) | rs2(s2) ); }
-inline void Assembler::srlx( Register s1, int imm6a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0) ); }
-inline void Assembler::srax( Register s1, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(1) | rs2(s2) ); }
-inline void Assembler::srax( Register s1, int imm6a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0) ); }
+inline void Assembler::sllx(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(1) | rs2(s2));
+}
+inline void Assembler::sllx(Register s1, int imm6a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0));
+}
+inline void Assembler::srlx(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(1) | rs2(s2));
+}
+inline void Assembler::srlx(Register s1, int imm6a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0));
+}
+inline void Assembler::srax(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(1) | rs2(s2));
+}
+inline void Assembler::srax(Register s1, int imm6a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0));
+}
-inline void Assembler::sir( int simm13a ) { emit_int32( op(arith_op) | fcn(15) | op3(sir_op3) | immed(true) | simm(simm13a, 13)); }
+inline void Assembler::sir(int simm13a) {
+ emit_int32(op(arith_op) | fcn(15) | op3(sir_op3) | immed(true) | simm(simm13a, 13));
+}
- // pp 221
+// pp 221
-inline void Assembler::stbar() { emit_int32( op(arith_op) | op3(membar_op3) | u_field(15, 18, 14)); }
+inline void Assembler::stbar() {
+ emit_int32(op(arith_op) | op3(membar_op3) | u_field(15, 18, 14));
+}
- // pp 222
+// pp 222
-inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
+inline void Assembler::stf(FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) {
+ emit_int32(op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::stf(FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) {
+ emit_data(op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
-inline void Assembler::stxfsr( Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::stxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
+inline void Assembler::stxfsr(Register s1, Register s2) {
+ emit_int32(op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::stxfsr(Register s1, int simm13a) {
+ emit_data(op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
-inline void Assembler::stfa( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2, int ia ) { v9_only(); emit_int32( op(ldst_op) | fd(d, w) | alt_op3(stf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
-inline void Assembler::stfa( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a ) { v9_only(); emit_int32( op(ldst_op) | fd(d, w) | alt_op3(stf_op3 | alt_bit_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
+inline void Assembler::stfa(FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2, int ia) {
+ emit_int32(op(ldst_op) | fd(d, w) | alt_op3(stf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2));
+}
+inline void Assembler::stfa(FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) {
+ emit_int32(op(ldst_op) | fd(d, w) | alt_op3(stf_op3 | alt_bit_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
- // p 226
+// p 226
-inline void Assembler::stb( Register d, Register s1, Register s2) { emit_int32( op(ldst_op) | rd(d) | op3(stb_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::stb( Register d, Register s1, int simm13a) { emit_data( op(ldst_op) | rd(d) | op3(stb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::sth( Register d, Register s1, Register s2) { emit_int32( op(ldst_op) | rd(d) | op3(sth_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::sth( Register d, Register s1, int simm13a) { emit_data( op(ldst_op) | rd(d) | op3(sth_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::stw( Register d, Register s1, Register s2) { emit_int32( op(ldst_op) | rd(d) | op3(stw_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::stw( Register d, Register s1, int simm13a) { emit_data( op(ldst_op) | rd(d) | op3(stw_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
+inline void Assembler::stb(Register d, Register s1, Register s2) {
+ emit_int32(op(ldst_op) | rd(d) | op3(stb_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::stb(Register d, Register s1, int simm13a) {
+ emit_data(op(ldst_op) | rd(d) | op3(stb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::sth(Register d, Register s1, Register s2) {
+ emit_int32(op(ldst_op) | rd(d) | op3(sth_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::sth(Register d, Register s1, int simm13a) {
+ emit_data(op(ldst_op) | rd(d) | op3(sth_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::stw(Register d, Register s1, Register s2) {
+ emit_int32(op(ldst_op) | rd(d) | op3(stw_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::stw(Register d, Register s1, int simm13a) {
+ emit_data(op(ldst_op) | rd(d) | op3(stw_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
-inline void Assembler::stx( Register d, Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(stx_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::stx( Register d, Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(d) | op3(stx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
+inline void Assembler::stx(Register d, Register s1, Register s2) {
+ emit_int32(op(ldst_op) | rd(d) | op3(stx_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::stx(Register d, Register s1, int simm13a) {
+ emit_data(op(ldst_op) | rd(d) | op3(stx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::std(Register d, Register s1, Register s2) {
+ v9_dep();
+ assert(d->is_even(), "not even");
+ emit_int32(op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::std(Register d, Register s1, int simm13a) {
+ v9_dep();
+ assert(d->is_even(), "not even");
+ emit_data(op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
-inline void Assembler::stba( Register d, Register s1, Register s2, int ia ) { emit_int32( op(ldst_op) | rd(d) | op3(stb_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
-inline void Assembler::stba( Register d, Register s1, int simm13a ) { emit_int32( op(ldst_op) | rd(d) | op3(stb_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::stha( Register d, Register s1, Register s2, int ia ) { emit_int32( op(ldst_op) | rd(d) | op3(sth_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
-inline void Assembler::stha( Register d, Register s1, int simm13a ) { emit_int32( op(ldst_op) | rd(d) | op3(sth_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::stwa( Register d, Register s1, Register s2, int ia ) { emit_int32( op(ldst_op) | rd(d) | op3(stw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
-inline void Assembler::stwa( Register d, Register s1, int simm13a ) { emit_int32( op(ldst_op) | rd(d) | op3(stw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::stxa( Register d, Register s1, Register s2, int ia ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(stx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
-inline void Assembler::stxa( Register d, Register s1, int simm13a ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(stx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::stda( Register d, Register s1, Register s2, int ia ) { emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
-inline void Assembler::stda( Register d, Register s1, int simm13a ) { emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
+inline void Assembler::stba(Register d, Register s1, Register s2, int ia) {
+ emit_int32(op(ldst_op) | rd(d) | op3(stb_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2));
+}
+inline void Assembler::stba(Register d, Register s1, int simm13a) {
+ emit_int32(op(ldst_op) | rd(d) | op3(stb_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::stha(Register d, Register s1, Register s2, int ia) {
+ emit_int32(op(ldst_op) | rd(d) | op3(sth_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2));
+}
+inline void Assembler::stha(Register d, Register s1, int simm13a) {
+ emit_int32(op(ldst_op) | rd(d) | op3(sth_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::stwa(Register d, Register s1, Register s2, int ia) {
+ emit_int32(op(ldst_op) | rd(d) | op3(stw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2));
+}
+inline void Assembler::stwa(Register d, Register s1, int simm13a) {
+ emit_int32(op(ldst_op) | rd(d) | op3(stw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::stxa(Register d, Register s1, Register s2, int ia) {
+ emit_int32(op(ldst_op) | rd(d) | op3(stx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2));
+}
+inline void Assembler::stxa(Register d, Register s1, int simm13a) {
+ emit_int32(op(ldst_op) | rd(d) | op3(stx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::stda(Register d, Register s1, Register s2, int ia) {
+ emit_int32(op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2));
+}
+inline void Assembler::stda(Register d, Register s1, int simm13a) {
+ emit_int32(op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
// pp 230
-inline void Assembler::sub( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::sub( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
+inline void Assembler::sub(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(sub_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::sub(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(sub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
-inline void Assembler::subcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3 | cc_bit_op3 ) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::subcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3 | cc_bit_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::subc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(subc_op3 ) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::subc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(subc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::subccc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(subc_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::subccc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(subc_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
+inline void Assembler::subcc(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(sub_op3 | cc_bit_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::subcc(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(sub_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::subc(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(subc_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::subc(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(subc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::subccc(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(subc_op3 | cc_bit_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::subccc(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(subc_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
// pp 231
-inline void Assembler::swap( Register s1, Register s2, Register d) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::swap( Register s1, int simm13a, Register d) { v9_dep(); emit_data( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
+inline void Assembler::swap(Register s1, Register s2, Register d) {
+ v9_dep();
+ emit_int32(op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::swap(Register s1, int simm13a, Register d) {
+ v9_dep();
+ emit_data(op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
-inline void Assembler::swapa( Register s1, Register s2, int ia, Register d ) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(swap_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
-inline void Assembler::swapa( Register s1, int simm13a, Register d ) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(swap_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
+inline void Assembler::swapa(Register s1, Register s2, int ia, Register d) {
+ v9_dep();
+ emit_int32(op(ldst_op) | rd(d) | op3(swap_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2));
+}
+inline void Assembler::swapa(Register s1, int simm13a, Register d) {
+ v9_dep();
+ emit_int32(op(ldst_op) | rd(d) | op3(swap_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
// pp 234, note op in book is wrong, see pp 268
-inline void Assembler::taddcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::taddcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
+inline void Assembler::taddcc(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(taddcc_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::taddcc(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(taddcc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
// pp 235
-inline void Assembler::tsubcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::tsubcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
+inline void Assembler::tsubcc(Register s1, Register s2, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(tsubcc_op3) | rs1(s1) | rs2(s2));
+}
+inline void Assembler::tsubcc(Register s1, int simm13a, Register d) {
+ emit_int32(op(arith_op) | rd(d) | op3(tsubcc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13));
+}
// pp 237
-inline void Assembler::trap( Condition c, CC cc, Register s1, Register s2 ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); }
-inline void Assembler::trap( Condition c, CC cc, Register s1, int trapa ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); }
+inline void Assembler::trap(Condition c, CC cc, Register s1, Register s2) {
+ emit_int32(op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2));
+}
+inline void Assembler::trap(Condition c, CC cc, Register s1, int trapa) {
+ emit_int32(op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0));
+}
// simple uncond. trap
-inline void Assembler::trap( int trapa ) { trap( always, icc, G0, trapa ); }
+inline void Assembler::trap(int trapa) {
+ trap(always, icc, G0, trapa);
+}
-inline void Assembler::wry(Register d) { v9_dep(); emit_int32(op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(0, 29, 25)); }
-inline void Assembler::wrccr(Register s) { v9_only(); emit_int32(op(arith_op) | rs1(s) | op3(wrreg_op3) | u_field(2, 29, 25)); }
-inline void Assembler::wrccr(Register s, int simm13a) { v9_only(); emit_int32(op(arith_op) | rs1(s) | op3(wrreg_op3) | u_field(2, 29, 25) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::wrasi(Register d) { v9_only(); emit_int32(op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(3, 29, 25)); }
+inline void Assembler::wry(Register d) {
+ v9_dep();
+ emit_int32(op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(0, 29, 25));
+}
+inline void Assembler::wrccr(Register s) {
+ emit_int32(op(arith_op) | rs1(s) | op3(wrreg_op3) | u_field(2, 29, 25));
+}
+inline void Assembler::wrccr(Register s, int simm13a) {
+ emit_int32(op(arith_op) | rs1(s) | op3(wrreg_op3) | u_field(2, 29, 25) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::wrasi(Register d) {
+ emit_int32(op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(3, 29, 25));
+}
// wrasi(d, imm) stores (d xor imm) to asi
-inline void Assembler::wrasi(Register d, int simm13a) { v9_only(); emit_int32(op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(3, 29, 25) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::wrfprs(Register d) { v9_only(); emit_int32(op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(6, 29, 25)); }
+inline void Assembler::wrasi(Register d, int simm13a) {
+ emit_int32(op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(3, 29, 25) | immed(true) | simm(simm13a, 13));
+}
+inline void Assembler::wrfprs(Register d) {
+ emit_int32(op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(6, 29, 25));
+}
-inline void Assembler::alignaddr( Register s1, Register s2, Register d ) { vis1_only(); emit_int32( op(arith_op) | rd(d) | op3(alignaddr_op3) | rs1(s1) | opf(alignaddr_opf) | rs2(s2)); }
+inline void Assembler::alignaddr(Register s1, Register s2, Register d) {
+ vis1_only();
+ emit_int32(op(arith_op) | rd(d) | op3(alignaddr_op3) | rs1(s1) | opf(alignaddr_opf) | rs2(s2));
+}
-inline void Assembler::faligndata( FloatRegister s1, FloatRegister s2, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(faligndata_op3) | fs1(s1, FloatRegisterImpl::D) | opf(faligndata_opf) | fs2(s2, FloatRegisterImpl::D)); }
+inline void Assembler::faligndata(FloatRegister s1, FloatRegister s2, FloatRegister d) {
+ vis1_only();
+ emit_int32(op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(faligndata_op3) | fs1(s1, FloatRegisterImpl::D) | opf(faligndata_opf) | fs2(s2, FloatRegisterImpl::D));
+}
-inline void Assembler::fzero( FloatRegisterImpl::Width w, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, w) | op3(fzero_op3) | opf(0x62 - w)); }
+inline void Assembler::fzero(FloatRegisterImpl::Width w, FloatRegister d) {
+ vis1_only();
+ emit_int32(op(arith_op) | fd(d, w) | op3(fzero_op3) | opf(0x62 - w));
+}
-inline void Assembler::fsrc2( FloatRegisterImpl::Width w, FloatRegister s2, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, w) | op3(fsrc_op3) | opf(0x7A - w) | fs2(s2, w)); }
+inline void Assembler::fsrc2(FloatRegisterImpl::Width w, FloatRegister s2, FloatRegister d) {
+ vis1_only();
+ emit_int32(op(arith_op) | fd(d, w) | op3(fsrc_op3) | opf(0x7A - w) | fs2(s2, w));
+}
-inline void Assembler::fnot1( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, w) | op3(fnot_op3) | fs1(s1, w) | opf(0x6C - w)); }
+inline void Assembler::fnot1(FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister d) {
+ vis1_only();
+ emit_int32(op(arith_op) | fd(d, w) | op3(fnot_op3) | fs1(s1, w) | opf(0x6C - w));
+}
-inline void Assembler::fpmerge( FloatRegister s1, FloatRegister s2, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(0x36) | fs1(s1, FloatRegisterImpl::S) | opf(0x4b) | fs2(s2, FloatRegisterImpl::S)); }
+inline void Assembler::fpmerge(FloatRegister s1, FloatRegister s2, FloatRegister d) {
+ vis1_only();
+ emit_int32(op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(0x36) | fs1(s1, FloatRegisterImpl::S) | opf(0x4b) | fs2(s2, FloatRegisterImpl::S));
+}
-inline void Assembler::stpartialf( Register s1, Register s2, FloatRegister d, int ia ) { vis1_only(); emit_int32( op(ldst_op) | fd(d, FloatRegisterImpl::D) | op3(stpartialf_op3) | rs1(s1) | imm_asi(ia) | rs2(s2)); }
+inline void Assembler::stpartialf(Register s1, Register s2, FloatRegister d, int ia) {
+ vis1_only();
+ emit_int32(op(ldst_op) | fd(d, FloatRegisterImpl::D) | op3(stpartialf_op3) | rs1(s1) | imm_asi(ia) | rs2(s2));
+}
-// VIS2 instructions
+// VIS2 instructions
-inline void Assembler::edge8n( Register s1, Register s2, Register d ) { vis2_only(); emit_int32( op(arith_op) | rd(d) | op3(edge_op3) | rs1(s1) | opf(edge8n_opf) | rs2(s2)); }
+inline void Assembler::edge8n(Register s1, Register s2, Register d) {
+ vis2_only();
+ emit_int32(op(arith_op) | rd(d) | op3(edge_op3) | rs1(s1) | opf(edge8n_opf) | rs2(s2));
+}
-inline void Assembler::bmask( Register s1, Register s2, Register d ) { vis2_only(); emit_int32( op(arith_op) | rd(d) | op3(bmask_op3) | rs1(s1) | opf(bmask_opf) | rs2(s2)); }
-inline void Assembler::bshuffle( FloatRegister s1, FloatRegister s2, FloatRegister d ) { vis2_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(bshuffle_op3) | fs1(s1, FloatRegisterImpl::D) | opf(bshuffle_opf) | fs2(s2, FloatRegisterImpl::D)); }
+inline void Assembler::bmask(Register s1, Register s2, Register d) {
+ vis2_only();
+ emit_int32(op(arith_op) | rd(d) | op3(bmask_op3) | rs1(s1) | opf(bmask_opf) | rs2(s2));
+}
+inline void Assembler::bshuffle(FloatRegister s1, FloatRegister s2, FloatRegister d) {
+ vis2_only();
+ emit_int32(op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(bshuffle_op3) | fs1(s1, FloatRegisterImpl::D) | opf(bshuffle_opf) | fs2(s2, FloatRegisterImpl::D));
+}
// VIS3 instructions
-inline void Assembler::movstosw( FloatRegister s, Register d ) { vis3_only(); emit_int32( op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mstosw_opf) | fs2(s, FloatRegisterImpl::S)); }
-inline void Assembler::movstouw( FloatRegister s, Register d ) { vis3_only(); emit_int32( op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mstouw_opf) | fs2(s, FloatRegisterImpl::S)); }
-inline void Assembler::movdtox( FloatRegister s, Register d ) { vis3_only(); emit_int32( op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mdtox_opf) | fs2(s, FloatRegisterImpl::D)); }
+inline void Assembler::movstosw(FloatRegister s, Register d) {
+ vis3_only();
+ emit_int32(op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mstosw_opf) | fs2(s, FloatRegisterImpl::S));
+}
+inline void Assembler::movstouw(FloatRegister s, Register d) {
+ vis3_only();
+ emit_int32(op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mstouw_opf) | fs2(s, FloatRegisterImpl::S));
+}
+inline void Assembler::movdtox(FloatRegister s, Register d) {
+ vis3_only();
+ emit_int32(op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mdtox_opf) | fs2(s, FloatRegisterImpl::D));
+}
-inline void Assembler::movwtos( Register s, FloatRegister d ) { vis3_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::S) | op3(mftoi_op3) | opf(mwtos_opf) | rs2(s)); }
-inline void Assembler::movxtod( Register s, FloatRegister d ) { vis3_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(mftoi_op3) | opf(mxtod_opf) | rs2(s)); }
+inline void Assembler::movwtos(Register s, FloatRegister d) {
+ vis3_only();
+ emit_int32(op(arith_op) | fd(d, FloatRegisterImpl::S) | op3(mftoi_op3) | opf(mwtos_opf) | rs2(s));
+}
+inline void Assembler::movxtod(Register s, FloatRegister d) {
+ vis3_only();
+ emit_int32(op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(mftoi_op3) | opf(mxtod_opf) | rs2(s));
+}
-inline void Assembler::xmulx(Register s1, Register s2, Register d) { vis3_only(); emit_int32( op(arith_op) | rd(d) | op3(xmulx_op3) | rs1(s1) | opf(xmulx_opf) | rs2(s2)); }
-inline void Assembler::xmulxhi(Register s1, Register s2, Register d) { vis3_only(); emit_int32( op(arith_op) | rd(d) | op3(xmulx_op3) | rs1(s1) | opf(xmulxhi_opf) | rs2(s2)); }
+inline void Assembler::xmulx(Register s1, Register s2, Register d) {
+ vis3_only();
+ emit_int32(op(arith_op) | rd(d) | op3(xmulx_op3) | rs1(s1) | opf(xmulx_opf) | rs2(s2));
+}
+inline void Assembler::xmulxhi(Register s1, Register s2, Register d) {
+ vis3_only();
+ emit_int32(op(arith_op) | rd(d) | op3(xmulx_op3) | rs1(s1) | opf(xmulxhi_opf) | rs2(s2));
+}
// Crypto SHA instructions
-inline void Assembler::sha1() { sha1_only(); emit_int32( op(arith_op) | op3(sha_op3) | opf(sha1_opf)); }
-inline void Assembler::sha256() { sha256_only(); emit_int32( op(arith_op) | op3(sha_op3) | opf(sha256_opf)); }
-inline void Assembler::sha512() { sha512_only(); emit_int32( op(arith_op) | op3(sha_op3) | opf(sha512_opf)); }
+inline void Assembler::sha1() {
+ sha1_only();
+ emit_int32(op(arith_op) | op3(sha_op3) | opf(sha1_opf));
+}
+inline void Assembler::sha256() {
+ sha256_only();
+ emit_int32(op(arith_op) | op3(sha_op3) | opf(sha256_opf));
+}
+inline void Assembler::sha512() {
+ sha512_only();
+ emit_int32(op(arith_op) | op3(sha_op3) | opf(sha512_opf));
+}
// CRC32C instruction
-inline void Assembler::crc32c( FloatRegister s1, FloatRegister s2, FloatRegister d ) { crc32c_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(crc32c_op3) | fs1(s1, FloatRegisterImpl::D) | opf(crc32c_opf) | fs2(s2, FloatRegisterImpl::D)); }
+inline void Assembler::crc32c(FloatRegister s1, FloatRegister s2, FloatRegister d) {
+ crc32c_only();
+ emit_int32(op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(crc32c_op3) | fs1(s1, FloatRegisterImpl::D) | opf(crc32c_opf) | fs2(s2, FloatRegisterImpl::D));
+}
#endif // CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP
diff --git a/hotspot/src/cpu/sparc/vm/bytes_sparc.hpp b/hotspot/src/cpu/sparc/vm/bytes_sparc.hpp
index 67d4307ff74..0ce1f5fac48 100644
--- a/hotspot/src/cpu/sparc/vm/bytes_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/bytes_sparc.hpp
@@ -34,10 +34,6 @@ class Bytes: AllStatic {
// can I count on address always being a pointer to an unsigned char? Yes
- // Returns true, if the byte ordering used by Java is different from the nativ byte ordering
- // of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
- static inline bool is_Java_byte_ordering_different() { return false; }
-
// Thus, a swap between native and Java ordering is always a no-op:
static inline u2 swap_u2(u2 x) { return x; }
static inline u4 swap_u4(u4 x) { return x; }
diff --git a/hotspot/src/cpu/sparc/vm/c1_FpuStackSim_sparc.cpp b/hotspot/src/cpu/sparc/vm/c1_FpuStackSim_sparc.cpp
index 8c3a19a9e78..abaf500ebd7 100644
--- a/hotspot/src/cpu/sparc/vm/c1_FpuStackSim_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/c1_FpuStackSim_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,10 +22,4 @@
*
*/
-#include "precompiled.hpp"
-#include "c1/c1_FpuStackSim.hpp"
-#include "c1/c1_FrameMap.hpp"
-#include "utilities/array.hpp"
-#include "utilities/ostream.hpp"
-
// No FPU stack on SPARC
diff --git a/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp b/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp
index ae4262bb73b..71e6276f09c 100644
--- a/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -159,21 +159,12 @@
public:
-#ifdef _LP64
static LIR_Opr as_long_opr(Register r) {
return as_long_single_opr(r);
}
static LIR_Opr as_pointer_opr(Register r) {
return as_long_single_opr(r);
}
-#else
- static LIR_Opr as_long_opr(Register r) {
- return as_long_pair_opr(r);
- }
- static LIR_Opr as_pointer_opr(Register r) {
- return as_opr(r);
- }
-#endif
static LIR_Opr as_float_opr(FloatRegister r) {
return LIR_OprFact::single_fpu(r->encoding());
}
diff --git a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
index ae2942f19b5..5269c9fd8be 100644
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
@@ -440,6 +440,31 @@ void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) {
}
void LIR_Assembler::emit_op3(LIR_Op3* op) {
+ switch (op->code()) {
+ case lir_idiv:
+ case lir_irem: // Both idiv & irem are handled after the switch (below).
+ break;
+ case lir_fmaf:
+ __ fmadd(FloatRegisterImpl::S,
+ op->in_opr1()->as_float_reg(),
+ op->in_opr2()->as_float_reg(),
+ op->in_opr3()->as_float_reg(),
+ op->result_opr()->as_float_reg());
+ return;
+ case lir_fmad:
+ __ fmadd(FloatRegisterImpl::D,
+ op->in_opr1()->as_double_reg(),
+ op->in_opr2()->as_double_reg(),
+ op->in_opr3()->as_double_reg(),
+ op->result_opr()->as_double_reg());
+ return;
+ default:
+ ShouldNotReachHere();
+ break;
+ }
+
+ // Handle idiv & irem:
+
Register Rdividend = op->in_opr1()->as_register();
Register Rdivisor = noreg;
Register Rscratch = op->in_opr3()->as_register();
@@ -556,11 +581,9 @@ void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
// guarantee that 32-bit loads always sign extended but that isn't
// true and since sign extension isn't free, it would impose a
// slight cost.
-#ifdef _LP64
if (op->type() == T_INT) {
__ br(acond, false, Assembler::pn, *(op->label()));
} else
-#endif
__ brx(acond, false, Assembler::pn, *(op->label()));
}
// The peephole pass fills the delay slot
@@ -576,12 +599,7 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
Register rlo = dst->as_register_lo();
Register rhi = dst->as_register_hi();
Register rval = op->in_opr()->as_register();
-#ifdef _LP64
__ sra(rval, 0, rlo);
-#else
- __ mov(rval, rlo);
- __ sra(rval, BitsPerInt-1, rhi);
-#endif
break;
}
case Bytecodes::_i2d:
@@ -614,11 +632,7 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
Register rlo = op->in_opr()->as_register_lo();
Register rhi = op->in_opr()->as_register_hi();
Register rdst = dst->as_register();
-#ifdef _LP64
__ sra(rlo, 0, rdst);
-#else
- __ mov(rlo, rdst);
-#endif
break;
}
case Bytecodes::_d2f:
@@ -711,7 +725,6 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType
case T_SHORT : __ sth(from_reg->as_register(), base, offset); break;
case T_INT : __ stw(from_reg->as_register(), base, offset); break;
case T_LONG :
-#ifdef _LP64
if (unaligned || PatchALot) {
// Don't use O7 here because it may be equal to 'base' (see LIR_Assembler::reg2mem)
assert(G3_scratch != base, "can't handle this");
@@ -722,11 +735,6 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType
} else {
__ stx(from_reg->as_register_lo(), base, offset);
}
-#else
- assert(Assembler::is_simm13(offset + 4), "must be");
- __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
- __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes);
-#endif
break;
case T_ADDRESS:
case T_METADATA:
@@ -778,12 +786,7 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicTy
case T_SHORT : __ sth(from_reg->as_register(), base, disp); break;
case T_INT : __ stw(from_reg->as_register(), base, disp); break;
case T_LONG :
-#ifdef _LP64
__ stx(from_reg->as_register_lo(), base, disp);
-#else
- assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match");
- __ std(from_reg->as_register_hi(), base, disp);
-#endif
break;
case T_ADDRESS:
__ st_ptr(from_reg->as_register(), base, disp);
@@ -826,40 +829,22 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ
case T_INT : __ ld(base, offset, to_reg->as_register()); break;
case T_LONG :
if (!unaligned && !PatchALot) {
-#ifdef _LP64
__ ldx(base, offset, to_reg->as_register_lo());
-#else
- assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
- "must be sequential");
- __ ldd(base, offset, to_reg->as_register_hi());
-#endif
} else {
-#ifdef _LP64
assert(base != to_reg->as_register_lo(), "can't handle this");
assert(O7 != to_reg->as_register_lo(), "can't handle this");
__ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
__ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
__ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
__ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
-#else
- if (base == to_reg->as_register_lo()) {
- __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
- __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
- } else {
- __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
- __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
- }
-#endif
}
break;
case T_METADATA: __ ld_ptr(base, offset, to_reg->as_register()); break;
case T_ADDRESS:
-#ifdef _LP64
if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {
__ lduw(base, offset, to_reg->as_register());
__ decode_klass_not_null(to_reg->as_register());
} else
-#endif
{
__ ld_ptr(base, offset, to_reg->as_register());
}
@@ -921,13 +906,7 @@ int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType
case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break;
case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break;
case T_LONG :
-#ifdef _LP64
__ ldx(base, disp, to_reg->as_register_lo());
-#else
- assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
- "must be sequential");
- __ ldd(base, disp, to_reg->as_register_hi());
-#endif
break;
default : ShouldNotReachHere();
}
@@ -1107,16 +1086,9 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
jlong con = c->as_jlong();
if (to_reg->is_double_cpu()) {
-#ifdef _LP64
__ set(con, to_reg->as_register_lo());
-#else
- __ set(low(con), to_reg->as_register_lo());
- __ set(high(con), to_reg->as_register_hi());
-#endif
-#ifdef _LP64
} else if (to_reg->is_single_cpu()) {
__ set(con, to_reg->as_register());
-#endif
} else {
ShouldNotReachHere();
assert(to_reg->is_double_fpu(), "wrong register kind");
@@ -1190,12 +1162,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
__ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg());
} else {
assert(to_reg->is_double_cpu(), "Must be a long register.");
-#ifdef _LP64
__ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo());
-#else
- __ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo());
- __ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi());
-#endif
}
}
@@ -1366,22 +1333,10 @@ void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
}
} else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
if (from_reg->is_double_cpu()) {
-#ifdef _LP64
__ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register());
-#else
- assert(to_reg->is_double_cpu() &&
- from_reg->as_register_hi() != to_reg->as_register_lo() &&
- from_reg->as_register_lo() != to_reg->as_register_hi(),
- "should both be long and not overlap");
- // long to long moves
- __ mov(from_reg->as_register_hi(), to_reg->as_register_hi());
- __ mov(from_reg->as_register_lo(), to_reg->as_register_lo());
-#endif
-#ifdef _LP64
} else if (to_reg->is_double_cpu()) {
// int to int moves
__ mov(from_reg->as_register(), to_reg->as_register_lo());
-#endif
} else {
// int to int moves
__ mov(from_reg->as_register(), to_reg->as_register());
@@ -1460,21 +1415,6 @@ void LIR_Assembler::return_op(LIR_Opr result) {
if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
__ reserved_stack_check();
}
- // the poll may need a register so just pick one that isn't the return register
-#if defined(TIERED) && !defined(_LP64)
- if (result->type_field() == LIR_OprDesc::long_type) {
- // Must move the result to G1
- // Must leave proper result in O0,O1 and G1 (TIERED only)
- __ sllx(I0, 32, G1); // Shift bits into high G1
- __ srl (I1, 0, I1); // Zero extend O1 (harmless?)
- __ or3 (I1, G1, G1); // OR 64 bits into G1
-#ifdef ASSERT
- // mangle it so any problems will show up
- __ set(0xdeadbeef, I0);
- __ set(0xdeadbeef, I1);
-#endif
- }
-#endif // TIERED
__ set((intptr_t)os::get_polling_page(), L0);
__ relocate(relocInfo::poll_return_type);
__ ld_ptr(L0, 0, G0);
@@ -1568,23 +1508,11 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
Register xhi = opr1->as_register_hi();
if (opr2->is_constant() && opr2->as_jlong() == 0) {
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases");
-#ifdef _LP64
__ orcc(xhi, G0, G0);
-#else
- __ orcc(xhi, xlo, G0);
-#endif
} else if (opr2->is_register()) {
Register ylo = opr2->as_register_lo();
Register yhi = opr2->as_register_hi();
-#ifdef _LP64
__ cmp(xlo, ylo);
-#else
- __ subcc(xlo, ylo, xlo);
- __ subccc(xhi, yhi, xhi);
- if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
- __ orcc(xhi, xlo, G0);
- }
-#endif
} else {
ShouldNotReachHere();
}
@@ -1612,13 +1540,7 @@ void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Op
ShouldNotReachHere();
}
} else if (code == lir_cmp_l2i) {
-#ifdef _LP64
__ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register());
-#else
- __ lcmp(left->as_register_hi(), left->as_register_lo(),
- right->as_register_hi(), right->as_register_lo(),
- dst->as_register());
-#endif
} else {
ShouldNotReachHere();
}
@@ -1656,12 +1578,11 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
ShouldNotReachHere();
}
Label skip;
-#ifdef _LP64
if (type == T_INT) {
__ br(acond, false, Assembler::pt, skip);
- } else
-#endif
+ } else {
__ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit
+ }
if (opr1->is_constant() && opr1->type() == T_INT) {
Register dest = result->as_register();
if (Assembler::is_simm13(opr1->as_jint())) {
@@ -1720,7 +1641,6 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
}
} else if (dest->is_double_cpu()) {
-#ifdef _LP64
Register dst_lo = dest->as_register_lo();
Register op1_lo = left->as_pointer_register();
Register op2_lo = right->as_pointer_register();
@@ -1736,28 +1656,6 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
default: ShouldNotReachHere();
}
-#else
- Register op1_lo = left->as_register_lo();
- Register op1_hi = left->as_register_hi();
- Register op2_lo = right->as_register_lo();
- Register op2_hi = right->as_register_hi();
- Register dst_lo = dest->as_register_lo();
- Register dst_hi = dest->as_register_hi();
-
- switch (code) {
- case lir_add:
- __ addcc(op1_lo, op2_lo, dst_lo);
- __ addc (op1_hi, op2_hi, dst_hi);
- break;
-
- case lir_sub:
- __ subcc(op1_lo, op2_lo, dst_lo);
- __ subc (op1_hi, op2_hi, dst_hi);
- break;
-
- default: ShouldNotReachHere();
- }
-#endif
} else {
assert (right->is_single_cpu(), "Just Checking");
@@ -1852,23 +1750,14 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
int simm13 = (int)c;
switch (code) {
case lir_logic_and:
-#ifndef _LP64
- __ and3 (left->as_register_hi(), 0, dest->as_register_hi());
-#endif
__ and3 (left->as_register_lo(), simm13, dest->as_register_lo());
break;
case lir_logic_or:
-#ifndef _LP64
- __ or3 (left->as_register_hi(), 0, dest->as_register_hi());
-#endif
__ or3 (left->as_register_lo(), simm13, dest->as_register_lo());
break;
case lir_logic_xor:
-#ifndef _LP64
- __ xor3 (left->as_register_hi(), 0, dest->as_register_hi());
-#endif
__ xor3 (left->as_register_lo(), simm13, dest->as_register_lo());
break;
@@ -1886,7 +1775,6 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
default: ShouldNotReachHere();
}
} else {
-#ifdef _LP64
Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() :
left->as_register_lo();
Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() :
@@ -1898,26 +1786,6 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break;
default: ShouldNotReachHere();
}
-#else
- switch (code) {
- case lir_logic_and:
- __ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
- __ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
- break;
-
- case lir_logic_or:
- __ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
- __ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
- break;
-
- case lir_logic_xor:
- __ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
- __ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
- break;
-
- default: ShouldNotReachHere();
- }
-#endif
}
}
}
@@ -1975,12 +1843,10 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
if (basic_type == T_ARRAY) basic_type = T_OBJECT;
-#ifdef _LP64
// higher 32bits must be null
__ sra(dst_pos, 0, dst_pos);
__ sra(src_pos, 0, src_pos);
__ sra(length, 0, length);
-#endif
// set up the arraycopy stub information
ArrayCopyStub* stub = op->stub();
@@ -2316,7 +2182,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
if (dest->is_single_cpu()) {
-#ifdef _LP64
if (left->type() == T_OBJECT) {
switch (code) {
case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break;
@@ -2325,7 +2190,6 @@ void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr
default: ShouldNotReachHere();
}
} else
-#endif
switch (code) {
case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break;
case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break;
@@ -2333,27 +2197,17 @@ void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr
default: ShouldNotReachHere();
}
} else {
-#ifdef _LP64
switch (code) {
case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
default: ShouldNotReachHere();
}
-#else
- switch (code) {
- case lir_shl: __ lshl (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
- case lir_shr: __ lshr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
- case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
- default: ShouldNotReachHere();
- }
-#endif
}
}
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
-#ifdef _LP64
if (left->type() == T_OBJECT) {
count = count & 63; // shouldn't shift by more than sizeof(intptr_t)
Register l = left->as_register();
@@ -2366,7 +2220,6 @@ void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr de
}
return;
}
-#endif
if (dest->is_single_cpu()) {
count = count & 0x1F; // Java spec
@@ -2425,7 +2278,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
op->tmp4()->as_register() == O1 &&
op->klass()->as_register() == G5, "must be");
- LP64_ONLY( __ signx(op->len()->as_register()); )
+ __ signx(op->len()->as_register());
if (UseSlowPath ||
(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
@@ -2748,7 +2601,6 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
Register new_value_hi = op->new_value()->as_register_hi();
Register t1 = op->tmp1()->as_register();
Register t2 = op->tmp2()->as_register();
-#ifdef _LP64
__ mov(cmp_value_lo, t1);
__ mov(new_value_lo, t2);
// perform the compare and swap operation
@@ -2756,23 +2608,6 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
// generate condition code - if the swap succeeded, t2 ("new value" reg) was
// overwritten with the original value in "addr" and will be equal to t1.
__ cmp(t1, t2);
-#else
- // move high and low halves of long values into single registers
- __ sllx(cmp_value_hi, 32, t1); // shift high half into temp reg
- __ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half
- __ or3(t1, cmp_value_lo, t1); // t1 holds 64-bit compare value
- __ sllx(new_value_hi, 32, t2);
- __ srl(new_value_lo, 0, new_value_lo);
- __ or3(t2, new_value_lo, t2); // t2 holds 64-bit value to swap
- // perform the compare and swap operation
- __ casx(addr, t1, t2);
- // generate condition code - if the swap succeeded, t2 ("new value" reg) was
- // overwritten with the original value in "addr" and will be equal to t1.
- // Produce icc flag for 32bit.
- __ sub(t1, t2, t2);
- __ srlx(t2, 32, t1);
- __ orcc(t2, t1, G0);
-#endif
} else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
Register addr = op->addr()->as_pointer_register();
Register cmp_value = op->cmp_value()->as_register();
@@ -2914,13 +2749,8 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
assert(data->is_CounterData(), "need CounterData for calls");
assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
Register mdo = op->mdo()->as_register();
-#ifdef _LP64
assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
Register tmp1 = op->tmp1()->as_register_lo();
-#else
- assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
- Register tmp1 = op->tmp1()->as_register();
-#endif
metadata2reg(md->constant_encoding(), mdo);
int mdo_offset_bias = 0;
if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
@@ -3200,12 +3030,7 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
assert (left->is_double_cpu(), "Must be a long");
Register Rlow = left->as_register_lo();
Register Rhi = left->as_register_hi();
-#ifdef _LP64
__ sub(G0, Rlow, dest->as_register_lo());
-#else
- __ subcc(G0, Rlow, dest->as_register_lo());
- __ subc (G0, Rhi, dest->as_register_hi());
-#endif
}
}
@@ -3245,9 +3070,7 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest,
void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
-#ifdef _LP64
ShouldNotReachHere();
-#endif
NEEDS_CLEANUP;
if (type == T_LONG) {
@@ -3366,16 +3189,29 @@ void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) {
__ srl (rs, 0, rd->successor());
}
-
void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) {
- LIR_Address* addr = addr_opr->as_address_ptr();
- assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1, "can't handle complex addresses yet");
+ const LIR_Address* addr = addr_opr->as_address_ptr();
+ assert(addr->scale() == LIR_Address::times_1, "can't handle complex addresses yet");
+ const Register dest_reg = dest->as_pointer_register();
+ const Register base_reg = addr->base()->as_pointer_register();
if (Assembler::is_simm13(addr->disp())) {
- __ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register());
+ if (addr->index()->is_valid()) {
+ const Register index_reg = addr->index()->as_pointer_register();
+ assert(index_reg != G3_scratch, "invariant");
+ __ add(base_reg, addr->disp(), G3_scratch);
+ __ add(index_reg, G3_scratch, dest_reg);
+ } else {
+ __ add(base_reg, addr->disp(), dest_reg);
+ }
} else {
__ set(addr->disp(), G3_scratch);
- __ add(addr->base()->as_pointer_register(), G3_scratch, dest->as_pointer_register());
+ if (addr->index()->is_valid()) {
+ const Register index_reg = addr->index()->as_pointer_register();
+ assert(index_reg != G3_scratch, "invariant");
+ __ add(index_reg, G3_scratch, G3_scratch);
+ }
+ __ add(base_reg, G3_scratch, dest_reg);
}
}
@@ -3491,31 +3327,6 @@ void LIR_Assembler::peephole(LIR_List* lir) {
inst->insert_before(i + 1, delay_op);
i++;
}
-
-#if defined(TIERED) && !defined(_LP64)
- // fixup the return value from G1 to O0/O1 for long returns.
- // It's done here instead of in LIRGenerator because there's
- // such a mismatch between the single reg and double reg
- // calling convention.
- LIR_OpJavaCall* callop = op->as_OpJavaCall();
- if (callop->result_opr() == FrameMap::out_long_opr) {
- LIR_OpJavaCall* call;
- LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length());
- for (int a = 0; a < arguments->length(); a++) {
- arguments[a] = callop->arguments()[a];
- }
- if (op->code() == lir_virtual_call) {
- call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
- callop->vtable_offset(), arguments, callop->info());
- } else {
- call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
- callop->addr(), arguments, callop->info());
- }
- inst->at_put(i - 1, call);
- inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(),
- T_LONG, lir_patch_none, NULL));
- }
-#endif
break;
}
}
@@ -3533,14 +3344,10 @@ void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr
} else if (data->is_oop()) {
Register obj = data->as_register();
Register narrow = tmp->as_register();
-#ifdef _LP64
assert(UseCompressedOops, "swap is 32bit only");
__ encode_heap_oop(obj, narrow);
__ swap(as_Address(addr), narrow);
__ decode_heap_oop(narrow, obj);
-#else
- __ swap(as_Address(addr), obj);
-#endif
} else {
ShouldNotReachHere();
}
diff --git a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.hpp b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.hpp
index c46d0002af3..37a3b38cfb3 100644
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -61,11 +61,7 @@
ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias);
enum {
-#ifdef _LP64
_call_stub_size = 68,
-#else
- _call_stub_size = 20,
-#endif // _LP64
_call_aot_stub_size = 0,
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(128),
_deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(64)
diff --git a/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp b/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp
index 069b9a2f46e..28f04a9c96e 100644
--- a/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -70,7 +70,7 @@ LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::Oexcepti
LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::Oissuing_pc_opr; }
LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); }
LIR_Opr LIRGenerator::syncTempOpr() { return new_register(T_OBJECT); }
-LIR_Opr LIRGenerator::getThreadTemp() { return rlock_callee_saved(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); }
+LIR_Opr LIRGenerator::getThreadTemp() { return rlock_callee_saved(T_LONG); }
LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
LIR_Opr opr;
@@ -215,13 +215,11 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
}
}
} else {
-#ifdef _LP64
if (index_opr->type() == T_INT) {
LIR_Opr tmp = new_register(T_LONG);
__ convert(Bytecodes::_i2l, index_opr, tmp);
index_opr = tmp;
}
-#endif
base_opr = new_pointer_register();
assert (index_opr->is_register(), "Must be register");
@@ -955,7 +953,29 @@ void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
}
void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
- fatal("FMA intrinsic is not implemented on this platform");
+ assert(x->number_of_arguments() == 3, "wrong type");
+ assert(UseFMA, "Needs FMA instructions support.");
+
+ LIRItem a(x->argument_at(0), this);
+ LIRItem b(x->argument_at(1), this);
+ LIRItem c(x->argument_at(2), this);
+
+ a.load_item();
+ b.load_item();
+ c.load_item();
+
+ LIR_Opr ina = a.result();
+ LIR_Opr inb = b.result();
+ LIR_Opr inc = c.result();
+ LIR_Opr res = rlock_result(x);
+
+ switch (x->id()) {
+ case vmIntrinsics::_fmaF: __ fmaf(ina, inb, inc, res); break;
+ case vmIntrinsics::_fmaD: __ fmad(ina, inb, inc, res); break;
+ default:
+ ShouldNotReachHere();
+ break;
+ }
}
void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
@@ -1317,20 +1337,12 @@ void LIRGenerator::trace_block_entry(BlockBegin* block) {
void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
CodeEmitInfo* info) {
-#ifdef _LP64
__ store(value, address, info);
-#else
- __ volatile_store_mem_reg(value, address, info);
-#endif
}
void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
CodeEmitInfo* info) {
-#ifdef _LP64
__ load(address, result, info);
-#else
- __ volatile_load_mem_reg(address, result, info);
-#endif
}
@@ -1340,11 +1352,6 @@ void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
LIR_Opr index_op = offset;
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
-#ifndef _LP64
- if (is_volatile && type == T_LONG) {
- __ volatile_store_unsafe_reg(data, src, offset, type, NULL, lir_patch_none);
- } else
-#endif
{
if (type == T_BOOLEAN) {
type = T_BYTE;
@@ -1374,11 +1381,6 @@ void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
BasicType type, bool is_volatile) {
-#ifndef _LP64
- if (is_volatile && type == T_LONG) {
- __ volatile_load_unsafe_reg(src, offset, dst, type, NULL, lir_patch_none);
- } else
-#endif
{
LIR_Address* addr = new LIR_Address(src, offset, type);
__ load(addr, dst);
@@ -1403,17 +1405,13 @@ void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
// Because we want a 2-arg form of xchg
__ move(data, dst);
- assert (!x->is_add() && (type == T_INT || (is_obj LP64_ONLY(&& UseCompressedOops))), "unexpected type");
+ assert (!x->is_add() && (type == T_INT || (is_obj && UseCompressedOops)), "unexpected type");
LIR_Address* addr;
if (offset->is_constant()) {
-#ifdef _LP64
jlong l = offset->as_jlong();
assert((jlong)((jint)l) == l, "offset too large for constant");
jint c = (jint)l;
-#else
- jint c = offset->as_jint();
-#endif
addr = new LIR_Address(src.result(), c, type);
} else {
addr = new LIR_Address(src.result(), offset, type);
diff --git a/hotspot/src/cpu/sparc/vm/c1_LIR_sparc.cpp b/hotspot/src/cpu/sparc/vm/c1_LIR_sparc.cpp
index e9467760679..c21d2c1d9ad 100644
--- a/hotspot/src/cpu/sparc/vm/c1_LIR_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/c1_LIR_sparc.cpp
@@ -48,16 +48,9 @@ LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) {
void LIR_Address::verify() const {
assert(scale() == times_1, "Scaled addressing mode not available on SPARC and should not be used");
assert(disp() == 0 || index()->is_illegal(), "can't have both");
-#ifdef _LP64
assert(base()->is_cpu_register(), "wrong base operand");
assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand");
assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
"wrong type for addresses");
-#else
- assert(base()->is_single_cpu(), "wrong base operand");
- assert(index()->is_illegal() || index()->is_single_cpu(), "wrong index operand");
- assert(base()->type() == T_OBJECT || base()->type() == T_INT || base()->type() == T_METADATA,
- "wrong type for addresses");
-#endif
}
#endif // PRODUCT
diff --git a/hotspot/src/cpu/sparc/vm/c1_LinearScan_sparc.hpp b/hotspot/src/cpu/sparc/vm/c1_LinearScan_sparc.hpp
index 164c000de21..6656056bb6f 100644
--- a/hotspot/src/cpu/sparc/vm/c1_LinearScan_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/c1_LinearScan_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,11 +32,7 @@ inline bool LinearScan::is_processed_reg_num(int reg_num) {
inline int LinearScan::num_physical_regs(BasicType type) {
// Sparc requires two cpu registers for long
// and two cpu registers for double
-#ifdef _LP64
if (type == T_DOUBLE) {
-#else
- if (type == T_DOUBLE || type == T_LONG) {
-#endif
return 2;
}
return 1;
@@ -44,11 +40,7 @@ inline int LinearScan::num_physical_regs(BasicType type) {
inline bool LinearScan::requires_adjacent_regs(BasicType type) {
-#ifdef _LP64
return type == T_DOUBLE;
-#else
- return type == T_DOUBLE || type == T_LONG;
-#endif
}
inline bool LinearScan::is_caller_save(int assigned_reg) {
diff --git a/hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp b/hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp
index fbd0e1ce6b4..fa0cde6e6ed 100644
--- a/hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -273,13 +273,6 @@ void C1_MacroAssembler::initialize_object(
add(obj, hdr_size_in_bytes, t1); // compute address of first element
sub(var_size_in_bytes, hdr_size_in_bytes, t2); // compute size of body
initialize_body(t1, t2);
-#ifndef _LP64
- } else if (con_size_in_bytes < threshold * 2) {
- // on v9 we can do double word stores to fill twice as much space.
- assert(hdr_size_in_bytes % 8 == 0, "double word aligned");
- assert(con_size_in_bytes % 8 == 0, "double word aligned");
- for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += 2 * HeapWordSize) stx(G0, obj, i);
-#endif
} else if (con_size_in_bytes <= threshold) {
// use explicit NULL stores
for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += HeapWordSize) st_ptr(G0, obj, i);
diff --git a/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp b/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp
index 40e30551967..baadab70bac 100644
--- a/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp
@@ -35,6 +35,7 @@
#include "runtime/signature.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/macros.hpp"
+#include "utilities/align.hpp"
#include "vmreg_sparc.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
@@ -251,7 +252,7 @@ void Runtime1::initialize_pd() {
// SP -> ---------------
//
int i;
- int sp_offset = round_to(frame::register_save_words, 2); // start doubleword aligned
+ int sp_offset = align_up((int)frame::register_save_words, 2); // start doubleword aligned
// only G int registers are saved explicitly; others are found in register windows
for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
@@ -272,7 +273,7 @@ void Runtime1::initialize_pd() {
// this should match assembler::total_frame_size_in_bytes, which
// isn't callable from this context. It's checked by an assert when
// it's used though.
- frame_size_in_bytes = align_size_up(sp_offset * wordSize, 8);
+ frame_size_in_bytes = align_up(sp_offset * wordSize, 8);
}
@@ -930,11 +931,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Label not_already_dirty, restart, refill, young_card;
-#ifdef _LP64
__ srlx(addr, CardTableModRefBS::card_shift, addr);
-#else
- __ srl(addr, CardTableModRefBS::card_shift, addr);
-#endif
AddressLiteral rs(byte_map_base);
__ set(rs, cardtable); // cardtable :=
diff --git a/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp b/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp
index 7e7cddc54da..27a0009c1a8 100644
--- a/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,7 +33,7 @@
define_pd_global(bool, BackgroundCompilation, true);
define_pd_global(bool, CICompileOSR, true);
-define_pd_global(bool, InlineIntrinsics, false);
+define_pd_global(bool, InlineIntrinsics, true);
define_pd_global(bool, PreferInterpreterNativeStubs, false);
define_pd_global(bool, ProfileTraps, true);
define_pd_global(bool, UseOnStackReplacement, true);
@@ -65,7 +65,6 @@ define_pd_global(bool, OptoRegScheduling, false);
define_pd_global(bool, SuperWordLoopUnrollAnalysis, false);
define_pd_global(bool, IdealizeClearArrayNode, true);
-#ifdef _LP64
// We need to make sure that all generated code is within
// 2 gigs of the libjvm.so runtime routines so we can use
// the faster "call" instruction rather than the expensive
@@ -81,17 +80,6 @@ define_pd_global(intx, CodeCacheExpansionSize, 64*K);
// Ergonomics related flags
define_pd_global(uint64_t,MaxRAM, 128ULL*G);
-#else
-// InitialCodeCacheSize derived from specjbb2000 run.
-define_pd_global(intx, InitialCodeCacheSize, 1536*K); // Integral multiple of CodeCacheExpansionSize
-define_pd_global(intx, ReservedCodeCacheSize, 32*M);
-define_pd_global(intx, NonProfiledCodeHeapSize, 13*M);
-define_pd_global(intx, ProfiledCodeHeapSize, 14*M);
-define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
-define_pd_global(intx, CodeCacheExpansionSize, 32*K);
-// Ergonomics related flags
-define_pd_global(uint64_t, MaxRAM, 4ULL*G);
-#endif
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
diff --git a/hotspot/src/cpu/sparc/vm/copy_sparc.hpp b/hotspot/src/cpu/sparc/vm/copy_sparc.hpp
index a9107d8dfdd..ae1dd0b5dcf 100644
--- a/hotspot/src/cpu/sparc/vm/copy_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/copy_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -114,14 +114,8 @@ static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
}
static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
-#ifdef _LP64
assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
pd_conjoint_oops_atomic((oop*)from, (oop*)to, count);
-#else
- // Guarantee use of ldd/std via some asm code, because compiler won't.
- // See solaris_sparc.il.
- _Copy_conjoint_jlongs_atomic(from, to, count);
-#endif
}
static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
@@ -162,7 +156,6 @@ static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count)
}
static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
-#ifdef _LP64
guarantee(mask_bits((uintptr_t)tohw, right_n_bits(LogBytesPerLong)) == 0,
"unaligned fill words");
julong* to = (julong*)tohw;
@@ -170,12 +163,6 @@ static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
while (count-- > 0) {
*to++ = v;
}
-#else // _LP64
- juint* to = (juint*)tohw;
- while (count-- > 0) {
- *to++ = value;
- }
-#endif // _LP64
}
typedef void (*_zero_Fn)(HeapWord* to, size_t count);
diff --git a/hotspot/src/cpu/sparc/vm/debug_sparc.cpp b/hotspot/src/cpu/sparc/vm/debug_sparc.cpp
deleted file mode 100644
index 9f3f40ada7c..00000000000
--- a/hotspot/src/cpu/sparc/vm/debug_sparc.cpp
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "code/codeCache.hpp"
-#include "code/nmethod.hpp"
-#include "runtime/frame.hpp"
-#include "runtime/init.hpp"
-#include "runtime/os.hpp"
-#include "utilities/debug.hpp"
-
-#ifndef PRODUCT
-
-extern "C" void findpc(int x);
-
-
-void pd_ps(frame f) {
- intptr_t* sp = f.sp();
- intptr_t* prev_sp = sp - 1;
- intptr_t *pc = NULL;
- intptr_t *next_pc = NULL;
- int count = 0;
- tty->print_cr("register window backtrace from " INTPTR_FORMAT ":", p2i(sp));
- while (sp != NULL && ((intptr_t)sp & 7) == 0 && sp > prev_sp && sp < prev_sp+1000) {
- pc = next_pc;
- next_pc = (intptr_t*) sp[I7->sp_offset_in_saved_window()];
- tty->print("[%d] sp=" INTPTR_FORMAT " pc=", count, p2i(sp));
- findpc((intptr_t)pc);
- if (WizardMode && Verbose) {
- // print register window contents also
- tty->print_cr(" L0..L7: {"
- INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " "
- INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " ",
- sp[0+0], sp[0+1], sp[0+2], sp[0+3],
- sp[0+4], sp[0+5], sp[0+6], sp[0+7]);
- tty->print_cr(" I0..I7: {"
- INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " "
- INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " ",
- sp[8+0], sp[8+1], sp[8+2], sp[8+3],
- sp[8+4], sp[8+5], sp[8+6], sp[8+7]);
- // (and print stack frame contents too??)
-
- CodeBlob *b = CodeCache::find_blob((address) pc);
- if (b != NULL) {
- if (b->is_nmethod()) {
- Method* m = ((nmethod*)b)->method();
- int nlocals = m->max_locals();
- int nparams = m->size_of_parameters();
- tty->print_cr("compiled java method (locals = %d, params = %d)", nlocals, nparams);
- }
- }
- }
- prev_sp = sp;
- sp = (intptr_t *)sp[FP->sp_offset_in_saved_window()];
- sp = (intptr_t *)((intptr_t)sp + STACK_BIAS);
- count += 1;
- }
- if (sp != NULL)
- tty->print("[%d] sp=" INTPTR_FORMAT " [bogus sp!]", count, p2i(sp));
-}
-
-#endif // PRODUCT
diff --git a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp
index c666f1eca47..4fa7e6a973b 100644
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -114,11 +114,7 @@ address RegisterMap::pd_location(VMReg regname) const {
// register locations. When that is fixed we'd will return NULL
// (or assert here).
reg = regname->prev()->as_Register();
-#ifdef _LP64
second_word = sizeof(jint);
-#else
- return NULL;
-#endif // _LP64
} else {
reg = regname->as_Register();
}
@@ -332,9 +328,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// Construct an unpatchable, deficient frame
void frame::init(intptr_t* sp, address pc, CodeBlob* cb) {
-#ifdef _LP64
assert( (((intptr_t)sp & (wordSize-1)) == 0), "frame constructor passed an invalid sp");
-#endif
_sp = sp;
_younger_sp = NULL;
_pc = pc;
@@ -410,7 +404,55 @@ frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpret
frame::frame(void* sp, void* fp, void* pc) {
init((intptr_t*)sp, (address)pc, NULL);
}
-#endif
+
+extern "C" void findpc(intptr_t x);
+
+void frame::pd_ps() {
+ intptr_t* curr_sp = sp();
+ intptr_t* prev_sp = curr_sp - 1;
+ intptr_t *pc = NULL;
+ intptr_t *next_pc = NULL;
+ int count = 0;
+ tty->print_cr("register window backtrace from " INTPTR_FORMAT ":", p2i(curr_sp));
+ while (curr_sp != NULL && ((intptr_t)curr_sp & 7) == 0 && curr_sp > prev_sp && curr_sp < prev_sp+1000) {
+ pc = next_pc;
+ next_pc = (intptr_t*) curr_sp[I7->sp_offset_in_saved_window()];
+ tty->print("[%d] curr_sp=" INTPTR_FORMAT " pc=", count, p2i(curr_sp));
+ findpc((intptr_t)pc);
+ if (WizardMode && Verbose) {
+ // print register window contents also
+ tty->print_cr(" L0..L7: {"
+ INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " "
+ INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " ",
+ curr_sp[0+0], curr_sp[0+1], curr_sp[0+2], curr_sp[0+3],
+ curr_sp[0+4], curr_sp[0+5], curr_sp[0+6], curr_sp[0+7]);
+ tty->print_cr(" I0..I7: {"
+ INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " "
+ INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " ",
+ curr_sp[8+0], curr_sp[8+1], curr_sp[8+2], curr_sp[8+3],
+ curr_sp[8+4], curr_sp[8+5], curr_sp[8+6], curr_sp[8+7]);
+ // (and print stack frame contents too??)
+
+ CodeBlob *b = CodeCache::find_blob((address) pc);
+ if (b != NULL) {
+ if (b->is_nmethod()) {
+ Method* m = ((nmethod*)b)->method();
+ int nlocals = m->max_locals();
+ int nparams = m->size_of_parameters();
+ tty->print_cr("compiled java method (locals = %d, params = %d)", nlocals, nparams);
+ }
+ }
+ }
+ prev_sp = curr_sp;
+ curr_sp = (intptr_t *)curr_sp[FP->sp_offset_in_saved_window()];
+ curr_sp = (intptr_t *)((intptr_t)curr_sp + STACK_BIAS);
+ count += 1;
+ }
+ if (curr_sp != NULL)
+ tty->print("[%d] curr_sp=" INTPTR_FORMAT " [bogus sp!]", count, p2i(curr_sp));
+}
+
+#endif // PRODUCT
bool frame::is_interpreted_frame() const {
return Interpreter::contains(pc());
@@ -693,11 +735,9 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
intptr_t* d_scratch = fp() + interpreter_frame_d_scratch_fp_offset;
address l_addr = (address)l_scratch;
-#ifdef _LP64
// On 64-bit the result for 1/8/16/32-bit result types is in the other
// word half
l_addr += wordSize/2;
-#endif
switch (type) {
case T_OBJECT:
diff --git a/hotspot/src/cpu/sparc/vm/frame_sparc.hpp b/hotspot/src/cpu/sparc/vm/frame_sparc.hpp
index 48cf45bd714..b3f16e4e377 100644
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -100,11 +100,7 @@
// size of each block, in order of increasing address:
register_save_words = 16,
-#ifdef _LP64
callee_aggregate_return_pointer_words = 0,
-#else
- callee_aggregate_return_pointer_words = 1,
-#endif
callee_register_argument_save_area_words = 6,
// memory_parameter_words = ,
diff --git a/hotspot/src/cpu/sparc/vm/frame_sparc.inline.hpp b/hotspot/src/cpu/sparc/vm/frame_sparc.inline.hpp
index e10261f5033..efc92775e37 100644
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.inline.hpp
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.inline.hpp
@@ -27,6 +27,7 @@
#include "asm/macroAssembler.hpp"
#include "code/vmreg.inline.hpp"
+#include "utilities/align.hpp"
// Inline functions for SPARC frames:
@@ -134,7 +135,7 @@ inline void frame::interpreter_frame_set_tos_address( intptr_t* x ) {
// Also begin is one past last monitor.
inline BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
- int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words, WordsPerLong);
+ int rounded_vm_local_words = align_up((int)frame::interpreter_frame_vm_local_words, WordsPerLong);
return (BasicObjectLock *)fp_addr_at(-rounded_vm_local_words);
}
@@ -148,7 +149,7 @@ inline void frame::interpreter_frame_set_monitor_end(BasicObjectLock* value) {
}
inline int frame::interpreter_frame_monitor_size() {
- return round_to(BasicObjectLock::size(), WordsPerLong);
+ return align_up(BasicObjectLock::size(), WordsPerLong);
}
inline Method** frame::interpreter_frame_method_addr() const {
diff --git a/hotspot/src/cpu/sparc/vm/globalDefinitions_sparc.hpp b/hotspot/src/cpu/sparc/vm/globalDefinitions_sparc.hpp
index ba6d99cd8ea..465a6a014e3 100644
--- a/hotspot/src/cpu/sparc/vm/globalDefinitions_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/globalDefinitions_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,24 +38,14 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
// The expected size in bytes of a cache line, used to pad data structures.
#if defined(TIERED)
- #ifdef _LP64
- // tiered, 64-bit, large machine
- #define DEFAULT_CACHE_LINE_SIZE 128
- #else
- // tiered, 32-bit, medium machine
- #define DEFAULT_CACHE_LINE_SIZE 64
- #endif
+ // tiered, 64-bit, large machine
+ #define DEFAULT_CACHE_LINE_SIZE 128
#elif defined(COMPILER1)
// pure C1, 32-bit, small machine
#define DEFAULT_CACHE_LINE_SIZE 16
#elif defined(COMPILER2) || defined(SHARK)
- #ifdef _LP64
- // pure C2, 64-bit, large machine
- #define DEFAULT_CACHE_LINE_SIZE 128
- #else
- // pure C2, 32-bit, medium machine
- #define DEFAULT_CACHE_LINE_SIZE 64
- #endif
+ // pure C2, 64-bit, large machine
+ #define DEFAULT_CACHE_LINE_SIZE 128
#endif
#if defined(SOLARIS)
diff --git a/hotspot/src/cpu/sparc/vm/globals_sparc.hpp b/hotspot/src/cpu/sparc/vm/globals_sparc.hpp
index ae9ba7e467f..89361fcddbd 100644
--- a/hotspot/src/cpu/sparc/vm/globals_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/globals_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -56,18 +56,10 @@ define_pd_global(intx, InlineSmallCode, 1500);
#define DEFAULT_STACK_RED_PAGES (1)
#define DEFAULT_STACK_RESERVED_PAGES (SOLARIS_ONLY(1) NOT_SOLARIS(0))
-#ifdef _LP64
-// Stack slots are 2X larger in LP64 than in the 32 bit VM.
define_pd_global(intx, CompilerThreadStackSize, 1024);
define_pd_global(intx, ThreadStackSize, 1024);
define_pd_global(intx, VMThreadStackSize, 1024);
#define DEFAULT_STACK_SHADOW_PAGES (20 DEBUG_ONLY(+2))
-#else
-define_pd_global(intx, CompilerThreadStackSize, 512);
-define_pd_global(intx, ThreadStackSize, 512);
-define_pd_global(intx, VMThreadStackSize, 512);
-#define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2))
-#endif // _LP64
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
@@ -125,9 +117,6 @@ define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
"Minimum size in bytes when block copy will be used") \
range(1, max_jint) \
\
- develop(bool, UseV8InstrsOnly, false, \
- "Use SPARC-V8 Compliant instruction subset") \
- \
product(bool, UseNiagaraInstrs, false, \
"Use Niagara-efficient instruction subset") \
\
diff --git a/hotspot/src/cpu/sparc/vm/icBuffer_sparc.cpp b/hotspot/src/cpu/sparc/vm/icBuffer_sparc.cpp
index d6942f6a7ca..5b384514489 100644
--- a/hotspot/src/cpu/sparc/vm/icBuffer_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/icBuffer_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,13 +32,9 @@
#include "oops/oop.inline.hpp"
int InlineCacheBuffer::ic_stub_code_size() {
-#ifdef _LP64
return (NativeMovConstReg::instruction_size + // sethi;add
NativeJump::instruction_size + // sethi; jmp; delay slot
(1*BytesPerInstWord) + 1); // flush + 1 extra byte
-#else
- return (2+2+ 1) * wordSize + 1; // set/jump_to/nop + 1 byte so that code_end can be set in CodeBuffer
-#endif
}
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
diff --git a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp
index ef2362897f7..16f20fb99d1 100644
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,11 +38,7 @@
#include "runtime/biasedLocking.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/thread.inline.hpp"
-
-#ifndef FAST_DISPATCH
-#define FAST_DISPATCH 1
-#endif
-#undef FAST_DISPATCH
+#include "utilities/align.hpp"
// Implementation of InterpreterMacroAssembler
@@ -70,7 +66,7 @@ void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args
br(Assembler::negative, true, Assembler::pt, skip_move);
delayed()->mov(G0, delta);
bind(skip_move);
- round_to(delta, WordsPerLong); // make multiple of 2 (SP must be 2-word aligned)
+ align_up(delta, WordsPerLong); // make multiple of 2 (SP must be 2-word aligned)
sll(delta, LogBytesPerWord, delta); // extra space for locals in bytes
}
@@ -78,23 +74,12 @@ void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args
// own dispatch. The dispatch address is computed and placed in IdispatchAddress
void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {
assert_not_delayed();
-#ifdef FAST_DISPATCH
- // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
- // they both use I2.
- assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
- ldub(Lbcp, bcp_incr, Lbyte_code); // load next bytecode
- add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
- // add offset to correct dispatch table
- sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
- ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr
-#else
ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
// dispatch table to use
AddressLiteral tbl(Interpreter::dispatch_table(state));
sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
set(tbl, G3_scratch); // compute addr of table
ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr
-#endif
}
@@ -281,23 +266,11 @@ void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* tab
// %%%%% maybe implement +VerifyActivationFrameSize here
//verify_thread(); //too slow; we will just verify on method entry & exit
if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
-#ifdef FAST_DISPATCH
- if (table == Interpreter::dispatch_table(state)) {
- // use IdispatchTables
- add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
- // add offset to correct dispatch table
- sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
- ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // get entry addr
- } else {
-#endif
- // dispatch table to use
- AddressLiteral tbl(table);
- sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
- set(tbl, G3_scratch); // compute addr of table
- ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr
-#ifdef FAST_DISPATCH
- }
-#endif
+ // dispatch table to use
+ AddressLiteral tbl(table);
+ sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
+ set(tbl, G3_scratch); // compute addr of table
+ ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr
jmp( G3_scratch, 0 );
if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr);
else delayed()->nop();
@@ -318,52 +291,32 @@ void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* tab
void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) {
assert_not_delayed();
-#ifdef _LP64
ldf(FloatRegisterImpl::D, r1, offset, d);
-#else
- ldf(FloatRegisterImpl::S, r1, offset, d);
- ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor());
-#endif
}
// Known good alignment in _LP64 but unknown otherwise
void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) {
assert_not_delayed();
-#ifdef _LP64
stf(FloatRegisterImpl::D, d, r1, offset);
// store something more useful here
debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
-#else
- stf(FloatRegisterImpl::S, d, r1, offset);
- stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize);
-#endif
}
// Known good alignment in _LP64 but unknown otherwise
void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) {
assert_not_delayed();
-#ifdef _LP64
ldx(r1, offset, rd);
-#else
- ld(r1, offset, rd);
- ld(r1, offset + Interpreter::stackElementSize, rd->successor());
-#endif
}
// Known good alignment in _LP64 but unknown otherwise
void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) {
assert_not_delayed();
-#ifdef _LP64
stx(l, r1, offset);
// store something more useful here
stx(G0, r1, offset+Interpreter::stackElementSize);
-#else
- st(l, r1, offset);
- st(l->successor(), r1, offset + Interpreter::stackElementSize);
-#endif
}
void InterpreterMacroAssembler::pop_i(Register r) {
@@ -527,9 +480,7 @@ void InterpreterMacroAssembler::empty_expression_stack() {
sub( Lesp, Gframe_size, Gframe_size );
and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary
debug_only(verify_sp(Gframe_size, G4_scratch));
-#ifdef _LP64
sub(Gframe_size, STACK_BIAS, Gframe_size );
-#endif
mov(Gframe_size, SP);
bind(done);
@@ -541,28 +492,20 @@ void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) {
Label Bad, OK;
// Saved SP must be aligned.
-#ifdef _LP64
btst(2*BytesPerWord-1, Rsp);
-#else
- btst(LongAlignmentMask, Rsp);
-#endif
br(Assembler::notZero, false, Assembler::pn, Bad);
delayed()->nop();
// Saved SP, plus register window size, must not be above FP.
add(Rsp, frame::register_save_words * wordSize, Rtemp);
-#ifdef _LP64
sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP
-#endif
cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad);
// Saved SP must not be ridiculously below current SP.
size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K);
set(maxstack, Rtemp);
sub(SP, Rtemp, Rtemp);
-#ifdef _LP64
add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp
-#endif
cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad);
ba_short(OK);
@@ -584,9 +527,7 @@ void InterpreterMacroAssembler::verify_esp(Register Resp) {
delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp);
stop("too many pops: Lesp points into monitor area");
bind(OK1);
-#ifdef _LP64
sub(Resp, STACK_BIAS, Resp);
-#endif
cmp(Resp, SP);
brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2);
delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp);
@@ -696,21 +637,12 @@ void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(
}
br(Assembler::zero, true, Assembler::pn, aligned);
-#ifdef _LP64
delayed()->ldsw(Rtmp, 0, Rdst);
-#else
- delayed()->ld(Rtmp, 0, Rdst);
-#endif
ldub(Lbcp, bcp_offset + 3, Rdst);
ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst);
ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst);
-#ifdef _LP64
ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp);
-#else
- // Unsigned load is faster than signed on some implementations
- ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp);
-#endif
or3(Rtmp, Rdst, Rdst );
bind(aligned);
@@ -796,7 +728,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
sll(index, LogBytesPerHeapOop, tmp);
get_constant_pool(result);
// load pointer for resolved_references[] objArray
- ld_ptr(result, ConstantPool::resolved_references_offset_in_bytes(), result);
+ ld_ptr(result, ConstantPool::cache_offset_in_bytes(), result);
+ ld_ptr(result, ConstantPoolCache::resolved_references_offset_in_bytes(), result);
// JNIHandles::resolve(result)
ld_ptr(result, 0, result);
// Add in the index
@@ -805,6 +738,24 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
}
+// load cpool->resolved_klass_at(index)
+void InterpreterMacroAssembler::load_resolved_klass_at_offset(Register Rcpool,
+ Register Roffset, Register Rklass) {
+ // int value = *this_cp->int_at_addr(which);
+ // int resolved_klass_index = extract_low_short_from_int(value);
+ //
+ // Because SPARC is big-endian, the low_short is at (cpool->int_at_addr(which) + 2 bytes)
+ add(Roffset, Rcpool, Roffset);
+ lduh(Roffset, sizeof(ConstantPool) + 2, Roffset); // Roffset = resolved_klass_index
+
+ Register Rresolved_klasses = Rklass;
+ ld_ptr(Rcpool, ConstantPool::resolved_klasses_offset_in_bytes(), Rresolved_klasses);
+ sll(Roffset, LogBytesPerWord, Roffset);
+ add(Roffset, Array::base_offset_in_bytes(), Roffset);
+ ld_ptr(Rresolved_klasses, Roffset, Rklass);
+}
+
+
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2.
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
@@ -910,10 +861,8 @@ void InterpreterMacroAssembler::index_check_without_pop(Register array, Register
assert_not_delayed();
verify_oop(array);
-#ifdef _LP64
// sign extend since tos (index) can be a 32bit value
sra(index, G0, index);
-#endif // _LP64
// check array
Label ptr_ok;
@@ -1191,11 +1140,7 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
// return tos
assert(Otos_l1 == Otos_i, "adjust code below");
switch (state) {
-#ifdef _LP64
case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0
-#else
- case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through // O1 -> I1
-#endif
case btos: // fall through
case ztos: // fall through
case ctos:
@@ -1207,20 +1152,6 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
case vtos: /* nothing to do */ break;
default : ShouldNotReachHere();
}
-
-#if defined(COMPILER2) && !defined(_LP64)
- if (state == ltos) {
- // C2 expects long results in G1 we can't tell if we're returning to interpreted
- // or compiled so just be safe use G1 and O0/O1
-
- // Shift bits into high (msb) of G1
- sllx(Otos_l1->after_save(), 32, G1);
- // Zero extend low bits
- srl (Otos_l2->after_save(), 0, Otos_l2->after_save());
- or3 (Otos_l2->after_save(), G1, G1);
- }
-#endif /* COMPILER2 */
-
}
// Lock object
@@ -1270,9 +1201,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object)
// Check if owner is self by comparing the value in the markOop of object
// with the stack pointer
sub(temp_reg, SP, temp_reg);
-#ifdef _LP64
sub(temp_reg, STACK_BIAS, temp_reg);
-#endif
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
// Composite "andcc" test:
@@ -2381,7 +2310,7 @@ void InterpreterMacroAssembler::store_local_double( Register index, FloatRegiste
int InterpreterMacroAssembler::top_most_monitor_byte_offset() {
const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
- int rounded_vm_local_words = ::round_to(frame::interpreter_frame_vm_local_words, WordsPerLong);
+ int rounded_vm_local_words = align_up((int)frame::interpreter_frame_vm_local_words, WordsPerLong);
return ((-rounded_vm_local_words * wordSize) - delta ) + STACK_BIAS;
}
@@ -2711,11 +2640,7 @@ void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,
void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) {
if (is_native_call) {
stf(FloatRegisterImpl::D, F0, d_tmp);
-#ifdef _LP64
stx(O0, l_tmp);
-#else
- std(O0, l_tmp);
-#endif
} else {
push(state);
}
@@ -2724,11 +2649,7 @@ void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native
void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) {
if (is_native_call) {
ldf(FloatRegisterImpl::D, d_tmp, F0);
-#ifdef _LP64
ldx(l_tmp, O0);
-#else
- ldd(l_tmp, O0);
-#endif
} else {
pop(state);
}
diff --git a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp
index 1baceb1233c..23e85374904 100644
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -70,9 +70,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
bool check_exception=true
);
- virtual void check_and_handle_popframe(Register java_thread);
- virtual void check_and_handle_earlyret(Register java_thread);
-
// base routine for all dispatches
void dispatch_base(TosState state, address* table);
@@ -80,6 +77,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
InterpreterMacroAssembler(CodeBuffer* c)
: MacroAssembler(c) {}
+ virtual void check_and_handle_popframe(Register scratch_reg);
+ virtual void check_and_handle_earlyret(Register scratch_reg);
+
void jump_to_entry(address entry);
virtual void load_earlyret_value(TosState state);
@@ -196,6 +196,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
// load cpool->resolved_references(index);
void load_resolved_reference_at_index(Register result, Register index);
+ // load cpool->resolved_klass_at(index)
+ void load_resolved_klass_at_offset(Register Rcpool, Register Roffset, Register Rklass);
+
// common code
void field_offset_at(int n, Register tmp, Register dest, Register base);
diff --git a/hotspot/src/cpu/sparc/vm/interpreterRT_sparc.cpp b/hotspot/src/cpu/sparc/vm/interpreterRT_sparc.cpp
index 0e1d6c992cd..0bd430b2217 100644
--- a/hotspot/src/cpu/sparc/vm/interpreterRT_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/interpreterRT_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -53,47 +53,24 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
Argument jni_arg(jni_offset(), false);
Register Rtmp = O0;
-#ifdef _LP64
__ ldx(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
__ store_long_argument(Rtmp, jni_arg);
-#else
- __ ld(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
- __ store_argument(Rtmp, jni_arg);
- __ ld(Llocals, Interpreter::local_offset_in_bytes(offset() + 0), Rtmp);
- Argument successor(jni_arg.successor());
- __ store_argument(Rtmp, successor);
-#endif
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
Argument jni_arg(jni_offset(), false);
-#ifdef _LP64
FloatRegister Rtmp = F0;
__ ldf(FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(offset()), Rtmp);
__ store_float_argument(Rtmp, jni_arg);
-#else
- Register Rtmp = O0;
- __ ld(Llocals, Interpreter::local_offset_in_bytes(offset()), Rtmp);
- __ store_argument(Rtmp, jni_arg);
-#endif
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_double() {
Argument jni_arg(jni_offset(), false);
-#ifdef _LP64
FloatRegister Rtmp = F0;
__ ldf(FloatRegisterImpl::D, Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
__ store_double_argument(Rtmp, jni_arg);
-#else
- Register Rtmp = O0;
- __ ld(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
- __ store_argument(Rtmp, jni_arg);
- __ ld(Llocals, Interpreter::local_offset_in_bytes(offset()), Rtmp);
- Argument successor(jni_arg.successor());
- __ store_argument(Rtmp, successor);
-#endif
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
@@ -171,7 +148,6 @@ class SlowSignatureHandler: public NativeSignatureIterator {
add_signature( non_float );
}
-#ifdef _LP64
virtual void pass_float() {
*_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
_from -= Interpreter::stackElementSize;
@@ -190,23 +166,6 @@ class SlowSignatureHandler: public NativeSignatureIterator {
_from -= 2*Interpreter::stackElementSize;
add_signature( long_sig );
}
-#else
- // pass_double() is pass_long() and pass_float() only _LP64
- virtual void pass_long() {
- _to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
- _to[1] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
- _to += 2;
- _from -= 2*Interpreter::stackElementSize;
- add_signature( non_float );
- }
-
- virtual void pass_float() {
- *_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
- _from -= Interpreter::stackElementSize;
- add_signature( non_float );
- }
-
-#endif // _LP64
virtual void add_signature( intptr_t sig_type ) {
if ( _argcount < (sizeof (intptr_t))*4 ) {
@@ -217,7 +176,7 @@ class SlowSignatureHandler: public NativeSignatureIterator {
public:
- SlowSignatureHandler(methodHandle method, address from, intptr_t* to, intptr_t *RegArgSig) : NativeSignatureIterator(method) {
+ SlowSignatureHandler(const methodHandle& method, address from, intptr_t* to, intptr_t *RegArgSig) : NativeSignatureIterator(method) {
_from = from;
_to = to;
_RegArgSignature = RegArgSig;
diff --git a/hotspot/src/cpu/sparc/vm/interpreterRT_sparc.hpp b/hotspot/src/cpu/sparc/vm/interpreterRT_sparc.hpp
index 6d90042c013..72ca060f979 100644
--- a/hotspot/src/cpu/sparc/vm/interpreterRT_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/interpreterRT_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -52,7 +52,7 @@ class SignatureHandlerGenerator: public NativeSignatureIterator {
public:
// Creation
- SignatureHandlerGenerator(methodHandle method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
+ SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
_masm = new MacroAssembler(buffer);
}
diff --git a/hotspot/src/cpu/sparc/vm/javaFrameAnchor_sparc.hpp b/hotspot/src/cpu/sparc/vm/javaFrameAnchor_sparc.hpp
index 1b4b3bf2ffc..c51545c09cc 100644
--- a/hotspot/src/cpu/sparc/vm/javaFrameAnchor_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/javaFrameAnchor_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -88,9 +88,7 @@ private:
// _last_Java_sp will always be a an unbiased stack pointer
// if is is biased then some setter screwed up. This is
// deadly.
-#ifdef _LP64
assert(((intptr_t)_last_Java_sp & 0xF) == 0, "Biased last_Java_sp");
-#endif
return _last_Java_sp;
}
diff --git a/hotspot/src/cpu/sparc/vm/jniFastGetField_sparc.cpp b/hotspot/src/cpu/sparc/vm/jniFastGetField_sparc.cpp
index ff9fcd69472..178b9c74870 100644
--- a/hotspot/src/cpu/sparc/vm/jniFastGetField_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/jniFastGetField_sparc.cpp
@@ -152,39 +152,19 @@ address JNI_FastGetField::generate_fast_get_long_field() {
__ ld_ptr (O1, 0, O5);
__ add (O5, O4, O5);
-#ifndef _LP64
- assert(count < LIST_CAPACITY-1, "LIST_CAPACITY too small");
- speculative_load_pclist[count++] = __ pc();
- __ ld (O5, 0, G2);
-
- speculative_load_pclist[count] = __ pc();
- __ ld (O5, 4, O3);
-#else
assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
speculative_load_pclist[count] = __ pc();
__ ldx (O5, 0, O3);
-#endif
__ ld (cnt_addr, G1);
__ cmp (G1, G4);
__ br (Assembler::notEqual, false, Assembler::pn, label2);
__ delayed()->mov (O7, G1);
-#ifndef _LP64
- __ mov (G2, O0);
- __ retl ();
- __ delayed()->mov (O3, O1);
-#else
__ retl ();
__ delayed()->mov (O3, O0);
-#endif
-#ifndef _LP64
- slowcase_entry_pclist[count-1] = __ pc();
- slowcase_entry_pclist[count++] = __ pc() ;
-#else
slowcase_entry_pclist[count++] = __ pc();
-#endif
__ bind (label1);
__ mov (O7, G1);
diff --git a/hotspot/src/cpu/sparc/vm/jniTypes_sparc.hpp b/hotspot/src/cpu/sparc/vm/jniTypes_sparc.hpp
index 9c3049141fd..50b51fff2c3 100644
--- a/hotspot/src/cpu/sparc/vm/jniTypes_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/jniTypes_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -55,18 +55,10 @@ public:
static inline void put_int(jint from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = from; }
static inline void put_int(jint *from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = *from; }
-#ifdef _LP64
// Longs are stored in native format in one JavaCallArgument slot at *(to+1).
static inline void put_long(jlong from, intptr_t *to) { *(jlong *)(to + 1 + 0) = from; }
static inline void put_long(jlong from, intptr_t *to, int& pos) { *(jlong *)(to + 1 + pos) = from; pos += 2; }
static inline void put_long(jlong *from, intptr_t *to, int& pos) { *(jlong *)(to + 1 + pos) = *from; pos += 2; }
-#else
- // Longs are stored in reversed native word format in two JavaCallArgument slots at *to.
- // The high half is in *(to+1) and the low half in *to.
- static inline void put_long(jlong from, intptr_t *to) { put_int2r((jint *)&from, (jint *)to); }
- static inline void put_long(jlong from, intptr_t *to, int& pos) { put_int2r((jint *)&from, (jint *)to, pos); }
- static inline void put_long(jlong *from, intptr_t *to, int& pos) { put_int2r((jint *) from, (jint *)to, pos); }
-#endif
// Oops are stored in native format in one JavaCallArgument slot at *to.
static inline void put_obj(oop from, intptr_t *to) { *(oop *)(to + 0 ) = from; }
@@ -78,39 +70,21 @@ public:
static inline void put_float(jfloat from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = from; }
static inline void put_float(jfloat *from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = *from; }
-#ifdef _LP64
// Doubles are stored in native word format in one JavaCallArgument slot at *(to+1).
static inline void put_double(jdouble from, intptr_t *to) { *(jdouble *)(to + 1 + 0) = from; }
static inline void put_double(jdouble from, intptr_t *to, int& pos) { *(jdouble *)(to + 1 + pos) = from; pos += 2; }
static inline void put_double(jdouble *from, intptr_t *to, int& pos) { *(jdouble *)(to + 1 + pos) = *from; pos += 2; }
-#else
- // Doubles are stored in reversed native word format in two JavaCallArgument slots at *to.
- static inline void put_double(jdouble from, intptr_t *to) { put_int2r((jint *)&from, (jint *)to); }
- static inline void put_double(jdouble from, intptr_t *to, int& pos) { put_int2r((jint *)&from, (jint *)to, pos); }
- static inline void put_double(jdouble *from, intptr_t *to, int& pos) { put_int2r((jint *) from, (jint *)to, pos); }
-#endif
// The get_xxx routines, on the other hand, actually _do_ fetch
// java primitive types from the interpreter stack.
static inline jint get_int(intptr_t *from) { return *(jint *)from; }
-#ifdef _LP64
static inline jlong get_long(intptr_t *from) { return *(jlong *)from; }
-#else
- static inline jlong get_long(intptr_t *from) { return ((jlong)(*( signed int *)((jint *)from )) << 32) |
- ((jlong)(*(unsigned int *)((jint *)from + 1)) << 0); }
-#endif
static inline oop get_obj(intptr_t *from) { return *(oop *)from; }
static inline jfloat get_float(intptr_t *from) { return *(jfloat *)from; }
-#ifdef _LP64
static inline jdouble get_double(intptr_t *from) { return *(jdouble *)from; }
-#else
- static inline jdouble get_double(intptr_t *from) { jlong jl = ((jlong)(*( signed int *)((jint *)from )) << 32) |
- ((jlong)(*(unsigned int *)((jint *)from + 1)) << 0);
- return *(jdouble *)&jl; }
-#endif
};
diff --git a/hotspot/src/cpu/sparc/vm/jni_sparc.h b/hotspot/src/cpu/sparc/vm/jni_sparc.h
index a7540600488..3402f67c992 100644
--- a/hotspot/src/cpu/sparc/vm/jni_sparc.h
+++ b/hotspot/src/cpu/sparc/vm/jni_sparc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -39,10 +39,6 @@
typedef int jint;
-#ifdef _LP64
- typedef long jlong;
-#else
- typedef long long jlong;
-#endif
+typedef long jlong;
typedef signed char jbyte;
diff --git a/hotspot/src/cpu/sparc/vm/jvmciCodeInstaller_sparc.cpp b/hotspot/src/cpu/sparc/vm/jvmciCodeInstaller_sparc.cpp
index 4976e1686b2..6c3c5568d48 100644
--- a/hotspot/src/cpu/sparc/vm/jvmciCodeInstaller_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/jvmciCodeInstaller_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "jvmci/jvmciJavaClasses.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/sharedRuntime.hpp"
+#include "utilities/align.hpp"
#include "vmreg_sparc.inline.hpp"
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Handle method, TRAPS) {
@@ -44,16 +45,12 @@ jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Hand
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) {
address pc = _instructions->start() + pc_offset;
- Handle obj = HotSpotObjectConstantImpl::object(constant);
+ Handle obj(THREAD, HotSpotObjectConstantImpl::object(constant));
jobject value = JNIHandles::make_local(obj());
if (HotSpotObjectConstantImpl::compressed(constant)) {
-#ifdef _LP64
int oop_index = _oop_recorder->find_index(value);
RelocationHolder rspec = oop_Relocation::spec(oop_index);
_instructions->relocate(pc, rspec, 1);
-#else
- JVMCI_ERROR("compressed oop on 32bit");
-#endif
} else {
NativeMovConstReg* move = nativeMovConstReg_at(pc);
move->set_data((intptr_t) value);
@@ -69,14 +66,10 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS)
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS) {
address pc = _instructions->start() + pc_offset;
if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
-#ifdef _LP64
NativeMovConstReg32* move = nativeMovConstReg32_at(pc);
narrowKlass narrowOop = record_narrow_metadata_reference(_instructions, pc, constant, CHECK);
move->set_data((intptr_t)narrowOop);
TRACE_jvmci_3("relocating (narrow metaspace constant) at " PTR_FORMAT "/0x%x", p2i(pc), narrowOop);
-#else
- JVMCI_ERROR("compressed Klass* on 32bit");
-#endif
} else {
NativeMovConstReg* move = nativeMovConstReg_at(pc);
void* reference = record_metadata_reference(_instructions, pc, constant, CHECK);
@@ -98,7 +91,7 @@ void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset
}
TRACE_jvmci_3("relocating at " PTR_FORMAT " (+%d) with destination at %d", p2i(pc), pc_offset, data_offset);
}else {
- int const_size = align_size_up(_constants->end()-_constants->start(), CodeEntryAlignment);
+ int const_size = align_up(_constants->end()-_constants->start(), CodeEntryAlignment);
NativeMovRegMem* load = nativeMovRegMem_at(pc);
// This offset must match with SPARCLoadConstantTableBaseOp.emitCode
load->set_offset(- (const_size - data_offset + Assembler::min_simm13()));
diff --git a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp
index e0065402b05..51d01936d9b 100644
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,7 @@
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/klass.inline.hpp"
+#include "prims/jvm.h"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/interfaceSupport.hpp"
@@ -38,6 +39,7 @@
#include "runtime/os.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
+#include "utilities/align.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1CollectedHeap.inline.hpp"
@@ -296,11 +298,6 @@ void MacroAssembler::verify_thread() {
mov(G3, L3); // avoid clobbering G3
mov(G4, L4); // avoid clobbering G4
mov(G5_method, L5); // avoid clobbering G5_method
-#if defined(COMPILER2) && !defined(_LP64)
- // Save & restore possible 64-bit Long arguments in G-regs
- srlx(G1,32,L0);
- srlx(G4,32,L6);
-#endif
call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type);
delayed()->mov(G2_thread, O0);
@@ -309,15 +306,6 @@ void MacroAssembler::verify_thread() {
mov(L3, G3); // restore G3
mov(L4, G4); // restore G4
mov(L5, G5_method); // restore G5_method
-#if defined(COMPILER2) && !defined(_LP64)
- // Save & restore possible 64-bit Long arguments in G-regs
- sllx(L0,32,G2); // Move old high G1 bits high in G2
- srl(G1, 0,G1); // Clear current high G1 bits
- or3 (G1,G2,G1); // Recover 64-bit G1
- sllx(L6,32,G2); // Move old high G4 bits high in G2
- srl(G4, 0,G4); // Clear current high G4 bits
- or3 (G4,G2,G4); // Recover 64-bit G4
-#endif
restore(O0, 0, G2_thread);
}
}
@@ -387,7 +375,6 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Ja
st_ptr(last_Java_pc, pc_addr);
}
-#ifdef _LP64
#ifdef ASSERT
// Make sure that we have an odd stack
Label StackOk;
@@ -400,9 +387,6 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Ja
assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame");
add( last_java_sp, STACK_BIAS, G4_scratch );
st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset());
-#else
- st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset());
-#endif // _LP64
}
void MacroAssembler::reset_last_Java_frame(void) {
@@ -658,11 +642,7 @@ void MacroAssembler::ic_call(address entry, bool emit_delay, jint method_index)
void MacroAssembler::card_table_write(jbyte* byte_map_base,
Register tmp, Register obj) {
-#ifdef _LP64
srlx(obj, CardTableModRefBS::card_shift, obj);
-#else
- srl(obj, CardTableModRefBS::card_shift, obj);
-#endif
assert(tmp != obj, "need separate temp reg");
set((address) byte_map_base, tmp);
stb(G0, tmp, obj);
@@ -672,10 +652,9 @@ void MacroAssembler::card_table_write(jbyte* byte_map_base,
void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
address save_pc;
int shiftcnt;
-#ifdef _LP64
-# ifdef CHECK_DELAY
- assert_not_delayed((char*) "cannot put two instructions in delay slot");
-# endif
+#ifdef VALIDATE_PIPELINE
+ assert_no_delay("Cannot put two instructions in delay-slot.");
+#endif
v9_dep();
save_pc = pc();
@@ -719,9 +698,6 @@ void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, b
while (pc() < (save_pc + (7 * BytesPerInstWord)))
nop();
}
-#else
- Assembler::sethi(addrlit.value(), d, addrlit.rspec());
-#endif
}
@@ -736,7 +712,6 @@ void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d)
int MacroAssembler::insts_for_sethi(address a, bool worst_case) {
-#ifdef _LP64
if (worst_case) return 7;
intptr_t iaddr = (intptr_t) a;
int msb32 = (int) (iaddr >> 32);
@@ -756,9 +731,6 @@ int MacroAssembler::insts_for_sethi(address a, bool worst_case) {
}
}
return count;
-#else
- return 1;
-#endif
}
int MacroAssembler::worst_case_insts_for_set() {
@@ -781,7 +753,7 @@ void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, boo
return;
}
}
- assert_not_delayed((char*) "cannot put two instructions in delay slot");
+ assert_no_delay("Cannot put two instructions in delay-slot.");
internal_sethi(addrlit, d, ForceRelocatable);
if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) {
add(d, addrlit.low10(), d, addrlit.rspec());
@@ -1117,7 +1089,7 @@ void RegistersForDebugging::print(outputStream* s) {
}
void RegistersForDebugging::save_registers(MacroAssembler* a) {
- a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0);
+ a->sub(FP, align_up(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0);
a->flushw();
int i;
for (i = 0; i < 8; ++i) {
@@ -1339,7 +1311,7 @@ void MacroAssembler::verify_oop_subroutine() {
wrccr( O5_save_flags ); // Restore CCR's
- save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
+ save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2));
// stop_subroutine expects message pointer in I1.
mov(I1, O1);
@@ -1368,7 +1340,7 @@ void MacroAssembler::stop(const char* msg) {
// add one word to size in case struct is odd number of words long
// It must be doubleword-aligned for storing doubles into it.
- save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
+ save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2));
// stop_subroutine expects message pointer in I1.
// Size of set() should stay the same
@@ -1391,7 +1363,7 @@ void MacroAssembler::stop(const char* msg) {
void MacroAssembler::warn(const char* msg) {
- save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
+ save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2));
RegistersForDebugging::save_registers(this);
mov(O0, L0);
// Size of set() should stay the same
@@ -1422,6 +1394,13 @@ void MacroAssembler::untested(const char* what) {
}
+void MacroAssembler::unimplemented(const char* what) {
+ char* b = new char[1024];
+ jio_snprintf(b, 1024, "unimplemented: %s", what);
+ stop(b);
+}
+
+
void MacroAssembler::stop_subroutine() {
RegistersForDebugging::save_registers(this);
@@ -1488,11 +1467,7 @@ void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresul
void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) {
-#ifdef _LP64
add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult);
-#else
- add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult);
-#endif
bclr(1, Rresult);
sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes
}
@@ -1531,22 +1506,12 @@ void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a,
// Does a test & branch on 32-bit systems and a register-branch on 64-bit.
void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) {
assert_not_delayed();
-#ifdef _LP64
bpr( rc_z, a, p, s1, L );
-#else
- tst(s1);
- br ( zero, a, p, L );
-#endif
}
void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) {
assert_not_delayed();
-#ifdef _LP64
bpr( rc_nz, a, p, s1, L );
-#else
- tst(s1);
- br ( notZero, a, p, L );
-#endif
}
// Compare registers and branch with nop in delay slot or cbcond without delay slot.
@@ -1862,14 +1827,12 @@ void MacroAssembler::lushr( Register Rin_high, Register Rin_low,
bind( done );
}
-#ifdef _LP64
void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) {
cmp(Ra, Rb);
mov(-1, Rresult);
movcc(equal, false, xcc, 0, Rresult);
movcc(greater, false, xcc, 1, Rresult);
}
-#endif
void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) {
@@ -2668,9 +2631,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
// if compare/exchange succeeded we found an unlocked object and we now have locked it
// hence we are done
cmp(Rmark, Rscratch);
-#ifdef _LP64
sub(Rscratch, STACK_BIAS, Rscratch);
-#endif
brx(Assembler::equal, false, Assembler::pt, done);
delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot
@@ -2716,9 +2677,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
// Stack-lock attempt failed - check for recursive stack-lock.
// See the comments below about how we might remove this case.
-#ifdef _LP64
sub(Rscratch, STACK_BIAS, Rscratch);
-#endif
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
andcc(Rscratch, 0xfffff003, Rscratch);
br(Assembler::always, false, Assembler::pt, done);
@@ -2800,9 +2759,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
// control to the "slow" operators in synchronizer.cpp.
// RScratch contains the fetched obj->mark value from the failed CAS.
-#ifdef _LP64
sub(Rscratch, STACK_BIAS, Rscratch);
-#endif
sub(Rscratch, SP, Rscratch);
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
andcc(Rscratch, 0xfffff003, Rscratch);
@@ -3720,11 +3677,7 @@ static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
Label not_already_dirty, restart, refill, young_card;
-#ifdef _LP64
__ srlx(O0, CardTableModRefBS::card_shift, O0);
-#else
- __ srl(O0, CardTableModRefBS::card_shift, O0);
-#endif
AddressLiteral addrlit(byte_map_base);
__ set(addrlit, O1); // O1 :=
__ ldub(O0, O1, O2); // O2 := [O0 + O1]
@@ -3826,11 +3779,7 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val
if (G1RSBarrierRegionFilter) {
xor3(store_addr, new_val, tmp);
-#ifdef _LP64
srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
-#else
- srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
-#endif
// XXX Should I predict this taken or not? Does it matter?
cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);
@@ -4665,7 +4614,7 @@ void MacroAssembler::has_negatives(Register inp, Register size, Register result,
// Use BIS for zeroing (count is in bytes).
void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) {
- assert(UseBlockZeroing && VM_Version::has_block_zeroing(), "only works with BIS zeroing");
+ assert(UseBlockZeroing && VM_Version::has_blk_zeroing(), "only works with BIS zeroing");
Register end = count;
int cache_line_size = VM_Version::prefetch_data_size();
assert(cache_line_size > 0, "cache line size should be known for this code");
diff --git a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.hpp b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.hpp
index a401859e774..1bcbc739c3d 100644
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -156,7 +156,6 @@ REGISTER_DECLARATION(Register, O5_savedSP , O5);
REGISTER_DECLARATION(Register, I5_savedSP , I5); // Saved SP before bumping for locals. This is simply
// a copy SP, so in 64-bit it's a biased value. The bias
// is added and removed as needed in the frame code.
-REGISTER_DECLARATION(Register, IdispatchTables , I4); // Base address of the bytecode dispatch tables
REGISTER_DECLARATION(Register, IdispatchAddress , I3); // Register which saves the dispatch address for each bytecode
REGISTER_DECLARATION(Register, ImethodDataPtr , I2); // Pointer to the current method data
@@ -228,7 +227,6 @@ REGISTER_DECLARATION(Register, Oissuing_pc , O1); // where the exception is comi
#define O5_savedSP AS_REGISTER(Register, O5_savedSP)
#define IdispatchAddress AS_REGISTER(Register, IdispatchAddress)
#define ImethodDataPtr AS_REGISTER(Register, ImethodDataPtr)
-#define IdispatchTables AS_REGISTER(Register, IdispatchTables)
#define Oexception AS_REGISTER(Register, Oexception)
#define Oissuing_pc AS_REGISTER(Register, Oissuing_pc)
@@ -333,14 +331,12 @@ class AddressLiteral VALUE_OBJ_CLASS_SPEC {
return external_word_Relocation::spec(addr);
case relocInfo::internal_word_type:
return internal_word_Relocation::spec(addr);
-#ifdef _LP64
case relocInfo::opt_virtual_call_type:
return opt_virtual_call_Relocation::spec();
case relocInfo::static_call_type:
return static_call_Relocation::spec();
case relocInfo::runtime_call_type:
return runtime_call_Relocation::spec();
-#endif
case relocInfo::none:
return RelocationHolder();
default:
@@ -396,12 +392,10 @@ class AddressLiteral VALUE_OBJ_CLASS_SPEC {
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
-#ifdef _LP64
// 32-bit complains about a multiple declaration for int*.
AddressLiteral(intptr_t* addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
-#endif
AddressLiteral(Metadata* addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
@@ -464,16 +458,10 @@ class Argument VALUE_OBJ_CLASS_SPEC {
bool _is_in;
public:
-#ifdef _LP64
enum {
n_register_parameters = 6, // only 6 registers may contain integer parameters
n_float_register_parameters = 16 // Can have up to 16 floating registers
};
-#else
- enum {
- n_register_parameters = 6 // only 6 registers may contain integer parameters
- };
-#endif
// creation
Argument(int number, bool is_in) : _number(number), _is_in(is_in) {}
@@ -489,7 +477,6 @@ class Argument VALUE_OBJ_CLASS_SPEC {
// locating register-based arguments:
bool is_register() const { return _number < n_register_parameters; }
-#ifdef _LP64
// locating Floating Point register-based arguments:
bool is_float_register() const { return _number < n_float_register_parameters; }
@@ -501,7 +488,6 @@ class Argument VALUE_OBJ_CLASS_SPEC {
assert(is_float_register(), "must be a register argument");
return as_FloatRegister(( number() *2 ));
}
-#endif
Register as_register() const {
assert(is_register(), "must be a register argument");
@@ -604,15 +590,15 @@ class MacroAssembler : public Assembler {
bool check_exception=true // flag which indicates if exception should be checked
);
+ public:
+ MacroAssembler(CodeBuffer* code) : Assembler(code) {}
+
// This routine should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler,
// as only the interpreter handles and ForceEarlyReturn PopFrame requests.
virtual void check_and_handle_popframe(Register scratch_reg);
virtual void check_and_handle_earlyret(Register scratch_reg);
- public:
- MacroAssembler(CodeBuffer* code) : Assembler(code) {}
-
// Support for NULL-checks
//
// Generates code that causes a NULL OS exception if the content of reg is NULL.
@@ -676,9 +662,6 @@ class MacroAssembler : public Assembler {
inline void fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
inline void fbp( Condition c, bool a, CC cc, Predict p, Label& L );
- // get PC the best way
- inline int get_pc( Register d );
-
// Sparc shorthands(pp 85, V8 manual, pp 289 V9 manual)
inline void cmp( Register s1, Register s2 );
inline void cmp( Register s1, int simm13a );
@@ -1144,7 +1127,7 @@ public:
void stop(const char* msg); // prints msg, dumps registers and stops execution
void warn(const char* msg); // prints msg, but don't stop
void untested(const char* what = "");
- void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); }
+ void unimplemented(const char* what = "");
void should_not_reach_here() { stop("should not reach here"); }
void print_CPU_state();
@@ -1217,9 +1200,7 @@ public:
void lushr( Register Rin_high, Register Rin_low, Register Rcount,
Register Rout_high, Register Rout_low, Register Rtemp );
-#ifdef _LP64
void lcmp( Register Ra, Register Rb, Register Rresult);
-#endif
// Load and store values by size and signed-ness
void load_sized_value( Address src, Register dst, size_t size_in_bytes, bool is_signed);
@@ -1412,7 +1393,7 @@ public:
void movitof_revbytes(Register src, FloatRegister dst, Register tmp1, Register tmp2);
void movftoi_revbytes(FloatRegister src, Register dst, Register tmp1, Register tmp2);
- // CRC32 code for java.util.zip.CRC32::updateBytes0() instrinsic.
+ // CRC32 code for java.util.zip.CRC32::updateBytes0() intrinsic.
void kernel_crc32(Register crc, Register buf, Register len, Register table);
// Fold 128-bit data chunk
void fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register buf, int offset);
@@ -1420,7 +1401,7 @@ public:
// Fold 8-bit data
void fold_8bit_crc32(Register xcrc, Register table, Register xtmp, Register tmp);
void fold_8bit_crc32(Register crc, Register table, Register tmp);
- // CRC32C code for java.util.zip.CRC32C::updateBytes/updateDirectByteBuffer instrinsic.
+ // CRC32C code for java.util.zip.CRC32C::updateBytes/updateDirectByteBuffer intrinsic.
void kernel_crc32c(Register crc, Register buf, Register len, Register table);
};
diff --git a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp
index 2f1c949bb7f..679bbd30c29 100644
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,19 +45,11 @@ inline void MacroAssembler::pd_patch_instruction(address branch, address target)
// Use the right loads/stores for the platform
inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
-#ifdef _LP64
Assembler::ldx(s1, s2, d);
-#else
- ld( s1, s2, d);
-#endif
}
inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
-#ifdef _LP64
Assembler::ldx(s1, simm13a, d);
-#else
- ld( s1, simm13a, d);
-#endif
}
#ifdef ASSERT
@@ -68,35 +60,19 @@ inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d )
#endif
inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) {
-#ifdef _LP64
ldx(s1, s2, d);
-#else
- ld( s1, s2, d);
-#endif
}
inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) {
-#ifdef _LP64
ldx(a, d, offset);
-#else
- ld( a, d, offset);
-#endif
}
inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
-#ifdef _LP64
Assembler::stx(d, s1, s2);
-#else
- st( d, s1, s2);
-#endif
}
inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) {
-#ifdef _LP64
Assembler::stx(d, s1, simm13a);
-#else
- st( d, s1, simm13a);
-#endif
}
#ifdef ASSERT
@@ -107,84 +83,44 @@ inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a )
#endif
inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) {
-#ifdef _LP64
stx(d, s1, s2);
-#else
- st( d, s1, s2);
-#endif
}
inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) {
-#ifdef _LP64
stx(d, a, offset);
-#else
- st( d, a, offset);
-#endif
}
// Use the right loads/stores for the platform
inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) {
-#ifdef _LP64
Assembler::ldx(s1, s2, d);
-#else
- Assembler::ldd(s1, s2, d);
-#endif
}
inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) {
-#ifdef _LP64
Assembler::ldx(s1, simm13a, d);
-#else
- Assembler::ldd(s1, simm13a, d);
-#endif
}
inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) {
-#ifdef _LP64
ldx(s1, s2, d);
-#else
- ldd(s1, s2, d);
-#endif
}
inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) {
-#ifdef _LP64
ldx(a, d, offset);
-#else
- ldd(a, d, offset);
-#endif
}
inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) {
-#ifdef _LP64
Assembler::stx(d, s1, s2);
-#else
- Assembler::std(d, s1, s2);
-#endif
}
inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) {
-#ifdef _LP64
Assembler::stx(d, s1, simm13a);
-#else
- Assembler::std(d, s1, simm13a);
-#endif
}
inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) {
-#ifdef _LP64
stx(d, s1, s2);
-#else
- std(d, s1, s2);
-#endif
}
inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) {
-#ifdef _LP64
stx(d, a, offset);
-#else
- std(d, a, offset);
-#endif
}
inline void MacroAssembler::stbool(Register d, const Address& a) { stb(d, a); }
@@ -207,45 +143,25 @@ inline void MacroAssembler::casx( Register s1, Register s2, Register d) { casxa(
// Functions for isolating 64 bit atomic swaps for LP64
// cas_ptr will perform cas for 32 bit VM's and casx for 64 bit VM's
inline void MacroAssembler::cas_ptr( Register s1, Register s2, Register d) {
-#ifdef _LP64
casx( s1, s2, d );
-#else
- cas( s1, s2, d );
-#endif
}
// Functions for isolating 64 bit shifts for LP64
inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) {
-#ifdef _LP64
Assembler::sllx(s1, s2, d);
-#else
- Assembler::sll( s1, s2, d);
-#endif
}
inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) {
-#ifdef _LP64
Assembler::sllx(s1, imm6a, d);
-#else
- Assembler::sll( s1, imm6a, d);
-#endif
}
inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) {
-#ifdef _LP64
Assembler::srlx(s1, s2, d);
-#else
- Assembler::srl( s1, s2, d);
-#endif
}
inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) {
-#ifdef _LP64
Assembler::srlx(s1, imm6a, d);
-#else
- Assembler::srl( s1, imm6a, d);
-#endif
}
inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) {
@@ -269,7 +185,8 @@ inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, reloc
}
inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
- insert_nop_after_cbcond();
+ // See note[+] on 'avoid_pipeline_stalls()', in "assembler_sparc.inline.hpp".
+ avoid_pipeline_stall();
br(c, a, p, target(L));
}
@@ -277,15 +194,11 @@ inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
// Branch that tests either xcc or icc depending on the
// architecture compiled (LP64 or not)
inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
-#ifdef _LP64
Assembler::bp(c, a, xcc, p, d, rt);
-#else
- MacroAssembler::br(c, a, p, d, rt);
-#endif
}
inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
- insert_nop_after_cbcond();
+ avoid_pipeline_stall();
brx(c, a, p, target(L));
}
@@ -307,7 +220,7 @@ inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, reloc
}
inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
- insert_nop_after_cbcond();
+ avoid_pipeline_stall();
fb(c, a, p, target(L));
}
@@ -338,7 +251,6 @@ inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
}
inline void MacroAssembler::call( address d, RelocationHolder const& rspec ) {
-#ifdef _LP64
intptr_t disp;
// NULL is ok because it will be relocated later.
// Must change NULL to a reachable address in order to
@@ -355,18 +267,14 @@ inline void MacroAssembler::call( address d, RelocationHolder const& rspec ) {
} else {
Assembler::call(d, rspec);
}
-#else
- Assembler::call( d, rspec );
-#endif
}
-inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
- insert_nop_after_cbcond();
- MacroAssembler::call( target(L), rt);
+inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
+ avoid_pipeline_stall();
+ MacroAssembler::call(target(L), rt);
}
-
inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); }
inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); }
@@ -396,13 +304,6 @@ inline void MacroAssembler::retl( bool trace ) {
}
}
-// clobbers o7 on V8!!
-// returns delta from gotten pc to addr after
-inline int MacroAssembler::get_pc( Register d ) {
- int x = offset();
- rdpc(d);
- return offset() - x;
-}
inline void MacroAssembler::cmp( Register s1, Register s2 ) { subcc( s1, s2, G0 ); }
inline void MacroAssembler::cmp( Register s1, int simm13a ) { subcc( s1, simm13a, G0 ); }
@@ -414,12 +315,7 @@ inline void MacroAssembler::cmp( Register s1, int simm13a ) { subcc( s1, simm13
// 2 instructions. All PCs in the CodeCache are within 2 Gig of each other.
inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) {
intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip;
-#ifdef _LP64
Unimplemented();
-#else
- Assembler::sethi( thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc));
- add(reg, thepc & 0x3ff, reg, internal_word_Relocation::spec((address)thepc));
-#endif
return thepc;
}
@@ -554,7 +450,6 @@ inline void MacroAssembler::store_ptr_argument( Register s, Argument& a ) {
}
-#ifdef _LP64
inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) {
if (a.is_float_register())
// V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2
@@ -579,7 +474,6 @@ inline void MacroAssembler::store_long_argument( Register s, Argument& a ) {
else
stx(s, a.as_address());
}
-#endif
inline void MacroAssembler::round_to( Register r, int modulus ) {
assert_not_delayed();
@@ -640,22 +534,13 @@ inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm1
inline void MacroAssembler::clruw( Register s, Register d ) { srl( s, G0, d); }
inline void MacroAssembler::clruwu( Register d ) { srl( d, G0, d); }
-#ifdef _LP64
// Make all 32 bit loads signed so 64 bit registers maintain proper sign
inline void MacroAssembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); }
inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); }
-#else
-inline void MacroAssembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); }
-inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); }
-#endif
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
-# ifdef _LP64
inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); }
-# else
-inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); }
-# endif
#endif
inline void MacroAssembler::ld( const Address& a, Register d, int offset) {
diff --git a/hotspot/src/cpu/sparc/vm/memset_with_concurrent_readers_sparc.cpp b/hotspot/src/cpu/sparc/vm/memset_with_concurrent_readers_sparc.cpp
index adacfda4ae6..e442d8f9c41 100644
--- a/hotspot/src/cpu/sparc/vm/memset_with_concurrent_readers_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/memset_with_concurrent_readers_sparc.cpp
@@ -26,6 +26,7 @@
#include "gc/shared/memset_with_concurrent_readers.hpp"
#include "runtime/prefetch.inline.hpp"
+#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
@@ -86,7 +87,7 @@ void memset_with_concurrent_readers(void* to, int value, size_t size) {
void* end = static_cast(to) + size;
if (size >= (size_t)BytesPerWord) {
// Fill any partial word prefix.
- uintx* aligned_to = static_cast(align_ptr_up(to, BytesPerWord));
+ uintx* aligned_to = static_cast(align_up(to, BytesPerWord));
fill_subword(to, aligned_to, value);
// Compute fill word.
@@ -97,7 +98,7 @@ void memset_with_concurrent_readers(void* to, int value, size_t size) {
xvalue |= (xvalue << 16);
xvalue |= (xvalue << 32);
- uintx* aligned_end = static_cast(align_ptr_down(end, BytesPerWord));
+ uintx* aligned_end = static_cast(align_down(end, BytesPerWord));
assert(aligned_to <= aligned_end, "invariant");
// for ( ; aligned_to < aligned_end; ++aligned_to) {
diff --git a/hotspot/src/cpu/sparc/vm/metaspaceShared_sparc.cpp b/hotspot/src/cpu/sparc/vm/metaspaceShared_sparc.cpp
deleted file mode 100644
index cc0141c283e..00000000000
--- a/hotspot/src/cpu/sparc/vm/metaspaceShared_sparc.cpp
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.inline.hpp"
-#include "asm/codeBuffer.hpp"
-#include "memory/metaspaceShared.hpp"
-
-// Generate the self-patching vtable method:
-//
-// This method will be called (as any other Klass virtual method) with
-// the Klass itself as the first argument. Example:
-//
-// oop obj;
-// int size = obj->klass()->oop_size(this);
-//
-// for which the virtual method call is Klass::oop_size();
-//
-// The dummy method is called with the Klass object as the first
-// operand, and an object as the second argument.
-//
-
-//=====================================================================
-
-// All of the dummy methods in the vtable are essentially identical,
-// differing only by an ordinal constant, and they bear no relationship
-// to the original method which the caller intended. Also, there needs
-// to be 'vtbl_list_size' instances of the vtable in order to
-// differentiate between the 'vtable_list_size' original Klass objects.
-
-#define __ masm->
-
-void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
- void** vtable,
- char** md_top,
- char* md_end,
- char** mc_top,
- char* mc_end) {
-
- intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
- *(intptr_t *)(*md_top) = vtable_bytes;
- *md_top += sizeof(intptr_t);
- void** dummy_vtable = (void**)*md_top;
- *vtable = dummy_vtable;
- *md_top += vtable_bytes;
-
- // Get ready to generate dummy methods.
-
- CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
- MacroAssembler* masm = new MacroAssembler(&cb);
-
- Label common_code;
- for (int i = 0; i < vtbl_list_size; ++i) {
- for (int j = 0; j < num_virtuals; ++j) {
- dummy_vtable[num_virtuals * i + j] = (void*)masm->pc();
- __ save(SP, -256, SP);
- int offset = (i << 8) + j;
- Register src = G0;
- if (!Assembler::is_simm13(offset)) {
- __ sethi(offset, L0);
- src = L0;
- offset = offset & ((1 << 10) - 1);
- }
- __ brx(Assembler::always, false, Assembler::pt, common_code);
-
- // Load L0 with a value indicating vtable/offset pair.
- // -- bits[ 7..0] (8 bits) which virtual method in table?
- // -- bits[13..8] (6 bits) which virtual method table?
- __ delayed()->or3(src, offset, L0);
- }
- }
-
- __ bind(common_code);
-
- // Expecting to be called with the "this" pointer in O0/I0 (where
- // "this" is a Klass object). In addition, L0 was set (above) to
- // identify the method and table.
-
- // Look up the correct vtable pointer.
-
- __ set((intptr_t)vtbl_list, L2); // L2 = address of new vtable list.
- __ srl(L0, 8, L3); // Isolate L3 = vtable identifier.
- __ sll(L3, LogBytesPerWord, L3);
- __ ld_ptr(L2, L3, L3); // L3 = new (correct) vtable pointer.
- __ st_ptr(L3, Address(I0, 0)); // Save correct vtable ptr in entry.
-
- // Restore registers and jump to the correct method;
-
- __ and3(L0, 255, L4); // Isolate L3 = method offset;.
- __ sll(L4, LogBytesPerWord, L4);
- __ ld_ptr(L3, L4, L4); // Get address of correct virtual method
- __ jmpl(L4, 0, G0); // Jump to correct method.
- __ delayed()->restore(); // Restore registers.
-
- __ flush();
- *mc_top = (char*)__ pc();
-
- guarantee(*mc_top <= mc_end, "Insufficient space for method wrappers.");
-}
diff --git a/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp
index f93dbf17212..37ac1024f4e 100644
--- a/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "interpreter/interp_masm.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
+#include "prims/jvm.h"
#include "prims/methodHandles.hpp"
#define __ _masm->
@@ -71,7 +72,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Register temp_reg, Register temp2_reg,
const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
- KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
+ Klass* klass = SystemDictionary::well_known_klass(klass_id);
bool did_save = false;
if (temp_reg == noreg || temp2_reg == noreg) {
temp_reg = L1;
@@ -181,8 +182,9 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
__ verify_oop(method_temp);
__ load_heap_oop(Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())), method_temp);
__ verify_oop(method_temp);
- // the following assumes that a Method* is normally compressed in the vmtarget field:
- __ ld_ptr( Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())), method_temp);
+ __ load_heap_oop(Address(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())), method_temp);
+ __ verify_oop(method_temp);
+ __ ld_ptr( Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())), method_temp);
if (VerifyMethodHandles && !for_compiler_entry) {
// make sure recv is already on stack
@@ -332,7 +334,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
- Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));
+ Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()));
+ Address vmtarget_method( G5_method, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()));
Register temp1_recv_klass = temp1;
if (iid != vmIntrinsics::_linkToStatic) {
@@ -384,14 +387,16 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp2);
}
- __ ld_ptr(member_vmtarget, G5_method);
+ __ load_heap_oop(member_vmtarget, G5_method);
+ __ ld_ptr(vmtarget_method, G5_method);
break;
case vmIntrinsics::_linkToStatic:
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp2);
}
- __ ld_ptr(member_vmtarget, G5_method);
+ __ load_heap_oop(member_vmtarget, G5_method);
+ __ ld_ptr(vmtarget_method, G5_method);
break;
case vmIntrinsics::_linkToVirtual:
diff --git a/hotspot/src/cpu/sparc/vm/methodHandles_sparc.hpp b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.hpp
index d58e7d75cd0..6638a194dec 100644
--- a/hotspot/src/cpu/sparc/vm/methodHandles_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
// Adapters
enum /* platform_dependent_constants */ {
- adapter_code_size = NOT_LP64(23000 DEBUG_ONLY(+ 40000)) LP64_ONLY(35000 DEBUG_ONLY(+ 50000))
+ adapter_code_size = 35000 DEBUG_ONLY(+ 50000)
};
// Additional helper methods for MethodHandles code generation:
diff --git a/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp b/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp
index 103c712cee4..950619dacb4 100644
--- a/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -236,8 +236,6 @@ void NativeCall::test() {
//-------------------------------------------------------------------
-#ifdef _LP64
-
void NativeFarCall::set_destination(address dest) {
// Address materialized in the instruction stream, so nothing to do.
return;
@@ -290,8 +288,6 @@ void NativeFarCall::test() {
}
// End code for unit testing implementation of NativeFarCall class
-#endif // _LP64
-
//-------------------------------------------------------------------
@@ -304,18 +300,9 @@ void NativeMovConstReg::verify() {
// verify the pattern "sethi %hi22(imm), reg ; add reg, %lo10(imm), reg"
Register rd = inv_rd(i0);
-#ifndef _LP64
- if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
- is_op3(i1, Assembler::add_op3, Assembler::arith_op) &&
- inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
- rd == inv_rs1(i1) && rd == inv_rd(i1))) {
- fatal("not a set_metadata");
- }
-#else
if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
fatal("not a set_metadata");
}
-#endif
}
@@ -324,23 +311,13 @@ void NativeMovConstReg::print() {
}
-#ifdef _LP64
intptr_t NativeMovConstReg::data() const {
return data64(addr_at(sethi_offset), long_at(add_offset));
}
-#else
-intptr_t NativeMovConstReg::data() const {
- return data32(long_at(sethi_offset), long_at(add_offset));
-}
-#endif
void NativeMovConstReg::set_data(intptr_t x) {
-#ifdef _LP64
set_data64_sethi(addr_at(sethi_offset), x);
-#else
- set_long_at(sethi_offset, set_data32_sethi( long_at(sethi_offset), x));
-#endif
set_long_at(add_offset, set_data32_simm13( long_at(add_offset), x));
// also store the value into an oop_Relocation cell, if any
@@ -508,20 +485,12 @@ void NativeMovConstRegPatching::print() {
int NativeMovConstRegPatching::data() const {
-#ifdef _LP64
return data64(addr_at(sethi_offset), long_at(add_offset));
-#else
- return data32(long_at(sethi_offset), long_at(add_offset));
-#endif
}
void NativeMovConstRegPatching::set_data(int x) {
-#ifdef _LP64
set_data64_sethi(addr_at(sethi_offset), x);
-#else
- set_long_at(sethi_offset, set_data32_sethi(long_at(sethi_offset), x));
-#endif
set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
// also store the value into an oop_Relocation cell, if any
@@ -758,21 +727,12 @@ void NativeJump::verify() {
assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
// verify the pattern "sethi %hi22(imm), treg ; jmpl treg, %lo10(imm), lreg"
Register rd = inv_rd(i0);
-#ifndef _LP64
- if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
- (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op)) &&
- inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
- rd == inv_rs1(i1))) {
- fatal("not a jump_to instruction");
- }
-#else
// In LP64, the jump instruction location varies for non relocatable
// jumps, for example is could be sethi, xor, jmp instead of the
// 7 instructions for sethi. So let's check sethi only.
if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
fatal("not a jump_to instruction");
}
-#endif
}
diff --git a/hotspot/src/cpu/sparc/vm/nativeInst_sparc.hpp b/hotspot/src/cpu/sparc/vm/nativeInst_sparc.hpp
index d0582f34265..c2a27394a75 100644
--- a/hotspot/src/cpu/sparc/vm/nativeInst_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/nativeInst_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -67,11 +67,8 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
bool is_illegal();
bool is_zombie() {
int x = long_at(0);
- return is_op3(x,
- Assembler::ldsw_op3,
- Assembler::ldst_op)
- && Assembler::inv_rs1(x) == G0
- && Assembler::inv_rd(x) == O7;
+ return (is_op3(x, Assembler::ldsw_op3, Assembler::ldst_op) &&
+ inv_rs1(x) == G0 && inv_rd(x) == O7);
}
bool is_ic_miss_trap(); // Inline-cache uses a trap to detect a miss
bool is_return() {
@@ -121,11 +118,7 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
bool is_safepoint_poll() {
int x = long_at(0);
-#ifdef _LP64
return is_op3(x, Assembler::ldx_op3, Assembler::ldst_op) &&
-#else
- return is_op3(x, Assembler::lduw_op3, Assembler::ldst_op) &&
-#endif
(inv_rd(x) == G0) && (inv_immed(x) ? Assembler::inv_simm13(x) == 0 : inv_rs2(x) == G0);
}
@@ -133,29 +126,11 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
bool is_load_store_with_small_offset(Register reg);
public:
-#ifdef ASSERT
- static int rdpc_instruction() { return Assembler::op(Assembler::arith_op ) | Assembler::op3(Assembler::rdreg_op3) | Assembler::u_field(5, 18, 14) | Assembler::rd(O7); }
-#else
- // Temporary fix: in optimized mode, u_field is a macro for efficiency reasons (see Assembler::u_field) - needs to be fixed
- static int rdpc_instruction() { return Assembler::op(Assembler::arith_op ) | Assembler::op3(Assembler::rdreg_op3) | u_field(5, 18, 14) | Assembler::rd(O7); }
-#endif
static int nop_instruction() { return Assembler::op(Assembler::branch_op) | Assembler::op2(Assembler::sethi_op2); }
static int illegal_instruction(); // the output of __ breakpoint_trap()
static int call_instruction(address destination, address pc) { return Assembler::op(Assembler::call_op) | Assembler::wdisp((intptr_t)destination, (intptr_t)pc, 30); }
- static int branch_instruction(Assembler::op2s op2val, Assembler::Condition c, bool a) {
- return Assembler::op(Assembler::branch_op) | Assembler::op2(op2val) | Assembler::annul(a) | Assembler::cond(c);
- }
-
- static int op3_instruction(Assembler::ops opval, Register rd, Assembler::op3s op3val, Register rs1, int simm13a) {
- return Assembler::op(opval) | Assembler::rd(rd) | Assembler::op3(op3val) | Assembler::rs1(rs1) | Assembler::immed(true) | Assembler::simm(simm13a, 13);
- }
-
- static int sethi_instruction(Register rd, int imm22a) {
- return Assembler::op(Assembler::branch_op) | Assembler::rd(rd) | Assembler::op2(Assembler::sethi_op2) | Assembler::hi22(imm22a);
- }
-
- protected:
+protected:
address addr_at(int offset) const { return address(this) + offset; }
int long_at(int offset) const { return *(int*)addr_at(offset); }
void set_long_at(int offset, int i); /* deals with I-cache */
@@ -432,22 +407,6 @@ class NativeCallReg: public NativeInstruction {
// instructions in the sparcv9 vm. Used to call native methods which may be loaded
// anywhere in the address space, possibly out of reach of a call instruction.
-#ifndef _LP64
-
-// On 32-bit systems, a far call is the same as a near one.
-class NativeFarCall;
-inline NativeFarCall* nativeFarCall_at(address instr);
-class NativeFarCall : public NativeCall {
-public:
- friend inline NativeFarCall* nativeFarCall_at(address instr) { return (NativeFarCall*)nativeCall_at(instr); }
- friend NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination = NULL)
- { return (NativeFarCall*)nativeCall_overwriting_at(instr, destination); }
- friend NativeFarCall* nativeFarCall_before(address return_address)
- { return (NativeFarCall*)nativeCall_before(return_address); }
-};
-
-#else
-
// The format of this extended-range call is:
// jumpl_to addr, lreg
// == sethi %hi54(addr), O7 ; jumpl O7, %lo10(addr), O7 ;
@@ -515,7 +474,6 @@ class NativeFarCall: public NativeInstruction {
static void replace_mt_safe(address instr_addr, address code_buffer);
};
-#endif // _LP64
// An interface for accessing/manipulating 32 bit native set_metadata imm, reg instructions
// (used to manipulate inlined data references, etc.)
@@ -567,13 +525,8 @@ class NativeMovConstReg: public NativeInstruction {
public:
enum Sparc_specific_constants {
sethi_offset = 0,
-#ifdef _LP64
add_offset = 7 * BytesPerInstWord,
instruction_size = 8 * BytesPerInstWord
-#else
- add_offset = 4,
- instruction_size = 8
-#endif
};
address instruction_address() const { return addr_at(0); }
@@ -626,11 +579,7 @@ inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address);
public:
enum Sparc_specific_constants {
sethi_offset = 0,
-#ifdef _LP64
nop_offset = 7 * BytesPerInstWord,
-#else
- nop_offset = sethi_offset + BytesPerInstWord,
-#endif
add_offset = nop_offset + BytesPerInstWord,
instruction_size = add_offset + BytesPerInstWord
};
@@ -705,11 +654,7 @@ class NativeMovRegMem: public NativeInstruction {
offset_width = 13,
sethi_offset = 0,
-#ifdef _LP64
add_offset = 7 * BytesPerInstWord,
-#else
- add_offset = 4,
-#endif
ldst_offset = add_offset + BytesPerInstWord
};
bool is_immediate() const {
@@ -720,11 +665,7 @@ class NativeMovRegMem: public NativeInstruction {
address instruction_address() const { return addr_at(0); }
address next_instruction_address() const {
-#ifdef _LP64
return addr_at(is_immediate() ? 4 : (7 * BytesPerInstWord));
-#else
- return addr_at(is_immediate() ? 4 : 12);
-#endif
}
intptr_t offset() const {
return is_immediate()? inv_simm(long_at(0), offset_width) :
@@ -777,19 +718,13 @@ class NativeJump: public NativeInstruction {
public:
enum Sparc_specific_constants {
sethi_offset = 0,
-#ifdef _LP64
jmpl_offset = 7 * BytesPerInstWord,
instruction_size = 9 * BytesPerInstWord // includes delay slot
-#else
- jmpl_offset = 1 * BytesPerInstWord,
- instruction_size = 3 * BytesPerInstWord // includes delay slot
-#endif
};
address instruction_address() const { return addr_at(0); }
address next_instruction_address() const { return addr_at(instruction_size); }
-#ifdef _LP64
address jump_destination() const {
return (address) data64(instruction_address(), long_at(jmpl_offset));
}
@@ -797,15 +732,6 @@ class NativeJump: public NativeInstruction {
set_data64_sethi( instruction_address(), (intptr_t)dest);
set_long_at(jmpl_offset, set_data32_simm13( long_at(jmpl_offset), (intptr_t)dest));
}
-#else
- address jump_destination() const {
- return (address) data32(long_at(sethi_offset), long_at(jmpl_offset));
- }
- void set_jump_destination(address dest) {
- set_long_at(sethi_offset, set_data32_sethi( long_at(sethi_offset), (intptr_t)dest));
- set_long_at(jmpl_offset, set_data32_simm13( long_at(jmpl_offset), (intptr_t)dest));
- }
-#endif
// Creation
friend inline NativeJump* nativeJump_at(address address) {
diff --git a/hotspot/src/cpu/sparc/vm/register_definitions_sparc.cpp b/hotspot/src/cpu/sparc/vm/register_definitions_sparc.cpp
index 8fd22e24670..98cfb72c405 100644
--- a/hotspot/src/cpu/sparc/vm/register_definitions_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/register_definitions_sparc.cpp
@@ -166,7 +166,6 @@ REGISTER_DEFINITION(Register, I5_savedSP);
REGISTER_DEFINITION(Register, O5_savedSP);
REGISTER_DEFINITION(Register, IdispatchAddress);
REGISTER_DEFINITION(Register, ImethodDataPtr);
-REGISTER_DEFINITION(Register, IdispatchTables);
REGISTER_DEFINITION(Register, Lmethod);
REGISTER_DEFINITION(Register, Llocals);
REGISTER_DEFINITION(Register, Oexception);
diff --git a/hotspot/src/cpu/sparc/vm/relocInfo_sparc.cpp b/hotspot/src/cpu/sparc/vm/relocInfo_sparc.cpp
index 9c70bd0ef4d..f75588c8538 100644
--- a/hotspot/src/cpu/sparc/vm/relocInfo_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/relocInfo_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -93,7 +93,6 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
case Assembler::branch_op:
{
-#ifdef _LP64
jint inst2;
guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
if (format() != 0) {
@@ -121,17 +120,6 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
} else {
ip->set_data64_sethi( ip->addr_at(0), (intptr_t)x );
}
-#else
- guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
- inst &= ~Assembler::hi22( -1);
- inst |= Assembler::hi22((intptr_t)x);
- // (ignore offset; it doesn't play into the sethi)
- if (verify_only) {
- guarantee(ip->long_at(0) == inst, "instructions must match");
- } else {
- ip->set_long_at(0, inst);
- }
-#endif
}
break;
diff --git a/hotspot/src/cpu/sparc/vm/relocInfo_sparc.hpp b/hotspot/src/cpu/sparc/vm/relocInfo_sparc.hpp
index a2d3b2c3928..26daae3757b 100644
--- a/hotspot/src/cpu/sparc/vm/relocInfo_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/relocInfo_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,12 +34,8 @@
// There is no need for format bits; the instructions are
// sufficiently self-identifying.
-#ifndef _LP64
- format_width = 0
-#else
// Except narrow oops in 64-bits VM.
format_width = 1
-#endif
};
diff --git a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
index 613e662d65c..4f217e33ac6 100644
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
@@ -33,6 +33,7 @@
#include "oops/compiledICHolder.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
+#include "utilities/align.hpp"
#include "vmreg_sparc.inline.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
@@ -77,7 +78,7 @@ class RegisterSaver {
call_args_area = frame::register_save_words_sp_offset +
frame::memory_parameter_word_sp_offset*wordSize,
// Make sure save locations are always 8 byte aligned.
- // can't use round_to because it doesn't produce compile time constant
+ // can't use align_up because it doesn't produce compile time constant
start_of_extra_save_area = ((call_args_area + 7) & ~7),
g1_offset = start_of_extra_save_area, // g-regs needing saving
g3_offset = g1_offset+8,
@@ -119,7 +120,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
// (as the stub's I's) when the runtime routine called by the stub creates its frame.
int i;
// Always make the frame size 16 byte aligned.
- int frame_size = round_to(additional_frame_words + register_save_size, 16);
+ int frame_size = align_up(additional_frame_words + register_save_size, 16);
// OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words
int frame_size_in_slots = frame_size / sizeof(jint);
// CodeBlob frame size is in words.
@@ -127,56 +128,10 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
// OopMap* map = new OopMap(*total_frame_words, 0);
OopMap* map = new OopMap(frame_size_in_slots, 0);
-#if !defined(_LP64)
-
- // Save 64-bit O registers; they will get their heads chopped off on a 'save'.
- __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
- __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
- __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
- __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
- __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
- __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
-#endif /* _LP64 */
-
__ save(SP, -frame_size, SP);
-#ifndef _LP64
- // Reload the 64 bit Oregs. Although they are now Iregs we load them
- // to Oregs here to avoid interrupts cutting off their heads
- __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
- __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
- __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
- __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
- __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
- __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
-
- __ stx(O0, SP, o0_offset+STACK_BIAS);
- map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg());
-
- __ stx(O1, SP, o1_offset+STACK_BIAS);
-
- map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg());
-
- __ stx(O2, SP, o2_offset+STACK_BIAS);
- map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg());
-
- __ stx(O3, SP, o3_offset+STACK_BIAS);
- map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg());
-
- __ stx(O4, SP, o4_offset+STACK_BIAS);
- map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg());
-
- __ stx(O5, SP, o5_offset+STACK_BIAS);
- map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
-#endif /* _LP64 */
-
-
-#ifdef _LP64
int debug_offset = 0;
-#else
- int debug_offset = 4;
-#endif
// Save the G's
__ stx(G1, SP, g1_offset+STACK_BIAS);
map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
@@ -192,18 +147,6 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
// This is really a waste but we'll keep things as they were for now
if (true) {
-#ifndef _LP64
- map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next());
- map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next());
- map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next());
- map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
- map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
- map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
- map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
- map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
- map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
- map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
-#endif /* _LP64 */
}
@@ -250,70 +193,22 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
__ ldx(SP, g4_offset+STACK_BIAS, G4);
__ ldx(SP, g5_offset+STACK_BIAS, G5);
-
-#if !defined(_LP64)
- // Restore the 64-bit O's.
- __ ldx(SP, o0_offset+STACK_BIAS, O0);
- __ ldx(SP, o1_offset+STACK_BIAS, O1);
- __ ldx(SP, o2_offset+STACK_BIAS, O2);
- __ ldx(SP, o3_offset+STACK_BIAS, O3);
- __ ldx(SP, o4_offset+STACK_BIAS, O4);
- __ ldx(SP, o5_offset+STACK_BIAS, O5);
-
- // And temporarily place them in TLS
-
- __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
- __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
- __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
- __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
- __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
- __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
-#endif /* _LP64 */
-
// Restore flags
__ ldxfsr(SP, fsr_offset+STACK_BIAS);
__ restore();
-#if !defined(_LP64)
- // Now reload the 64bit Oregs after we've restore the window.
- __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
- __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
- __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
- __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
- __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
- __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
-#endif /* _LP64 */
-
}
// Pop the current frame and restore the registers that might be holding
// a result.
void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
-#if !defined(_LP64)
- // 32bit build returns longs in G1
- __ ldx(SP, g1_offset+STACK_BIAS, G1);
-
- // Retrieve the 64-bit O's.
- __ ldx(SP, o0_offset+STACK_BIAS, O0);
- __ ldx(SP, o1_offset+STACK_BIAS, O1);
- // and save to TLS
- __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
- __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
-#endif /* _LP64 */
-
__ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));
__ restore();
-#if !defined(_LP64)
- // Now reload the 64bit Oregs after we've restore the window.
- __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
- __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
-#endif /* _LP64 */
-
}
// Is vector's size (in bytes) bigger than a size saved by default?
@@ -410,11 +305,6 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
case T_CHAR:
case T_BYTE:
case T_BOOLEAN:
-#ifndef _LP64
- case T_OBJECT:
- case T_ARRAY:
- case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
-#endif // _LP64
if (int_reg < int_reg_max) {
Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
regs[i].set1(r->as_VMReg());
@@ -423,7 +313,6 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
}
break;
-#ifdef _LP64
case T_LONG:
assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting VOID in other half");
// fall-through
@@ -434,21 +323,11 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
regs[i].set2(r->as_VMReg());
} else {
- slot = round_to(slot, 2); // align
+ slot = align_up(slot, 2); // align
regs[i].set2(VMRegImpl::stack2reg(slot));
slot += 2;
}
break;
-#else
- case T_LONG:
- assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting VOID in other half");
- // On 32-bit SPARC put longs always on the stack to keep the pressure off
- // integer argument registers. They should be used for oops.
- slot = round_to(slot, 2); // align
- regs[i].set2(VMRegImpl::stack2reg(slot));
- slot += 2;
-#endif
- break;
case T_FLOAT:
if (flt_reg < flt_reg_max) {
@@ -461,13 +340,13 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
case T_DOUBLE:
assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half");
- if (round_to(flt_reg, 2) + 1 < flt_reg_max) {
- flt_reg = round_to(flt_reg, 2); // align
+ if (align_up(flt_reg, 2) + 1 < flt_reg_max) {
+ flt_reg = align_up(flt_reg, 2); // align
FloatRegister r = as_FloatRegister(flt_reg);
regs[i].set2(r->as_VMReg());
flt_reg += 2;
} else {
- slot = round_to(slot, 2); // align
+ slot = align_up(slot, 2); // align
regs[i].set2(VMRegImpl::stack2reg(slot));
slot += 2;
}
@@ -554,7 +433,6 @@ void AdapterGenerator::patch_callers_callsite() {
// The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.
-#ifdef _LP64
// mov(s,d)
__ mov(G1, L1);
__ mov(G4, L4);
@@ -571,20 +449,6 @@ void AdapterGenerator::patch_callers_callsite() {
__ mov(L1, G1);
__ mov(L4, G4);
__ mov(L5, G5_method);
-#else
- __ stx(G1, FP, -8 + STACK_BIAS);
- __ stx(G4, FP, -16 + STACK_BIAS);
- __ mov(G5_method, L5);
- __ mov(G5_method, O0); // VM needs target method
- __ mov(I7, O1); // VM needs caller's callsite
- // Must be a leaf call...
- __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
- __ delayed()->mov(G2_thread, L7_thread_cache);
- __ mov(L7_thread_cache, G2_thread);
- __ ldx(FP, -8 + STACK_BIAS, G1);
- __ ldx(FP, -16 + STACK_BIAS, G4);
- __ mov(L5, G5_method);
-#endif /* _LP64 */
__ restore(); // Restore args
__ bind(L);
@@ -605,28 +469,9 @@ RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
// Stores long into offset pointed to by base
void AdapterGenerator::store_c2i_long(Register r, Register base,
const int st_off, bool is_stack) {
-#ifdef _LP64
// In V9, longs are given 2 64-bit slots in the interpreter, but the
// data is passed in only 1 slot.
__ stx(r, base, next_arg_slot(st_off));
-#else
-#ifdef COMPILER2
- // Misaligned store of 64-bit data
- __ stw(r, base, arg_slot(st_off)); // lo bits
- __ srlx(r, 32, r);
- __ stw(r, base, next_arg_slot(st_off)); // hi bits
-#else
- if (is_stack) {
- // Misaligned store of 64-bit data
- __ stw(r, base, arg_slot(st_off)); // lo bits
- __ srlx(r, 32, r);
- __ stw(r, base, next_arg_slot(st_off)); // hi bits
- } else {
- __ stw(r->successor(), base, arg_slot(st_off) ); // lo bits
- __ stw(r , base, next_arg_slot(st_off)); // hi bits
- }
-#endif // COMPILER2
-#endif // _LP64
}
void AdapterGenerator::store_c2i_object(Register r, Register base,
@@ -642,15 +487,9 @@ void AdapterGenerator::store_c2i_int(Register r, Register base,
// Stores into offset pointed to by base
void AdapterGenerator::store_c2i_double(VMReg r_2,
VMReg r_1, Register base, const int st_off) {
-#ifdef _LP64
// In V9, doubles are given 2 64-bit slots in the interpreter, but the
// data is passed in only 1 slot.
__ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
-#else
- // Need to marshal 64-bit value from misaligned Lesp loads
- __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
- __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
-#endif
}
void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
@@ -693,7 +532,7 @@ void AdapterGenerator::gen_c2i_adapter(
const int arg_size = total_args_passed * Interpreter::stackElementSize;
const int varargs_area =
(frame::varargs_offset - frame::register_save_words)*wordSize;
- const int extraspace = round_to(arg_size + varargs_area, 2*wordSize);
+ const int extraspace = align_up(arg_size + varargs_area, 2*wordSize);
const int bias = STACK_BIAS;
const int interp_arg_offset = frame::varargs_offset*wordSize +
@@ -915,9 +754,9 @@ void AdapterGenerator::gen_i2c_adapter(int total_args_passed,
// in registers, we will commonly have no stack args.
if (comp_args_on_stack > 0) {
// Convert VMReg stack slots to words.
- int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
+ int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
// Round up to miminum stack alignment, in wordSize
- comp_words_on_stack = round_to(comp_words_on_stack, 2);
+ comp_words_on_stack = align_up(comp_words_on_stack, 2);
// Now compute the distance from Lesp to SP. This calculation does not
// include the space for total_args_passed because Lesp has not yet popped
// the arguments.
@@ -957,22 +796,17 @@ void AdapterGenerator::gen_i2c_adapter(int total_args_passed,
if (!r_2->is_valid()) {
__ ld(Gargs, arg_slot(ld_off), r);
} else {
-#ifdef _LP64
// In V9, longs are given 2 64-bit slots in the interpreter, but the
// data is passed in only 1 slot.
RegisterOrConstant slot = (sig_bt[i] == T_LONG) ?
next_arg_slot(ld_off) : arg_slot(ld_off);
__ ldx(Gargs, slot, r);
-#else
- fatal("longs should be on stack");
-#endif
}
} else {
assert(r_1->is_FloatRegister(), "");
if (!r_2->is_valid()) {
__ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister());
} else {
-#ifdef _LP64
// In V9, doubles are given 2 64-bit slots in the interpreter, but the
// data is passed in only 1 slot. This code also handles longs that
// are passed on the stack, but need a stack-to-stack move through a
@@ -980,11 +814,6 @@ void AdapterGenerator::gen_i2c_adapter(int total_args_passed,
RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
next_arg_slot(ld_off) : arg_slot(ld_off);
__ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister());
-#else
- // Need to marshal 64-bit value from misaligned Lesp loads
- __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister());
- __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister());
-#endif
}
}
// Was the argument really intended to be on the stack, but was loaded
@@ -1157,7 +986,6 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
// See int_stk_helper for a further discussion.
int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots();
-#ifdef _LP64
// V9 convention: All things "as-if" on double-wide stack slots.
// Hoist any int/ptr/long's in the first 6 to int regs.
// Hoist any flt/dbl's in the first 16 dbl regs.
@@ -1241,45 +1069,7 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
if (off > max_stack_slots) max_stack_slots = off;
}
}
-
-#else // _LP64
- // V8 convention: first 6 things in O-regs, rest on stack.
- // Alignment is willy-nilly.
- for (int i = 0; i < total_args_passed; i++) {
- switch (sig_bt[i]) {
- case T_ADDRESS: // raw pointers, like current thread, for VM calls
- case T_ARRAY:
- case T_BOOLEAN:
- case T_BYTE:
- case T_CHAR:
- case T_FLOAT:
- case T_INT:
- case T_OBJECT:
- case T_METADATA:
- case T_SHORT:
- regs[i].set1(int_stk_helper(i));
- break;
- case T_DOUBLE:
- case T_LONG:
- assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
- regs[i].set_pair(int_stk_helper(i + 1), int_stk_helper(i));
- break;
- case T_VOID: regs[i].set_bad(); break;
- default:
- ShouldNotReachHere();
- }
- if (regs[i].first()->is_stack()) {
- int off = regs[i].first()->reg2stack();
- if (off > max_stack_slots) max_stack_slots = off;
- }
- if (regs[i].second()->is_stack()) {
- int off = regs[i].second()->reg2stack();
- if (off > max_stack_slots) max_stack_slots = off;
- }
- }
-#endif // _LP64
-
- return round_to(max_stack_slots + 1, 2);
+ return align_up(max_stack_slots + 1, 2);
}
@@ -1406,12 +1196,7 @@ static void object_move(MacroAssembler* masm,
Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register();
__ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle);
__ ld_ptr(rHandle, 0, L4);
-#ifdef _LP64
__ movr( Assembler::rc_z, L4, G0, rHandle );
-#else
- __ tst( L4 );
- __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
-#endif
if (dst.first()->is_stack()) {
__ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
}
@@ -1432,12 +1217,7 @@ static void object_move(MacroAssembler* masm,
}
map->set_oop(VMRegImpl::stack2reg(oop_slot));
__ add(SP, offset + STACK_BIAS, rHandle);
-#ifdef _LP64
__ movr( Assembler::rc_z, rOop, G0, rHandle );
-#else
- __ tst( rOop );
- __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
-#endif
if (dst.first()->is_stack()) {
__ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
@@ -1853,7 +1633,7 @@ static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType
}
static void verify_oop_args(MacroAssembler* masm,
- methodHandle method,
+ const methodHandle& method,
const BasicType* sig_bt,
const VMRegPair* regs) {
Register temp_reg = G5_method; // not part of any compiled calling seq
@@ -1877,7 +1657,7 @@ static void verify_oop_args(MacroAssembler* masm,
}
static void gen_special_dispatch(MacroAssembler* masm,
- methodHandle method,
+ const methodHandle& method,
const BasicType* sig_bt,
const VMRegPair* regs) {
verify_oop_args(masm, method, sig_bt, regs);
@@ -2068,11 +1848,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
// Check for a valid (non-zero) hash code and get its value.
-#ifdef _LP64
__ srlx(header, markOopDesc::hash_shift, hash);
-#else
- __ srl(header, markOopDesc::hash_shift, hash);
-#endif
__ andcc(hash, mask, hash);
__ br(Assembler::equal, false, Assembler::pn, slowCase);
__ delayed()->nop();
@@ -2214,7 +1990,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Now the space for the inbound oop handle area
- int oop_handle_offset = round_to(stack_slots, 2);
+ int oop_handle_offset = align_up(stack_slots, 2);
stack_slots += total_save_slots;
// Now any space we need for handlizing a klass if static method
@@ -2268,7 +2044,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Now compute actual number of stack words we need rounding to make
// stack properly aligned.
- stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
+ stack_slots = align_up(stack_slots, 2 * VMRegImpl::slots_per_word);
int stack_size = stack_slots * VMRegImpl::stack_slot_size;
@@ -2408,7 +2184,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// We have all of the arguments setup at this point. We MUST NOT touch any Oregs
// except O6/O7. So if we must call out we must push a new frame. We immediately
// push a new frame and flush the windows.
-#ifdef _LP64
intptr_t thepc = (intptr_t) __ pc();
{
address here = __ pc();
@@ -2416,9 +2191,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ call(here + 8, relocInfo::none);
__ delayed()->nop();
}
-#else
- intptr_t thepc = __ load_pc_address(O7, 0);
-#endif /* _LP64 */
// We use the same pc/oopMap repeatedly when we call out
oop_maps->add_gc_map(thepc - start, map);
@@ -2553,13 +2325,9 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Transition from _thread_in_Java to _thread_in_native.
__ set(_thread_in_native, G3_scratch);
-#ifdef _LP64
AddressLiteral dest(native_func);
__ relocate(relocInfo::runtime_call_type);
__ jumpl_to(dest, O7, O7);
-#else
- __ call(native_func, relocInfo::runtime_call_type);
-#endif
__ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
__ restore_thread(L7_thread_cache); // restore G2_thread
@@ -2574,9 +2342,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
case T_DOUBLE: break; // Got it where we want it (unless slow-path)
// In 64 bits build result is in O0, in O0, O1 in 32bit build
case T_LONG:
-#ifndef _LP64
- __ mov(O1, I1);
-#endif
// Fall thru
case T_OBJECT: // Really a handle
case T_ARRAY:
@@ -2797,16 +2562,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Return
-#ifndef _LP64
- if (ret_type == T_LONG) {
-
- // Must leave proper result in O0,O1 and G1 (c2/tiered only)
- __ sllx(I0, 32, G1); // Shift bits into high G1
- __ srl (I1, 0, I1); // Zero extend O1 (harmless?)
- __ or3 (I1, G1, G1); // OR 64 bits into G1
- }
-#endif
-
__ ret();
__ delayed()->restore();
@@ -2837,7 +2592,7 @@ int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals)
if (callee_locals < callee_parameters)
return 0; // No adjustment for negative locals
int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
- return round_to(diff, WordsPerLong);
+ return align_up(diff, WordsPerLong);
}
// "Top of Stack" slots that may be unused by the calling convention but must
@@ -2868,10 +2623,6 @@ static void gen_new_frame(MacroAssembler* masm, bool deopt) {
#ifdef ASSERT
// make sure that the frames are aligned properly
-#ifndef _LP64
- __ btst(wordSize*2-1, SP);
- __ breakpoint_trap(Assembler::notZero, Assembler::ptr_cc);
-#endif
#endif
// Deopt needs to pass some extra live values from frame to frame
@@ -2989,13 +2740,7 @@ void SharedRuntime::generate_deopt_blob() {
pad += 1000; // Increase the buffer size when compiling for JVMCI
}
#endif
-#ifdef _LP64
CodeBuffer buffer("deopt_blob", 2100+pad, 512);
-#else
- // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread)
- // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread)
- CodeBuffer buffer("deopt_blob", 1600+pad, 512);
-#endif /* _LP64 */
MacroAssembler* masm = new MacroAssembler(&buffer);
FloatRegister Freturn0 = F0;
Register Greturn1 = G1;
@@ -3006,9 +2751,6 @@ void SharedRuntime::generate_deopt_blob() {
Register G4deopt_mode = G4_scratch;
int frame_size_words;
Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
-#if !defined(_LP64) && defined(COMPILER2)
- Address saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
-#endif
Label cont;
OopMapSet *oop_maps = new OopMapSet();
@@ -3220,30 +2962,13 @@ void SharedRuntime::generate_deopt_blob() {
// to the interpreter entry point
__ save(SP, -frame_size_words*wordSize, SP);
__ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
-#if !defined(_LP64)
-#if defined(COMPILER2)
- // 32-bit 1-register longs return longs in G1
- __ stx(Greturn1, saved_Greturn1_addr);
-#endif
- __ set_last_Java_frame(SP, noreg);
- __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode);
-#else
// LP64 uses g4 in set_last_Java_frame
__ mov(G4deopt_mode, O1);
__ set_last_Java_frame(SP, G0);
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1);
-#endif
__ reset_last_Java_frame();
__ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
-#if !defined(_LP64) && defined(COMPILER2)
- // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
- // I0/I1 if the return value is long.
- Label not_long;
- __ cmp_and_br_short(O0,T_LONG, Assembler::notEqual, Assembler::pt, not_long);
- __ ldd(saved_Greturn1_addr,I0);
- __ bind(not_long);
-#endif
__ ret();
__ delayed()->restore();
@@ -3273,13 +2998,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
pad += (JavaThread::stack_shadow_zone_size() / os::vm_page_size())*16 + 32;
}
#endif
-#ifdef _LP64
CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
-#else
- // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
- // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)
- CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512);
-#endif
MacroAssembler* masm = new MacroAssembler(&buffer);
Register O2UnrollBlock = O2;
Register O2klass_index = O2;
diff --git a/hotspot/src/cpu/sparc/vm/sparc.ad b/hotspot/src/cpu/sparc/vm/sparc.ad
index 35e90204b68..07f62bac5aa 100644
--- a/hotspot/src/cpu/sparc/vm/sparc.ad
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad
@@ -1,5 +1,5 @@
//
-// Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@@ -311,7 +311,6 @@ reg_class o7_regI(R_O7);
// ----------------------------
// Pointer Register Classes
// ----------------------------
-#ifdef _LP64
// 64-bit build means 64-bit pointers means hi/lo pairs
reg_class ptr_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5,
R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5,
@@ -344,40 +343,6 @@ reg_class o1_regP(R_O1H,R_O1);
reg_class o2_regP(R_O2H,R_O2);
reg_class o7_regP(R_O7H,R_O7);
-#else // _LP64
-// 32-bit build means 32-bit pointers means 1 register.
-reg_class ptr_reg( R_G1, R_G3,R_G4,R_G5,
- R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,
- R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
- R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
-// Lock encodings use G3 and G4 internally
-reg_class lock_ptr_reg(R_G1, R_G5,
- R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,
- R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
- R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
-// Special class for storeP instructions, which can store SP or RPC to TLS.
-// It is also used for memory addressing, allowing direct TLS addressing.
-reg_class sp_ptr_reg( R_G1,R_G2,R_G3,R_G4,R_G5,
- R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_SP,
- R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
- R_I0,R_I1,R_I2,R_I3,R_I4,R_I5,R_FP);
-// R_L7 is the lowest-priority callee-save (i.e., NS) register
-// We use it to save R_G2 across calls out of Java.
-reg_class l7_regP(R_L7);
-
-// Other special pointer regs
-reg_class g1_regP(R_G1);
-reg_class g2_regP(R_G2);
-reg_class g3_regP(R_G3);
-reg_class g4_regP(R_G4);
-reg_class g5_regP(R_G5);
-reg_class i0_regP(R_I0);
-reg_class o0_regP(R_O0);
-reg_class o1_regP(R_O1);
-reg_class o2_regP(R_O2);
-reg_class o7_regP(R_O7);
-#endif // _LP64
-
// ----------------------------
// Long Register Classes
@@ -386,12 +351,9 @@ reg_class o7_regP(R_O7);
// Note: O7 is never in this class; it is sometimes used as an encoding temp.
reg_class long_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5
,R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5
-#ifdef _LP64
// 64-bit, longs in 1 register: use all 64-bit integer registers
-// 32-bit, longs in 1 register: cannot use I's and L's. Restrict to O's and G's.
,R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7
,R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5
-#endif // _LP64
);
reg_class g1_regL(R_G1H,R_G1);
@@ -533,10 +495,8 @@ static Register reg_to_register_object(int register_encoding);
// instructions which either zero-fill or sign-fill).
bool can_branch_register( Node *bol, Node *cmp ) {
if( !BranchOnRegister ) return false;
-#ifdef _LP64
if( cmp->Opcode() == Op_CmpP )
return true; // No problems with pointer compares
-#endif
if( cmp->Opcode() == Op_CmpL )
return true; // No problems with long compares
@@ -617,15 +577,11 @@ int MachCallDynamicJavaNode::ret_addr_offset() {
}
int MachCallRuntimeNode::ret_addr_offset() {
-#ifdef _LP64
if (MacroAssembler::is_far_target(entry_point())) {
return NativeFarCall::instruction_size;
} else {
return NativeCall::instruction_size;
}
-#else
- return NativeCall::instruction_size; // call; delay slot
-#endif
}
// Indicate if the safepoint node needs the polling page as an input.
@@ -1024,7 +980,6 @@ void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, RelocationHolder co
#ifdef ASSERT
if (preserve_g2 && (VerifyCompiledCode || VerifyOops)) {
-#ifdef _LP64
// Trash argument dump slots.
__ set(0xb0b8ac0db0b8ac0d, G1);
__ mov(G1, G5);
@@ -1034,22 +989,6 @@ void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, RelocationHolder co
__ stx(G1, SP, STACK_BIAS + 0x98);
__ stx(G1, SP, STACK_BIAS + 0xA0);
__ stx(G1, SP, STACK_BIAS + 0xA8);
-#else // _LP64
- // this is also a native call, so smash the first 7 stack locations,
- // and the various registers
-
- // Note: [SP+0x40] is sp[callee_aggregate_return_pointer_sp_offset],
- // while [SP+0x44..0x58] are the argument dump slots.
- __ set((intptr_t)0xbaadf00d, G1);
- __ mov(G1, G5);
- __ sllx(G1, 32, G1);
- __ or3(G1, G5, G1);
- __ mov(G1, G5);
- __ stx(G1, SP, 0x40);
- __ stx(G1, SP, 0x48);
- __ stx(G1, SP, 0x50);
- __ stw(G1, SP, 0x58); // Do not trash [SP+0x5C] which is a usable spill slot
-#endif // _LP64
}
#endif /*ASSERT*/
}
@@ -1133,7 +1072,13 @@ void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
__ rdpc(r);
- if (disp != 0) {
+ if (disp == 0) {
+ // Emitting an additional 'nop' instruction in order not to cause a code
+ // size adjustment in the code following the table setup (if the instruction
+ // immediately following after this section is a CTI).
+ __ nop();
+ }
+ else {
assert(r != O7, "need temporary");
__ sub(r, __ ensure_simm13_or_reg(disp, O7), r);
}
@@ -1262,11 +1207,7 @@ void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
if(do_polling() && ra_->C->is_method_compilation()) {
st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t");
-#ifdef _LP64
st->print("LDX [L0],G0\t!Poll for Safepointing\n\t");
-#else
- st->print("LDUW [L0],G0\t!Poll for Safepointing\n\t");
-#endif
}
if(do_polling()) {
@@ -1472,75 +1413,10 @@ static void mach_spill_copy_implementation_helper(const MachNode* mach,
// hardware does the flop for me. Doubles are always aligned, so no problem
// there. Misaligned sources only come from native-long-returns (handled
// special below).
-#ifndef _LP64
- if (src_first_rc == rc_int && // source is already big-endian
- src_second_rc != rc_bad && // 64-bit move
- ((dst_first & 1) != 0 || dst_second != dst_first + 1)) { // misaligned dst
- assert((src_first & 1) == 0 && src_second == src_first + 1, "source must be aligned");
- // Do the big-endian flop.
- OptoReg::Name tmp = dst_first ; dst_first = dst_second ; dst_second = tmp ;
- enum RC tmp_rc = dst_first_rc; dst_first_rc = dst_second_rc; dst_second_rc = tmp_rc;
- }
-#endif
// --------------------------------------
// Check for integer reg-reg copy
if (src_first_rc == rc_int && dst_first_rc == rc_int) {
-#ifndef _LP64
- if (src_first == R_O0_num && src_second == R_O1_num) { // Check for the evil O0/O1 native long-return case
- // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value
- // as stored in memory. On a big-endian machine like SPARC, this means that the _second
- // operand contains the least significant word of the 64-bit value and vice versa.
- OptoReg::Name tmp = OptoReg::Name(R_O7_num);
- assert((dst_first & 1) == 0 && dst_second == dst_first + 1, "return a native O0/O1 long to an aligned-adjacent 64-bit reg" );
- // Shift O0 left in-place, zero-extend O1, then OR them into the dst
- if ( cbuf ) {
- emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[tmp], Assembler::sllx_op3, Matcher::_regEncode[src_first], 0x1020);
- emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[src_second], Assembler::srl_op3, Matcher::_regEncode[src_second], 0x0000);
- emit3 (*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler:: or_op3, Matcher::_regEncode[tmp], 0, Matcher::_regEncode[src_second]);
-#ifndef PRODUCT
- } else {
- print_helper(st, "SLLX R_%s,32,R_%s\t! Move O0-first to O7-high\n\t", OptoReg::regname(src_first), OptoReg::regname(tmp));
- print_helper(st, "SRL R_%s, 0,R_%s\t! Zero-extend O1\n\t", OptoReg::regname(src_second), OptoReg::regname(src_second));
- print_helper(st, "OR R_%s,R_%s,R_%s\t! spill",OptoReg::regname(tmp), OptoReg::regname(src_second), OptoReg::regname(dst_first));
-#endif
- }
- return;
- } else if (dst_first == R_I0_num && dst_second == R_I1_num) {
- // returning a long value in I0/I1
- // a SpillCopy must be able to target a return instruction's reg_class
- // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value
- // as stored in memory. On a big-endian machine like SPARC, this means that the _second
- // operand contains the least significant word of the 64-bit value and vice versa.
- OptoReg::Name tdest = dst_first;
-
- if (src_first == dst_first) {
- tdest = OptoReg::Name(R_O7_num);
- }
-
- if (cbuf) {
- assert((src_first & 1) == 0 && (src_first + 1) == src_second, "return value was in an aligned-adjacent 64-bit reg");
- // Shift value in upper 32-bits of src to lower 32-bits of I0; move lower 32-bits to I1
- // ShrL_reg_imm6
- emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[tdest], Assembler::srlx_op3, Matcher::_regEncode[src_second], 32 | 0x1000);
- // ShrR_reg_imm6 src, 0, dst
- emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srl_op3, Matcher::_regEncode[src_first], 0x0000);
- if (tdest != dst_first) {
- emit3 (*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler::or_op3, 0/*G0*/, 0/*op2*/, Matcher::_regEncode[tdest]);
- }
- }
-#ifndef PRODUCT
- else {
- print_helper(st, "SRLX R_%s,32,R_%s\t! Extract MSW\n\t",OptoReg::regname(src_second),OptoReg::regname(tdest));
- print_helper(st, "SRL R_%s, 0,R_%s\t! Extract LSW\n\t",OptoReg::regname(src_first),OptoReg::regname(dst_second));
- if (tdest != dst_first) {
- print_helper(st, "MOV R_%s,R_%s\t! spill\n\t", OptoReg::regname(tdest), OptoReg::regname(dst_first));
- }
- }
-#endif // PRODUCT
- return size+8;
- }
-#endif // !_LP64
// Else normal reg-reg copy
assert(src_second != dst_first, "smashed second before evacuating it");
impl_mov_helper(cbuf, src_first, dst_first, Assembler::or_op3, 0, "MOV ", st);
@@ -1614,58 +1490,6 @@ static void mach_spill_copy_implementation_helper(const MachNode* mach,
}
assert(src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad");
-#ifndef _LP64
- // In the LP64 build, all registers can be moved as aligned/adjacent
- // pairs, so there's never any need to move the high bits separately.
- // The 32-bit builds have to deal with the 32-bit ABI which can force
- // all sorts of silly alignment problems.
-
- // Check for integer reg-reg copy. Hi bits are stuck up in the top
- // 32-bits of a 64-bit register, but are needed in low bits of another
- // register (else it's a hi-bits-to-hi-bits copy which should have
- // happened already as part of a 64-bit move)
- if (src_second_rc == rc_int && dst_second_rc == rc_int) {
- assert((src_second & 1) == 1, "its the evil O0/O1 native return case");
- assert((dst_second & 1) == 0, "should have moved with 1 64-bit move");
- // Shift src_second down to dst_second's low bits.
- if (cbuf) {
- emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020);
-#ifndef PRODUCT
- } else {
- print_helper(st, "SRLX R_%s,32,R_%s\t! spill: Move high bits down low", OptoReg::regname(src_second - 1), OptoReg::regname(dst_second));
-#endif
- }
- return;
- }
-
- // Check for high word integer store. Must down-shift the hi bits
- // into a temp register, then fall into the case of storing int bits.
- if (src_second_rc == rc_int && dst_second_rc == rc_stack && (src_second & 1) == 1) {
- // Shift src_second down to dst_second's low bits.
- if (cbuf) {
- emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[R_O7_num], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020);
-#ifndef PRODUCT
- } else {
- print_helper(st, "SRLX R_%s,32,R_%s\t! spill: Move high bits down low", OptoReg::regname(src_second-1), OptoReg::regname(R_O7_num));
-#endif
- }
- src_second = OptoReg::Name(R_O7_num); // Not R_O7H_num!
- }
-
- // Check for high word integer load
- if (dst_second_rc == rc_int && src_second_rc == rc_stack)
- return impl_helper(this, cbuf, ra_, true, ra_->reg2offset(src_second), dst_second, Assembler::lduw_op3, "LDUW", size, st);
-
- // Check for high word integer store
- if (src_second_rc == rc_int && dst_second_rc == rc_stack)
- return impl_helper(this, cbuf, ra_, false, ra_->reg2offset(dst_second), src_second, Assembler::stw_op3, "STW ", size, st);
-
- // Check for high word float store
- if (src_second_rc == rc_float && dst_second_rc == rc_stack)
- return impl_helper(this, cbuf, ra_, false, ra_->reg2offset(dst_second), src_second, Assembler::stf_op3, "STF ", size, st);
-
-#endif // !_LP64
-
Unimplemented();
}
@@ -1743,7 +1567,6 @@ uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
#ifndef PRODUCT
void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
st->print_cr("\nUEP:");
-#ifdef _LP64
if (UseCompressedClassPointers) {
assert(Universe::heap() != NULL, "java heap should be initialized");
st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
@@ -1762,11 +1585,6 @@ void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
}
st->print_cr("\tCMP R_G5,R_G3" );
st->print ("\tTne xcc,R_G0+ST_RESERVED_FOR_USER_0+2");
-#else // _LP64
- st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
- st->print_cr("\tCMP R_G5,R_G3" );
- st->print ("\tTne icc,R_G0+ST_RESERVED_FOR_USER_0+2");
-#endif // _LP64
}
#endif
@@ -1874,9 +1692,7 @@ const bool Matcher::match_rule_supported(int opcode) {
if (!UsePopCountInstruction)
return false;
case Op_CompareAndSwapL:
-#ifdef _LP64
case Op_CompareAndSwapP:
-#endif
if (!VM_Version::supports_cx8())
return false;
break;
@@ -1919,12 +1735,12 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
}
// Vector ideal reg
-const int Matcher::vector_ideal_reg(int size) {
+const uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize == 8, "");
return Op_RegD;
}
-const int Matcher::vector_shift_count_ideal_reg(int size) {
+const uint Matcher::vector_shift_count_ideal_reg(int size) {
fatal("vector shift is not supported");
return Node::NotAMachineReg;
}
@@ -1950,10 +1766,8 @@ const bool Matcher::pass_original_key_for_aes() {
return true;
}
-// USII supports fxtof through the whole range of number, USIII doesn't
-const bool Matcher::convL2FSupported(void) {
- return VM_Version::has_fast_fxtof();
-}
+// NOTE: All currently supported SPARC HW provides fast conversion.
+const bool Matcher::convL2FSupported(void) { return true; }
// Is this branch offset short enough that a short branch can be used?
//
@@ -1979,9 +1793,9 @@ const bool Matcher::init_array_count_is_in_bytes = true;
// No additional cost for CMOVL.
const int Matcher::long_cmove_cost() { return 0; }
-// CMOVF/CMOVD are expensive on T4 and on SPARC64.
+// CMOVF/CMOVD are expensive on e.g., T4 and SPARC64.
const int Matcher::float_cmove_cost() {
- return (VM_Version::is_T4() || VM_Version::is_sparc64()) ? ConditionalMoveLimit : 0;
+ return VM_Version::has_fast_cmove() ? 0 : ConditionalMoveLimit;
}
// Does the CPU require late expand (see block.cpp for description of late expand)?
@@ -1992,13 +1806,11 @@ const bool Matcher::require_postalloc_expand = false;
const bool Matcher::need_masked_shift_count = false;
bool Matcher::narrow_oop_use_complex_address() {
- NOT_LP64(ShouldNotCallThis());
assert(UseCompressedOops, "only for compressed oops code");
return false;
}
bool Matcher::narrow_klass_use_complex_address() {
- NOT_LP64(ShouldNotCallThis());
assert(UseCompressedClassPointers, "only for compressed klass code");
return false;
}
@@ -2027,11 +1839,7 @@ const bool Matcher::rematerialize_float_constants = false;
// needed. Else we split the double into 2 integer pieces and move it
// piece-by-piece. Only happens when passing doubles into C code as the
// Java calling convention forces doubles to be aligned.
-#ifdef _LP64
const bool Matcher::misaligned_doubles_ok = true;
-#else
-const bool Matcher::misaligned_doubles_ok = false;
-#endif
// No-op on SPARC.
void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
@@ -2050,11 +1858,7 @@ bool Matcher::float_in_double() { return false; }
// The relevant question is how the int is callee-saved. In _LP64
// the whole long is written but de-opt'ing will have to extract
// the relevant 32 bits, in not-_LP64 only the low 32 bits is written.
-#ifdef _LP64
const bool Matcher::int_in_long = true;
-#else
-const bool Matcher::int_in_long = false;
-#endif
// Return whether or not this register is ever used as an argument. This
// function is used on startup to build the trampoline stubs in generateOptoStub.
@@ -2068,7 +1872,6 @@ bool Matcher::can_be_java_arg( int reg ) {
reg == R_I3_num ||
reg == R_I4_num ||
reg == R_I5_num ) return true;
-#ifdef _LP64
// 64-bit builds can pass 64-bit pointers and longs in
// the high I registers
if( reg == R_I0H_num ||
@@ -2082,14 +1885,6 @@ bool Matcher::can_be_java_arg( int reg ) {
return true;
}
-#else
- // 32-bit builds with longs-in-one-entry pass longs in G1 & G4.
- // Longs cannot be passed in O regs, because O regs become I regs
- // after a 'save' and I regs get their high bits chopped off on
- // interrupt.
- if( reg == R_G1H_num || reg == R_G1_num ) return true;
- if( reg == R_G4H_num || reg == R_G4_num ) return true;
-#endif
// A few float args in registers
if( reg >= R_F0_num && reg <= R_F7_num ) return true;
@@ -2152,19 +1947,11 @@ void Compile::reshape_address(AddPNode* addp) {
// The intptr_t operand types, defined by textual substitution.
// (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.)
-#ifdef _LP64
#define immX immL
#define immX13 immL13
#define immX13m7 immL13m7
#define iRegX iRegL
#define g1RegX g1RegL
-#else
-#define immX immI
-#define immX13 immI13
-#define immX13m7 immI13m7
-#define iRegX iRegI
-#define g1RegX g1RegI
-#endif
//----------ENCODING BLOCK-----------------------------------------------------
// This block specifies the encoding classes used by the compiler to output
@@ -2326,7 +2113,6 @@ encode %{
emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::add_op3, R_O7_enc, frame::pc_return_offset );
%}
-#ifdef _LP64
/* %%% merge with enc_to_bool */
enc_class enc_convP2B( iRegI dst, iRegP src ) %{
MacroAssembler _masm(&cbuf);
@@ -2335,7 +2121,6 @@ encode %{
Register dst_reg = reg_to_register_object($dst$$reg);
__ movr(Assembler::rc_nz, src_reg, 1, dst_reg);
%}
-#endif
enc_class enc_cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp ) %{
// (Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)))
@@ -2626,16 +2411,6 @@ encode %{
// to G1 so the register allocator will not have to deal with the misaligned register
// pair.
enc_class adjust_long_from_native_call %{
-#ifndef _LP64
- if (returns_long()) {
- // sllx O0,32,O0
- emit3_simm13( cbuf, Assembler::arith_op, R_O0_enc, Assembler::sllx_op3, R_O0_enc, 0x1020 );
- // srl O1,0,O1
- emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::srl_op3, R_O1_enc, 0x0000 );
- // or O0,O1,G1
- emit3 ( cbuf, Assembler::arith_op, R_G1_enc, Assembler:: or_op3, R_O0_enc, 0, R_O1_enc );
- }
-#endif
%}
enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime
@@ -2852,6 +2627,33 @@ enc_class fsqrtd (dflt_reg dst, dflt_reg src) %{
__ fsqrt(FloatRegisterImpl::D, Fsrc, Fdst);
%}
+
+
+enc_class fmadds (sflt_reg dst, sflt_reg a, sflt_reg b, sflt_reg c) %{
+ MacroAssembler _masm(&cbuf);
+
+ FloatRegister Frd = reg_to_SingleFloatRegister_object($dst$$reg);
+ FloatRegister Fra = reg_to_SingleFloatRegister_object($a$$reg);
+ FloatRegister Frb = reg_to_SingleFloatRegister_object($b$$reg);
+ FloatRegister Frc = reg_to_SingleFloatRegister_object($c$$reg);
+
+ __ fmadd(FloatRegisterImpl::S, Fra, Frb, Frc, Frd);
+%}
+
+enc_class fmaddd (dflt_reg dst, dflt_reg a, dflt_reg b, dflt_reg c) %{
+ MacroAssembler _masm(&cbuf);
+
+ FloatRegister Frd = reg_to_DoubleFloatRegister_object($dst$$reg);
+ FloatRegister Fra = reg_to_DoubleFloatRegister_object($a$$reg);
+ FloatRegister Frb = reg_to_DoubleFloatRegister_object($b$$reg);
+ FloatRegister Frc = reg_to_DoubleFloatRegister_object($c$$reg);
+
+ __ fmadd(FloatRegisterImpl::D, Fra, Frb, Frc, Frd);
+%}
+
+
+
+
enc_class fmovs (dflt_reg dst, dflt_reg src) %{
MacroAssembler _masm(&cbuf);
@@ -3102,11 +2904,7 @@ frame %{
cisc_spilling_operand_name(indOffset);
// Number of stack slots consumed by a Monitor enter
-#ifdef _LP64
sync_stack_slots(2);
-#else
- sync_stack_slots(1);
-#endif
// Compiled code's Frame Pointer
frame_pointer(R_SP);
@@ -3124,13 +2922,8 @@ frame %{
// Number of outgoing stack slots killed above the out_preserve_stack_slots
// for calls to C. Supports the var-args backing area for register parms.
// ADLC doesn't support parsing expressions, so I folded the math by hand.
-#ifdef _LP64
// (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (0)) * 2-stack-slots-per-word
varargs_C_out_slots_killed(12);
-#else
- // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (1)) * 1-stack-slots-per-word
- varargs_C_out_slots_killed( 7);
-#endif
// The after-PROLOG location of the return address. Location of
// return address specifies a type (REG or STACK) and a number
@@ -3161,17 +2954,10 @@ frame %{
// opcodes. This simplifies the register allocator.
c_return_value %{
assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
-#ifdef _LP64
static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
-#else // !_LP64
- static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
- static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
- static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
- static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
-#endif
return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
(is_outgoing?lo_out:lo_in)[ideal_reg] );
%}
@@ -3179,17 +2965,10 @@ frame %{
// Location of compiled Java return values. Same as C
return_value %{
assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
-#ifdef _LP64
static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
-#else // !_LP64
- static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
- static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
- static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
- static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
-#endif
return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
(is_outgoing?lo_out:lo_in)[ideal_reg] );
%}
@@ -3454,10 +3233,9 @@ operand immP() %{
interface(CONST_INTER);
%}
-#ifdef _LP64
// Pointer Immediate: 64-bit
operand immP_set() %{
- predicate(!VM_Version::is_niagara_plus());
+ predicate(!VM_Version::has_fast_ld());
match(ConP);
op_cost(5);
@@ -3469,7 +3247,7 @@ operand immP_set() %{
// Pointer Immediate: 64-bit
// From Niagara2 processors on a load should be better than materializing.
operand immP_load() %{
- predicate(VM_Version::is_niagara_plus() && (n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set(n->get_ptr()) > 3)));
+ predicate(VM_Version::has_fast_ld() && (n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set(n->get_ptr()) > 3)));
match(ConP);
op_cost(5);
@@ -3480,7 +3258,7 @@ operand immP_load() %{
// Pointer Immediate: 64-bit
operand immP_no_oop_cheap() %{
- predicate(VM_Version::is_niagara_plus() && !n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set(n->get_ptr()) <= 3));
+ predicate(VM_Version::has_fast_ld() && !n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set(n->get_ptr()) <= 3));
match(ConP);
op_cost(5);
@@ -3488,7 +3266,6 @@ operand immP_no_oop_cheap() %{
format %{ %}
interface(CONST_INTER);
%}
-#endif
operand immP13() %{
predicate((-4096 < n->get_ptr()) && (n->get_ptr() <= 4095));
@@ -3605,7 +3382,7 @@ operand immL_32bits() %{
// Long Immediate: cheap (materialize in <= 3 instructions)
operand immL_cheap() %{
- predicate(!VM_Version::is_niagara_plus() || MacroAssembler::insts_for_set64(n->get_long()) <= 3);
+ predicate(!VM_Version::has_fast_ld() || MacroAssembler::insts_for_set64(n->get_long()) <= 3);
match(ConL);
op_cost(0);
@@ -3615,7 +3392,7 @@ operand immL_cheap() %{
// Long Immediate: expensive (materialize in > 3 instructions)
operand immL_expensive() %{
- predicate(VM_Version::is_niagara_plus() && MacroAssembler::insts_for_set64(n->get_long()) > 3);
+ predicate(VM_Version::has_fast_ld() && MacroAssembler::insts_for_set64(n->get_long()) > 3);
match(ConL);
op_cost(0);
@@ -3929,11 +3706,7 @@ operand flagsRegP() %{
constraint(ALLOC_IN_RC(int_flags));
match(RegFlags);
-#ifdef _LP64
format %{ "xcc_P" %}
-#else
- format %{ "icc_P" %}
-#endif
interface(REG_INTER);
%}
@@ -4519,7 +4292,6 @@ pipe_class ialu_reg_flags( iRegI op2_out, iRegI op2_in, iRegI op1, flagsReg cr )
MS : R(2);
%}
-#ifdef _LP64
pipe_class ialu_clr_and_mover( iRegI dst, iRegP src ) %{
instruction_count(1); multiple_bundles;
dst : C(write)+1;
@@ -4528,7 +4300,6 @@ pipe_class ialu_clr_and_mover( iRegI dst, iRegP src ) %{
BR : E(2);
MS : E(2);
%}
-#endif
// Integer ALU reg operation
pipe_class ialu_move_reg_L_to_I(iRegI dst, iRegL src) %{
@@ -4633,13 +4404,8 @@ pipe_class loadConP( iRegP dst, immP src ) %{
// Polling Address
pipe_class loadConP_poll( iRegP dst, immP_poll src ) %{
-#ifdef _LP64
instruction_count(0); multiple_bundles;
fixed_latency(6);
-#else
- dst : E(write);
- IALU : R;
-#endif
%}
// Long Constant small
@@ -4820,6 +4586,26 @@ pipe_class fdivD_reg_reg(regD dst, regD src1, regD src2) %{
FDIV : C(17);
%}
+// Fused floating-point multiply-add float.
+pipe_class fmaF_regx4(regF dst, regF src1, regF src2, regF src3) %{
+ single_instruction;
+ dst : X(write);
+ src1 : E(read);
+ src2 : E(read);
+ src3 : E(read);
+ FM : R;
+%}
+
+// Fused gloating-point multiply-add double.
+pipe_class fmaD_regx4(regD dst, regD src1, regD src2, regD src3) %{
+ single_instruction;
+ dst : X(write);
+ src1 : E(read);
+ src2 : E(read);
+ src3 : E(read);
+ FM : R;
+%}
+
// Floating Point Move/Negate/Abs Float
pipe_class faddF_reg(regF dst, regF src) %{
single_instruction;
@@ -5380,7 +5166,6 @@ instruct regL_to_stkL(stackSlotL dst, iRegL src) %{
ins_pipe(istore_mem_reg);
%}
-#ifdef _LP64
// Load pointer from stack slot, 64-bit encoding
instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
match(Set dst src);
@@ -5400,27 +5185,6 @@ instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
ins_encode(simple_form3_mem_reg( dst, src ) );
ins_pipe(istore_mem_reg);
%}
-#else // _LP64
-// Load pointer from stack slot, 32-bit encoding
-instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
- match(Set dst src);
- ins_cost(MEMORY_REF_COST);
- format %{ "LDUW $src,$dst\t!ptr" %}
- opcode(Assembler::lduw_op3, Assembler::ldst_op);
- ins_encode(simple_form3_mem_reg( src, dst ) );
- ins_pipe(iload_mem);
-%}
-
-// Store pointer to stack slot
-instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
- match(Set dst src);
- ins_cost(MEMORY_REF_COST);
- format %{ "STW $src,$dst\t!ptr" %}
- opcode(Assembler::stw_op3, Assembler::ldst_op);
- ins_encode(simple_form3_mem_reg( dst, src ) );
- ins_pipe(istore_mem_reg);
-%}
-#endif // _LP64
//------------Special Nop instructions for bundling - no match rules-----------
// Nop using the A0 functional unit
@@ -5877,17 +5641,10 @@ instruct loadP(iRegP dst, memory mem) %{
ins_cost(MEMORY_REF_COST);
size(4);
-#ifndef _LP64
- format %{ "LDUW $mem,$dst\t! ptr" %}
- ins_encode %{
- __ lduw($mem$$Address, $dst$$Register);
- %}
-#else
format %{ "LDX $mem,$dst\t! ptr" %}
ins_encode %{
__ ldx($mem$$Address, $dst$$Register);
%}
-#endif
ins_pipe(iload_mem);
%}
@@ -5910,17 +5667,10 @@ instruct loadKlass(iRegP dst, memory mem) %{
ins_cost(MEMORY_REF_COST);
size(4);
-#ifndef _LP64
- format %{ "LDUW $mem,$dst\t! klass ptr" %}
- ins_encode %{
- __ lduw($mem$$Address, $dst$$Register);
- %}
-#else
format %{ "LDX $mem,$dst\t! klass ptr" %}
ins_encode %{
__ ldx($mem$$Address, $dst$$Register);
%}
-#endif
ins_pipe(iload_mem);
%}
@@ -5988,26 +5738,6 @@ instruct loadConI13( iRegI dst, immI13 src ) %{
ins_pipe(ialu_imm);
%}
-#ifndef _LP64
-instruct loadConP(iRegP dst, immP con) %{
- match(Set dst con);
- ins_cost(DEFAULT_COST * 3/2);
- format %{ "SET $con,$dst\t!ptr" %}
- ins_encode %{
- relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc();
- intptr_t val = $con$$constant;
- if (constant_reloc == relocInfo::oop_type) {
- __ set_oop_constant((jobject) val, $dst$$Register);
- } else if (constant_reloc == relocInfo::metadata_type) {
- __ set_metadata_constant((Metadata*)val, $dst$$Register);
- } else { // non-oop pointers, e.g. card mark base, heap top
- assert(constant_reloc == relocInfo::none, "unexpected reloc type");
- __ set(val, $dst$$Register);
- }
- %}
- ins_pipe(loadConP);
-%}
-#else
instruct loadConP_set(iRegP dst, immP_set con) %{
match(Set dst con);
ins_cost(DEFAULT_COST * 3/2);
@@ -6051,7 +5781,6 @@ instruct loadConP_no_oop_cheap(iRegP dst, immP_no_oop_cheap con) %{
%}
ins_pipe(loadConP);
%}
-#endif // _LP64
instruct loadConP0(iRegP dst, immP0 src) %{
match(Set dst src);
@@ -6205,19 +5934,6 @@ instruct prefetchAlloc_bis( iRegP dst ) %{
%}
// Next code is used for finding next cache line address to prefetch.
-#ifndef _LP64
-instruct cacheLineAdr( iRegP dst, iRegP src, immI13 mask ) %{
- match(Set dst (CastX2P (AndI (CastP2X src) mask)));
- ins_cost(DEFAULT_COST);
- size(4);
-
- format %{ "AND $src,$mask,$dst\t! next cache line address" %}
- ins_encode %{
- __ and3($src$$Register, $mask$$constant, $dst$$Register);
- %}
- ins_pipe(ialu_reg_imm);
-%}
-#else
instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{
match(Set dst (CastX2P (AndL (CastP2X src) mask)));
ins_cost(DEFAULT_COST);
@@ -6229,7 +5945,6 @@ instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{
%}
ins_pipe(ialu_reg_imm);
%}
-#endif
//----------Store Instructions-------------------------------------------------
// Store Byte
@@ -6341,13 +6056,8 @@ instruct storeP(memory dst, sp_ptr_RegP src) %{
match(Set dst (StoreP dst src));
ins_cost(MEMORY_REF_COST);
-#ifndef _LP64
- format %{ "STW $src,$dst\t! ptr" %}
- opcode(Assembler::stw_op3, 0, REGP_OP);
-#else
format %{ "STX $src,$dst\t! ptr" %}
opcode(Assembler::stx_op3, 0, REGP_OP);
-#endif
ins_encode( form3_mem_reg( dst, src ) );
ins_pipe(istore_mem_spORreg);
%}
@@ -6356,13 +6066,8 @@ instruct storeP0(memory dst, immP0 src) %{
match(Set dst (StoreP dst src));
ins_cost(MEMORY_REF_COST);
-#ifndef _LP64
- format %{ "STW $src,$dst\t! ptr" %}
- opcode(Assembler::stw_op3, 0, REGP_OP);
-#else
format %{ "STX $src,$dst\t! ptr" %}
opcode(Assembler::stx_op3, 0, REGP_OP);
-#endif
ins_encode( form3_mem_reg( dst, R_G0 ) );
ins_pipe(istore_mem_zero);
%}
@@ -7113,13 +6818,8 @@ instruct loadPLocked(iRegP dst, memory mem) %{
match(Set dst (LoadPLocked mem));
ins_cost(MEMORY_REF_COST);
-#ifndef _LP64
- format %{ "LDUW $mem,$dst\t! ptr" %}
- opcode(Assembler::lduw_op3, 0, REGP_OP);
-#else
format %{ "LDX $mem,$dst\t! ptr" %}
opcode(Assembler::ldx_op3, 0, REGP_OP);
-#endif
ins_encode( form3_mem_reg( mem, dst ) );
ins_pipe(iload_mem);
%}
@@ -7190,9 +6890,7 @@ instruct compareAndSwapI_bool(iRegP mem_ptr, iRegI oldval, iRegI newval, iRegI r
%}
instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
-#ifdef _LP64
predicate(VM_Version::supports_cx8());
-#endif
match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
match(Set res (WeakCompareAndSwapP mem_ptr (Binary oldval newval)));
effect( USE mem_ptr, KILL ccr, KILL tmp1);
@@ -7203,13 +6901,8 @@ instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI r
"MOV 1,$res\n\t"
"MOVne xcc,R_G0,$res"
%}
-#ifdef _LP64
ins_encode( enc_casx(mem_ptr, oldval, newval),
enc_lflags_ne_to_boolean(res) );
-#else
- ins_encode( enc_casi(mem_ptr, oldval, newval),
- enc_iflags_ne_to_boolean(res) );
-#endif
ins_pipe( long_memory_op );
%}
@@ -7287,17 +6980,6 @@ instruct xchgI( memory mem, iRegI newval) %{
ins_pipe( long_memory_op );
%}
-#ifndef _LP64
-instruct xchgP( memory mem, iRegP newval) %{
- match(Set newval (GetAndSetP mem newval));
- format %{ "SWAP [$mem],$newval" %}
- size(4);
- ins_encode %{
- __ swap($mem$$Address, $newval$$Register);
- %}
- ins_pipe( long_memory_op );
-%}
-#endif
instruct xchgN( memory mem, iRegN newval) %{
match(Set newval (GetAndSetN mem newval));
@@ -7759,7 +7441,6 @@ instruct shrL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
%}
// Register Shift Right Immediate with a CastP2X
-#ifdef _LP64
instruct shrP_reg_imm6(iRegL dst, iRegP src1, immU6 src2) %{
match(Set dst (URShiftL (CastP2X src1) src2));
size(4);
@@ -7768,16 +7449,6 @@ instruct shrP_reg_imm6(iRegL dst, iRegP src1, immU6 src2) %{
ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
ins_pipe(ialu_reg_imm);
%}
-#else
-instruct shrP_reg_imm5(iRegI dst, iRegP src1, immU5 src2) %{
- match(Set dst (URShiftI (CastP2X src1) src2));
- size(4);
- format %{ "SRL $src1,$src2,$dst\t! Cast ptr $src1 to int and shift" %}
- opcode(Assembler::srl_op3, Assembler::arith_op);
- ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
- ins_pipe(ialu_reg_imm);
-%}
-#endif
//----------Floating Point Arithmetic Instructions-----------------------------
@@ -7926,6 +7597,24 @@ instruct sqrtD_reg_reg(regD dst, regD src) %{
ins_pipe(fdivD_reg_reg);
%}
+// Single precision fused floating-point multiply-add (d = a * b + c).
+instruct fmaF_regx4(regF dst, regF a, regF b, regF c) %{
+ predicate(UseFMA);
+ match(Set dst (FmaF c (Binary a b)));
+ format %{ "fmadds $a,$b,$c,$dst\t# $dst = $a * $b + $c" %}
+ ins_encode(fmadds(dst, a, b, c));
+ ins_pipe(fmaF_regx4);
+%}
+
+// Double precision fused floating-point multiply-add (d = a * b + c).
+instruct fmaD_regx4(regD dst, regD a, regD b, regD c) %{
+ predicate(UseFMA);
+ match(Set dst (FmaD c (Binary a b)));
+ format %{ "fmaddd $a,$b,$c,$dst\t# $dst = $a * $b + $c" %}
+ ins_encode(fmaddd(dst, a, b, c));
+ ins_pipe(fmaD_regx4);
+%}
+
//----------Logical Instructions-----------------------------------------------
// And Instructions
// Register And
@@ -8020,21 +7709,6 @@ instruct orL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
ins_pipe(ialu_reg_imm);
%}
-#ifndef _LP64
-
-// Use sp_ptr_RegP to match G2 (TLS register) without spilling.
-instruct orI_reg_castP2X(iRegI dst, iRegI src1, sp_ptr_RegP src2) %{
- match(Set dst (OrI src1 (CastP2X src2)));
-
- size(4);
- format %{ "OR $src1,$src2,$dst" %}
- opcode(Assembler::or_op3, Assembler::arith_op);
- ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
- ins_pipe(ialu_reg_reg);
-%}
-
-#else
-
instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{
match(Set dst (OrL src1 (CastP2X src2)));
@@ -8046,8 +7720,6 @@ instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{
ins_pipe(ialu_reg_reg);
%}
-#endif
-
// Xor Instructions
// Register Xor
instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
@@ -8107,17 +7779,6 @@ instruct convI2B( iRegI dst, iRegI src, flagsReg ccr ) %{
ins_pipe(ialu_reg_ialu);
%}
-#ifndef _LP64
-instruct convP2B( iRegI dst, iRegP src, flagsReg ccr ) %{
- match(Set dst (Conv2B src));
- effect( KILL ccr );
- ins_cost(DEFAULT_COST*2);
- format %{ "CMP R_G0,$src\n\t"
- "ADDX R_G0,0,$dst" %}
- ins_encode( enc_to_bool( src, dst ) );
- ins_pipe(ialu_reg_ialu);
-%}
-#else
instruct convP2B( iRegI dst, iRegP src ) %{
match(Set dst (Conv2B src));
ins_cost(DEFAULT_COST*2);
@@ -8126,7 +7787,6 @@ instruct convP2B( iRegI dst, iRegP src ) %{
ins_encode( form3_g0_rs2_rd_move( src, dst ), enc_convP2B( dst, src ) );
ins_pipe(ialu_clr_and_mover);
%}
-#endif
instruct cmpLTMask0( iRegI dst, iRegI src, immI0 zero, flagsReg ccr ) %{
match(Set dst (CmpLTMask src zero));
@@ -8669,40 +8329,6 @@ instruct mulD_regD_regD(regD dst, regD src1, regD src2) %{
ins_pipe(fmulD_reg_reg);
%}
-instruct convL2D_reg_slow_fxtof(regD dst, stackSlotL src) %{
- match(Set dst (ConvL2D src));
- ins_cost(DEFAULT_COST*8 + MEMORY_REF_COST*6);
-
- expand %{
- regD_low tmpsrc;
- iRegI ix43300000;
- iRegI ix41f00000;
- stackSlotL lx43300000;
- stackSlotL lx41f00000;
- regD_low dx43300000;
- regD dx41f00000;
- regD tmp1;
- regD_low tmp2;
- regD tmp3;
- regD tmp4;
-
- stkL_to_regD(tmpsrc, src);
-
- loadConI_x43300000(ix43300000);
- loadConI_x41f00000(ix41f00000);
- regI_to_stkLHi(lx43300000, ix43300000);
- regI_to_stkLHi(lx41f00000, ix41f00000);
- stkL_to_regD(dx43300000, lx43300000);
- stkL_to_regD(dx41f00000, lx41f00000);
-
- convI2D_regDHi_regD(tmp1, tmpsrc);
- regDHi_regDLo_to_regD(tmp2, dx43300000, tmpsrc);
- subD_regD_regD(tmp3, tmp2, dx43300000);
- mulD_regD_regD(tmp4, tmp1, dx41f00000);
- addD_regD_regD(dst, tmp3, tmp4);
- %}
-%}
-
// Long to Double conversion using fast fxtof
instruct convL2D_helper(regD dst, regD tmp) %{
effect(DEF dst, USE tmp);
@@ -8714,7 +8340,6 @@ instruct convL2D_helper(regD dst, regD tmp) %{
%}
instruct convL2D_stk_fast_fxtof(regD dst, stackSlotL src) %{
- predicate(VM_Version::has_fast_fxtof());
match(Set dst (ConvL2D src));
ins_cost(DEFAULT_COST + 3 * MEMORY_REF_COST);
expand %{
@@ -8769,16 +8394,10 @@ instruct convL2F_reg(regF dst, iRegL src) %{
instruct convL2I_reg(iRegI dst, iRegL src) %{
match(Set dst (ConvL2I src));
-#ifndef _LP64
- format %{ "MOV $src.lo,$dst\t! long->int" %}
- ins_encode( form3_g0_rs2_rd_move_lo2( src, dst ) );
- ins_pipe(ialu_move_reg_I_to_L);
-#else
size(4);
format %{ "SRA $src,R_G0,$dst\t! long->int" %}
ins_encode( form3_rs1_rd_signextend_lo1( src, dst ) );
ins_pipe(ialu_reg);
-#endif
%}
// Register Shift Right Immediate
@@ -9117,7 +8736,7 @@ instruct branch_short(label labl) %{
predicate(UseCBCond);
effect(USE labl);
- size(4);
+ size(4); // Assuming no NOP inserted.
ins_cost(BRANCH_COST);
format %{ "BA $labl\t! short branch" %}
ins_encode %{
@@ -9496,7 +9115,7 @@ instruct cmpI_reg_branch_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flag
predicate(UseCBCond);
effect(USE labl, KILL icc);
- size(4);
+ size(4); // Assuming no NOP inserted.
ins_cost(BRANCH_COST);
format %{ "CWB$cmp $op1,$op2,$labl\t! int" %}
ins_encode %{
@@ -9514,7 +9133,7 @@ instruct cmpI_imm_branch_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flag
predicate(UseCBCond);
effect(USE labl, KILL icc);
- size(4);
+ size(4); // Assuming no NOP inserted.
ins_cost(BRANCH_COST);
format %{ "CWB$cmp $op1,$op2,$labl\t! int" %}
ins_encode %{
@@ -9532,7 +9151,7 @@ instruct cmpU_reg_branch_short(cmpOpU cmp, iRegI op1, iRegI op2, label labl, fla
predicate(UseCBCond);
effect(USE labl, KILL icc);
- size(4);
+ size(4); // Assuming no NOP inserted.
ins_cost(BRANCH_COST);
format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %}
ins_encode %{
@@ -9550,7 +9169,7 @@ instruct cmpU_imm_branch_short(cmpOpU cmp, iRegI op1, immI5 op2, label labl, fla
predicate(UseCBCond);
effect(USE labl, KILL icc);
- size(4);
+ size(4); // Assuming no NOP inserted.
ins_cost(BRANCH_COST);
format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %}
ins_encode %{
@@ -9604,7 +9223,7 @@ instruct cmpL_reg_branch_short(cmpOp cmp, iRegL op1, iRegL op2, label labl, flag
predicate(UseCBCond);
effect(USE labl, KILL xcc);
- size(4);
+ size(4); // Assuming no NOP inserted.
ins_cost(BRANCH_COST);
format %{ "CXB$cmp $op1,$op2,$labl\t! long" %}
ins_encode %{
@@ -9622,7 +9241,7 @@ instruct cmpL_imm_branch_short(cmpOp cmp, iRegL op1, immL5 op2, label labl, flag
predicate(UseCBCond);
effect(USE labl, KILL xcc);
- size(4);
+ size(4); // Assuming no NOP inserted.
ins_cost(BRANCH_COST);
format %{ "CXB$cmp $op1,$op2,$labl\t! long" %}
ins_encode %{
@@ -9641,13 +9260,9 @@ instruct cmpP_reg_branch_short(cmpOpP cmp, iRegP op1, iRegP op2, label labl, fla
predicate(UseCBCond);
effect(USE labl, KILL pcc);
- size(4);
+ size(4); // Assuming no NOP inserted.
ins_cost(BRANCH_COST);
-#ifdef _LP64
format %{ "CXB$cmp $op1,$op2,$labl\t! ptr" %}
-#else
- format %{ "CWB$cmp $op1,$op2,$labl\t! ptr" %}
-#endif
ins_encode %{
Label* L = $labl$$label;
assert(__ use_cbcond(*L), "back to back cbcond");
@@ -9663,13 +9278,9 @@ instruct cmpP_null_branch_short(cmpOpP cmp, iRegP op1, immP0 null, label labl, f
predicate(UseCBCond);
effect(USE labl, KILL pcc);
- size(4);
+ size(4); // Assuming no NOP inserted.
ins_cost(BRANCH_COST);
-#ifdef _LP64
format %{ "CXB$cmp $op1,0,$labl\t! ptr" %}
-#else
- format %{ "CWB$cmp $op1,0,$labl\t! ptr" %}
-#endif
ins_encode %{
Label* L = $labl$$label;
assert(__ use_cbcond(*L), "back to back cbcond");
@@ -9685,7 +9296,7 @@ instruct cmpN_reg_branch_short(cmpOp cmp, iRegN op1, iRegN op2, label labl, flag
predicate(UseCBCond);
effect(USE labl, KILL icc);
- size(4);
+ size(4); // Assuming no NOP inserted.
ins_cost(BRANCH_COST);
format %{ "CWB$cmp $op1,$op2,$labl\t! compressed ptr" %}
ins_encode %{
@@ -9703,7 +9314,7 @@ instruct cmpN_null_branch_short(cmpOp cmp, iRegN op1, immN0 null, label labl, fl
predicate(UseCBCond);
effect(USE labl, KILL icc);
- size(4);
+ size(4); // Assuming no NOP inserted.
ins_cost(BRANCH_COST);
format %{ "CWB$cmp $op1,0,$labl\t! compressed ptr" %}
ins_encode %{
@@ -9722,7 +9333,7 @@ instruct cmpI_reg_branchLoopEnd_short(cmpOp cmp, iRegI op1, iRegI op2, label lab
predicate(UseCBCond);
effect(USE labl, KILL icc);
- size(4);
+ size(4); // Assuming no NOP inserted.
ins_cost(BRANCH_COST);
format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %}
ins_encode %{
@@ -9740,7 +9351,7 @@ instruct cmpI_imm_branchLoopEnd_short(cmpOp cmp, iRegI op1, immI5 op2, label lab
predicate(UseCBCond);
effect(USE labl, KILL icc);
- size(4);
+ size(4); // Assuming no NOP inserted.
ins_cost(BRANCH_COST);
format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %}
ins_encode %{
@@ -9956,11 +9567,7 @@ instruct safePoint_poll(iRegP poll) %{
effect(USE poll);
size(4);
-#ifdef _LP64
format %{ "LDX [$poll],R_G0\t! Safepoint: poll for GC" %}
-#else
- format %{ "LDUW [$poll],R_G0\t! Safepoint: poll for GC" %}
-#endif
ins_encode %{
__ relocate(relocInfo::poll_type);
__ ld_ptr($poll$$Register, 0, G0);
@@ -10393,15 +10000,15 @@ instruct array_equalsC(o0RegP ary1, o1RegP ary2, g3RegI tmp1, notemp_iRegI resul
instruct has_negatives(o0RegP pAryR, g3RegI iSizeR, notemp_iRegI resultR,
iRegL tmp1L, iRegL tmp2L, iRegL tmp3L, iRegL tmp4L,
- flagsReg ccr)
+ flagsReg ccr)
%{
match(Set resultR (HasNegatives pAryR iSizeR));
effect(TEMP resultR, TEMP tmp1L, TEMP tmp2L, TEMP tmp3L, TEMP tmp4L, USE pAryR, USE iSizeR, KILL ccr);
format %{ "has negatives byte[] $pAryR,$iSizeR -> $resultR // KILL $tmp1L,$tmp2L,$tmp3L,$tmp4L" %}
ins_encode %{
- __ has_negatives($pAryR$$Register, $iSizeR$$Register,
+ __ has_negatives($pAryR$$Register, $iSizeR$$Register,
$resultR$$Register,
- $tmp1L$$Register, $tmp2L$$Register,
+ $tmp1L$$Register, $tmp2L$$Register,
$tmp3L$$Register, $tmp4L$$Register);
%}
ins_pipe(long_memory_op);
diff --git a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp
index 8babb471137..9c4713e936d 100644
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -216,9 +216,7 @@ class StubGenerator: public StubCodeGenerator {
__ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words)
__ sll(t, Interpreter::logStackElementSize, t); // compute number of bytes
__ sub(FP, t, Gargs); // setup parameter pointer
-#ifdef _LP64
__ add( Gargs, STACK_BIAS, Gargs ); // Account for LP64 stack bias
-#endif
__ mov(SP, O5_savedSP);
@@ -271,27 +269,8 @@ class StubGenerator: public StubCodeGenerator {
__ delayed()->stf(FloatRegisterImpl::D, F0, addr, G0);
__ BIND(is_long);
-#ifdef _LP64
__ ba(exit);
__ delayed()->st_long(O0, addr, G0); // store entire long
-#else
-#if defined(COMPILER2)
- // All return values are where we want them, except for Longs. C2 returns
- // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
- // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
- // build we simply always use G1.
- // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
- // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
- // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
-
- __ ba(exit);
- __ delayed()->stx(G1, addr, G0); // store entire long
-#else
- __ st(O1, addr, BytesPerInt);
- __ ba(exit);
- __ delayed()->st(O0, addr, G0);
-#endif /* COMPILER2 */
-#endif /* _LP64 */
}
return start;
}
@@ -746,22 +725,10 @@ class StubGenerator: public StubCodeGenerator {
address start = __ pc();
Label miss;
-#if defined(COMPILER2) && !defined(_LP64)
- // Do not use a 'save' because it blows the 64-bit O registers.
- __ add(SP,-4*wordSize,SP); // Make space for 4 temps (stack must be 2 words aligned)
- __ st_ptr(L0,SP,(frame::register_save_words+0)*wordSize);
- __ st_ptr(L1,SP,(frame::register_save_words+1)*wordSize);
- __ st_ptr(L2,SP,(frame::register_save_words+2)*wordSize);
- __ st_ptr(L3,SP,(frame::register_save_words+3)*wordSize);
- Register Rret = O0;
- Register Rsub = O1;
- Register Rsuper = O2;
-#else
__ save_frame(0);
Register Rret = I0;
Register Rsub = I1;
Register Rsuper = I2;
-#endif
Register L0_ary_len = L0;
Register L1_ary_ptr = L1;
@@ -775,32 +742,14 @@ class StubGenerator: public StubCodeGenerator {
// Match falls through here.
__ addcc(G0,0,Rret); // set Z flags, Z result
-#if defined(COMPILER2) && !defined(_LP64)
- __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
- __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
- __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
- __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
- __ retl(); // Result in Rret is zero; flags set to Z
- __ delayed()->add(SP,4*wordSize,SP);
-#else
__ ret(); // Result in Rret is zero; flags set to Z
__ delayed()->restore();
-#endif
__ BIND(miss);
__ addcc(G0,1,Rret); // set NZ flags, NZ result
-#if defined(COMPILER2) && !defined(_LP64)
- __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
- __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
- __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
- __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
- __ retl(); // Result in Rret is != 0; flags set to NZ
- __ delayed()->add(SP,4*wordSize,SP);
-#else
__ ret(); // Result in Rret is != 0; flags set to NZ
__ delayed()->restore();
-#endif
return start;
}
@@ -828,11 +777,11 @@ class StubGenerator: public StubCodeGenerator {
// Rtmp - scratch
//
void assert_clean_int(Register Rint, Register Rtmp) {
-#if defined(ASSERT) && defined(_LP64)
+ #if defined(ASSERT)
__ signx(Rint, Rtmp);
__ cmp(Rint, Rtmp);
__ breakpoint_trap(Assembler::notEqual, Assembler::xcc);
-#endif
+ #endif
}
//
@@ -1019,10 +968,11 @@ class StubGenerator: public StubCodeGenerator {
// than prefetch distance.
__ set(prefetch_count, O4);
__ cmp_and_brx_short(count, O4, Assembler::less, Assembler::pt, L_block_copy);
- __ sub(count, prefetch_count, count);
+ __ sub(count, O4, count);
(this->*copy_loop_func)(from, to, count, count_dec, L_block_copy_prefetch, true, true);
- __ add(count, prefetch_count, count); // restore count
+ __ set(prefetch_count, O4);
+ __ add(count, O4, count);
} // prefetch_count > 0
@@ -1043,11 +993,12 @@ class StubGenerator: public StubCodeGenerator {
// than prefetch distance.
__ set(prefetch_count, O4);
__ cmp_and_brx_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_copy);
- __ sub(count, prefetch_count, count);
+ __ sub(count, O4, count);
Label L_copy_prefetch;
(this->*copy_loop_func)(from, to, count, count_dec, L_copy_prefetch, true, false);
- __ add(count, prefetch_count, count); // restore count
+ __ set(prefetch_count, O4);
+ __ add(count, O4, count);
} // prefetch_count > 0
@@ -1269,17 +1220,6 @@ class StubGenerator: public StubCodeGenerator {
// Aligned arrays have 4 bytes alignment in 32-bits VM
// and 8 bytes - in 64-bits VM. So we do it only for 32-bits VM
//
-#ifndef _LP64
- // copy a 4-bytes word if necessary to align 'to' to 8 bytes
- __ andcc(to, 7, G0);
- __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment);
- __ delayed()->ld(from, 0, O3);
- __ inc(from, 4);
- __ inc(to, 4);
- __ dec(count, 4);
- __ st(O3, to, -4);
- __ BIND(L_skip_alignment);
-#endif
} else {
// copy bytes to align 'to' on 8 byte boundary
__ andcc(to, 7, G1); // misaligned bytes
@@ -1296,10 +1236,7 @@ class StubGenerator: public StubCodeGenerator {
__ delayed()->inc(to);
__ BIND(L_skip_alignment);
}
-#ifdef _LP64
- if (!aligned)
-#endif
- {
+ if (!aligned) {
// Copy with shift 16 bytes per iteration if arrays do not have
// the same alignment mod 8, otherwise fall through to the next
// code for aligned copy.
@@ -1395,15 +1332,12 @@ class StubGenerator: public StubCodeGenerator {
__ delayed()->stb(O3, end_to, 0);
__ BIND(L_skip_alignment);
}
-#ifdef _LP64
if (aligned) {
// Both arrays are aligned to 8-bytes in 64-bits VM.
// The 'count' is decremented in copy_16_bytes_backward_with_shift()
// in unaligned case.
__ dec(count, 16);
- } else
-#endif
- {
+ } else {
// Copy with shift 16 bytes per iteration if arrays do not have
// the same alignment mod 8, otherwise jump to the next
// code for aligned copy (and substracting 16 from 'count' before jump).
@@ -1490,17 +1424,6 @@ class StubGenerator: public StubCodeGenerator {
// Aligned arrays have 4 bytes alignment in 32-bits VM
// and 8 bytes - in 64-bits VM.
//
-#ifndef _LP64
- // copy a 2-elements word if necessary to align 'to' to 8 bytes
- __ andcc(to, 7, G0);
- __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
- __ delayed()->ld(from, 0, O3);
- __ inc(from, 4);
- __ inc(to, 4);
- __ dec(count, 2);
- __ st(O3, to, -4);
- __ BIND(L_skip_alignment);
-#endif
} else {
// copy 1 element if necessary to align 'to' on an 4 bytes
__ andcc(to, 3, G0);
@@ -1524,10 +1447,7 @@ class StubGenerator: public StubCodeGenerator {
__ sth(O4, to, -2);
__ BIND(L_skip_alignment2);
}
-#ifdef _LP64
- if (!aligned)
-#endif
- {
+ if (!aligned) {
// Copy with shift 16 bytes per iteration if arrays do not have
// the same alignment mod 8, otherwise fall through to the next
// code for aligned copy.
@@ -1643,20 +1563,16 @@ class StubGenerator: public StubCodeGenerator {
__ dec(count, 1 << (shift - 1));
__ BIND(L_skip_align2);
}
-#ifdef _LP64
if (!aligned) {
-#endif
- // align to 8 bytes, we know we are 4 byte aligned to start
- __ andcc(to, 7, G0);
- __ br(Assembler::zero, false, Assembler::pt, L_fill_32_bytes);
- __ delayed()->nop();
- __ stw(value, to, 0);
- __ inc(to, 4);
- __ dec(count, 1 << shift);
- __ BIND(L_fill_32_bytes);
-#ifdef _LP64
+ // align to 8 bytes, we know we are 4 byte aligned to start
+ __ andcc(to, 7, G0);
+ __ br(Assembler::zero, false, Assembler::pt, L_fill_32_bytes);
+ __ delayed()->nop();
+ __ stw(value, to, 0);
+ __ inc(to, 4);
+ __ dec(count, 1 << shift);
+ __ BIND(L_fill_32_bytes);
}
-#endif
if (t == T_INT) {
// Zero extend value
@@ -1857,15 +1773,12 @@ class StubGenerator: public StubCodeGenerator {
__ sth(O4, end_to, 0);
__ BIND(L_skip_alignment2);
}
-#ifdef _LP64
if (aligned) {
// Both arrays are aligned to 8-bytes in 64-bits VM.
// The 'count' is decremented in copy_16_bytes_backward_with_shift()
// in unaligned case.
__ dec(count, 8);
- } else
-#endif
- {
+ } else {
// Copy with shift 16 bytes per iteration if arrays do not have
// the same alignment mod 8, otherwise jump to the next
// code for aligned copy (and substracting 8 from 'count' before jump).
@@ -1974,10 +1887,7 @@ class StubGenerator: public StubCodeGenerator {
// Aligned arrays have 4 bytes alignment in 32-bits VM
// and 8 bytes - in 64-bits VM.
//
-#ifdef _LP64
- if (!aligned)
-#endif
- {
+ if (!aligned) {
// The next check could be put under 'ifndef' since the code in
// generate_disjoint_long_copy_core() has own checks and set 'offset'.
@@ -2463,16 +2373,12 @@ class StubGenerator: public StubCodeGenerator {
__ mov(to, G1);
__ mov(count, G5);
gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized);
- #ifdef _LP64
assert_clean_int(count, O3); // Make sure 'count' is clean int.
if (UseCompressedOops) {
generate_disjoint_int_copy_core(aligned);
} else {
generate_disjoint_long_copy_core(aligned);
}
- #else
- generate_disjoint_int_copy_core(aligned);
- #endif
// O0 is used as temp register
gen_write_ref_array_post_barrier(G1, G5, O0);
@@ -2518,15 +2424,11 @@ class StubGenerator: public StubCodeGenerator {
__ mov(count, G5);
gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized);
- #ifdef _LP64
if (UseCompressedOops) {
generate_conjoint_int_copy_core(aligned);
} else {
generate_conjoint_long_copy_core(aligned);
}
- #else
- generate_conjoint_int_copy_core(aligned);
- #endif
// O0 is used as temp register
gen_write_ref_array_post_barrier(G1, G5, O0);
@@ -3138,7 +3040,6 @@ class StubGenerator: public StubCodeGenerator {
"arrayof_jint_disjoint_arraycopy");
StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy,
"arrayof_jint_arraycopy");
-#ifdef _LP64
// In 64 bit we need both aligned and unaligned versions of jint arraycopy.
// entry_jint_arraycopy always points to the unaligned version (notice that we overwrite it).
StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, &entry,
@@ -3146,14 +3047,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, entry,
&entry_jint_arraycopy,
"jint_arraycopy");
-#else
- // In 32 bit jints are always HeapWordSize aligned, so always use the aligned version
- // (in fact in 32bit we always have a pre-loop part even in the aligned version,
- // because it uses 64-bit loads/stores, so the aligned flag is actually ignored).
- StubRoutines::_jint_disjoint_arraycopy = StubRoutines::_arrayof_jint_disjoint_arraycopy;
- StubRoutines::_jint_arraycopy = StubRoutines::_arrayof_jint_arraycopy;
-#endif
-
//*** jlong
// It is always aligned
@@ -3178,7 +3071,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, entry, NULL,
"arrayof_oop_arraycopy_uninit",
/*dest_uninitialized*/true);
-#ifdef _LP64
if (UseCompressedOops) {
// With compressed oops we need unaligned versions, notice that we overwrite entry_oop_arraycopy.
StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, &entry,
@@ -3192,9 +3084,7 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, entry, NULL,
"oop_arraycopy_uninit",
/*dest_uninitialized*/true);
- } else
-#endif
- {
+ } else {
// oop arraycopy is always aligned on 32bit and 64bit without compressed oops
StubRoutines::_oop_disjoint_arraycopy = StubRoutines::_arrayof_oop_disjoint_arraycopy;
StubRoutines::_oop_arraycopy = StubRoutines::_arrayof_oop_arraycopy;
@@ -5104,17 +4994,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::Sparc::_stop_subroutine_entry = generate_stop_subroutine();
StubRoutines::Sparc::_flush_callers_register_windows_entry = generate_flush_callers_register_windows();
-#if !defined(COMPILER2) && !defined(_LP64)
- StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
- StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
- StubRoutines::_atomic_add_entry = generate_atomic_add();
- StubRoutines::_atomic_xchg_ptr_entry = StubRoutines::_atomic_xchg_entry;
- StubRoutines::_atomic_cmpxchg_ptr_entry = StubRoutines::_atomic_cmpxchg_entry;
- StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub();
- StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
- StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry;
-#endif // COMPILER2 !=> _LP64
-
// Build this early so it's available for the interpreter.
StubRoutines::_throw_StackOverflowError_entry =
generate_throw_exception("StackOverflowError throw_exception",
@@ -5222,11 +5101,9 @@ class StubGenerator: public StubCodeGenerator {
void stub_prolog(StubCodeDesc* cdesc) {
# ifdef ASSERT
// put extra information in the stub code, to make it more readable
-#ifdef _LP64
-// Write the high part of the address
-// [RGV] Check if there is a dependency on the size of this prolog
+ // Write the high part of the address
+ // [RGV] Check if there is a dependency on the size of this prolog
__ emit_data((intptr_t)cdesc >> 32, relocInfo::none);
-#endif
__ emit_data((intptr_t)cdesc, relocInfo::none);
__ emit_data(++_stub_count, relocInfo::none);
# endif
diff --git a/hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp b/hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp
index fd4e3ffd149..44195840bce 100644
--- a/hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp
@@ -44,26 +44,18 @@
#include "runtime/synchronizer.hpp"
#include "runtime/timer.hpp"
#include "runtime/vframeArray.hpp"
+#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include "utilities/macros.hpp"
-#ifndef FAST_DISPATCH
-#define FAST_DISPATCH 1
-#endif
-#undef FAST_DISPATCH
-
// Size of interpreter code. Increase if too small. Interpreter will
// fail with a guarantee ("not enough space for interpreter generation");
// if too small.
// Run with +PrintInterpreter to get the VM to print out the size.
// Max size with JVMTI
-#ifdef _LP64
- // The sethi() instruction generates lots more instructions when shell
- // stack limit is unlimited, so that's why this is much bigger.
+// The sethi() instruction generates lots more instructions when shell
+// stack limit is unlimited, so that's why this is much bigger.
int TemplateInterpreter::InterpreterCodeSize = 260 * K;
-#else
-int TemplateInterpreter::InterpreterCodeSize = 230 * K;
-#endif
// Generation of Interpreter
//
@@ -75,41 +67,6 @@ int TemplateInterpreter::InterpreterCodeSize = 230 * K;
//----------------------------------------------------------------------------------------------------
-#ifndef _LP64
-address TemplateInterpreterGenerator::generate_slow_signature_handler() {
- address entry = __ pc();
- Argument argv(0, true);
-
- // We are in the jni transition frame. Save the last_java_frame corresponding to the
- // outer interpreter frame
- //
- __ set_last_Java_frame(FP, noreg);
- // make sure the interpreter frame we've pushed has a valid return pc
- __ mov(O7, I7);
- __ mov(Lmethod, G3_scratch);
- __ mov(Llocals, G4_scratch);
- __ save_frame(0);
- __ mov(G2_thread, L7_thread_cache);
- __ add(argv.address_in_frame(), O3);
- __ mov(G2_thread, O0);
- __ mov(G3_scratch, O1);
- __ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type);
- __ delayed()->mov(G4_scratch, O2);
- __ mov(L7_thread_cache, G2_thread);
- __ reset_last_Java_frame();
-
- // load the register arguments (the C code packed them as varargs)
- for (Argument ldarg = argv.successor(); ldarg.is_register(); ldarg = ldarg.successor()) {
- __ ld_ptr(ldarg.address_in_frame(), ldarg.as_register());
- }
- __ ret();
- __ delayed()->
- restore(O0, 0, Lscratch); // caller's Lscratch gets the result handler
- return entry;
-}
-
-
-#else
// LP64 passes floating point arguments in F1, F3, F5, etc. instead of
// O0, O1, O2 etc..
// Doubles are passed in D0, D2, D4
@@ -197,16 +154,14 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
__ delayed()->srl( G4_scratch, 2, G4_scratch );
__ bind(NextArg);
-
}
__ bind(done);
__ ret();
- __ delayed()->
- restore(O0, 0, Lscratch); // caller's Lscratch gets the result handler
+ __ delayed()->restore(O0, 0, Lscratch); // caller's Lscratch gets the result handler
+
return entry;
}
-#endif
void TemplateInterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
@@ -222,7 +177,6 @@ void TemplateInterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
// returns verified_entry_point or NULL
// we ignore it in any case
__ ba_short(Lcontinue);
-
}
@@ -241,7 +195,6 @@ address TemplateInterpreterGenerator::generate_abstract_entry(void) {
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
return entry;
-
}
void TemplateInterpreterGenerator::save_native_result(void) {
@@ -253,11 +206,7 @@ void TemplateInterpreterGenerator::save_native_result(void) {
// save and restore any potential method result value around the unlocking operation
__ stf(FloatRegisterImpl::D, F0, d_tmp);
-#ifdef _LP64
__ stx(O0, l_tmp);
-#else
- __ std(O0, l_tmp);
-#endif
}
void TemplateInterpreterGenerator::restore_native_result(void) {
@@ -266,11 +215,7 @@ void TemplateInterpreterGenerator::restore_native_result(void) {
// Restore any method result value
__ ldf(FloatRegisterImpl::D, d_tmp, F0);
-#ifdef _LP64
__ ldx(l_tmp, O0);
-#else
- __ ldd(l_tmp, O0);
-#endif
}
address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
@@ -340,22 +285,6 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ profile_return_type(O0, G3_scratch, G1_scratch);
}
-#if !defined(_LP64) && defined(COMPILER2)
- // All return values are where we want them, except for Longs. C2 returns
- // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
- // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
- // build even if we are returning from interpreted we just do a little
- // stupid shuffing.
- // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
- // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
- // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
-
- if (state == ltos) {
- __ srl (G1, 0, O1);
- __ srlx(G1, 32, O0);
- }
-#endif // !_LP64 && COMPILER2
-
// The callee returns with the stack possibly adjusted by adapter transition
// We remove that possible adjustment here.
// All interpreter local registers are untouched. Any result is passed back
@@ -374,6 +303,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size); // argument size in words
__ sll(parameter_size, Interpreter::logStackElementSize, parameter_size); // each argument size in bytes
__ add(Lesp, parameter_size, Lesp); // pop arguments
+
+ __ check_and_handle_popframe(Gtemp);
+ __ check_and_handle_earlyret(Gtemp);
+
__ dispatch_next(state, step);
return entry;
@@ -438,9 +371,6 @@ address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type
case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break;
case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break;
case T_LONG :
-#ifndef _LP64
- __ mov(O1, Itos_l2); // move other half of long
-#endif // ifdef or no ifdef, fall through to the T_INT case
case T_INT : __ mov(O0, Itos_i); break;
case T_VOID : /* nothing to do */ break;
case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break;
@@ -466,12 +396,6 @@ address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state,
}
-address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
- address entry = __ pc();
- __ dispatch_next(state);
- return entry;
-}
-
//
// Helpers for commoning out cases in the various type of method entries.
//
@@ -548,7 +472,6 @@ void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label*
__ delayed()->nop();
__ bind(done);
}
-
}
// Allocate monitor and lock method (asm interpreter)
@@ -664,7 +587,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe
// pop parameters from the callers stack by adjusting Lesp
// set O0 to Lesp
// compute X = (max_locals - num_parameters)
-// bump SP up by X to accomadate the extra locals
+// bump SP up by X to accommodate the extra locals
// compute X = max_expression_stack
// + vm_local_words
// + 16 words of register save area
@@ -762,7 +685,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// 1) Increase caller's SP by for the extra local space needed:
// (check for overflow)
// Efficient implementation of xload/xstore bytecodes requires
- // that arguments and non-argument locals are in a contigously
+ // that arguments and non-argument locals are in a contiguously
// addressable memory block => non-argument locals must be
// allocated in the caller's frame.
//
@@ -789,7 +712,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// (gri - 2/25/2000)
- int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
+ int rounded_vm_local_words = align_up((int)frame::interpreter_frame_vm_local_words, WordsPerLong );
const int extra_space =
rounded_vm_local_words + // frame local scratch space
@@ -856,7 +779,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
__ sub(Gframe_size, Glocals_size, Gframe_size);
//
- // bump SP to accomodate the extra locals
+ // bump SP to accommodate the extra locals
//
__ sub(SP, Glocals_size, SP);
}
@@ -884,22 +807,15 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
Register mirror = LcpoolCache;
__ load_mirror(mirror, Lmethod);
__ st_ptr(mirror, FP, (frame::interpreter_frame_mirror_offset * wordSize) + STACK_BIAS);
- __ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache
+ __ get_constant_pool_cache(LcpoolCache); // set LcpoolCache
__ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
-#ifdef _LP64
- __ add( Lmonitors, STACK_BIAS, Lmonitors ); // Account for 64 bit stack bias
-#endif
+ __ add(Lmonitors, STACK_BIAS, Lmonitors); // Account for 64 bit stack bias
__ sub(Lmonitors, BytesPerWord, Lesp); // set Lesp
// setup interpreter activation registers
__ sub(Gargs, BytesPerWord, Llocals); // set Llocals
if (ProfileInterpreter) {
-#ifdef FAST_DISPATCH
- // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
- // they both use I2.
- assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
-#endif // FAST_DISPATCH
__ set_method_data_pointer();
}
@@ -1065,7 +981,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
__ ldx( Gargs, 16, buf);
__ lduw(Gargs, 24, crc);
__ add(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE), buf); // account for the header size
- __ add(buf ,offset, buf);
+ __ add(buf, offset, buf);
}
// Call the crc32 kernel
@@ -1138,8 +1054,58 @@ address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(Abstract
return NULL;
}
-// Not supported
-address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
+/* Math routines only partially supported.
+ *
+ * Providing support for fma (float/double) only.
+ */
+address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind)
+{
+ if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
+
+ address entry = __ pc();
+
+ switch (kind) {
+ case Interpreter::java_lang_math_fmaF:
+ if (UseFMA) {
+ // float .fma(float a, float b, float c)
+ const FloatRegister ra = F1;
+ const FloatRegister rb = F2;
+ const FloatRegister rc = F3;
+ const FloatRegister rd = F0; // Result.
+
+ __ ldf(FloatRegisterImpl::S, Gargs, 0, rc);
+ __ ldf(FloatRegisterImpl::S, Gargs, 8, rb);
+ __ ldf(FloatRegisterImpl::S, Gargs, 16, ra);
+
+ __ fmadd(FloatRegisterImpl::S, ra, rb, rc, rd);
+ __ retl(); // Result in F0 (rd).
+ __ delayed()->mov(O5_savedSP, SP);
+
+ return entry;
+ }
+ break;
+ case Interpreter::java_lang_math_fmaD:
+ if (UseFMA) {
+ // double .fma(double a, double b, double c)
+ const FloatRegister ra = F2; // D1
+ const FloatRegister rb = F4; // D2
+ const FloatRegister rc = F6; // D3
+ const FloatRegister rd = F0; // D0 Result.
+
+ __ ldf(FloatRegisterImpl::D, Gargs, 0, rc);
+ __ ldf(FloatRegisterImpl::D, Gargs, 16, rb);
+ __ ldf(FloatRegisterImpl::D, Gargs, 32, ra);
+
+ __ fmadd(FloatRegisterImpl::D, ra, rb, rc, rd);
+ __ retl(); // Result in D0 (rd).
+ __ delayed()->mov(O5_savedSP, SP);
+
+ return entry;
+ }
+ break;
+ default:
+ break;
+ }
return NULL;
}
@@ -1152,7 +1118,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
// Doing the banging earlier fails if the caller frame is not an interpreter
// frame.
// (Also, the exception throwing code expects to unlock any synchronized
- // method receiever, so do the banging after locking the receiver.)
+ // method receiver, so do the banging after locking the receiver.)
// Bang each page in the shadow zone. We can't assume it's been done for
// an interpreter frame with greater than a page of locals, so each page
@@ -1193,8 +1159,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// rethink these assertions - they can be simplified and shared (gri 2/25/2000)
#ifdef ASSERT
__ ld(G5_method, Method::access_flags_offset(), Gtmp1);
- {
- Label L;
+ { Label L;
__ btst(JVM_ACC_NATIVE, Gtmp1);
__ br(Assembler::notZero, false, Assembler::pt, L);
__ delayed()->nop();
@@ -1443,7 +1408,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// didn't see any synchronization is progress, and escapes.
__ set(_thread_in_native_trans, G3_scratch);
__ st(G3_scratch, thread_state);
- if(os::is_MP()) {
+ if (os::is_MP()) {
if (UseMembar) {
// Force this write out before the read below
__ membar(Assembler::StoreLoad);
@@ -1483,12 +1448,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// Move the result handler address
__ mov(Lscratch, G3_scratch);
// return possible result to the outer frame
-#ifndef __LP64
- __ mov(O0, I0);
- __ restore(O1, G0, O1);
-#else
__ restore(O0, G0, O0);
-#endif /* __LP64 */
// Move result handler to expected register
__ mov(G3_scratch, Lscratch);
@@ -1511,8 +1471,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// If we have an oop result store it where it will be safe for any further gc
// until we return now that we've released the handle it might be protected by
- {
- Label no_oop, store_result;
+ { Label no_oop, store_result;
__ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch);
__ cmp_and_brx_short(G3_scratch, Lscratch, Assembler::notEqual, Assembler::pt, no_oop);
@@ -1568,21 +1527,9 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
restore_native_result();
}
-#if defined(COMPILER2) && !defined(_LP64)
-
- // C2 expects long results in G1 we can't tell if we're returning to interpreted
- // or compiled so just be safe.
-
- __ sllx(O0, 32, G1); // Shift bits into high G1
- __ srl (O1, 0, O1); // Zero extend O1
- __ or3 (O1, G1, G1); // OR 64 bits into G1
-
-#endif /* COMPILER2 && !_LP64 */
-
// dispose of return address and remove activation
#ifdef ASSERT
- {
- Label ok;
+ { Label ok;
__ cmp_and_brx_short(I5_savedSP, FP, Assembler::greaterEqualUnsigned, Assembler::pt, ok);
__ stop("bad I5_savedSP value");
__ should_not_reach_here();
@@ -1592,15 +1539,12 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ jmp(Lscratch, 0);
__ delayed()->nop();
-
if (inc_counter) {
// handle invocation counter overflow
__ bind(invocation_counter_overflow);
generate_counter_overflow(Lcontinue);
}
-
-
return entry;
}
@@ -1630,8 +1574,7 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
// rethink these assertions - they can be simplified and shared (gri 2/25/2000)
#ifdef ASSERT
__ ld(G5_method, Method::access_flags_offset(), Gtmp1);
- {
- Label L;
+ { Label L;
__ btst(JVM_ACC_NATIVE, Gtmp1);
__ br(Assembler::zero, false, Assembler::pt, L);
__ delayed()->nop();
@@ -1651,11 +1594,6 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
generate_fixed_frame(false);
-#ifdef FAST_DISPATCH
- __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables);
- // set bytecode dispatch table base
-#endif
-
//
// Code to initialize the extra (i.e. non-parm) locals
//
@@ -1768,7 +1706,6 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
generate_counter_overflow(Lcontinue);
}
-
return entry;
}
@@ -1888,8 +1825,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
}
#if INCLUDE_JVMTI
- {
- Label L_done;
+ { Label L_done;
__ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode
__ cmp_and_br_short(G1_scratch, Bytecodes::_invokestatic, Assembler::notEqual, Assembler::pn, L_done);
@@ -1929,7 +1865,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ get_vm_result(Oexception);
__ verify_oop(Oexception);
- const int return_reg_adjustment = frame::pc_return_offset;
+ const int return_reg_adjustment = frame::pc_return_offset;
Address issuing_pc_addr(I7, return_reg_adjustment);
// We are done with this activation frame; find out where to go next.
diff --git a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp
index f828aa0e370..af53a865318 100644
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -248,12 +248,7 @@ void TemplateTable::iconst(int value) {
void TemplateTable::lconst(int value) {
transition(vtos, ltos);
assert(value >= 0, "check this code");
-#ifdef _LP64
__ set(value, Otos_l);
-#else
- __ set(value, Otos_l2);
- __ clr( Otos_l1);
-#endif
}
@@ -406,24 +401,12 @@ void TemplateTable::ldc2_w() {
// Check out Conversions.java for an example.
// Also ConstantPool::header_size() is 20, which makes it very difficult
// to double-align double on the constant pool. SG, 11/7/97
-#ifdef _LP64
__ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d);
-#else
- FloatRegister f = Ftos_d;
- __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f);
- __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2,
- f->successor());
-#endif
__ push(dtos);
__ ba_short(exit);
__ bind(Long);
-#ifdef _LP64
__ ldx(G3_scratch, base_offset, Otos_l);
-#else
- __ ld(G3_scratch, base_offset, Otos_l);
- __ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor());
-#endif
__ push(ltos);
__ bind(exit);
@@ -1128,19 +1111,11 @@ void TemplateTable::lop2(Operation op) {
transition(ltos, ltos);
__ pop_l(O2);
switch (op) {
-#ifdef _LP64
case add: __ add(O2, Otos_l, Otos_l); break;
case sub: __ sub(O2, Otos_l, Otos_l); break;
case _and: __ and3(O2, Otos_l, Otos_l); break;
case _or: __ or3(O2, Otos_l, Otos_l); break;
case _xor: __ xor3(O2, Otos_l, Otos_l); break;
-#else
- case add: __ addcc(O3, Otos_l2, Otos_l2); __ addc(O2, Otos_l1, Otos_l1); break;
- case sub: __ subcc(O3, Otos_l2, Otos_l2); __ subc(O2, Otos_l1, Otos_l1); break;
- case _and: __ and3(O3, Otos_l2, Otos_l2); __ and3(O2, Otos_l1, Otos_l1); break;
- case _or: __ or3(O3, Otos_l2, Otos_l2); __ or3(O2, Otos_l1, Otos_l1); break;
- case _xor: __ xor3(O3, Otos_l2, Otos_l2); __ xor3(O2, Otos_l1, Otos_l1); break;
-#endif
default: ShouldNotReachHere();
}
}
@@ -1171,14 +1146,10 @@ void TemplateTable::idiv() {
Label regular;
__ cmp(Otos_i, -1);
__ br(Assembler::notEqual, false, Assembler::pt, regular);
-#ifdef _LP64
// Don't put set in delay slot
// Set will turn into multiple instructions in 64 bit mode
__ delayed()->nop();
__ set(min_int, G4_scratch);
-#else
- __ delayed()->set(min_int, G4_scratch);
-#endif
Label done;
__ cmp(O1, G4_scratch);
__ br(Assembler::equal, true, Assembler::pt, done);
@@ -1202,11 +1173,7 @@ void TemplateTable::irem() {
void TemplateTable::lmul() {
transition(ltos, ltos);
__ pop_l(O2);
-#ifdef _LP64
__ mulx(Otos_l, O2, Otos_l);
-#else
- __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul));
-#endif
}
@@ -1216,15 +1183,9 @@ void TemplateTable::ldiv() {
// check for zero
__ pop_l(O2);
-#ifdef _LP64
__ tst(Otos_l);
__ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
__ sdivx(O2, Otos_l, Otos_l);
-#else
- __ orcc(Otos_l1, Otos_l2, G0);
- __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
- __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
-#endif
}
@@ -1233,17 +1194,11 @@ void TemplateTable::lrem() {
// check for zero
__ pop_l(O2);
-#ifdef _LP64
__ tst(Otos_l);
__ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
__ sdivx(O2, Otos_l, Otos_l2);
__ mulx (Otos_l2, Otos_l, Otos_l2);
__ sub (O2, Otos_l2, Otos_l);
-#else
- __ orcc(Otos_l1, Otos_l2, G0);
- __ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
- __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
-#endif
}
@@ -1251,11 +1206,7 @@ void TemplateTable::lshl() {
transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra
__ pop_l(O2); // shift value in O2, O3
-#ifdef _LP64
__ sllx(O2, Otos_i, Otos_l);
-#else
- __ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
-#endif
}
@@ -1263,11 +1214,7 @@ void TemplateTable::lshr() {
transition(itos, ltos); // %%%% see lshl comment
__ pop_l(O2); // shift value in O2, O3
-#ifdef _LP64
__ srax(O2, Otos_i, Otos_l);
-#else
- __ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
-#endif
}
@@ -1276,11 +1223,7 @@ void TemplateTable::lushr() {
transition(itos, ltos); // %%%% see lshl comment
__ pop_l(O2); // shift value in O2, O3
-#ifdef _LP64
__ srlx(O2, Otos_i, Otos_l);
-#else
- __ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
-#endif
}
@@ -1293,15 +1236,9 @@ void TemplateTable::fop2(Operation op) {
case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
case rem:
assert(Ftos_f == F0, "just checking");
-#ifdef _LP64
// LP64 calling conventions use F1, F3 for passing 2 floats
__ pop_f(F1);
__ fmov(FloatRegisterImpl::S, Ftos_f, F3);
-#else
- __ pop_i(O0);
- __ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp);
- __ ld( __ d_tmp, O1 );
-#endif
__ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem));
assert( Ftos_f == F0, "fix this code" );
break;
@@ -1319,18 +1256,9 @@ void TemplateTable::dop2(Operation op) {
case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
case rem:
-#ifdef _LP64
// Pass arguments in D0, D2
__ fmov(FloatRegisterImpl::D, Ftos_f, F2 );
__ pop_d( F0 );
-#else
- // Pass arguments in O0O1, O2O3
- __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
- __ ldd( __ d_tmp, O2 );
- __ pop_d(Ftos_f);
- __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
- __ ldd( __ d_tmp, O0 );
-#endif
__ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem));
assert( Ftos_d == F0, "fix this code" );
break;
@@ -1348,11 +1276,7 @@ void TemplateTable::ineg() {
void TemplateTable::lneg() {
transition(ltos, ltos);
-#ifdef _LP64
__ sub(G0, Otos_l, Otos_l);
-#else
- __ lneg(Otos_l1, Otos_l2);
-#endif
}
@@ -1437,15 +1361,8 @@ void TemplateTable::convert() {
Label done;
switch (bytecode()) {
case Bytecodes::_i2l:
-#ifdef _LP64
// Sign extend the 32 bits
__ sra ( Otos_i, 0, Otos_l );
-#else
- __ addcc(Otos_i, 0, Otos_l2);
- __ br(Assembler::greaterEqual, true, Assembler::pt, done);
- __ delayed()->clr(Otos_l1);
- __ set(~0, Otos_l1);
-#endif
break;
case Bytecodes::_i2f:
@@ -1476,12 +1393,8 @@ void TemplateTable::convert() {
break;
case Bytecodes::_l2i:
-#ifndef _LP64
- __ mov(Otos_l2, Otos_i);
-#else
// Sign-extend into the high 32 bits
__ sra(Otos_l, 0, Otos_i);
-#endif
break;
case Bytecodes::_l2f:
@@ -1512,11 +1425,7 @@ void TemplateTable::convert() {
case Bytecodes::_f2l:
// must uncache tos
__ push_f();
-#ifdef _LP64
__ pop_f(F1);
-#else
- __ pop_i(O0);
-#endif
__ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
break;
@@ -1528,13 +1437,8 @@ void TemplateTable::convert() {
case Bytecodes::_d2l:
// must uncache tos
__ push_d();
-#ifdef _LP64
// LP64 calling conventions pass first double arg in D0
__ pop_d( Ftos_d );
-#else
- __ pop_i( O0 );
- __ pop_i( O1 );
-#endif
__ call_VM_leaf(Lscratch,
bytecode() == Bytecodes::_d2i
? CAST_FROM_FN_PTR(address, SharedRuntime::d2i)
@@ -1554,13 +1458,8 @@ void TemplateTable::convert() {
void TemplateTable::lcmp() {
transition(ltos, itos);
-#ifdef _LP64
__ pop_l(O1); // pop off value 1, value 2 is in O0
__ lcmp( O1, Otos_l, Otos_i );
-#else
- __ pop_l(O2); // cmp O2,3 to O0,1
- __ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i );
-#endif
}
@@ -1756,7 +1655,6 @@ void TemplateTable::ret() {
__ access_local_returnAddress(G3_scratch, Otos_i);
// Otos_i contains the bci, compute the bcp from that
-#ifdef _LP64
#ifdef ASSERT
// jsr result was labeled as an 'itos' not an 'atos' because we cannot GC
// the result. The return address (really a BCI) was stored with an
@@ -1771,7 +1669,6 @@ void TemplateTable::ret() {
__ stop("BCI is in the wrong register half?");
__ bind (zzz) ;
}
-#endif
#endif
__ profile_ret(vtos, Otos_i, G4_scratch);
@@ -1808,10 +1705,8 @@ void TemplateTable::tableswitch() {
// load lo, hi
__ ld(O1, 1 * BytesPerInt, O2); // Low Byte
__ ld(O1, 2 * BytesPerInt, O3); // High Byte
-#ifdef _LP64
// Sign extend the 32 bits
__ sra ( Otos_i, 0, Otos_i );
-#endif /* _LP64 */
// check against lo & hi
__ cmp( Otos_i, O2);
@@ -3346,9 +3241,7 @@ void TemplateTable::_new() {
__ br(Assembler::notEqual, false, Assembler::pn, slow_case);
__ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
// get InstanceKlass
- //__ sll(Roffset, LogBytesPerWord, Roffset); // executed in delay slot
- __ add(Roffset, sizeof(ConstantPool), Roffset);
- __ ld_ptr(Rscratch, Roffset, RinstanceKlass);
+ __ load_resolved_klass_at_offset(Rscratch, Roffset, RinstanceKlass);
// make sure klass is fully initialized:
__ ldub(RinstanceKlass, in_bytes(InstanceKlass::init_state_offset()), G3_scratch);
@@ -3400,11 +3293,7 @@ void TemplateTable::_new() {
// Check if tlab should be discarded (refill_waste_limit >= free)
__ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
__ sub(RendValue, RoldTopValue, RfreeValue);
-#ifdef _LP64
__ srlx(RfreeValue, LogHeapWordSize, RfreeValue);
-#else
- __ srl(RfreeValue, LogHeapWordSize, RfreeValue);
-#endif
__ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small
// increment waste limit to prevent getting stuck on this slow path
@@ -3574,8 +3463,9 @@ void TemplateTable::checkcast() {
// Extract target class from constant pool
__ bind(quicked);
- __ add(Roffset, sizeof(ConstantPool), Roffset);
- __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
+ __ load_resolved_klass_at_offset(Lscratch, Roffset, RspecifiedKlass);
+
+
__ bind(resolved);
__ load_klass(Otos_i, RobjKlass); // get value klass
@@ -3631,9 +3521,9 @@ void TemplateTable::instanceof() {
// Extract target class from constant pool
__ bind(quicked);
- __ add(Roffset, sizeof(ConstantPool), Roffset);
__ get_constant_pool(Lscratch);
- __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
+ __ load_resolved_klass_at_offset(Lscratch, Roffset, RspecifiedKlass);
+
__ bind(resolved);
__ load_klass(Otos_i, RobjKlass); // get value klass
diff --git a/hotspot/src/cpu/sparc/vm/vmStructs_sparc.hpp b/hotspot/src/cpu/sparc/vm/vmStructs_sparc.hpp
index 2c84cece3cc..aa21dbdb2db 100644
--- a/hotspot/src/cpu/sparc/vm/vmStructs_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/vmStructs_sparc.hpp
@@ -71,28 +71,43 @@
declare_c2_constant(R_G5_num) \
declare_c2_constant(R_G6_num) \
declare_c2_constant(R_G7_num) \
- declare_constant(VM_Version::vis1_instructions_m) \
- declare_constant(VM_Version::vis2_instructions_m) \
- declare_constant(VM_Version::vis3_instructions_m) \
- declare_constant(VM_Version::cbcond_instructions_m) \
- declare_constant(VM_Version::v8_instructions_m) \
- declare_constant(VM_Version::hardware_mul32_m) \
- declare_constant(VM_Version::hardware_div32_m) \
- declare_constant(VM_Version::hardware_fsmuld_m) \
- declare_constant(VM_Version::hardware_popc_m) \
- declare_constant(VM_Version::v9_instructions_m) \
- declare_constant(VM_Version::sun4v_m) \
- declare_constant(VM_Version::blk_init_instructions_m) \
- declare_constant(VM_Version::fmaf_instructions_m) \
- declare_constant(VM_Version::sparc64_family_m) \
- declare_constant(VM_Version::M_family_m) \
- declare_constant(VM_Version::T_family_m) \
- declare_constant(VM_Version::T1_model_m) \
- declare_constant(VM_Version::sparc5_instructions_m) \
- declare_constant(VM_Version::aes_instructions_m) \
- declare_constant(VM_Version::sha1_instruction_m) \
- declare_constant(VM_Version::sha256_instruction_m) \
- declare_constant(VM_Version::sha512_instruction_m)
+ declare_constant(VM_Version::ISA_V9) \
+ declare_constant(VM_Version::ISA_POPC) \
+ declare_constant(VM_Version::ISA_VIS1) \
+ declare_constant(VM_Version::ISA_VIS2) \
+ declare_constant(VM_Version::ISA_BLK_INIT) \
+ declare_constant(VM_Version::ISA_FMAF) \
+ declare_constant(VM_Version::ISA_VIS3) \
+ declare_constant(VM_Version::ISA_HPC) \
+ declare_constant(VM_Version::ISA_IMA) \
+ declare_constant(VM_Version::ISA_AES) \
+ declare_constant(VM_Version::ISA_DES) \
+ declare_constant(VM_Version::ISA_KASUMI) \
+ declare_constant(VM_Version::ISA_CAMELLIA) \
+ declare_constant(VM_Version::ISA_MD5) \
+ declare_constant(VM_Version::ISA_SHA1) \
+ declare_constant(VM_Version::ISA_SHA256) \
+ declare_constant(VM_Version::ISA_SHA512) \
+ declare_constant(VM_Version::ISA_MPMUL) \
+ declare_constant(VM_Version::ISA_MONT) \
+ declare_constant(VM_Version::ISA_PAUSE) \
+ declare_constant(VM_Version::ISA_CBCOND) \
+ declare_constant(VM_Version::ISA_CRC32C) \
+ declare_constant(VM_Version::ISA_VIS3B) \
+ declare_constant(VM_Version::ISA_ADI) \
+ declare_constant(VM_Version::ISA_SPARC5) \
+ declare_constant(VM_Version::ISA_MWAIT) \
+ declare_constant(VM_Version::ISA_XMPMUL) \
+ declare_constant(VM_Version::ISA_XMONT) \
+ declare_constant(VM_Version::ISA_PAUSE_NSEC) \
+ declare_constant(VM_Version::ISA_VAMASK) \
+ declare_constant(VM_Version::CPU_FAST_IDIV) \
+ declare_constant(VM_Version::CPU_FAST_RDPC) \
+ declare_constant(VM_Version::CPU_FAST_BIS) \
+ declare_constant(VM_Version::CPU_FAST_LD) \
+ declare_constant(VM_Version::CPU_FAST_CMOVE) \
+ declare_constant(VM_Version::CPU_FAST_IND_BR) \
+ declare_constant(VM_Version::CPU_BLK_ZEROING)
#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
diff --git a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp
index 770ca6bf197..37203221f71 100644
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,13 +25,17 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "logging/log.hpp"
+#include "logging/logStream.hpp"
#include "memory/resourceArea.hpp"
+#include "prims/jvm.h"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "vm_version_sparc.hpp"
-unsigned int VM_Version::_L2_data_cache_line_size = 0;
+#include
+
+uint VM_Version::_L2_data_cache_line_size = 0;
void VM_Version::initialize() {
assert(_features != 0, "System pre-initialization is not complete.");
@@ -42,97 +46,110 @@ void VM_Version::initialize() {
PrefetchFieldsAhead = prefetch_fields_ahead();
// Allocation prefetch settings
- intx cache_line_size = prefetch_data_size();
- if( cache_line_size > AllocatePrefetchStepSize )
- AllocatePrefetchStepSize = cache_line_size;
AllocatePrefetchDistance = allocate_prefetch_distance();
AllocatePrefetchStyle = allocate_prefetch_style();
- if (!has_blk_init() || cache_line_size <= 0) {
- if (AllocatePrefetchInstr == 1) {
+ intx cache_line_size = prefetch_data_size();
+
+ if (FLAG_IS_DEFAULT(AllocatePrefetchStepSize)) {
+ AllocatePrefetchStepSize = MAX2(AllocatePrefetchStepSize, cache_line_size);
+ }
+
+ if (AllocatePrefetchInstr == 1) {
+ if (!has_blk_init()) {
warning("BIS instructions required for AllocatePrefetchInstr 1 unavailable");
FLAG_SET_DEFAULT(AllocatePrefetchInstr, 0);
}
+ if (cache_line_size <= 0) {
+ warning("Cache-line size must be known for AllocatePrefetchInstr 1 to work");
+ FLAG_SET_DEFAULT(AllocatePrefetchInstr, 0);
+ }
}
- UseSSE = 0; // Only on x86 and x64
+ UseSSE = false; // Only used on x86 and x64.
- _supports_cx8 = has_v9();
- _supports_atomic_getset4 = true; // swap instruction
+ _supports_cx8 = true; // All SPARC V9 implementations.
+ _supports_atomic_getset4 = true; // Using the 'swap' instruction.
- if (is_niagara()) {
- // Indirect branch is the same cost as direct
- if (FLAG_IS_DEFAULT(UseInlineCaches)) {
- FLAG_SET_DEFAULT(UseInlineCaches, false);
- }
- // Align loops on a single instruction boundary.
- if (FLAG_IS_DEFAULT(OptoLoopAlignment)) {
- FLAG_SET_DEFAULT(OptoLoopAlignment, 4);
- }
-#ifdef _LP64
- // 32-bit oops don't make sense for the 64-bit VM on sparc
- // since the 32-bit VM has the same registers and smaller objects.
- Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
- Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
-#endif // _LP64
-#ifdef COMPILER2
- // Indirect branch is the same cost as direct
- if (FLAG_IS_DEFAULT(UseJumpTables)) {
- FLAG_SET_DEFAULT(UseJumpTables, true);
- }
- // Single-issue, so entry and loop tops are
- // aligned on a single instruction boundary
- if (FLAG_IS_DEFAULT(InteriorEntryAlignment)) {
- FLAG_SET_DEFAULT(InteriorEntryAlignment, 4);
- }
- if (is_niagara_plus()) {
- if (has_blk_init() && (cache_line_size > 0) && UseTLAB &&
- FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
- if (!has_sparc5_instr()) {
- // Use BIS instruction for TLAB allocation prefetch
- // on Niagara plus processors other than those based on CoreS4
- FLAG_SET_DEFAULT(AllocatePrefetchInstr, 1);
- } else {
- // On CoreS4 processors use prefetch instruction
- // to avoid partial RAW issue, also use prefetch style 3
- FLAG_SET_DEFAULT(AllocatePrefetchInstr, 0);
- if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
- FLAG_SET_DEFAULT(AllocatePrefetchStyle, 3);
- }
- }
- }
- if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
- if (AllocatePrefetchInstr == 0) {
- // Use different prefetch distance without BIS
- FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256);
- } else {
- // Use smaller prefetch distance with BIS
- FLAG_SET_DEFAULT(AllocatePrefetchDistance, 64);
- }
- }
- if (is_T4()) {
- // Double number of prefetched cache lines on T4
- // since L2 cache line size is smaller (32 bytes).
- if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) {
- FLAG_SET_ERGO(intx, AllocatePrefetchLines, AllocatePrefetchLines*2);
- }
- if (FLAG_IS_DEFAULT(AllocateInstancePrefetchLines)) {
- FLAG_SET_ERGO(intx, AllocateInstancePrefetchLines, AllocateInstancePrefetchLines*2);
- }
- }
- }
-
- if (AllocatePrefetchInstr == 1) {
- // Use allocation prefetch style 3 because BIS instructions
- // require aligned memory addresses.
- FLAG_SET_DEFAULT(AllocatePrefetchStyle, 3);
- }
-#endif /* COMPILER2 */
+ if (has_fast_ind_br() && FLAG_IS_DEFAULT(UseInlineCaches)) {
+ // Indirect and direct branches are cost equivalent.
+ FLAG_SET_DEFAULT(UseInlineCaches, false);
+ }
+ // Align loops on the proper instruction boundary to fill the instruction
+ // fetch buffer.
+ if (FLAG_IS_DEFAULT(OptoLoopAlignment)) {
+ FLAG_SET_DEFAULT(OptoLoopAlignment, VM_Version::insn_fetch_alignment);
}
+ // 32-bit oops don't make sense for the 64-bit VM on SPARC since the 32-bit
+ // VM has the same registers and smaller objects.
+ Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
+ Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
+
+#ifdef COMPILER2
+ if (has_fast_ind_br() && FLAG_IS_DEFAULT(UseJumpTables)) {
+ // Indirect and direct branches are cost equivalent.
+ FLAG_SET_DEFAULT(UseJumpTables, true);
+ }
+ // Entry and loop tops are aligned to fill the instruction fetch buffer.
+ if (FLAG_IS_DEFAULT(InteriorEntryAlignment)) {
+ FLAG_SET_DEFAULT(InteriorEntryAlignment, VM_Version::insn_fetch_alignment);
+ }
+ if (UseTLAB && cache_line_size > 0 &&
+ FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
+ if (has_fast_bis()) {
+ // Use BIS instruction for TLAB allocation prefetch.
+ FLAG_SET_DEFAULT(AllocatePrefetchInstr, 1);
+ }
+ else if (has_sparc5()) {
+ // Use prefetch instruction to avoid partial RAW issue on Core S4 processors,
+ // also use prefetch style 3.
+ FLAG_SET_DEFAULT(AllocatePrefetchInstr, 0);
+ if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
+ FLAG_SET_DEFAULT(AllocatePrefetchStyle, 3);
+ }
+ }
+ }
+ if (AllocatePrefetchInstr == 1) {
+ // Use allocation prefetch style 3 because BIS instructions require
+ // aligned memory addresses.
+ FLAG_SET_DEFAULT(AllocatePrefetchStyle, 3);
+ }
+ if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
+ if (AllocatePrefetchInstr == 0) {
+ // Use different prefetch distance without BIS
+ FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256);
+ } else {
+ // Use smaller prefetch distance with BIS
+ FLAG_SET_DEFAULT(AllocatePrefetchDistance, 64);
+ }
+ }
+
+ // We increase the number of prefetched cache lines, to use just a bit more
+ // aggressive approach, when the L2-cache line size is small (32 bytes), or
+ // when running on newer processor implementations, such as the Core S4.
+ bool inc_prefetch = cache_line_size > 0 && (cache_line_size < 64 || has_sparc5());
+
+ if (inc_prefetch) {
+ // We use a factor two for small cache line sizes (as before) but a slightly
+ // more conservative increase when running on more recent hardware that will
+ // benefit from just a bit more aggressive prefetching.
+ if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) {
+ const int ap_lns = AllocatePrefetchLines;
+ const int ap_inc = cache_line_size < 64 ? ap_lns : (ap_lns + 1) / 2;
+ FLAG_SET_ERGO(intx, AllocatePrefetchLines, ap_lns + ap_inc);
+ }
+ if (FLAG_IS_DEFAULT(AllocateInstancePrefetchLines)) {
+ const int ip_lns = AllocateInstancePrefetchLines;
+ const int ip_inc = cache_line_size < 64 ? ip_lns : (ip_lns + 1) / 2;
+ FLAG_SET_ERGO(intx, AllocateInstancePrefetchLines, ip_lns + ip_inc);
+ }
+ }
+#endif /* COMPILER2 */
+
// Use hardware population count instruction if available.
- if (has_hardware_popc()) {
+ if (has_popc()) {
if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
FLAG_SET_DEFAULT(UsePopCountInstruction, true);
}
@@ -141,7 +158,7 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UsePopCountInstruction, false);
}
- // T4 and newer Sparc cpus have new compare and branch instruction.
+ // Use compare and branch instructions if available.
if (has_cbcond()) {
if (FLAG_IS_DEFAULT(UseCBCond)) {
FLAG_SET_DEFAULT(UseCBCond, true);
@@ -152,7 +169,8 @@ void VM_Version::initialize() {
}
assert(BlockZeroingLowLimit > 0, "invalid value");
- if (has_block_zeroing() && cache_line_size > 0) {
+
+ if (has_blk_zeroing() && cache_line_size > 0) {
if (FLAG_IS_DEFAULT(UseBlockZeroing)) {
FLAG_SET_DEFAULT(UseBlockZeroing, true);
}
@@ -162,7 +180,8 @@ void VM_Version::initialize() {
}
assert(BlockCopyLowLimit > 0, "invalid value");
- if (has_block_zeroing() && cache_line_size > 0) { // has_blk_init() && is_T4(): core's local L2 cache
+
+ if (has_blk_zeroing() && cache_line_size > 0) {
if (FLAG_IS_DEFAULT(UseBlockCopy)) {
FLAG_SET_DEFAULT(UseBlockCopy, true);
}
@@ -172,7 +191,6 @@ void VM_Version::initialize() {
}
#ifdef COMPILER2
- // T4 and newer Sparc cpus have fast RDPC.
if (has_fast_rdpc() && FLAG_IS_DEFAULT(UseRDPCForConstantTableBase)) {
FLAG_SET_DEFAULT(UseRDPCForConstantTableBase, true);
}
@@ -189,44 +207,67 @@ void VM_Version::initialize() {
assert((OptoLoopAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size");
char buf[512];
- jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
- (has_v9() ? ", v9" : (has_v8() ? ", v8" : "")),
- (has_hardware_popc() ? ", popc" : ""),
- (has_vis1() ? ", vis1" : ""),
- (has_vis2() ? ", vis2" : ""),
- (has_vis3() ? ", vis3" : ""),
- (has_blk_init() ? ", blk_init" : ""),
- (has_cbcond() ? ", cbcond" : ""),
- (has_aes() ? ", aes" : ""),
- (has_sha1() ? ", sha1" : ""),
- (has_sha256() ? ", sha256" : ""),
- (has_sha512() ? ", sha512" : ""),
- (has_crc32c() ? ", crc32c" : ""),
- (is_ultra3() ? ", ultra3" : ""),
- (has_sparc5_instr() ? ", sparc5" : ""),
- (is_sun4v() ? ", sun4v" : ""),
- (is_niagara_plus() ? ", niagara_plus" : (is_niagara() ? ", niagara" : "")),
- (is_sparc64() ? ", sparc64" : ""),
- (!has_hardware_mul32() ? ", no-mul32" : ""),
- (!has_hardware_div32() ? ", no-div32" : ""),
- (!has_hardware_fsmuld() ? ", no-fsmuld" : ""));
+ jio_snprintf(buf, sizeof(buf),
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+ (has_v9() ? "v9" : ""),
+ (has_popc() ? ", popc" : ""),
+ (has_vis1() ? ", vis1" : ""),
+ (has_vis2() ? ", vis2" : ""),
+ (has_blk_init() ? ", blk_init" : ""),
+ (has_fmaf() ? ", fmaf" : ""),
+ (has_hpc() ? ", hpc" : ""),
+ (has_ima() ? ", ima" : ""),
+ (has_aes() ? ", aes" : ""),
+ (has_des() ? ", des" : ""),
+ (has_kasumi() ? ", kas" : ""),
+ (has_camellia() ? ", cam" : ""),
+ (has_md5() ? ", md5" : ""),
+ (has_sha1() ? ", sha1" : ""),
+ (has_sha256() ? ", sha256" : ""),
+ (has_sha512() ? ", sha512" : ""),
+ (has_mpmul() ? ", mpmul" : ""),
+ (has_mont() ? ", mont" : ""),
+ (has_pause() ? ", pause" : ""),
+ (has_cbcond() ? ", cbcond" : ""),
+ (has_crc32c() ? ", crc32c" : ""),
- // buf is started with ", " or is empty
- _features_string = os::strdup(strlen(buf) > 2 ? buf + 2 : buf);
+ (has_athena_plus() ? ", athena_plus" : ""),
+ (has_vis3b() ? ", vis3b" : ""),
+ (has_adi() ? ", adi" : ""),
+ (has_sparc5() ? ", sparc5" : ""),
+ (has_mwait() ? ", mwait" : ""),
+ (has_xmpmul() ? ", xmpmul" : ""),
+ (has_xmont() ? ", xmont" : ""),
+ (has_pause_nsec() ? ", pause_nsec" : ""),
+ (has_vamask() ? ", vamask" : ""),
- // UseVIS is set to the smallest of what hardware supports and what
- // the command line requires. I.e., you cannot set UseVIS to 3 on
- // older UltraSparc which do not support it.
- if (UseVIS > 3) UseVIS=3;
- if (UseVIS < 0) UseVIS=0;
+ (has_fast_idiv() ? ", *idiv" : ""),
+ (has_fast_rdpc() ? ", *rdpc" : ""),
+ (has_fast_bis() ? ", *bis" : ""),
+ (has_fast_ld() ? ", *ld" : ""),
+ (has_fast_cmove() ? ", *cmove" : ""),
+ (has_fast_ind_br() ? ", *ind_br" : ""),
+ (has_blk_zeroing() ? ", *blk_zeroing" : ""));
+
+ assert(strlen(buf) >= 2, "must be");
+
+ _features_string = os::strdup(buf);
+
+ log_info(os, cpu)("SPARC features detected: %s", _features_string);
+
+ // UseVIS is set to the smallest of what hardware supports and what the command
+ // line requires, i.e. you cannot set UseVIS to 3 on older UltraSparc which do
+ // not support it.
+
+ if (UseVIS > 3) UseVIS = 3;
+ if (UseVIS < 0) UseVIS = 0;
if (!has_vis3()) // Drop to 2 if no VIS3 support
- UseVIS = MIN2((intx)2,UseVIS);
+ UseVIS = MIN2((intx)2, UseVIS);
if (!has_vis2()) // Drop to 1 if no VIS2 support
- UseVIS = MIN2((intx)1,UseVIS);
+ UseVIS = MIN2((intx)1, UseVIS);
if (!has_vis1()) // Drop to 0 if no VIS1 support
UseVIS = 0;
- // SPARC T4 and above should have support for AES instructions
if (has_aes()) {
if (FLAG_IS_DEFAULT(UseAES)) {
FLAG_SET_DEFAULT(UseAES, true);
@@ -277,12 +318,16 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
}
- if (UseFMA) {
+ if (has_fmaf()) {
+ if (FLAG_IS_DEFAULT(UseFMA)) {
+ UseFMA = true;
+ }
+ } else if (UseFMA) {
warning("FMA instructions are not available on this CPU");
FLAG_SET_DEFAULT(UseFMA, false);
}
- // SHA1, SHA256, and SHA512 instructions were added to SPARC T-series at different times
+ // SHA1, SHA256, and SHA512 instructions were added to SPARC at different times
if (has_sha1() || has_sha256() || has_sha512()) {
if (UseVIS > 0) { // SHA intrinsics use VIS1 instructions
if (FLAG_IS_DEFAULT(UseSHA)) {
@@ -330,7 +375,6 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseSHA, false);
}
- // SPARC T4 and above should have support for CRC32C instruction
if (has_crc32c()) {
if (UseVIS > 2) { // CRC32C intrinsics use VIS3 instructions
if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
@@ -383,7 +427,8 @@ void VM_Version::initialize() {
if (log_is_enabled(Info, os, cpu)) {
ResourceMark rm;
- outputStream* log = Log(os, cpu)::info_stream();
+ LogStream ls(Log(os, cpu)::info());
+ outputStream* log = &ls;
log->print_cr("L1 data cache line size: %u", L1_data_cache_line_size());
log->print_cr("L2 data cache line size: %u", L2_data_cache_line_size());
log->print("Allocation");
@@ -418,96 +463,42 @@ void VM_Version::initialize() {
}
void VM_Version::print_features() {
- tty->print_cr("Version:%s", _features);
+ tty->print("ISA features [0x%0" PRIx64 "]:", _features);
+ if (_features_string != NULL) {
+ tty->print(" %s", _features_string);
+ }
+ tty->cr();
}
-int VM_Version::determine_features() {
- if (UseV8InstrsOnly) {
- log_info(os, cpu)("Version is Forced-V8");
- return generic_v8_m;
+void VM_Version::determine_features() {
+ platform_features(); // platform_features() is os_arch specific.
+
+ assert(has_v9(), "must be");
+
+ if (UseNiagaraInstrs) { // Limit code generation to Niagara.
+ _features &= niagara1_msk;
}
-
- int features = platform_features(unknown_m); // platform_features() is os_arch specific
-
- if (features == unknown_m) {
- features = generic_v9_m;
- log_info(os)("Cannot recognize SPARC version. Default to V9");
- }
-
- assert(is_T_family(features) == is_niagara(features), "Niagara should be T series");
- if (UseNiagaraInstrs) { // Force code generation for Niagara
- if (is_T_family(features)) {
- // Happy to accomodate...
- } else {
- log_info(os, cpu)("Version is Forced-Niagara");
- features |= T_family_m;
- }
- } else {
- if (is_T_family(features) && !FLAG_IS_DEFAULT(UseNiagaraInstrs)) {
- log_info(os, cpu)("Version is Forced-Not-Niagara");
- features &= ~(T_family_m | T1_model_m);
- } else {
- // Happy to accomodate...
- }
- }
-
- return features;
}
static uint64_t saved_features = 0;
void VM_Version::allow_all() {
saved_features = _features;
- _features = all_features_m;
+ _features = full_feature_msk;
}
void VM_Version::revert() {
_features = saved_features;
}
+/* Determine a suitable number of threads on this particular machine.
+ *
+ * FIXME: Simply checking the processor family is insufficient.
+ */
unsigned int VM_Version::calc_parallel_worker_threads() {
- unsigned int result;
- if (is_M_series() || is_S_series()) {
- // for now, use same gc thread calculation for M-series and S-series as for
- // niagara-plus. In future, we may want to tweak parameters for
- // nof_parallel_worker_thread
- result = nof_parallel_worker_threads(5, 16, 8);
- } else if (is_niagara_plus()) {
- result = nof_parallel_worker_threads(5, 16, 8);
- } else {
- result = nof_parallel_worker_threads(5, 8, 8);
- }
- return result;
-}
-
-
-int VM_Version::parse_features(const char* implementation) {
- int features = unknown_m;
- // Convert to UPPER case before compare.
- char* impl = os::strdup_check_oom(implementation);
-
- for (int i = 0; impl[i] != 0; i++)
- impl[i] = (char)toupper((uint)impl[i]);
-
- if (strstr(impl, "SPARC64") != NULL) {
- features |= sparc64_family_m;
- } else if (strstr(impl, "SPARC-M") != NULL) {
- // M-series SPARC is based on T-series.
- features |= (M_family_m | T_family_m);
- } else if (strstr(impl, "SPARC-S") != NULL) {
- // S-series SPARC is based on T-series.
- features |= (S_family_m | T_family_m);
- } else if (strstr(impl, "SPARC-T") != NULL) {
- features |= T_family_m;
- if (strstr(impl, "SPARC-T1") != NULL) {
- features |= T1_model_m;
- }
- } else if (strstr(impl, "SUN4V-CPU") != NULL) {
- // Generic or migration class LDOM
- features |= T_family_m;
- } else {
- log_info(os, cpu)("Failed to parse CPU implementation = '%s'", impl);
- }
- os::free((void*)impl);
- return features;
+ const int num = 5;
+ const int den = is_post_niagara() ? 16 : 8;
+ const int threshold = 8;
+
+ return nof_parallel_worker_threads(num, den, threshold);
}
diff --git a/hotspot/src/cpu/sparc/vm/vm_version_sparc.hpp b/hotspot/src/cpu/sparc/vm/vm_version_sparc.hpp
index f5cb00d75a7..58e8283d6ee 100644
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.hpp
@@ -33,185 +33,296 @@ class VM_Version: public Abstract_VM_Version {
friend class JVMCIVMStructs;
protected:
- enum Feature_Flag {
- v8_instructions = 0,
- hardware_mul32 = 1,
- hardware_div32 = 2,
- hardware_fsmuld = 3,
- hardware_popc = 4,
- v9_instructions = 5,
- vis1_instructions = 6,
- vis2_instructions = 7,
- sun4v_instructions = 8,
- blk_init_instructions = 9,
- fmaf_instructions = 10,
- vis3_instructions = 11,
- cbcond_instructions = 12,
- sparc64_family = 13,
- M_family = 14,
- S_family = 15,
- T_family = 16,
- T1_model = 17,
- sparc5_instructions = 18,
- aes_instructions = 19,
- sha1_instruction = 20,
- sha256_instruction = 21,
- sha512_instruction = 22,
- crc32c_instruction = 23
+ enum {
+ ISA_V9,
+ ISA_POPC,
+ ISA_VIS1,
+ ISA_VIS2,
+ ISA_BLK_INIT,
+ ISA_FMAF,
+ ISA_VIS3,
+ ISA_HPC,
+ ISA_IMA,
+ ISA_AES,
+ ISA_DES,
+ ISA_KASUMI,
+ ISA_CAMELLIA,
+ ISA_MD5,
+ ISA_SHA1,
+ ISA_SHA256,
+ ISA_SHA512,
+ ISA_MPMUL,
+ ISA_MONT,
+ ISA_PAUSE,
+ ISA_CBCOND,
+ ISA_CRC32C,
+
+ ISA_FJATHPLUS,
+ ISA_VIS3B,
+ ISA_ADI,
+ ISA_SPARC5,
+ ISA_MWAIT,
+ ISA_XMPMUL,
+ ISA_XMONT,
+ ISA_PAUSE_NSEC,
+ ISA_VAMASK,
+
+ // Synthesised properties:
+
+ CPU_FAST_IDIV,
+ CPU_FAST_RDPC,
+ CPU_FAST_BIS,
+ CPU_FAST_LD,
+ CPU_FAST_CMOVE,
+ CPU_FAST_IND_BR,
+ CPU_BLK_ZEROING
};
- enum Feature_Flag_Set {
- unknown_m = 0,
- all_features_m = -1,
+private:
+ enum { ISA_last_feature = ISA_VAMASK,
+ CPU_last_feature = CPU_BLK_ZEROING };
- v8_instructions_m = 1 << v8_instructions,
- hardware_mul32_m = 1 << hardware_mul32,
- hardware_div32_m = 1 << hardware_div32,
- hardware_fsmuld_m = 1 << hardware_fsmuld,
- hardware_popc_m = 1 << hardware_popc,
- v9_instructions_m = 1 << v9_instructions,
- vis1_instructions_m = 1 << vis1_instructions,
- vis2_instructions_m = 1 << vis2_instructions,
- sun4v_m = 1 << sun4v_instructions,
- blk_init_instructions_m = 1 << blk_init_instructions,
- fmaf_instructions_m = 1 << fmaf_instructions,
- vis3_instructions_m = 1 << vis3_instructions,
- cbcond_instructions_m = 1 << cbcond_instructions,
- sparc64_family_m = 1 << sparc64_family,
- M_family_m = 1 << M_family,
- S_family_m = 1 << S_family,
- T_family_m = 1 << T_family,
- T1_model_m = 1 << T1_model,
- sparc5_instructions_m = 1 << sparc5_instructions,
- aes_instructions_m = 1 << aes_instructions,
- sha1_instruction_m = 1 << sha1_instruction,
- sha256_instruction_m = 1 << sha256_instruction,
- sha512_instruction_m = 1 << sha512_instruction,
- crc32c_instruction_m = 1 << crc32c_instruction,
+ enum {
+ ISA_unknown_msk = 0,
- generic_v8_m = v8_instructions_m | hardware_mul32_m | hardware_div32_m | hardware_fsmuld_m,
- generic_v9_m = generic_v8_m | v9_instructions_m,
- ultra3_m = generic_v9_m | vis1_instructions_m | vis2_instructions_m,
+ ISA_v9_msk = UINT64_C(1) << ISA_V9,
- // Temporary until we have something more accurate
- niagara1_unique_m = sun4v_m,
- niagara1_m = generic_v9_m | niagara1_unique_m
+ ISA_popc_msk = UINT64_C(1) << ISA_POPC,
+ ISA_vis1_msk = UINT64_C(1) << ISA_VIS1,
+ ISA_vis2_msk = UINT64_C(1) << ISA_VIS2,
+ ISA_blk_init_msk = UINT64_C(1) << ISA_BLK_INIT,
+ ISA_fmaf_msk = UINT64_C(1) << ISA_FMAF,
+ ISA_vis3_msk = UINT64_C(1) << ISA_VIS3,
+ ISA_hpc_msk = UINT64_C(1) << ISA_HPC,
+ ISA_ima_msk = UINT64_C(1) << ISA_IMA,
+ ISA_aes_msk = UINT64_C(1) << ISA_AES,
+ ISA_des_msk = UINT64_C(1) << ISA_DES,
+ ISA_kasumi_msk = UINT64_C(1) << ISA_KASUMI,
+ ISA_camellia_msk = UINT64_C(1) << ISA_CAMELLIA,
+ ISA_md5_msk = UINT64_C(1) << ISA_MD5,
+ ISA_sha1_msk = UINT64_C(1) << ISA_SHA1,
+ ISA_sha256_msk = UINT64_C(1) << ISA_SHA256,
+ ISA_sha512_msk = UINT64_C(1) << ISA_SHA512,
+ ISA_mpmul_msk = UINT64_C(1) << ISA_MPMUL,
+ ISA_mont_msk = UINT64_C(1) << ISA_MONT,
+ ISA_pause_msk = UINT64_C(1) << ISA_PAUSE,
+ ISA_cbcond_msk = UINT64_C(1) << ISA_CBCOND,
+ ISA_crc32c_msk = UINT64_C(1) << ISA_CRC32C,
+
+ ISA_fjathplus_msk = UINT64_C(1) << ISA_FJATHPLUS,
+ ISA_vis3b_msk = UINT64_C(1) << ISA_VIS3B,
+ ISA_adi_msk = UINT64_C(1) << ISA_ADI,
+ ISA_sparc5_msk = UINT64_C(1) << ISA_SPARC5,
+ ISA_mwait_msk = UINT64_C(1) << ISA_MWAIT,
+ ISA_xmpmul_msk = UINT64_C(1) << ISA_XMPMUL,
+ ISA_xmont_msk = UINT64_C(1) << ISA_XMONT,
+ ISA_pause_nsec_msk = UINT64_C(1) << ISA_PAUSE_NSEC,
+ ISA_vamask_msk = UINT64_C(1) << ISA_VAMASK,
+
+ CPU_fast_idiv_msk = UINT64_C(1) << CPU_FAST_IDIV,
+ CPU_fast_rdpc_msk = UINT64_C(1) << CPU_FAST_RDPC,
+ CPU_fast_bis_msk = UINT64_C(1) << CPU_FAST_BIS,
+ CPU_fast_ld_msk = UINT64_C(1) << CPU_FAST_LD,
+ CPU_fast_cmove_msk = UINT64_C(1) << CPU_FAST_CMOVE,
+ CPU_fast_ind_br_msk = UINT64_C(1) << CPU_FAST_IND_BR,
+ CPU_blk_zeroing_msk = UINT64_C(1) << CPU_BLK_ZEROING,
+
+ last_feature_msk = CPU_blk_zeroing_msk,
+ full_feature_msk = (last_feature_msk << 1) - 1
};
- static unsigned int _L2_data_cache_line_size;
- static unsigned int L2_data_cache_line_size() { return _L2_data_cache_line_size; }
+/* The following, previously supported, SPARC implementations are no longer
+ * supported.
+ *
+ * UltraSPARC I/II:
+ * SPARC-V9, VIS
+ * UltraSPARC III/+: (Cheetah/+)
+ * SPARC-V9, VIS
+ * UltraSPARC IV: (Jaguar)
+ * SPARC-V9, VIS
+ * UltraSPARC IV+: (Panther)
+ * SPARC-V9, VIS, POPC
+ *
+ * The currently supported SPARC implementations are listed below (including
+ * generic V9 support).
+ *
+ * UltraSPARC T1: (Niagara)
+ * SPARC-V9, VIS, ASI_BIS (Crypto/hash in SPU)
+ * UltraSPARC T2: (Niagara-2)
+ * SPARC-V9, VIS, ASI_BIS, POPC (Crypto/hash in SPU)
+ * UltraSPARC T2+: (Victoria Falls, etc.)
+ * SPARC-V9, VIS, VIS2, ASI_BIS, POPC (Crypto/hash in SPU)
+ *
+ * UltraSPARC T3: (Rainbow Falls/S2)
+ * SPARC-V9, VIS, VIS2, ASI_BIS, POPC (Crypto/hash in SPU)
+ *
+ * Oracle SPARC T4/T5/M5: (Core S3)
+ * SPARC-V9, VIS, VIS2, VIS3, ASI_BIS, HPC, POPC, FMAF, IMA, PAUSE, CBCOND,
+ * AES, DES, Kasumi, Camellia, MD5, SHA1, SHA256, SHA512, CRC32C, MONT, MPMUL
+ *
+ * Oracle SPARC M7: (Core S4)
+ * SPARC-V9, VIS, VIS2, VIS3, ASI_BIS, HPC, POPC, FMAF, IMA, PAUSE, CBCOND,
+ * AES, DES, Camellia, MD5, SHA1, SHA256, SHA512, CRC32C, MONT, MPMUL, VIS3b,
+ * ADI, SPARC5, MWAIT, XMPMUL, XMONT, PAUSE_NSEC, VAMASK
+ *
+ */
+ enum {
+ niagara1_msk = ISA_v9_msk | ISA_vis1_msk | ISA_blk_init_msk,
+ niagara2_msk = niagara1_msk | ISA_popc_msk,
+ core_S2_msk = niagara2_msk | ISA_vis2_msk,
+
+ core_S3_msk = core_S2_msk | ISA_fmaf_msk | ISA_vis3_msk | ISA_hpc_msk |
+ ISA_ima_msk | ISA_aes_msk | ISA_des_msk | ISA_kasumi_msk |
+ ISA_camellia_msk | ISA_md5_msk | ISA_sha1_msk | ISA_sha256_msk |
+ ISA_sha512_msk | ISA_mpmul_msk | ISA_mont_msk | ISA_pause_msk |
+ ISA_cbcond_msk | ISA_crc32c_msk,
+
+ core_S4_msk = core_S3_msk - ISA_kasumi_msk |
+ ISA_vis3b_msk | ISA_adi_msk | ISA_sparc5_msk | ISA_mwait_msk |
+ ISA_xmpmul_msk | ISA_xmont_msk | ISA_pause_nsec_msk | ISA_vamask_msk,
+
+ ultra_sparc_t1_msk = niagara1_msk,
+ ultra_sparc_t2_msk = niagara2_msk,
+ ultra_sparc_t3_msk = core_S2_msk,
+ ultra_sparc_m5_msk = core_S3_msk, // NOTE: First out-of-order pipeline.
+ ultra_sparc_m7_msk = core_S4_msk
+ };
+
+ static uint _L2_data_cache_line_size;
+ static uint L2_data_cache_line_size() { return _L2_data_cache_line_size; }
+
+ static void determine_features();
+ static void platform_features();
static void print_features();
- static int determine_features();
- static int platform_features(int features);
- // Returns true if the platform is in the niagara line (T series)
- static bool is_M_family(int features) { return (features & M_family_m) != 0; }
- static bool is_S_family(int features) { return (features & S_family_m) != 0; }
- static bool is_T_family(int features) { return (features & T_family_m) != 0; }
- static bool is_niagara() { return is_T_family(_features); }
-#ifdef ASSERT
- static bool is_niagara(int features) {
- // 'sun4v_m' may be defined on both Sun/Oracle Sparc CPUs as well as
- // on Fujitsu Sparc64 CPUs, but only Sun/Oracle Sparcs can be 'niagaras'.
- return (features & sun4v_m) != 0 && (features & sparc64_family_m) == 0;
- }
-#endif
-
- // Returns true if it is niagara1 (T1).
- static bool is_T1_model(int features) { return is_T_family(features) && ((features & T1_model_m) != 0); }
-
- static int maximum_niagara1_processor_count() { return 32; }
- static int parse_features(const char* implementation);
public:
- // Initialization
+ enum {
+ // Adopt a conservative behaviour (modelling single-insn-fetch-n-issue) for
+ // Niagara (and SPARC64). While there are at least two entries/slots in the
+ // instruction fetch buffer on any Niagara core (and as many as eight on a
+ // SPARC64), the performance improvement from keeping hot branch targets on
+ // optimally aligned addresses is such a small one (if any) that we choose
+ // not to use the extra code space required.
+
+ insn_fetch_alignment = 4 // Byte alignment in L1 insn. cache.
+ };
+
static void initialize();
- static void init_before_ergo() { _features = determine_features(); }
+ static void init_before_ergo() { determine_features(); }
- // Instruction support
- static bool has_v8() { return (_features & v8_instructions_m) != 0; }
- static bool has_v9() { return (_features & v9_instructions_m) != 0; }
- static bool has_hardware_mul32() { return (_features & hardware_mul32_m) != 0; }
- static bool has_hardware_div32() { return (_features & hardware_div32_m) != 0; }
- static bool has_hardware_fsmuld() { return (_features & hardware_fsmuld_m) != 0; }
- static bool has_hardware_popc() { return (_features & hardware_popc_m) != 0; }
- static bool has_vis1() { return (_features & vis1_instructions_m) != 0; }
- static bool has_vis2() { return (_features & vis2_instructions_m) != 0; }
- static bool has_vis3() { return (_features & vis3_instructions_m) != 0; }
- static bool has_blk_init() { return (_features & blk_init_instructions_m) != 0; }
- static bool has_cbcond() { return (_features & cbcond_instructions_m) != 0; }
- static bool has_sparc5_instr() { return (_features & sparc5_instructions_m) != 0; }
- static bool has_aes() { return (_features & aes_instructions_m) != 0; }
- static bool has_sha1() { return (_features & sha1_instruction_m) != 0; }
- static bool has_sha256() { return (_features & sha256_instruction_m) != 0; }
- static bool has_sha512() { return (_features & sha512_instruction_m) != 0; }
- static bool has_crc32c() { return (_features & crc32c_instruction_m) != 0; }
+ // Instruction feature support:
- static bool supports_compare_and_exchange()
- { return has_v9(); }
+ static bool has_v9() { return (_features & ISA_v9_msk) != 0; }
+ static bool has_popc() { return (_features & ISA_popc_msk) != 0; }
+ static bool has_vis1() { return (_features & ISA_vis1_msk) != 0; }
+ static bool has_vis2() { return (_features & ISA_vis2_msk) != 0; }
+ static bool has_blk_init() { return (_features & ISA_blk_init_msk) != 0; }
+ static bool has_fmaf() { return (_features & ISA_fmaf_msk) != 0; }
+ static bool has_vis3() { return (_features & ISA_vis3_msk) != 0; }
+ static bool has_hpc() { return (_features & ISA_hpc_msk) != 0; }
+ static bool has_ima() { return (_features & ISA_ima_msk) != 0; }
+ static bool has_aes() { return (_features & ISA_aes_msk) != 0; }
+ static bool has_des() { return (_features & ISA_des_msk) != 0; }
+ static bool has_kasumi() { return (_features & ISA_kasumi_msk) != 0; }
+ static bool has_camellia() { return (_features & ISA_camellia_msk) != 0; }
+ static bool has_md5() { return (_features & ISA_md5_msk) != 0; }
+ static bool has_sha1() { return (_features & ISA_sha1_msk) != 0; }
+ static bool has_sha256() { return (_features & ISA_sha256_msk) != 0; }
+ static bool has_sha512() { return (_features & ISA_sha512_msk) != 0; }
+ static bool has_mpmul() { return (_features & ISA_mpmul_msk) != 0; }
+ static bool has_mont() { return (_features & ISA_mont_msk) != 0; }
+ static bool has_pause() { return (_features & ISA_pause_msk) != 0; }
+ static bool has_cbcond() { return (_features & ISA_cbcond_msk) != 0; }
+ static bool has_crc32c() { return (_features & ISA_crc32c_msk) != 0; }
- // Returns true if the platform is in the niagara line (T series)
- // and newer than the niagara1.
- static bool is_niagara_plus() { return is_T_family(_features) && !is_T1_model(_features); }
+ static bool has_athena_plus() { return (_features & ISA_fjathplus_msk) != 0; }
+ static bool has_vis3b() { return (_features & ISA_vis3b_msk) != 0; }
+ static bool has_adi() { return (_features & ISA_adi_msk) != 0; }
+ static bool has_sparc5() { return (_features & ISA_sparc5_msk) != 0; }
+ static bool has_mwait() { return (_features & ISA_mwait_msk) != 0; }
+ static bool has_xmpmul() { return (_features & ISA_xmpmul_msk) != 0; }
+ static bool has_xmont() { return (_features & ISA_xmont_msk) != 0; }
+ static bool has_pause_nsec() { return (_features & ISA_pause_nsec_msk) != 0; }
+ static bool has_vamask() { return (_features & ISA_vamask_msk) != 0; }
- static bool is_M_series() { return is_M_family(_features); }
- static bool is_S_series() { return is_S_family(_features); }
- static bool is_T4() { return is_T_family(_features) && has_cbcond(); }
- static bool is_T7() { return is_T_family(_features) && has_sparc5_instr(); }
+ static bool has_fast_idiv() { return (_features & CPU_fast_idiv_msk) != 0; }
+ static bool has_fast_rdpc() { return (_features & CPU_fast_rdpc_msk) != 0; }
+ static bool has_fast_bis() { return (_features & CPU_fast_bis_msk) != 0; }
+ static bool has_fast_ld() { return (_features & CPU_fast_ld_msk) != 0; }
+ static bool has_fast_cmove() { return (_features & CPU_fast_cmove_msk) != 0; }
- // Fujitsu SPARC64
- static bool is_sparc64() { return (_features & sparc64_family_m) != 0; }
+ // If indirect and direct branching is equally fast.
+ static bool has_fast_ind_br() { return (_features & CPU_fast_ind_br_msk) != 0; }
+ // If SPARC BIS to the beginning of cache line always zeros it.
+ static bool has_blk_zeroing() { return (_features & CPU_blk_zeroing_msk) != 0; }
- static bool is_sun4v() { return (_features & sun4v_m) != 0; }
- static bool is_ultra3() { return (_features & ultra3_m) == ultra3_m && !is_sun4v() && !is_sparc64(); }
+ static bool supports_compare_and_exchange() { return true; }
- static bool has_fast_fxtof() { return is_niagara() || is_sparc64() || has_v9() && !is_ultra3(); }
- static bool has_fast_idiv() { return is_niagara_plus() || is_sparc64(); }
+ // FIXME: To be removed.
+ static bool is_post_niagara() {
+ return (_features & niagara2_msk) == niagara2_msk;
+ }
- // T4 and newer Sparc have fast RDPC instruction.
- static bool has_fast_rdpc() { return is_T4(); }
+ // Default prefetch block size on SPARC.
+ static uint prefetch_data_size() { return L2_data_cache_line_size(); }
- // On T4 and newer Sparc BIS to the beginning of cache line always zeros it.
- static bool has_block_zeroing() { return has_blk_init() && is_T4(); }
-
- // default prefetch block size on sparc
- static intx prefetch_data_size() { return L2_data_cache_line_size(); }
-
- // Prefetch
+ private:
+ // Prefetch policy and characteristics:
+ //
+ // These support routines are used in order to isolate any CPU/core specific
+ // logic from the actual flag/option processing. They should reflect the HW
+ // characteristics for the associated options on the current platform.
+ //
+ // The three Prefetch* options below (assigned -1 in the configuration) are
+ // treated according to (given the accepted range [-1..]):
+ // -1: Determine a proper HW-specific value for the current HW.
+ // 0: Off
+ // >0: Command-line supplied value to use.
+ //
+ // FIXME: The documentation string in the configuration is wrong, saying that
+ // -1 is also interpreted as off.
+ //
static intx prefetch_copy_interval_in_bytes() {
- intx interval = PrefetchCopyIntervalInBytes;
- return interval >= 0 ? interval : (has_v9() ? 512 : 0);
+ intx bytes = PrefetchCopyIntervalInBytes;
+ return bytes < 0 ? 512 : bytes;
}
static intx prefetch_scan_interval_in_bytes() {
- intx interval = PrefetchScanIntervalInBytes;
- return interval >= 0 ? interval : (has_v9() ? 512 : 0);
+ intx bytes = PrefetchScanIntervalInBytes;
+ return bytes < 0 ? 512 : bytes;
}
static intx prefetch_fields_ahead() {
intx count = PrefetchFieldsAhead;
- return count >= 0 ? count : (is_ultra3() ? 1 : 0);
+ return count < 0 ? 0 : count;
}
+ // AllocatePrefetchDistance is treated under the same interpretation as the
+ // Prefetch* options above (i.e., -1, 0, >0).
static intx allocate_prefetch_distance() {
- // This method should be called before allocate_prefetch_style().
intx count = AllocatePrefetchDistance;
- if (count < 0) { // default is not defined ?
- count = 512;
- }
- return count;
- }
- static intx allocate_prefetch_style() {
- assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
- // Return 0 if AllocatePrefetchDistance was not defined.
- return AllocatePrefetchDistance > 0 ? AllocatePrefetchStyle : 0;
+ return count < 0 ? 512 : count;
}
+ // AllocatePrefetchStyle is guaranteed to be in range [0..3] defined by the
+ // configuration.
+ static intx allocate_prefetch_style() {
+ intx distance = allocate_prefetch_distance();
+ // Return 0 (off/none) if AllocatePrefetchDistance was not defined.
+ return distance > 0 ? AllocatePrefetchStyle : 0;
+ }
+
+ public:
// Assembler testing
static void allow_all();
static void revert();
// Override the Abstract_VM_Version implementation.
- static uint page_size_count() { return is_sun4v() ? 4 : 2; }
+ //
+ // FIXME: Removed broken test on sun4v (always false when invoked prior to the
+ // proper capability setup), thus always returning 2. Still need to fix
+ // this properly in order to enable complete page size support.
+ static uint page_size_count() { return 2; }
// Calculates the number of parallel threads
static unsigned int calc_parallel_worker_threads();
diff --git a/hotspot/src/cpu/sparc/vm/vmreg_sparc.hpp b/hotspot/src/cpu/sparc/vm/vmreg_sparc.hpp
index 29f38a8d8ca..e1f4591002d 100644
--- a/hotspot/src/cpu/sparc/vm/vmreg_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/vmreg_sparc.hpp
@@ -27,9 +27,8 @@
inline bool is_Register() { return value() >= 0 && value() < ConcreteRegisterImpl::max_gpr; }
inline bool is_FloatRegister() { return value() >= ConcreteRegisterImpl::max_gpr &&
- value() < ConcreteRegisterImpl::max_fpr; }
+ value() < ConcreteRegisterImpl::max_fpr; }
inline Register as_Register() {
-
assert( is_Register() && is_even(value()), "even-aligned GPR name" );
// Yuk
return ::as_Register(value()>>1);
diff --git a/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp b/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp
index 9e825aaa79e..3249b3a26f3 100644
--- a/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp
@@ -232,7 +232,7 @@ int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
return basic + slop;
} else {
- const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord +
+ const int basic = 34 * BytesPerInstWord +
// shift;add for load_klass (only shift with zero heap based)
(UseCompressedClassPointers ?
MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
@@ -257,7 +257,6 @@ int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
// ld [ %g3 + 0xe8 ], %l2
// sll %l2, 2, %l2
// add %l2, 0x134, %l2
- // and %l2, -8, %l2 ! NOT_LP64 only
// add %g3, %l2, %l2
// add %g3, 4, %g3
// ld [ %l2 ], %l5
diff --git a/hotspot/src/cpu/x86/vm/abstractInterpreter_x86.cpp b/hotspot/src/cpu/x86/vm/abstractInterpreter_x86.cpp
index 0865b99a0d6..5c146f84d5e 100644
--- a/hotspot/src/cpu/x86/vm/abstractInterpreter_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/abstractInterpreter_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -160,27 +160,6 @@ int AbstractInterpreter::BasicType_as_index(BasicType type) {
}
#endif // _LP64
-// These should never be compiled since the interpreter will prefer
-// the compiled version to the intrinsic version.
-bool AbstractInterpreter::can_be_compiled(methodHandle m) {
- switch (method_kind(m)) {
- case Interpreter::java_lang_math_sin : // fall thru
- case Interpreter::java_lang_math_cos : // fall thru
- case Interpreter::java_lang_math_tan : // fall thru
- case Interpreter::java_lang_math_abs : // fall thru
- case Interpreter::java_lang_math_log : // fall thru
- case Interpreter::java_lang_math_log10 : // fall thru
- case Interpreter::java_lang_math_sqrt : // fall thru
- case Interpreter::java_lang_math_pow : // fall thru
- case Interpreter::java_lang_math_exp : // fall thru
- case Interpreter::java_lang_math_fmaD : // fall thru
- case Interpreter::java_lang_math_fmaF :
- return false;
- default:
- return true;
- }
-}
-
// How much stack a method activation needs in words.
int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
const int entry_size = frame::interpreter_frame_monitor_size();
diff --git a/hotspot/src/cpu/x86/vm/assembler_x86.cpp b/hotspot/src/cpu/x86/vm/assembler_x86.cpp
index 3c6d7b5cdd6..3bdc0ed3c30 100644
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp
@@ -2103,12 +2103,20 @@ void Assembler::jmpb(Label& L) {
}
void Assembler::ldmxcsr( Address src) {
- NOT_LP64(assert(VM_Version::supports_sse(), ""));
- InstructionMark im(this);
- prefix(src);
- emit_int8(0x0F);
- emit_int8((unsigned char)0xAE);
- emit_operand(as_Register(2), src);
+ if (UseAVX > 0 ) {
+ InstructionMark im(this);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ vex_prefix(src, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
+ emit_int8((unsigned char)0xAE);
+ emit_operand(as_Register(2), src);
+ } else {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
+ InstructionMark im(this);
+ prefix(src);
+ emit_int8(0x0F);
+ emit_int8((unsigned char)0xAE);
+ emit_operand(as_Register(2), src);
+ }
}
void Assembler::leal(Register dst, Address src) {
@@ -3313,6 +3321,11 @@ void Assembler::pause() {
emit_int8((unsigned char)0x90);
}
+void Assembler::ud2() {
+ emit_int8(0x0F);
+ emit_int8(0x0B);
+}
+
void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
assert(VM_Version::supports_sse4_2(), "");
InstructionMark im(this);
@@ -4416,12 +4429,21 @@ void Assembler::sqrtss(XMMRegister dst, Address src) {
}
void Assembler::stmxcsr( Address dst) {
- NOT_LP64(assert(VM_Version::supports_sse(), ""));
- InstructionMark im(this);
- prefix(dst);
- emit_int8(0x0F);
- emit_int8((unsigned char)0xAE);
- emit_operand(as_Register(3), dst);
+ if (UseAVX > 0 ) {
+ assert(VM_Version::supports_avx(), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ vex_prefix(dst, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
+ emit_int8((unsigned char)0xAE);
+ emit_operand(as_Register(3), dst);
+ } else {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
+ InstructionMark im(this);
+ prefix(dst);
+ emit_int8(0x0F);
+ emit_int8((unsigned char)0xAE);
+ emit_operand(as_Register(3), dst);
+ }
}
void Assembler::subl(Address dst, int32_t imm32) {
@@ -5070,6 +5092,42 @@ void Assembler::vmulps(XMMRegister dst, XMMRegister nds, Address src, int vector
emit_operand(dst, src);
}
+void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
+ assert(VM_Version::supports_fma(), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int8((unsigned char)0xB8);
+ emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::vfmadd231ps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
+ assert(VM_Version::supports_fma(), "");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int8((unsigned char)0xB8);
+ emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
+ assert(VM_Version::supports_fma(), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
+ vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int8((unsigned char)0xB8);
+ emit_operand(dst, src2);
+}
+
+void Assembler::vfmadd231ps(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
+ assert(VM_Version::supports_fma(), "");
+ InstructionMark im(this);
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
+ vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int8((unsigned char)0xB8);
+ emit_operand(dst, src2);
+}
+
void Assembler::divpd(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
@@ -6620,10 +6678,11 @@ void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, in
}
void Assembler::vzeroupper() {
- assert(VM_Version::supports_avx(), "");
- InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
- (void)vex_prefix_and_encode(0, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
- emit_int8(0x77);
+ if (VM_Version::supports_vzeroupper()) {
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+ (void)vex_prefix_and_encode(0, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
+ emit_int8(0x77);
+ }
}
#ifndef _LP64
diff --git a/hotspot/src/cpu/x86/vm/assembler_x86.hpp b/hotspot/src/cpu/x86/vm/assembler_x86.hpp
index ada59ae288c..c4e6645e319 100644
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp
@@ -1554,6 +1554,9 @@ private:
void pause();
+ // Undefined Instruction
+ void ud2();
+
// SSE4.2 string instructions
void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
void pcmpestri(XMMRegister xmm1, Address src, int imm8);
@@ -1903,6 +1906,11 @@ private:
void vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
void vmulps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
+ void vfmadd231pd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
+ void vfmadd231ps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
+ void vfmadd231pd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
+ void vfmadd231ps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
+
// Divide Packed Floating-Point Values
void divpd(XMMRegister dst, XMMRegister src);
void divps(XMMRegister dst, XMMRegister src);
diff --git a/hotspot/src/cpu/x86/vm/bytes_x86.hpp b/hotspot/src/cpu/x86/vm/bytes_x86.hpp
index eb5ab17ca08..8636e8810fe 100644
--- a/hotspot/src/cpu/x86/vm/bytes_x86.hpp
+++ b/hotspot/src/cpu/x86/vm/bytes_x86.hpp
@@ -26,6 +26,7 @@
#define CPU_X86_VM_BYTES_X86_HPP
#include "memory/allocation.hpp"
+#include "utilities/align.hpp"
#include "utilities/macros.hpp"
class Bytes: AllStatic {
@@ -36,36 +37,85 @@ class Bytes: AllStatic {
#endif // AMD64
public:
- // Returns true if the byte ordering used by Java is different from the native byte ordering
- // of the underlying machine. For example, this is true for Intel x86, but false for Solaris
- // on Sparc.
- static inline bool is_Java_byte_ordering_different(){ return true; }
-
-
// Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering
- // (no special code is needed since x86 CPUs can access unaligned data)
- static inline u2 get_native_u2(address p) { return *(u2*)p; }
- static inline u4 get_native_u4(address p) { return *(u4*)p; }
- static inline u8 get_native_u8(address p) { return *(u8*)p; }
+ template
+ static inline T get_native(const void* p) {
+ assert(p != NULL, "null pointer");
- static inline void put_native_u2(address p, u2 x) { *(u2*)p = x; }
- static inline void put_native_u4(address p, u4 x) { *(u4*)p = x; }
- static inline void put_native_u8(address p, u8 x) { *(u8*)p = x; }
+ T x;
+ if (is_aligned(p, sizeof(T))) {
+ x = *(T*)p;
+ } else {
+ memcpy(&x, p, sizeof(T));
+ }
+
+ return x;
+ }
+
+ template
+ static inline void put_native(void* p, T x) {
+ assert(p != NULL, "null pointer");
+
+ if (is_aligned(p, sizeof(T))) {
+ *(T*)p = x;
+ } else {
+ memcpy(p, &x, sizeof(T));
+ }
+ }
+
+ static inline u2 get_native_u2(address p) { return get_native((void*)p); }
+ static inline u4 get_native_u4(address p) { return get_native((void*)p); }
+ static inline u8 get_native_u8(address p) { return get_native((void*)p); }
+ static inline void put_native_u2(address p, u2 x) { put_native((void*)p, x); }
+ static inline void put_native_u4(address p, u4 x) { put_native((void*)p, x); }
+ static inline void put_native_u8(address p, u8 x) { put_native((void*)p, x); }
// Efficient reading and writing of unaligned unsigned data in Java
// byte ordering (i.e. big-endian ordering). Byte-order reversal is
// needed since x86 CPUs use little-endian format.
- static inline u2 get_Java_u2(address p) { return swap_u2(get_native_u2(p)); }
- static inline u4 get_Java_u4(address p) { return swap_u4(get_native_u4(p)); }
- static inline u8 get_Java_u8(address p) { return swap_u8(get_native_u8(p)); }
+ template
+ static inline T get_Java(const address p) {
+ T x = get_native(p);
- static inline void put_Java_u2(address p, u2 x) { put_native_u2(p, swap_u2(x)); }
- static inline void put_Java_u4(address p, u4 x) { put_native_u4(p, swap_u4(x)); }
- static inline void put_Java_u8(address p, u8 x) { put_native_u8(p, swap_u8(x)); }
+ if (Endian::is_Java_byte_ordering_different()) {
+ x = swap(x);
+ }
+ return x;
+ }
+
+ template
+ static inline void put_Java(address p, T x) {
+ if (Endian::is_Java_byte_ordering_different()) {
+ x = swap(x);
+ }
+
+ put_native(p, x);
+ }
+
+ static inline u2 get_Java_u2(address p) { return get_Java(p); }
+ static inline u4 get_Java_u4(address p) { return get_Java(p); }
+ static inline u8 get_Java_u8(address p) { return get_Java(p); }
+
+ static inline void put_Java_u2(address p, u2 x) { put_Java(p, x); }
+ static inline void put_Java_u4(address p, u4 x) { put_Java(p, x); }
+ static inline void put_Java_u8(address p, u8 x) { put_Java(p, x); }
// Efficient swapping of byte ordering
+ template
+ static T swap(T x) {
+ switch (sizeof(T)) {
+ case sizeof(u1): return x;
+ case sizeof(u2): return swap_u2(x);
+ case sizeof(u4): return swap_u4(x);
+ case sizeof(u8): return swap_u8(x);
+ default:
+ guarantee(false, "invalid size: " SIZE_FORMAT "\n", sizeof(T));
+ return 0;
+ }
+ }
+
static inline u2 swap_u2(u2 x); // compiler-dependent implementation
static inline u4 swap_u4(u4 x); // compiler-dependent implementation
static inline u8 swap_u8(u8 x);
diff --git a/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp b/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp
index 4f6690fa540..79ab7ddbf33 100644
--- a/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp
@@ -30,6 +30,7 @@
#include "c1/c1_Runtime1.hpp"
#include "nativeInst_x86.hpp"
#include "runtime/sharedRuntime.hpp"
+#include "utilities/align.hpp"
#include "utilities/macros.hpp"
#include "vmreg_x86.inline.hpp"
#if INCLUDE_ALL_GCS
@@ -290,7 +291,7 @@ void PatchingStub::align_patch_site(MacroAssembler* masm) {
// very hard to make a guess about what code might be in the icache.
// Force the instruction to be double word aligned so that it
// doesn't span a cache line.
- masm->align(round_to(NativeGeneralJump::instruction_size, wordSize));
+ masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
}
void PatchingStub::emit_code(LIR_Assembler* ce) {
diff --git a/hotspot/src/cpu/x86/vm/c1_FpuStackSim_x86.cpp b/hotspot/src/cpu/x86/vm/c1_FpuStackSim_x86.cpp
index 6d91d1948e9..3ec182a350b 100644
--- a/hotspot/src/cpu/x86/vm/c1_FpuStackSim_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/c1_FpuStackSim_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "c1/c1_FpuStackSim.hpp"
#include "c1/c1_FrameMap.hpp"
-#include "utilities/array.hpp"
+#include "utilities/growableArray.hpp"
#include "utilities/ostream.hpp"
//--------------------------------------------------------
diff --git a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
index 8aa4aec76b6..f747e41e522 100644
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1160,6 +1160,8 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
__ xorptr(dest->as_register(), dest->as_register());
}
break;
+ default:
+ break;
}
PatchingStub* patch = NULL;
diff --git a/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp b/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp
index 95ee2d54beb..6995986e4d4 100644
--- a/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -649,8 +649,8 @@ void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
case doubleTag: do_ArithmeticOp_FPU(x); return;
case longTag: do_ArithmeticOp_Long(x); return;
case intTag: do_ArithmeticOp_Int(x); return;
+ default: ShouldNotReachHere(); return;
}
- ShouldNotReachHere();
}
diff --git a/hotspot/src/cpu/x86/vm/c1_LinearScan_x86.cpp b/hotspot/src/cpu/x86/vm/c1_LinearScan_x86.cpp
index e291000eab0..54a0c569219 100644
--- a/hotspot/src/cpu/x86/vm/c1_LinearScan_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/c1_LinearScan_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -826,6 +826,9 @@ void FpuStackAllocator::check_invalid_lir_op(LIR_Op* op) {
case lir_fld:
assert(false, "operations only inserted by FpuStackAllocator");
break;
+
+ default:
+ break;
}
}
#endif
diff --git a/hotspot/src/cpu/x86/vm/c1_LinearScan_x86.hpp b/hotspot/src/cpu/x86/vm/c1_LinearScan_x86.hpp
index 6d0dbb8756d..a1044b6b2a2 100644
--- a/hotspot/src/cpu/x86/vm/c1_LinearScan_x86.hpp
+++ b/hotspot/src/cpu/x86/vm/c1_LinearScan_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -92,6 +92,8 @@ inline void LinearScan::pd_add_temps(LIR_Op* op) {
}
break;
}
+ default:
+ break;
}
}
diff --git a/hotspot/src/cpu/x86/vm/compiledIC_aot_x86_64.cpp b/hotspot/src/cpu/x86/vm/compiledIC_aot_x86_64.cpp
index 6b711506fab..d884ef2a87c 100644
--- a/hotspot/src/cpu/x86/vm/compiledIC_aot_x86_64.cpp
+++ b/hotspot/src/cpu/x86/vm/compiledIC_aot_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -21,6 +21,8 @@
* questions.
*/
+#include "precompiled.hpp"
+
#include "aot/compiledIC_aot.hpp"
#include "code/codeCache.hpp"
#include "memory/resourceArea.hpp"
diff --git a/hotspot/src/cpu/x86/vm/debug_x86.cpp b/hotspot/src/cpu/x86/vm/debug_x86.cpp
deleted file mode 100644
index 0bde483a789..00000000000
--- a/hotspot/src/cpu/x86/vm/debug_x86.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "code/codeCache.hpp"
-#include "code/nmethod.hpp"
-#include "runtime/frame.hpp"
-#include "runtime/init.hpp"
-#include "runtime/os.hpp"
-#include "utilities/debug.hpp"
-
-void pd_ps(frame f) {}
diff --git a/hotspot/src/cpu/x86/vm/frame_x86.cpp b/hotspot/src/cpu/x86/vm/frame_x86.cpp
index 11f28bdb6ca..a64ceb2a2ed 100644
--- a/hotspot/src/cpu/x86/vm/frame_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/frame_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -680,6 +680,8 @@ intptr_t* frame::real_fp() const {
frame::frame(void* sp, void* fp, void* pc) {
init((intptr_t*)sp, (intptr_t*)fp, (address)pc);
}
+
+void frame::pd_ps() {}
#endif
void JavaFrameAnchor::make_walkable(JavaThread* thread) {
diff --git a/hotspot/src/cpu/x86/vm/globals_x86.hpp b/hotspot/src/cpu/x86/vm/globals_x86.hpp
index 748a1af0326..80344a3c8cf 100644
--- a/hotspot/src/cpu/x86/vm/globals_x86.hpp
+++ b/hotspot/src/cpu/x86/vm/globals_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -116,7 +116,7 @@ define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
product(bool, UseStoreImmI16, true, \
"Use store immediate 16-bits value instruction on x86") \
\
- product(intx, UseAVX, 2, \
+ product(intx, UseAVX, 3, \
"Highest supported AVX instructions set on x86/x64") \
range(0, 99) \
\
@@ -160,25 +160,31 @@ define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
product(bool, UseRTMDeopt, false, \
"Perform deopt and recompilation based on RTM abort ratio") \
\
- product(uintx, RTMRetryCount, 5, \
+ product(int, RTMRetryCount, 5, \
"Number of RTM retries on lock abort or busy") \
- range(0, max_uintx) \
+ range(0, max_jint) \
\
- experimental(intx, RTMSpinLoopCount, 100, \
+ experimental(int, RTMSpinLoopCount, 100, \
"Spin count for lock to become free before RTM retry") \
+ range(0, max_jint) \
\
- experimental(intx, RTMAbortThreshold, 1000, \
+ experimental(int, RTMAbortThreshold, 1000, \
"Calculate abort ratio after this number of aborts") \
+ range(0, max_jint) \
\
- experimental(intx, RTMLockingThreshold, 10000, \
+ experimental(int, RTMLockingThreshold, 10000, \
"Lock count at which to do RTM lock eliding without " \
"abort ratio calculation") \
+ range(0, max_jint) \
\
- experimental(intx, RTMAbortRatio, 50, \
+ experimental(int, RTMAbortRatio, 50, \
"Lock abort ratio at which to stop use RTM lock eliding") \
+ range(0, 100) /* natural range */ \
\
- experimental(intx, RTMTotalCountIncrRate, 64, \
+ experimental(int, RTMTotalCountIncrRate, 64, \
"Increment total RTM attempted lock count once every n times") \
+ range(1, max_jint) \
+ constraint(RTMTotalCountIncrRateConstraintFunc,AfterErgo) \
\
experimental(intx, RTMLockingCalculationDelay, 0, \
"Number of milliseconds to wait before start calculating aborts " \
diff --git a/hotspot/src/cpu/x86/vm/interp_masm_x86.cpp b/hotspot/src/cpu/x86/vm/interp_masm_x86.cpp
index 331ac5d3405..6d27ee79a0a 100644
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -509,7 +509,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
get_constant_pool(result);
// load pointer for resolved_references[] objArray
- movptr(result, Address(result, ConstantPool::resolved_references_offset_in_bytes()));
+ movptr(result, Address(result, ConstantPool::cache_offset_in_bytes()));
+ movptr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
// JNIHandles::resolve(obj);
movptr(result, Address(result, 0));
// Add in the index
@@ -517,6 +518,14 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
}
+// load cpool->resolved_klass_at(index)
+void InterpreterMacroAssembler::load_resolved_klass_at_index(Register cpool,
+ Register index, Register klass) {
+ movw(index, Address(cpool, index, Address::times_ptr, sizeof(ConstantPool)));
+ Register resolved_klasses = cpool;
+ movptr(resolved_klasses, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes()));
+ movptr(klass, Address(resolved_klasses, index, Address::times_ptr, Array::base_offset_in_bytes()));
+}
// Generate a subtype check: branch to ok_is_subtype if sub_klass is a
// subtype of super_klass.
@@ -1080,7 +1089,6 @@ void InterpreterMacroAssembler::remove_activation(
call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
- push(rthread);
call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::throw_delayed_StackOverflowError));
should_not_reach_here();
@@ -1148,7 +1156,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
movl(swap_reg, (int32_t)1);
// Load (object->mark() | 1) into swap_reg %rax
- orptr(swap_reg, Address(obj_reg, 0));
+ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
// Save (object->mark() | 1) into BasicLock's displaced header
movptr(Address(lock_reg, mark_offset), swap_reg);
@@ -1157,7 +1165,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
"displaced header must be first word in BasicObjectLock");
if (os::is_MP()) lock();
- cmpxchgptr(lock_reg, Address(obj_reg, 0));
+ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
if (PrintBiasedLockingStatistics) {
cond_inc32(Assembler::zero,
ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
@@ -1254,7 +1262,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
// Atomic swap back the old header
if (os::is_MP()) lock();
- cmpxchgptr(header_reg, Address(obj_reg, 0));
+ cmpxchgptr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
// zero for simple unlock of a stack-lock case
jcc(Assembler::zero, done);
diff --git a/hotspot/src/cpu/x86/vm/interp_masm_x86.hpp b/hotspot/src/cpu/x86/vm/interp_masm_x86.hpp
index 9ed5c2c9ad3..0095564c1b0 100644
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86.hpp
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -48,9 +48,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
int number_of_arguments,
bool check_exceptions);
- virtual void check_and_handle_popframe(Register java_thread);
- virtual void check_and_handle_earlyret(Register java_thread);
-
// base routine for all dispatches
void dispatch_base(TosState state, address* table, bool verifyoop = true);
@@ -61,6 +58,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
void jump_to_entry(address entry);
+ virtual void check_and_handle_popframe(Register java_thread);
+ virtual void check_and_handle_earlyret(Register java_thread);
+
void load_earlyret_value(TosState state);
// Interpreter-specific registers
@@ -123,6 +123,11 @@ class InterpreterMacroAssembler: public MacroAssembler {
// load cpool->resolved_references(index);
void load_resolved_reference_at_index(Register result, Register index);
+ // load cpool->resolved_klass_at(index)
+ void load_resolved_klass_at_index(Register cpool, // the constant pool (corrupted on return)
+ Register index, // the constant pool index (corrupted on return)
+ Register klass); // contains the Klass on return
+
NOT_LP64(void f2ieee();) // truncate ftos to 32bits
NOT_LP64(void d2ieee();) // truncate dtos to 64bits
diff --git a/hotspot/src/cpu/x86/vm/interpreterRT_x86.hpp b/hotspot/src/cpu/x86/vm/interpreterRT_x86.hpp
index f35f0122d43..dd0aa20d426 100644
--- a/hotspot/src/cpu/x86/vm/interpreterRT_x86.hpp
+++ b/hotspot/src/cpu/x86/vm/interpreterRT_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -55,7 +55,7 @@ class SignatureHandlerGenerator: public NativeSignatureIterator {
public:
// Creation
- SignatureHandlerGenerator(methodHandle method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
+ SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
_masm = new MacroAssembler(buffer);
#ifdef AMD64
#ifdef _WIN64
diff --git a/hotspot/src/cpu/x86/vm/interpreterRT_x86_32.cpp b/hotspot/src/cpu/x86/vm/interpreterRT_x86_32.cpp
index 0e68dd011d3..18d57924912 100644
--- a/hotspot/src/cpu/x86/vm/interpreterRT_x86_32.cpp
+++ b/hotspot/src/cpu/x86/vm/interpreterRT_x86_32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -124,7 +124,7 @@ class SlowSignatureHandler: public NativeSignatureIterator {
}
public:
- SlowSignatureHandler(methodHandle method, address from, intptr_t* to) :
+ SlowSignatureHandler(const methodHandle& method, address from, intptr_t* to) :
NativeSignatureIterator(method) {
_from = from;
_to = to + (is_static() ? 2 : 1);
diff --git a/hotspot/src/cpu/x86/vm/interpreterRT_x86_64.cpp b/hotspot/src/cpu/x86/vm/interpreterRT_x86_64.cpp
index 40cc9d71aa7..53f03a38b6c 100644
--- a/hotspot/src/cpu/x86/vm/interpreterRT_x86_64.cpp
+++ b/hotspot/src/cpu/x86/vm/interpreterRT_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -369,7 +369,7 @@ class SlowSignatureHandler
}
public:
- SlowSignatureHandler(methodHandle method, address from, intptr_t* to)
+ SlowSignatureHandler(const methodHandle& method, address from, intptr_t* to)
: NativeSignatureIterator(method)
{
_from = from;
@@ -461,7 +461,7 @@ class SlowSignatureHandler
}
public:
- SlowSignatureHandler(methodHandle method, address from, intptr_t* to)
+ SlowSignatureHandler(const methodHandle& method, address from, intptr_t* to)
: NativeSignatureIterator(method)
{
_from = from;
diff --git a/hotspot/src/cpu/x86/vm/jniFastGetField_x86_64.cpp b/hotspot/src/cpu/x86/vm/jniFastGetField_x86_64.cpp
index f18aa684266..b078bcf2db3 100644
--- a/hotspot/src/cpu/x86/vm/jniFastGetField_x86_64.cpp
+++ b/hotspot/src/cpu/x86/vm/jniFastGetField_x86_64.cpp
@@ -121,7 +121,8 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
case T_CHAR: slow_case_addr = jni_GetCharField_addr(); break;
case T_SHORT: slow_case_addr = jni_GetShortField_addr(); break;
case T_INT: slow_case_addr = jni_GetIntField_addr(); break;
- case T_LONG: slow_case_addr = jni_GetLongField_addr();
+ case T_LONG: slow_case_addr = jni_GetLongField_addr(); break;
+ default: break;
}
// tail call
__ jump (ExternalAddress(slow_case_addr));
@@ -215,7 +216,8 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
address slow_case_addr = NULL;
switch (type) {
case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break;
- case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr();
+ case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break;
+ default: break;
}
// tail call
__ jump (ExternalAddress(slow_case_addr));
diff --git a/hotspot/src/cpu/x86/vm/jvmciCodeInstaller_x86.cpp b/hotspot/src/cpu/x86/vm/jvmciCodeInstaller_x86.cpp
index e2ddb71aa9d..acb7034684e 100644
--- a/hotspot/src/cpu/x86/vm/jvmciCodeInstaller_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/jvmciCodeInstaller_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -65,7 +65,7 @@ jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Hand
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) {
address pc = _instructions->start() + pc_offset;
- Handle obj = HotSpotObjectConstantImpl::object(constant);
+ Handle obj(THREAD, HotSpotObjectConstantImpl::object(constant));
jobject value = JNIHandles::make_local(obj());
if (HotSpotObjectConstantImpl::compressed(constant)) {
#ifdef _LP64
diff --git a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp
index 6b7f947de6b..bf1943fcae9 100644
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp
@@ -32,6 +32,7 @@
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/klass.inline.hpp"
+#include "prims/jvm.h"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/interfaceSupport.hpp"
@@ -763,11 +764,13 @@ void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
// Always clear the pc because it could have been set by make_walkable()
movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
+ vzeroupper();
}
void MacroAssembler::set_last_Java_frame(Register last_java_sp,
Register last_java_fp,
address last_java_pc) {
+ vzeroupper();
// determine last_java_sp register
if (!last_java_sp->is_valid()) {
last_java_sp = rsp;
@@ -891,7 +894,7 @@ void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
ttyLocker ttyl;
FlagSetting fs(Debugging, true);
- tty->print_cr("rip = 0x%016lx", pc);
+ tty->print_cr("rip = 0x%016lx", (intptr_t)pc);
#ifndef PRODUCT
tty->cr();
findpc(pc);
@@ -920,13 +923,13 @@ void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
int64_t* rsp = (int64_t*) regs[11];
int64_t* dump_sp = rsp;
for (int col1 = 0; col1 < 8; col1++) {
- tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
+ tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
os::print_location(tty, *dump_sp++);
}
for (int row = 0; row < 25; row++) {
- tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
+ tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
for (int col = 0; col < 4; col++) {
- tty->print(" 0x%016lx", *dump_sp++);
+ tty->print(" 0x%016lx", (intptr_t)*dump_sp++);
}
tty->cr();
}
@@ -1482,7 +1485,7 @@ void MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Registe
movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
bind(L_rtm_retry);
}
- movptr(tmpReg, Address(objReg, 0));
+ movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes()));
testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
jcc(Assembler::notZero, IsInflated);
@@ -1490,14 +1493,14 @@ void MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Registe
Label L_noincrement;
if (RTMTotalCountIncrRate > 1) {
// tmpReg, scrReg and flags are killed
- branch_on_random_using_rdtsc(tmpReg, scrReg, (int)RTMTotalCountIncrRate, L_noincrement);
+ branch_on_random_using_rdtsc(tmpReg, scrReg, RTMTotalCountIncrRate, L_noincrement);
}
assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM");
atomic_incptr(ExternalAddress((address)stack_rtm_counters->total_count_addr()), scrReg);
bind(L_noincrement);
}
xbegin(L_on_abort);
- movptr(tmpReg, Address(objReg, 0)); // fetch markword
+ movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // fetch markword
andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
cmpptr(tmpReg, markOopDesc::unlocked_value); // bits = 001 unlocked
jcc(Assembler::equal, DONE_LABEL); // all done if unlocked
@@ -1551,14 +1554,14 @@ void MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, Regi
Label L_noincrement;
if (RTMTotalCountIncrRate > 1) {
// tmpReg, scrReg and flags are killed
- branch_on_random_using_rdtsc(tmpReg, scrReg, (int)RTMTotalCountIncrRate, L_noincrement);
+ branch_on_random_using_rdtsc(tmpReg, scrReg, RTMTotalCountIncrRate, L_noincrement);
}
assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
atomic_incptr(ExternalAddress((address)rtm_counters->total_count_addr()), scrReg);
bind(L_noincrement);
}
xbegin(L_on_abort);
- movptr(tmpReg, Address(objReg, 0));
+ movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes()));
movptr(tmpReg, Address(tmpReg, owner_offset));
testptr(tmpReg, tmpReg);
jcc(Assembler::zero, DONE_LABEL);
@@ -1751,7 +1754,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
}
#endif // INCLUDE_RTM_OPT
- movptr(tmpReg, Address(objReg, 0)); // [FETCH]
+ movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // [FETCH]
testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
jccb(Assembler::notZero, IsInflated);
@@ -1761,7 +1764,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
if (os::is_MP()) {
lock();
}
- cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
+ cmpxchgptr(boxReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Updates tmpReg
if (counters != NULL) {
cond_inc32(Assembler::equal,
ExternalAddress((address)counters->fast_path_entry_count_addr()));
@@ -1982,7 +1985,7 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
if (UseRTMForStackLocks && use_rtm) {
assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
Label L_regular_unlock;
- movptr(tmpReg, Address(objReg, 0)); // fetch markword
+ movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // fetch markword
andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
cmpptr(tmpReg, markOopDesc::unlocked_value); // bits = 001 unlocked
jccb(Assembler::notEqual, L_regular_unlock); // if !HLE RegularLock
@@ -1994,7 +1997,7 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
jcc (Assembler::zero, DONE_LABEL); // 0 indicates recursive stack-lock
- movptr(tmpReg, Address(objReg, 0)); // Examine the object's markword
+ movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Examine the object's markword
testptr(tmpReg, markOopDesc::monitor_value); // Inflated?
jccb (Assembler::zero, Stacked);
@@ -2148,7 +2151,7 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
if (os::is_MP()) {
lock();
}
- cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
+ cmpxchgptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Uses RAX which is box
// Intention fall-thru into DONE_LABEL
// DONE_LABEL is a hot target - we'd really like to place it at the
@@ -2245,7 +2248,7 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
bind (Stacked);
movptr(tmpReg, Address (boxReg, 0)); // re-fetch
if (os::is_MP()) { lock(); }
- cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
+ cmpxchgptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Uses RAX which is box
if (EmitSync & 65536) {
bind (CheckSucc);
@@ -3163,8 +3166,37 @@ void MacroAssembler::fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegi
}
}
+// dst = c = a * b + c
+void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) {
+ Assembler::vfmadd231pd(c, a, b, vector_len);
+ if (dst != c) {
+ vmovdqu(dst, c);
+ }
+}
+// dst = c = a * b + c
+void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) {
+ Assembler::vfmadd231ps(c, a, b, vector_len);
+ if (dst != c) {
+ vmovdqu(dst, c);
+ }
+}
+// dst = c = a * b + c
+void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) {
+ Assembler::vfmadd231pd(c, a, b, vector_len);
+ if (dst != c) {
+ vmovdqu(dst, c);
+ }
+}
+
+// dst = c = a * b + c
+void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) {
+ Assembler::vfmadd231ps(c, a, b, vector_len);
+ if (dst != c) {
+ vmovdqu(dst, c);
+ }
+}
void MacroAssembler::incrementl(AddressLiteral dst) {
if (reachable(dst)) {
@@ -3609,6 +3641,12 @@ void MacroAssembler::os_breakpoint() {
call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
}
+void MacroAssembler::unimplemented(const char* what) {
+ char* b = new char[1024];
+ jio_snprintf(b, 1024, "unimplemented: %s", what);
+ stop(b);
+}
+
#ifdef _LP64
#define XSTATE_BV 0x200
#endif
@@ -3672,6 +3710,7 @@ void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp)
// Always clear the pc because it could have been set by make_walkable()
movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
+ vzeroupper();
}
void MacroAssembler::restore_rax(Register tmp) {
@@ -3714,6 +3753,7 @@ void MacroAssembler::set_last_Java_frame(Register java_thread,
Register last_java_sp,
Register last_java_fp,
address last_java_pc) {
+ vzeroupper();
// determine java_thread register
if (!java_thread->is_valid()) {
java_thread = rdi;
@@ -6552,10 +6592,8 @@ void MacroAssembler::restore_cpu_control_state_after_jni() {
call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
}
}
- if (VM_Version::supports_avx()) {
- // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
- vzeroupper();
- }
+ // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
+ vzeroupper();
#ifndef _LP64
// Either restore the x87 floating pointer control word after returning
diff --git a/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp b/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp
index 2c2b17c06ad..f2bcbb12d30 100644
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp
@@ -71,12 +71,6 @@ class MacroAssembler: public Assembler {
bool check_exceptions // whether to check for pending exceptions after return
);
- // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
- // The implementation is only non-empty for the InterpreterMacroAssembler,
- // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
- virtual void check_and_handle_popframe(Register java_thread);
- virtual void check_and_handle_earlyret(Register java_thread);
-
void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
// helpers for FPU flag access
@@ -87,6 +81,12 @@ class MacroAssembler: public Assembler {
public:
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
+ // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
+ // The implementation is only non-empty for the InterpreterMacroAssembler,
+ // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
+ virtual void check_and_handle_popframe(Register java_thread);
+ virtual void check_and_handle_earlyret(Register java_thread);
+
// Support for NULL-checks
//
// Generates code that causes a NULL OS exception if the content of reg is NULL.
@@ -456,6 +456,11 @@ class MacroAssembler: public Assembler {
void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
+ void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
+ void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
+ void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
+ void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
+
// same as fcmp2int, but using SSE2
void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
@@ -623,7 +628,7 @@ class MacroAssembler: public Assembler {
void untested() { stop("untested"); }
- void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); }
+ void unimplemented(const char* what = "");
void should_not_reach_here() { stop("should not reach here"); }
diff --git a/hotspot/src/cpu/x86/vm/metaspaceShared_x86_32.cpp b/hotspot/src/cpu/x86/vm/metaspaceShared_x86_32.cpp
deleted file mode 100644
index c2956a52a54..00000000000
--- a/hotspot/src/cpu/x86/vm/metaspaceShared_x86_32.cpp
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "asm/codeBuffer.hpp"
-#include "memory/metaspaceShared.hpp"
-
-// Generate the self-patching vtable method:
-//
-// This method will be called (as any other Klass virtual method) with
-// the Klass itself as the first argument. Example:
-//
-// oop obj;
-// int size = obj->klass()->oop_size(this);
-//
-// for which the virtual method call is Klass::oop_size();
-//
-// The dummy method is called with the Klass object as the first
-// operand, and an object as the second argument.
-//
-
-//=====================================================================
-
-// All of the dummy methods in the vtable are essentially identical,
-// differing only by an ordinal constant, and they bear no relationship
-// to the original method which the caller intended. Also, there needs
-// to be 'vtbl_list_size' instances of the vtable in order to
-// differentiate between the 'vtable_list_size' original Klass objects.
-
-#define __ masm->
-
-void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
- void** vtable,
- char** md_top,
- char* md_end,
- char** mc_top,
- char* mc_end) {
-
- intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
- *(intptr_t *)(*md_top) = vtable_bytes;
- *md_top += sizeof(intptr_t);
- void** dummy_vtable = (void**)*md_top;
- *vtable = dummy_vtable;
- *md_top += vtable_bytes;
-
- // Get ready to generate dummy methods.
-
- CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
- MacroAssembler* masm = new MacroAssembler(&cb);
-
- Label common_code;
- for (int i = 0; i < vtbl_list_size; ++i) {
- for (int j = 0; j < num_virtuals; ++j) {
- dummy_vtable[num_virtuals * i + j] = (void*)masm->pc();
-
- // Load rax, with a value indicating vtable/offset pair.
- // -- bits[ 7..0] (8 bits) which virtual method in table?
- // -- bits[12..8] (5 bits) which virtual method table?
- // -- must fit in 13-bit instruction immediate field.
- __ movl(rax, (i << 8) + j);
- __ jmp(common_code);
- }
- }
-
- __ bind(common_code);
-
-#ifdef WIN32
- // Expecting to be called with "thiscall" conventions -- the arguments
- // are on the stack, except that the "this" pointer is in rcx.
-#else
- // Expecting to be called with Unix conventions -- the arguments
- // are on the stack, including the "this" pointer.
-#endif
-
- // In addition, rax was set (above) to the offset of the method in the
- // table.
-
-#ifdef WIN32
- __ push(rcx); // save "this"
-#endif
- __ mov(rcx, rax);
- __ shrptr(rcx, 8); // isolate vtable identifier.
- __ shlptr(rcx, LogBytesPerWord);
- Address index(noreg, rcx, Address::times_1);
- ExternalAddress vtbl((address)vtbl_list);
- __ movptr(rdx, ArrayAddress(vtbl, index)); // get correct vtable address.
-#ifdef WIN32
- __ pop(rcx); // restore "this"
-#else
- __ movptr(rcx, Address(rsp, BytesPerWord)); // fetch "this"
-#endif
- __ movptr(Address(rcx, 0), rdx); // update vtable pointer.
-
- __ andptr(rax, 0x00ff); // isolate vtable method index
- __ shlptr(rax, LogBytesPerWord);
- __ addptr(rax, rdx); // address of real method pointer.
- __ jmp(Address(rax, 0)); // get real method pointer.
-
- __ flush();
-
- *mc_top = (char*)__ pc();
-}
diff --git a/hotspot/src/cpu/x86/vm/metaspaceShared_x86_64.cpp b/hotspot/src/cpu/x86/vm/metaspaceShared_x86_64.cpp
deleted file mode 100644
index 4ff6cc955d7..00000000000
--- a/hotspot/src/cpu/x86/vm/metaspaceShared_x86_64.cpp
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "asm/codeBuffer.hpp"
-#include "memory/metaspaceShared.hpp"
-
-// Generate the self-patching vtable method:
-//
-// This method will be called (as any other Klass virtual method) with
-// the Klass itself as the first argument. Example:
-//
-// oop obj;
-// int size = obj->klass()->oop_size(this);
-//
-// for which the virtual method call is Klass::oop_size();
-//
-// The dummy method is called with the Klass object as the first
-// operand, and an object as the second argument.
-//
-
-//=====================================================================
-
-// All of the dummy methods in the vtable are essentially identical,
-// differing only by an ordinal constant, and they bear no relationship
-// to the original method which the caller intended. Also, there needs
-// to be 'vtbl_list_size' instances of the vtable in order to
-// differentiate between the 'vtable_list_size' original Klass objects.
-
-#define __ masm->
-
-void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
- void** vtable,
- char** md_top,
- char* md_end,
- char** mc_top,
- char* mc_end) {
-
- intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
- *(intptr_t *)(*md_top) = vtable_bytes;
- *md_top += sizeof(intptr_t);
- void** dummy_vtable = (void**)*md_top;
- *vtable = dummy_vtable;
- *md_top += vtable_bytes;
-
- // Get ready to generate dummy methods.
-
- CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
- MacroAssembler* masm = new MacroAssembler(&cb);
-
- Label common_code;
- for (int i = 0; i < vtbl_list_size; ++i) {
- for (int j = 0; j < num_virtuals; ++j) {
- dummy_vtable[num_virtuals * i + j] = (void*)masm->pc();
-
- // Load eax with a value indicating vtable/offset pair.
- // -- bits[ 7..0] (8 bits) which virtual method in table?
- // -- bits[12..8] (5 bits) which virtual method table?
- // -- must fit in 13-bit instruction immediate field.
- __ movl(rax, (i << 8) + j);
- __ jmp(common_code);
- }
- }
-
- __ bind(common_code);
-
- // Expecting to be called with "thiscall" convections -- the arguments
- // are on the stack and the "this" pointer is in c_rarg0. In addition, rax
- // was set (above) to the offset of the method in the table.
-
- __ push(c_rarg1); // save & free register
- __ push(c_rarg0); // save "this"
- __ mov(c_rarg0, rax);
- __ shrptr(c_rarg0, 8); // isolate vtable identifier.
- __ shlptr(c_rarg0, LogBytesPerWord);
- __ lea(c_rarg1, ExternalAddress((address)vtbl_list)); // ptr to correct vtable list.
- __ addptr(c_rarg1, c_rarg0); // ptr to list entry.
- __ movptr(c_rarg1, Address(c_rarg1, 0)); // get correct vtable address.
- __ pop(c_rarg0); // restore "this"
- __ movptr(Address(c_rarg0, 0), c_rarg1); // update vtable pointer.
-
- __ andptr(rax, 0x00ff); // isolate vtable method index
- __ shlptr(rax, LogBytesPerWord);
- __ addptr(rax, c_rarg1); // address of real method pointer.
- __ pop(c_rarg1); // restore register.
- __ movptr(rax, Address(rax, 0)); // get real method pointer.
- __ jmp(rax); // jump to the real method.
-
- __ flush();
-
- *mc_top = (char*)__ pc();
-}
diff --git a/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp b/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp
index 399172b5a15..dd582d9db44 100644
--- a/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
+#include "prims/jvm.h"
#include "prims/methodHandles.hpp"
#define __ _masm->
@@ -65,7 +66,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Register obj, SystemDictionary::WKID klass_id,
const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
- KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
+ Klass* klass = SystemDictionary::well_known_klass(klass_id);
Register temp = rdi;
Register temp2 = noreg;
LP64_ONLY(temp2 = rscratch1); // used by MacroAssembler::cmpptr
@@ -169,8 +170,9 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
__ verify_oop(method_temp);
__ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
__ verify_oop(method_temp);
- // the following assumes that a Method* is normally compressed in the vmtarget field:
- __ movptr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));
+ __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())));
+ __ verify_oop(method_temp);
+ __ movptr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())));
if (VerifyMethodHandles && !for_compiler_entry) {
// make sure recv is already on stack
@@ -331,7 +333,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
- Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));
+ Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()));
+ Address vmtarget_method( rbx_method, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()));
Register temp1_recv_klass = temp1;
if (iid != vmIntrinsics::_linkToStatic) {
@@ -383,14 +386,16 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
}
- __ movptr(rbx_method, member_vmtarget);
+ __ load_heap_oop(rbx_method, member_vmtarget);
+ __ movptr(rbx_method, vmtarget_method);
break;
case vmIntrinsics::_linkToStatic:
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
}
- __ movptr(rbx_method, member_vmtarget);
+ __ load_heap_oop(rbx_method, member_vmtarget);
+ __ movptr(rbx_method, vmtarget_method);
break;
case vmIntrinsics::_linkToVirtual:
diff --git a/hotspot/src/cpu/x86/vm/nativeInst_x86.hpp b/hotspot/src/cpu/x86/vm/nativeInst_x86.hpp
index ef87c17807c..6fba4b8982b 100644
--- a/hotspot/src/cpu/x86/vm/nativeInst_x86.hpp
+++ b/hotspot/src/cpu/x86/vm/nativeInst_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -715,8 +715,8 @@ inline bool NativeInstruction::is_safepoint_poll() {
if (((ubyte_at(0) & NativeTstRegMem::instruction_rex_prefix_mask) == NativeTstRegMem::instruction_rex_prefix &&
ubyte_at(1) == NativeTstRegMem::instruction_code_memXregl &&
(ubyte_at(2) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg) ||
- ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl &&
- (ubyte_at(1) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg) {
+ (ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl &&
+ (ubyte_at(1) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg)) {
NOT_JVMCI(assert(Assembler::is_polling_page_far(), "unexpected poll encoding");)
return true;
}
diff --git a/hotspot/src/cpu/x86/vm/sharedRuntime_x86.cpp b/hotspot/src/cpu/x86/vm/sharedRuntime_x86.cpp
index ed8cc4acacd..a68cbe811a2 100644
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -39,7 +39,7 @@
// Since hashCode is usually polymorphic at call sites we can't do this
// optimization at the call site without a lot of work.
void SharedRuntime::inline_check_hashcode_from_object_header(MacroAssembler* masm,
- methodHandle method,
+ const methodHandle& method,
Register obj_reg,
Register result) {
Label slowCase;
diff --git a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
index 47b9fe5c627..afd3525e70f 100644
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
@@ -34,6 +34,7 @@
#include "oops/compiledICHolder.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
+#include "utilities/align.hpp"
#include "vmreg_x86.inline.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
@@ -41,6 +42,7 @@
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
+#include "vm_version_x86.hpp"
#define __ masm->
@@ -120,8 +122,8 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
int zmm_bytes = num_xmm_regs * 32;
#ifdef COMPILER2
if (save_vectors) {
- assert(UseAVX > 0, "up to 512bit vectors are supported with EVEX");
- assert(MaxVectorSize <= 64, "up to 512bit vectors are supported now");
+ assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
+ assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
// Save upper half of YMM registers
int vect_bytes = ymm_bytes;
if (UseAVX > 2) {
@@ -219,6 +221,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
}
}
}
+ __ vzeroupper();
// Set an oopmap for the call site. This oopmap will map all
// oop-registers and debug-info registers as callee-saved. This
@@ -269,8 +272,8 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_ve
int additional_frame_bytes = 0;
#ifdef COMPILER2
if (restore_vectors) {
- assert(UseAVX > 0, "up to 512bit vectors are supported with EVEX");
- assert(MaxVectorSize <= 64, "up to 512bit vectors are supported now");
+ assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
+ assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
// Save upper half of YMM registers
additional_frame_bytes = ymm_bytes;
if (UseAVX > 2) {
@@ -285,6 +288,8 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_ve
int off = xmm0_off;
int delta = xmm1_off - off;
+ __ vzeroupper();
+
if (UseSSE == 1) {
// Restore XMM registers
assert(additional_frame_bytes == 0, "");
@@ -502,7 +507,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
}
// return value can be odd number of VMRegImpl stack slots make multiple of 2
- return round_to(stack, 2);
+ return align_up(stack, 2);
}
// Patch the callers callsite with entry to compiled code if it exists.
@@ -778,9 +783,9 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
// number (all values in registers) or the maximum stack slot accessed.
// int comp_args_on_stack = VMRegImpl::reg2stack(max_arg);
// Convert 4-byte stack slots to words.
- comp_words_on_stack = round_to(comp_args_on_stack*4, wordSize)>>LogBytesPerWord;
+ comp_words_on_stack = align_up(comp_args_on_stack*4, wordSize)>>LogBytesPerWord;
// Round up to miminum stack alignment, in wordSize
- comp_words_on_stack = round_to(comp_words_on_stack, 2);
+ comp_words_on_stack = align_up(comp_words_on_stack, 2);
__ subptr(rsp, comp_words_on_stack * wordSize);
}
@@ -1402,7 +1407,7 @@ static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType
}
static void verify_oop_args(MacroAssembler* masm,
- methodHandle method,
+ const methodHandle& method,
const BasicType* sig_bt,
const VMRegPair* regs) {
Register temp_reg = rbx; // not part of any compiled calling seq
@@ -1424,7 +1429,7 @@ static void verify_oop_args(MacroAssembler* masm,
}
static void gen_special_dispatch(MacroAssembler* masm,
- methodHandle method,
+ const methodHandle& method,
const BasicType* sig_bt,
const VMRegPair* regs) {
verify_oop_args(masm, method, sig_bt, regs);
@@ -1666,7 +1671,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
total_save_slots = double_slots * 2 + single_slots;
// align the save area
if (double_slots != 0) {
- stack_slots = round_to(stack_slots, 2);
+ stack_slots = align_up(stack_slots, 2);
}
}
@@ -1729,7 +1734,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Now compute actual number of stack words we need rounding to make
// stack properly aligned.
- stack_slots = round_to(stack_slots, StackAlignmentInSlots);
+ stack_slots = align_up(stack_slots, StackAlignmentInSlots);
int stack_size = stack_slots * VMRegImpl::stack_slot_size;
@@ -1994,7 +1999,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ movptr(swap_reg, 1);
// Load (object->mark() | 1) into swap_reg %rax,
- __ orptr(swap_reg, Address(obj_reg, 0));
+ __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
// Save (object->mark() | 1) into BasicLock's displaced header
__ movptr(Address(lock_reg, mark_word_offset), swap_reg);
@@ -2005,7 +2010,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// src -> dest iff dest == rax, else rax, <- dest
// *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
- __ cmpxchgptr(lock_reg, Address(obj_reg, 0));
+ __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ jcc(Assembler::equal, lock_done);
// Test if the oopMark is an obvious stack pointer, i.e.,
@@ -2123,6 +2128,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// preserved and correspond to the bcp/locals pointers. So we do a runtime call
// by hand.
//
+ __ vzeroupper();
+
save_native_result(masm, ret_type, stack_slots);
__ push(thread);
if (!is_critical_native) {
@@ -2198,7 +2205,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// src -> dest iff dest == rax, else rax, <- dest
// *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
- __ cmpxchgptr(rbx, Address(obj_reg, 0));
+ __ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ jcc(Assembler::notEqual, slow_path_unlock);
// slow path re-enters here
@@ -2304,7 +2311,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// BEGIN Slow path unlock
__ bind(slow_path_unlock);
-
+ __ vzeroupper();
// Slow path unlock
if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
@@ -2349,6 +2356,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// SLOW PATH Reguard the stack if needed
__ bind(reguard);
+ __ vzeroupper();
save_native_result(masm, ret_type, stack_slots);
{
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
diff --git a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
index d81e965d05d..12f5f76a451 100644
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
@@ -37,6 +37,8 @@
#include "oops/compiledICHolder.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
+#include "utilities/align.hpp"
+#include "vm_version_x86.hpp"
#include "vmreg_x86.inline.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
@@ -151,15 +153,15 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
}
#if defined(COMPILER2) || INCLUDE_JVMCI
if (save_vectors) {
- assert(UseAVX > 0, "up to 512bit vectors are supported with EVEX");
- assert(MaxVectorSize <= 64, "up to 512bit vectors are supported now");
+ assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
+ assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
}
#else
assert(!save_vectors, "vectors are generated only by C2 and JVMCI");
#endif
// Always make the frame size 16-byte aligned, both vector and non vector stacks are always allocated
- int frame_size_in_bytes = round_to(reg_save_size*BytesPerInt, num_xmm_regs);
+ int frame_size_in_bytes = align_up(reg_save_size*BytesPerInt, num_xmm_regs);
// OopMap frame size is in compiler stack slots (jint's) not bytes or words
int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
// CodeBlob frame size is in words.
@@ -206,6 +208,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
}
}
}
+ __ vzeroupper();
if (frame::arg_reg_save_area_bytes != 0) {
// Allocate argument register save area
__ subptr(rsp, frame::arg_reg_save_area_bytes);
@@ -322,13 +325,15 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_ve
#if defined(COMPILER2) || INCLUDE_JVMCI
if (restore_vectors) {
- assert(UseAVX > 0, "up to 512bit vectors are supported with EVEX");
- assert(MaxVectorSize <= 64, "up to 512bit vectors are supported now");
+ assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
+ assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
}
#else
assert(!restore_vectors, "vectors are generated only by C2");
#endif
+ __ vzeroupper();
+
// On EVEX enabled targets everything is handled in pop fpu state
if (restore_vectors) {
// Restore upper half of YMM registers (0..15)
@@ -509,7 +514,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
}
}
- return round_to(stk_args, 2);
+ return align_up(stk_args, 2);
}
// Patch the callers callsite with entry to compiled code if it exists.
@@ -528,7 +533,7 @@ static void patch_callers_callsite(MacroAssembler *masm) {
// align stack so push_CPU_state doesn't fault
__ andptr(rsp, -(StackAlignmentInBytes));
__ push_CPU_state();
-
+ __ vzeroupper();
// VM needs caller's callsite
// VM needs target method
// This needs to be a long call since we will relocate this adapter to
@@ -547,6 +552,7 @@ static void patch_callers_callsite(MacroAssembler *masm) {
__ addptr(rsp, frame::arg_reg_save_area_bytes);
}
+ __ vzeroupper();
__ pop_CPU_state();
// restore sp
__ mov(rsp, r13);
@@ -577,7 +583,7 @@ static void gen_c2i_adapter(MacroAssembler *masm,
int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize;
// stack is aligned, keep it that way
- extraspace = round_to(extraspace, 2*wordSize);
+ extraspace = align_up(extraspace, 2*wordSize);
// Get return address
__ pop(rax);
@@ -777,9 +783,9 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
// number (all values in registers) or the maximum stack slot accessed.
// Convert 4-byte c2 stack slots to words.
- comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
+ comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
// Round up to miminum stack alignment, in wordSize
- comp_words_on_stack = round_to(comp_words_on_stack, 2);
+ comp_words_on_stack = align_up(comp_words_on_stack, 2);
__ subptr(rsp, comp_words_on_stack * wordSize);
}
@@ -1465,7 +1471,6 @@ static void check_needs_gc_for_critical_native(MacroAssembler* masm,
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, NULL, in_regs, in_sig_bt);
-
__ bind(cont);
#ifdef ASSERT
if (StressCriticalJNINatives) {
@@ -1735,7 +1740,7 @@ static void verify_oop_args(MacroAssembler* masm,
}
static void gen_special_dispatch(MacroAssembler* masm,
- methodHandle method,
+ const methodHandle& method,
const BasicType* sig_bt,
const VMRegPair* regs) {
verify_oop_args(masm, method, sig_bt, regs);
@@ -1978,7 +1983,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
total_save_slots = double_slots * 2 + single_slots;
// align the save area
if (double_slots != 0) {
- stack_slots = round_to(stack_slots, 2);
+ stack_slots = align_up(stack_slots, 2);
}
}
@@ -2035,7 +2040,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Now compute actual number of stack words we need rounding to make
// stack properly aligned.
- stack_slots = round_to(stack_slots, StackAlignmentInSlots);
+ stack_slots = align_up(stack_slots, StackAlignmentInSlots);
int stack_size = stack_slots * VMRegImpl::stack_slot_size;
@@ -2368,7 +2373,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ movl(swap_reg, 1);
// Load (object->mark() | 1) into swap_reg %rax
- __ orptr(swap_reg, Address(obj_reg, 0));
+ __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
// Save (object->mark() | 1) into BasicLock's displaced header
__ movptr(Address(lock_reg, mark_word_offset), swap_reg);
@@ -2378,7 +2383,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
}
// src -> dest iff dest == rax else rax <- dest
- __ cmpxchgptr(lock_reg, Address(obj_reg, 0));
+ __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ jcc(Assembler::equal, lock_done);
// Hmm should this move to the slow path code area???
@@ -2485,6 +2490,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// preserved and correspond to the bcp/locals pointers. So we do a runtime call
// by hand.
//
+ __ vzeroupper();
save_native_result(masm, ret_type, stack_slots);
__ mov(c_rarg0, r15_thread);
__ mov(r12, rsp); // remember sp
@@ -2555,7 +2561,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
if (os::is_MP()) {
__ lock();
}
- __ cmpxchgptr(old_hdr, Address(obj_reg, 0));
+ __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ jcc(Assembler::notEqual, slow_path_unlock);
// slow path re-enters here
@@ -2658,7 +2664,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// If we haven't already saved the native result we must save it now as xmm registers
// are still exposed.
-
+ __ vzeroupper();
if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
save_native_result(masm, ret_type, stack_slots);
}
@@ -2704,6 +2710,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// SLOW PATH Reguard the stack if needed
__ bind(reguard);
+ __ vzeroupper();
save_native_result(masm, ret_type, stack_slots);
__ mov(r12, rsp); // remember sp
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
diff --git a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp
index d32917a5ab0..25eb6384c6b 100644
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp
@@ -1012,6 +1012,7 @@ class StubGenerator: public StubCodeGenerator {
__ pop(rdi);
__ pop(rsi);
__ leave(); // required for proper stackwalking of RuntimeStub frame
+ __ vzeroupper();
__ xorptr(rax, rax); // return 0
__ ret(0);
return start;
@@ -1247,6 +1248,7 @@ class StubGenerator: public StubCodeGenerator {
}
inc_copy_counter_np(T_LONG);
__ leave(); // required for proper stackwalking of RuntimeStub frame
+ __ vzeroupper();
__ xorptr(rax, rax); // return 0
__ ret(0);
return start;
@@ -3365,6 +3367,7 @@ class StubGenerator: public StubCodeGenerator {
__ pop(rbx);
__ pop(rdi);
__ pop(rsi);
+ __ vzeroupper();
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -3422,6 +3425,7 @@ class StubGenerator: public StubCodeGenerator {
__ pop(h);
__ pop(g);
__ pop(d);
+ __ vzeroupper();
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
diff --git a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp
index 48be33ae526..bbe81875e91 100644
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp
@@ -402,6 +402,7 @@ class StubGenerator: public StubCodeGenerator {
__ addptr(rsp, -rsp_after_call_off * wordSize);
// return
+ __ vzeroupper();
__ pop(rbp);
__ ret(0);
@@ -1554,6 +1555,7 @@ class StubGenerator: public StubCodeGenerator {
restore_arg_regs();
inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
+ __ vzeroupper();
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -1643,6 +1645,7 @@ class StubGenerator: public StubCodeGenerator {
restore_arg_regs();
inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
+ __ vzeroupper();
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -1652,6 +1655,7 @@ class StubGenerator: public StubCodeGenerator {
restore_arg_regs();
inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
+ __ vzeroupper();
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -1746,6 +1750,7 @@ class StubGenerator: public StubCodeGenerator {
restore_arg_regs();
inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
+ __ vzeroupper();
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -1771,6 +1776,7 @@ class StubGenerator: public StubCodeGenerator {
__ generate_fill(t, aligned, to, value, count, rax, xmm0);
+ __ vzeroupper();
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
return start;
@@ -1847,6 +1853,7 @@ class StubGenerator: public StubCodeGenerator {
restore_arg_regs();
inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
+ __ vzeroupper();
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -1856,6 +1863,7 @@ class StubGenerator: public StubCodeGenerator {
restore_arg_regs();
inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
+ __ vzeroupper();
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -1945,6 +1953,7 @@ class StubGenerator: public StubCodeGenerator {
}
restore_arg_regs();
inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free
+ __ vzeroupper();
__ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -2030,6 +2039,7 @@ class StubGenerator: public StubCodeGenerator {
restore_arg_regs();
inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
+ __ vzeroupper();
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -2043,6 +2053,7 @@ class StubGenerator: public StubCodeGenerator {
restore_arg_regs();
inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
+ __ vzeroupper();
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -2120,6 +2131,7 @@ class StubGenerator: public StubCodeGenerator {
restore_arg_regs();
inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
+ __ vzeroupper();
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
}
@@ -2137,6 +2149,7 @@ class StubGenerator: public StubCodeGenerator {
} else {
inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free
}
+ __ vzeroupper();
__ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -2203,6 +2216,7 @@ class StubGenerator: public StubCodeGenerator {
restore_arg_regs();
inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
+ __ vzeroupper();
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
}
@@ -2220,6 +2234,7 @@ class StubGenerator: public StubCodeGenerator {
} else {
inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free
}
+ __ vzeroupper();
__ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -3774,7 +3789,7 @@ class StubGenerator: public StubCodeGenerator {
buf, state, ofs, limit, rsp, multi_block, shuf_mask);
}
__ addptr(rsp, 4 * wordSize);
-
+ __ vzeroupper();
__ leave();
__ ret(0);
return start;
@@ -3808,6 +3823,7 @@ class StubGenerator: public StubCodeGenerator {
__ sha512_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4,
buf, state, ofs, limit, rsp, multi_block, shuf_mask);
+ __ vzeroupper();
__ leave();
__ ret(0);
return start;
@@ -4281,7 +4297,6 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_exit);
__ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result
__ movdqu(Address(state, 0), xmm_temp6); // store the result
-
__ leave();
__ ret(0);
return start;
@@ -4321,6 +4336,7 @@ class StubGenerator: public StubCodeGenerator {
__ kernel_crc32(crc, buf, len, table, tmp);
__ movl(rax, crc);
+ __ vzeroupper();
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -4380,6 +4396,7 @@ class StubGenerator: public StubCodeGenerator {
__ pop(z);
__ pop(y);
#endif
+ __ vzeroupper();
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -4494,6 +4511,7 @@ class StubGenerator: public StubCodeGenerator {
__ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2);
+ __ vzeroupper();
__ leave();
__ ret(0);
@@ -4618,7 +4636,7 @@ class StubGenerator: public StubCodeGenerator {
BLOCK_COMMENT("Entry:");
__ enter(); // required for proper stackwalking of RuntimeStub frame
- __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp);
+ __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
diff --git a/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp b/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp
index 9c02d44cdb8..e0a03daa358 100644
--- a/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp
@@ -171,16 +171,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(
return entry;
}
-
-address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
- address entry = __ pc();
- // NULL last_sp until next java call
- __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
- __ dispatch_next(state);
- return entry;
-}
-
-
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
address entry = __ pc();
@@ -230,6 +220,17 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
__ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
__ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
+
+ const Register java_thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
+ if (JvmtiExport::can_pop_frame()) {
+ NOT_LP64(__ get_thread(java_thread));
+ __ check_and_handle_popframe(java_thread);
+ }
+ if (JvmtiExport::can_force_early_return()) {
+ NOT_LP64(__ get_thread(java_thread));
+ __ check_and_handle_earlyret(java_thread);
+ }
+
__ dispatch_next(state, step);
return entry;
diff --git a/hotspot/src/cpu/x86/vm/templateTable_x86.cpp b/hotspot/src/cpu/x86/vm/templateTable_x86.cpp
index 3d2f57f37bc..60335333076 100644
--- a/hotspot/src/cpu/x86/vm/templateTable_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86.cpp
@@ -2622,6 +2622,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
switch (code) {
case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
+ default: break;
}
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
@@ -3287,6 +3288,7 @@ void TemplateTable::jvmti_post_fast_field_mod() {
case Bytecodes::_fast_dputfield: __ pop(dtos); break;
case Bytecodes::_fast_fputfield: __ pop(ftos); break;
case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
+ default: break;
}
__ bind(L2);
}
@@ -3846,7 +3848,7 @@ void TemplateTable::_new() {
__ jcc(Assembler::notEqual, slow_case_no_pop);
// get InstanceKlass
- __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(ConstantPool)));
+ __ load_resolved_klass_at_index(rcx, rdx, rcx);
__ push(rcx); // save the contexts of klass for initializing the header
// make sure klass is initialized & doesn't have finalizer
@@ -4061,8 +4063,7 @@ void TemplateTable::checkcast() {
// Get superklass in rax and subklass in rbx
__ bind(quicked);
__ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
- __ movptr(rax, Address(rcx, rbx,
- Address::times_ptr, sizeof(ConstantPool)));
+ __ load_resolved_klass_at_index(rcx, rbx, rax);
__ bind(resolved);
__ load_klass(rbx, rdx);
@@ -4128,8 +4129,7 @@ void TemplateTable::instanceof() {
// Get superklass in rax and subklass in rdx
__ bind(quicked);
__ load_klass(rdx, rax);
- __ movptr(rax, Address(rcx, rbx,
- Address::times_ptr, sizeof(ConstantPool)));
+ __ load_resolved_klass_at_index(rcx, rbx, rax);
__ bind(resolved);
diff --git a/hotspot/src/cpu/x86/vm/vmStructs_x86.hpp b/hotspot/src/cpu/x86/vm/vmStructs_x86.hpp
index 0307107ad9b..7fc5e451d08 100644
--- a/hotspot/src/cpu/x86/vm/vmStructs_x86.hpp
+++ b/hotspot/src/cpu/x86/vm/vmStructs_x86.hpp
@@ -74,6 +74,7 @@
declare_preprocessor_constant("VM_Version::CPU_AVX512BW", CPU_AVX512BW) \
declare_preprocessor_constant("VM_Version::CPU_AVX512VL", CPU_AVX512VL) \
declare_preprocessor_constant("VM_Version::CPU_SHA", CPU_SHA) \
- declare_preprocessor_constant("VM_Version::CPU_FMA", CPU_FMA)
+ declare_preprocessor_constant("VM_Version::CPU_FMA", CPU_FMA) \
+ declare_preprocessor_constant("VM_Version::CPU_VZEROUPPER", CPU_VZEROUPPER)
#endif // CPU_X86_VM_VMSTRUCTS_X86_HPP
diff --git a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp
index 197079ba1c7..99e402f8dee 100644
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp
@@ -26,7 +26,9 @@
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "logging/log.hpp"
+#include "logging/logStream.hpp"
#include "memory/resourceArea.hpp"
+#include "prims/jvm.h"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "runtime/stubCodeGenerator.hpp"
@@ -436,14 +438,14 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ movl(rax, 0x10000);
__ andl(rax, Address(rsi, 4));
__ cmpl(rax, 0x10000);
- __ jccb(Assembler::notEqual, legacy_save_restore);
+ __ jcc(Assembler::notEqual, legacy_save_restore);
// check _cpuid_info.xem_xcr0_eax.bits.opmask
// check _cpuid_info.xem_xcr0_eax.bits.zmm512
// check _cpuid_info.xem_xcr0_eax.bits.zmm32
__ movl(rax, 0xE0);
__ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
__ cmpl(rax, 0xE0);
- __ jccb(Assembler::notEqual, legacy_save_restore);
+ __ jcc(Assembler::notEqual, legacy_save_restore);
// If UseAVX is unitialized or is set by the user to include EVEX
if (use_evex) {
@@ -469,11 +471,12 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ evmovdqul(xmm7, Address(rsp, 0), Assembler::AVX_512bit);
__ addptr(rsp, 64);
#endif // _WINDOWS
+ generate_vzeroupper(wrapup);
VM_Version::clean_cpuFeatures();
UseAVX = saved_useavx;
UseSSE = saved_usesse;
__ jmp(wrapup);
- }
+ }
__ bind(legacy_save_restore);
// AVX check
@@ -498,6 +501,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ vmovdqu(xmm7, Address(rsp, 0));
__ addptr(rsp, 32);
#endif // _WINDOWS
+ generate_vzeroupper(wrapup);
VM_Version::clean_cpuFeatures();
UseAVX = saved_useavx;
UseSSE = saved_usesse;
@@ -513,6 +517,21 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
return start;
};
+ void generate_vzeroupper(Label& L_wrapup) {
+# define __ _masm->
+ __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
+ __ cmpl(Address(rsi, 4), 0x756e6547); // 'uneG'
+ __ jcc(Assembler::notEqual, L_wrapup);
+ __ movl(rcx, 0x0FFF0FF0);
+ __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
+ __ andl(rcx, Address(rsi, 0));
+ __ cmpl(rcx, 0x00050670); // If it is Xeon Phi 3200/5200/7200
+ __ jcc(Assembler::equal, L_wrapup);
+ __ cmpl(rcx, 0x00080650); // If it is Future Xeon Phi
+ __ jcc(Assembler::equal, L_wrapup);
+ __ vzeroupper();
+# undef __
+ }
};
void VM_Version::get_processor_features() {
@@ -619,17 +638,22 @@ void VM_Version::get_processor_features() {
if (UseAVX < 2)
_features &= ~CPU_AVX2;
- if (UseAVX < 1)
+ if (UseAVX < 1) {
_features &= ~CPU_AVX;
-
- if (!UseAES && !FLAG_IS_DEFAULT(UseAES))
- _features &= ~CPU_AES;
+ _features &= ~CPU_VZEROUPPER;
+ }
if (logical_processors_per_package() == 1) {
// HT processor could be installed on a system which doesn't support HT.
_features &= ~CPU_HT;
}
+ if( is_intel() ) { // Intel cpus specific settings
+ if (is_knights_family()) {
+ _features &= ~CPU_VZEROUPPER;
+ }
+ }
+
char buf[256];
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
cores_per_cpu(), threads_per_core(),
@@ -785,7 +809,7 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
}
- if (supports_fma() && UseSSE >= 2) {
+ if (supports_fma() && UseSSE >= 2) { // Check UseSSE since FMA code uses SSE instructions
if (FLAG_IS_DEFAULT(UseFMA)) {
UseFMA = true;
}
@@ -861,7 +885,8 @@ void VM_Version::get_processor_features() {
(_model == CPU_MODEL_BROADWELL && _stepping < 4)) {
// currently a collision between SKL and HSW_E3
if (!UnlockExperimentalVMOptions && UseAVX < 3) {
- vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. It must be enabled via -XX:+UnlockExperimentalVMOptions flag.");
+ vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this "
+ "platform. It must be enabled via -XX:+UnlockExperimentalVMOptions flag.");
} else {
warning("UseRTMLocking is only available as experimental option on this platform.");
}
@@ -872,14 +897,6 @@ void VM_Version::get_processor_features() {
// high lock contention. For now we do not use it by default.
vm_exit_during_initialization("UseRTMLocking flag should be only set on command line");
}
- if (!is_power_of_2(RTMTotalCountIncrRate)) {
- warning("RTMTotalCountIncrRate must be a power of 2, resetting it to 64");
- FLAG_SET_DEFAULT(RTMTotalCountIncrRate, 64);
- }
- if (RTMAbortRatio < 0 || RTMAbortRatio > 100) {
- warning("RTMAbortRatio must be in the range 0 to 100, resetting it to 50");
- FLAG_SET_DEFAULT(RTMAbortRatio, 50);
- }
} else { // !UseRTMLocking
if (UseRTMForStackLocks) {
if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) {
@@ -917,16 +934,36 @@ void VM_Version::get_processor_features() {
warning("MaxVectorSize must be a power of 2");
FLAG_SET_DEFAULT(MaxVectorSize, 64);
}
- if (MaxVectorSize > 64) {
- FLAG_SET_DEFAULT(MaxVectorSize, 64);
- }
- if (MaxVectorSize > 16 && (UseAVX == 0 || !os_supports_avx_vectors())) {
- // 32 bytes vectors (in YMM) are only supported with AVX+
- FLAG_SET_DEFAULT(MaxVectorSize, 16);
- }
if (UseSSE < 2) {
// Vectors (in XMM) are only supported with SSE2+
- FLAG_SET_DEFAULT(MaxVectorSize, 0);
+ if (MaxVectorSize > 0) {
+ if (!FLAG_IS_DEFAULT(MaxVectorSize))
+ warning("MaxVectorSize must be 0");
+ FLAG_SET_DEFAULT(MaxVectorSize, 0);
+ }
+ }
+ else if (UseAVX == 0 || !os_supports_avx_vectors()) {
+ // 32 bytes vectors (in YMM) are only supported with AVX+
+ if (MaxVectorSize > 16) {
+ if (!FLAG_IS_DEFAULT(MaxVectorSize))
+ warning("MaxVectorSize must be <= 16");
+ FLAG_SET_DEFAULT(MaxVectorSize, 16);
+ }
+ }
+ else if (UseAVX == 1 || UseAVX == 2) {
+ // 64 bytes vectors (in ZMM) are only supported with AVX 3
+ if (MaxVectorSize > 32) {
+ if (!FLAG_IS_DEFAULT(MaxVectorSize))
+ warning("MaxVectorSize must be <= 32");
+ FLAG_SET_DEFAULT(MaxVectorSize, 32);
+ }
+ }
+ else if (UseAVX > 2 ) {
+ if (MaxVectorSize > 64) {
+ if (!FLAG_IS_DEFAULT(MaxVectorSize))
+ warning("MaxVectorSize must be <= 64");
+ FLAG_SET_DEFAULT(MaxVectorSize, 64);
+ }
}
#if defined(COMPILER2) && defined(ASSERT)
if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) {
@@ -1056,18 +1093,18 @@ void VM_Version::get_processor_features() {
if ( cpu_family() == 0x15 ) {
// On family 15h processors default is no sw prefetch
if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
- AllocatePrefetchStyle = 0;
+ FLAG_SET_DEFAULT(AllocatePrefetchStyle, 0);
}
// Also, if some other prefetch style is specified, default instruction type is PREFETCHW
if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
- AllocatePrefetchInstr = 3;
+ FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
}
// On family 15h processors use XMM and UnalignedLoadStores for Array Copy
if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
- UseXMMForArrayCopy = true;
+ FLAG_SET_DEFAULT(UseXMMForArrayCopy, true);
}
if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
- UseUnalignedLoadStores = true;
+ FLAG_SET_DEFAULT(UseUnalignedLoadStores, true);
}
}
@@ -1132,10 +1169,7 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(UseSSE42Intrinsics, false);
}
}
- if ((cpu_family() == 0x06) &&
- ((extended_cpu_model() == 0x36) || // Centerton
- (extended_cpu_model() == 0x37) || // Silvermont
- (extended_cpu_model() == 0x4D))) {
+ if (is_atom_family() || is_knights_family()) {
#ifdef COMPILER2
if (FLAG_IS_DEFAULT(OptoScheduling)) {
OptoScheduling = true;
@@ -1146,9 +1180,12 @@ void VM_Version::get_processor_features() {
UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
}
}
+ if (FLAG_IS_DEFAULT(UseIncDec)) {
+ FLAG_SET_DEFAULT(UseIncDec, false);
+ }
}
if(FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) {
- AllocatePrefetchInstr = 3;
+ FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
}
}
@@ -1244,45 +1281,68 @@ void VM_Version::get_processor_features() {
}
#endif // COMPILER2
- if( AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch() ) AllocatePrefetchInstr=0;
- if( !supports_sse() && supports_3dnow_prefetch() ) AllocatePrefetchInstr = 3;
+ if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
+ if (AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch()) {
+ FLAG_SET_DEFAULT(AllocatePrefetchInstr, 0);
+ } else if (!supports_sse() && supports_3dnow_prefetch()) {
+ FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
+ }
+ }
// Allocation prefetch settings
intx cache_line_size = prefetch_data_size();
- if( cache_line_size > AllocatePrefetchStepSize )
- AllocatePrefetchStepSize = cache_line_size;
+ if (FLAG_IS_DEFAULT(AllocatePrefetchStepSize) &&
+ (cache_line_size > AllocatePrefetchStepSize)) {
+ FLAG_SET_DEFAULT(AllocatePrefetchStepSize, cache_line_size);
+ }
- AllocatePrefetchDistance = allocate_prefetch_distance();
- AllocatePrefetchStyle = allocate_prefetch_style();
+ if ((AllocatePrefetchDistance == 0) && (AllocatePrefetchStyle != 0)) {
+ assert(!FLAG_IS_DEFAULT(AllocatePrefetchDistance), "default value should not be 0");
+ if (!FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
+ warning("AllocatePrefetchDistance is set to 0 which disable prefetching. Ignoring AllocatePrefetchStyle flag.");
+ }
+ FLAG_SET_DEFAULT(AllocatePrefetchStyle, 0);
+ }
+
+ if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
+ bool use_watermark_prefetch = (AllocatePrefetchStyle == 2);
+ FLAG_SET_DEFAULT(AllocatePrefetchDistance, allocate_prefetch_distance(use_watermark_prefetch));
+ }
if (is_intel() && cpu_family() == 6 && supports_sse3()) {
- if (AllocatePrefetchStyle == 2) { // watermark prefetching on Core
-#ifdef _LP64
- AllocatePrefetchDistance = 384;
-#else
- AllocatePrefetchDistance = 320;
-#endif
- }
- if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus
- AllocatePrefetchDistance = 192;
- if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) {
- FLAG_SET_DEFAULT(AllocatePrefetchLines, 4);
- }
+ if (FLAG_IS_DEFAULT(AllocatePrefetchLines) &&
+ supports_sse4_2() && supports_ht()) { // Nehalem based cpus
+ FLAG_SET_DEFAULT(AllocatePrefetchLines, 4);
}
#ifdef COMPILER2
- if (supports_sse4_2()) {
- if (FLAG_IS_DEFAULT(UseFPUForSpilling)) {
- FLAG_SET_DEFAULT(UseFPUForSpilling, true);
- }
+ if (FLAG_IS_DEFAULT(UseFPUForSpilling) && supports_sse4_2()) {
+ FLAG_SET_DEFAULT(UseFPUForSpilling, true);
}
#endif
}
#ifdef _LP64
// Prefetch settings
- PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes();
- PrefetchScanIntervalInBytes = prefetch_scan_interval_in_bytes();
- PrefetchFieldsAhead = prefetch_fields_ahead();
+
+ // Prefetch interval for gc copy/scan == 9 dcache lines. Derived from
+ // 50-warehouse specjbb runs on a 2-way 1.8ghz opteron using a 4gb heap.
+ // Tested intervals from 128 to 2048 in increments of 64 == one cache line.
+ // 256 bytes (4 dcache lines) was the nearest runner-up to 576.
+
+ // gc copy/scan is disabled if prefetchw isn't supported, because
+ // Prefetch::write emits an inlined prefetchw on Linux.
+ // Do not use the 3dnow prefetchw instruction. It isn't supported on em64t.
+ // The used prefetcht0 instruction works for both amd64 and em64t.
+
+ if (FLAG_IS_DEFAULT(PrefetchCopyIntervalInBytes)) {
+ FLAG_SET_DEFAULT(PrefetchCopyIntervalInBytes, 576);
+ }
+ if (FLAG_IS_DEFAULT(PrefetchScanIntervalInBytes)) {
+ FLAG_SET_DEFAULT(PrefetchScanIntervalInBytes, 576);
+ }
+ if (FLAG_IS_DEFAULT(PrefetchFieldsAhead)) {
+ FLAG_SET_DEFAULT(PrefetchFieldsAhead, 1);
+ }
#endif
if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
@@ -1296,7 +1356,8 @@ void VM_Version::get_processor_features() {
#ifndef PRODUCT
if (log_is_enabled(Info, os, cpu)) {
- outputStream* log = Log(os, cpu)::info_stream();
+ LogStream ls(Log(os, cpu)::info());
+ outputStream* log = &ls;
log->print_cr("Logical CPUs per core: %u",
logical_processors_per_package());
log->print_cr("L1 data cache line size: %u", L1_data_cache_line_size());
@@ -1314,7 +1375,7 @@ void VM_Version::get_processor_features() {
#endif
log->cr();
log->print("Allocation");
- if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) {
+ if (AllocatePrefetchStyle <= 0 || (UseSSE == 0 && !supports_3dnow_prefetch())) {
log->print_cr(": no prefetching");
} else {
log->print(" prefetching: ");
diff --git a/hotspot/src/cpu/x86/vm/vm_version_x86.hpp b/hotspot/src/cpu/x86/vm/vm_version_x86.hpp
index 5a51889e06b..23c2c7c195c 100644
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.hpp
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.hpp
@@ -291,6 +291,7 @@ protected:
#define CPU_AVX512VL ((uint64_t)UCONST64(0x200000000)) // EVEX instructions with smaller vector length
#define CPU_SHA ((uint64_t)UCONST64(0x400000000)) // SHA instructions
#define CPU_FMA ((uint64_t)UCONST64(0x800000000)) // FMA instructions
+#define CPU_VZEROUPPER ((uint64_t)UCONST64(0x1000000000)) // Vzeroupper instruction
enum Extended_Family {
// AMD
@@ -468,6 +469,7 @@ protected:
_cpuid_info.xem_xcr0_eax.bits.sse != 0 &&
_cpuid_info.xem_xcr0_eax.bits.ymm != 0) {
result |= CPU_AVX;
+ result |= CPU_VZEROUPPER;
if (_cpuid_info.sef_cpuid7_ebx.bits.avx2 != 0)
result |= CPU_AVX2;
if (_cpuid_info.sef_cpuid7_ebx.bits.avx512f != 0 &&
@@ -605,8 +607,8 @@ public:
static address cpuinfo_cont_addr() { return _cpuinfo_cont_addr; }
static void clean_cpuFeatures() { _features = 0; }
- static void set_avx_cpuFeatures() { _features = (CPU_SSE | CPU_SSE2 | CPU_AVX); }
- static void set_evex_cpuFeatures() { _features = (CPU_AVX512F | CPU_SSE | CPU_SSE2 ); }
+ static void set_avx_cpuFeatures() { _features = (CPU_SSE | CPU_SSE2 | CPU_AVX | CPU_VZEROUPPER ); }
+ static void set_evex_cpuFeatures() { _features = (CPU_AVX512F | CPU_SSE | CPU_SSE2 | CPU_VZEROUPPER ); }
// Initialization
@@ -639,6 +641,8 @@ public:
static bool is_P6() { return cpu_family() >= 6; }
static bool is_amd() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x68747541; } // 'htuA'
static bool is_intel() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x756e6547; } // 'uneG'
+ static bool is_atom_family() { return ((cpu_family() == 0x06) && ((extended_cpu_model() == 0x36) || (extended_cpu_model() == 0x37) || (extended_cpu_model() == 0x4D))); } //Silvermont and Centerton
+ static bool is_knights_family() { return ((cpu_family() == 0x06) && ((extended_cpu_model() == 0x57) || (extended_cpu_model() == 0x85))); } // Xeon Phi 3200/5200/7200 and Future Xeon Phi
static bool supports_processor_topology() {
return (_cpuid_info.std_max_function >= 0xB) &&
@@ -730,7 +734,9 @@ public:
static bool supports_avx256only() { return (supports_avx2() && !supports_evex()); }
static bool supports_avxonly() { return ((supports_avx2() || supports_avx()) && !supports_evex()); }
static bool supports_sha() { return (_features & CPU_SHA) != 0; }
- static bool supports_fma() { return (_features & CPU_FMA) != 0; }
+ static bool supports_fma() { return (_features & CPU_FMA) != 0 && supports_avx(); }
+ static bool supports_vzeroupper() { return (_features & CPU_VZEROUPPER) != 0; }
+
// Intel features
static bool is_intel_family_core() { return is_intel() &&
extended_cpu_family() == CPU_FAMILY_INTEL_CORE; }
@@ -778,9 +784,7 @@ public:
static bool supports_compare_and_exchange() { return true; }
- static intx allocate_prefetch_distance() {
- // This method should be called before allocate_prefetch_style().
- //
+ static intx allocate_prefetch_distance(bool use_watermark_prefetch) {
// Hardware prefetching (distance/size in bytes):
// Pentium 3 - 64 / 32
// Pentium 4 - 256 / 128
@@ -796,58 +800,34 @@ public:
// Core - 256 / prefetchnta
// It will be used only when AllocatePrefetchStyle > 0
- intx count = AllocatePrefetchDistance;
- if (count < 0) { // default ?
- if (is_amd()) { // AMD
- if (supports_sse2())
- count = 256; // Opteron
- else
- count = 128; // Athlon
- } else { // Intel
- if (supports_sse2())
- if (cpu_family() == 6) {
- count = 256; // Pentium M, Core, Core2
- } else {
- count = 512; // Pentium 4
- }
- else
- count = 128; // Pentium 3 (and all other old CPUs)
+ if (is_amd()) { // AMD
+ if (supports_sse2()) {
+ return 256; // Opteron
+ } else {
+ return 128; // Athlon
+ }
+ } else { // Intel
+ if (supports_sse3() && cpu_family() == 6) {
+ if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus
+ return 192;
+ } else if (use_watermark_prefetch) { // watermark prefetching on Core
+#ifdef _LP64
+ return 384;
+#else
+ return 320;
+#endif
+ }
+ }
+ if (supports_sse2()) {
+ if (cpu_family() == 6) {
+ return 256; // Pentium M, Core, Core2
+ } else {
+ return 512; // Pentium 4
+ }
+ } else {
+ return 128; // Pentium 3 (and all other old CPUs)
}
}
- return count;
- }
- static intx allocate_prefetch_style() {
- assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
- // Return 0 if AllocatePrefetchDistance was not defined.
- return AllocatePrefetchDistance > 0 ? AllocatePrefetchStyle : 0;
- }
-
- // Prefetch interval for gc copy/scan == 9 dcache lines. Derived from
- // 50-warehouse specjbb runs on a 2-way 1.8ghz opteron using a 4gb heap.
- // Tested intervals from 128 to 2048 in increments of 64 == one cache line.
- // 256 bytes (4 dcache lines) was the nearest runner-up to 576.
-
- // gc copy/scan is disabled if prefetchw isn't supported, because
- // Prefetch::write emits an inlined prefetchw on Linux.
- // Do not use the 3dnow prefetchw instruction. It isn't supported on em64t.
- // The used prefetcht0 instruction works for both amd64 and em64t.
- static intx prefetch_copy_interval_in_bytes() {
- intx interval = PrefetchCopyIntervalInBytes;
- return interval >= 0 ? interval : 576;
- }
- static intx prefetch_scan_interval_in_bytes() {
- intx interval = PrefetchScanIntervalInBytes;
- return interval >= 0 ? interval : 576;
- }
- static intx prefetch_fields_ahead() {
- intx count = PrefetchFieldsAhead;
- return count >= 0 ? count : 1;
- }
- static uint32_t get_xsave_header_lower_segment() {
- return _cpuid_info.xem_xcr0_eax.value;
- }
- static uint32_t get_xsave_header_upper_segment() {
- return _cpuid_info.xem_xcr0_edx;
}
// SSE2 and later processors implement a 'pause' instruction
diff --git a/hotspot/src/cpu/x86/vm/x86.ad b/hotspot/src/cpu/x86/vm/x86.ad
index 3dd25561c0d..afaa2da23c7 100644
--- a/hotspot/src/cpu/x86/vm/x86.ad
+++ b/hotspot/src/cpu/x86/vm/x86.ad
@@ -1,5 +1,5 @@
//
-// Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@@ -1387,7 +1387,7 @@ const int Matcher::min_vector_size(const BasicType bt) {
}
// Vector ideal reg corresponding to specidied size in bytes
-const int Matcher::vector_ideal_reg(int size) {
+const uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize >= size, "");
switch(size) {
case 4: return Op_VecS;
@@ -1401,7 +1401,7 @@ const int Matcher::vector_ideal_reg(int size) {
}
// Only lowest bits of xmm reg are used for vector shift count.
-const int Matcher::vector_shift_count_ideal_reg(int size) {
+const uint Matcher::vector_shift_count_ideal_reg(int size) {
return Op_VecS;
}
@@ -1804,9 +1804,9 @@ operand cmpOp_vcmppd() %{
instruct ShouldNotReachHere() %{
match(Halt);
- format %{ "int3\t# ShouldNotReachHere" %}
+ format %{ "ud2\t# ShouldNotReachHere" %}
ins_encode %{
- __ int3();
+ __ ud2();
%}
ins_pipe(pipe_slow);
%}
@@ -10520,3 +10520,161 @@ instruct vxor64B_mem(vecZ dst, vecZ src, memory mem) %{
ins_pipe( pipe_slow );
%}
+// --------------------------------- FMA --------------------------------------
+
+// a * b + c
+instruct vfma2D_reg(vecX a, vecX b, vecX c) %{
+ predicate(UseFMA && n->as_Vector()->length() == 2);
+ match(Set c (FmaVD c (Binary a b)));
+ format %{ "fmapd $a,$b,$c\t# $c = $a * $b + $c fma packed2D" %}
+ ins_cost(150);
+ ins_encode %{
+ int vector_len = 0;
+ __ vfmad($c$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $c$$XMMRegister, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+// a * b + c
+instruct vfma2D_mem(vecX a, memory b, vecX c) %{
+ predicate(UseFMA && n->as_Vector()->length() == 2);
+ match(Set c (FmaVD c (Binary a (LoadVector b))));
+ format %{ "fmapd $a,$b,$c\t# $c = $a * $b + $c fma packed2D" %}
+ ins_cost(150);
+ ins_encode %{
+ int vector_len = 0;
+ __ vfmad($c$$XMMRegister, $a$$XMMRegister, $b$$Address, $c$$XMMRegister, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+
+// a * b + c
+instruct vfma4D_reg(vecY a, vecY b, vecY c) %{
+ predicate(UseFMA && n->as_Vector()->length() == 4);
+ match(Set c (FmaVD c (Binary a b)));
+ format %{ "fmapd $a,$b,$c\t# $c = $a * $b + $c fma packed4D" %}
+ ins_cost(150);
+ ins_encode %{
+ int vector_len = 1;
+ __ vfmad($c$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $c$$XMMRegister, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+// a * b + c
+instruct vfma4D_mem(vecY a, memory b, vecY c) %{
+ predicate(UseFMA && n->as_Vector()->length() == 4);
+ match(Set c (FmaVD c (Binary a (LoadVector b))));
+ format %{ "fmapd $a,$b,$c\t# $c = $a * $b + $c fma packed4D" %}
+ ins_cost(150);
+ ins_encode %{
+ int vector_len = 1;
+ __ vfmad($c$$XMMRegister, $a$$XMMRegister, $b$$Address, $c$$XMMRegister, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+// a * b + c
+instruct vfma8D_reg(vecZ a, vecZ b, vecZ c) %{
+ predicate(UseFMA && n->as_Vector()->length() == 8);
+ match(Set c (FmaVD c (Binary a b)));
+ format %{ "fmapd $a,$b,$c\t# $c = $a * $b + $c fma packed8D" %}
+ ins_cost(150);
+ ins_encode %{
+ int vector_len = 2;
+ __ vfmad($c$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $c$$XMMRegister, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+// a * b + c
+instruct vfma8D_mem(vecZ a, memory b, vecZ c) %{
+ predicate(UseFMA && n->as_Vector()->length() == 8);
+ match(Set c (FmaVD c (Binary a (LoadVector b))));
+ format %{ "fmapd $a,$b,$c\t# $c = $a * $b + $c fma packed8D" %}
+ ins_cost(150);
+ ins_encode %{
+ int vector_len = 2;
+ __ vfmad($c$$XMMRegister, $a$$XMMRegister, $b$$Address, $c$$XMMRegister, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+// a * b + c
+instruct vfma4F_reg(vecX a, vecX b, vecX c) %{
+ predicate(UseFMA && n->as_Vector()->length() == 4);
+ match(Set c (FmaVF c (Binary a b)));
+ format %{ "fmaps $a,$b,$c\t# $c = $a * $b + $c fma packed4F" %}
+ ins_cost(150);
+ ins_encode %{
+ int vector_len = 0;
+ __ vfmaf($c$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $c$$XMMRegister, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+// a * b + c
+instruct vfma4F_mem(vecX a, memory b, vecX c) %{
+ predicate(UseFMA && n->as_Vector()->length() == 4);
+ match(Set c (FmaVF c (Binary a (LoadVector b))));
+ format %{ "fmaps $a,$b,$c\t# $c = $a * $b + $c fma packed4F" %}
+ ins_cost(150);
+ ins_encode %{
+ int vector_len = 0;
+ __ vfmaf($c$$XMMRegister, $a$$XMMRegister, $b$$Address, $c$$XMMRegister, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+// a * b + c
+instruct vfma8F_reg(vecY a, vecY b, vecY c) %{
+ predicate(UseFMA && n->as_Vector()->length() == 8);
+ match(Set c (FmaVF c (Binary a b)));
+ format %{ "fmaps $a,$b,$c\t# $c = $a * $b + $c fma packed8F" %}
+ ins_cost(150);
+ ins_encode %{
+ int vector_len = 1;
+ __ vfmaf($c$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $c$$XMMRegister, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+// a * b + c
+instruct vfma8F_mem(vecY a, memory b, vecY c) %{
+ predicate(UseFMA && n->as_Vector()->length() == 8);
+ match(Set c (FmaVF c (Binary a (LoadVector b))));
+ format %{ "fmaps $a,$b,$c\t# $c = $a * $b + $c fma packed8F" %}
+ ins_cost(150);
+ ins_encode %{
+ int vector_len = 1;
+ __ vfmaf($c$$XMMRegister, $a$$XMMRegister, $b$$Address, $c$$XMMRegister, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+// a * b + c
+instruct vfma16F_reg(vecZ a, vecZ b, vecZ c) %{
+ predicate(UseFMA && n->as_Vector()->length() == 16);
+ match(Set c (FmaVF c (Binary a b)));
+ format %{ "fmaps $a,$b,$c\t# $c = $a * $b + $c fma packed16F" %}
+ ins_cost(150);
+ ins_encode %{
+ int vector_len = 2;
+ __ vfmaf($c$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $c$$XMMRegister, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+// a * b + c
+instruct vfma16F_mem(vecZ a, memory b, vecZ c) %{
+ predicate(UseFMA && n->as_Vector()->length() == 16);
+ match(Set c (FmaVF c (Binary a (LoadVector b))));
+ format %{ "fmaps $a,$b,$c\t# $c = $a * $b + $c fma packed16F" %}
+ ins_cost(150);
+ ins_encode %{
+ int vector_len = 2;
+ __ vfmaf($c$$XMMRegister, $a$$XMMRegister, $b$$Address, $c$$XMMRegister, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
diff --git a/hotspot/src/cpu/x86/vm/x86_32.ad b/hotspot/src/cpu/x86/vm/x86_32.ad
index 3f045554ba2..cc711d2ec24 100644
--- a/hotspot/src/cpu/x86/vm/x86_32.ad
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad
@@ -290,7 +290,7 @@ static int pre_call_resets_size() {
if (C->in_24_bit_fp_mode()) {
size += 6; // fldcw
}
- if (C->max_vector_size() > 16) {
+ if (VM_Version::supports_vzeroupper()) {
size += 3; // vzeroupper
}
return size;
@@ -329,7 +329,7 @@ bool SafePointNode::needs_polling_address_input() {
int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
current_offset += pre_call_resets_size(); // skip fldcw, if any
current_offset += 1; // skip call opcode byte
- return round_to(current_offset, alignment_required()) - current_offset;
+ return align_up(current_offset, alignment_required()) - current_offset;
}
// The address of the call instruction needs to be 4-byte aligned to
@@ -338,7 +338,7 @@ int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
current_offset += pre_call_resets_size(); // skip fldcw, if any
current_offset += 5; // skip MOV instruction
current_offset += 1; // skip call opcode byte
- return round_to(current_offset, alignment_required()) - current_offset;
+ return align_up(current_offset, alignment_required()) - current_offset;
}
// EMIT_RM()
@@ -1884,7 +1884,6 @@ encode %{
}
%}
-
enc_class pre_call_resets %{
// If method sets FPU control word restore it here
debug_only(int off0 = cbuf.insts_size());
@@ -1892,12 +1891,10 @@ encode %{
MacroAssembler _masm(&cbuf);
__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
}
- if (ra_->C->max_vector_size() > 16) {
- // Clear upper bits of YMM registers when current compiled code uses
- // wide vectors to avoid AVX <-> SSE transition penalty during call.
- MacroAssembler _masm(&cbuf);
- __ vzeroupper();
- }
+ // Clear upper bits of YMM registers when current compiled code uses
+ // wide vectors to avoid AVX <-> SSE transition penalty during call.
+ MacroAssembler _masm(&cbuf);
+ __ vzeroupper();
debug_only(int off1 = cbuf.insts_size());
assert(off1 - off0 == pre_call_resets_size(), "correct size prediction");
%}
@@ -3278,7 +3275,7 @@ frame %{
// Ret Addr is on stack in slot 0 if no locks or verification or alignment.
// Otherwise, it is above the locks and verification slot and alignment word
return_addr(STACK - 1 +
- round_to((Compile::current()->in_preserve_stack_slots() +
+ align_up((Compile::current()->in_preserve_stack_slots() +
Compile::current()->fixed_slots()),
stack_alignment_in_slots()));
@@ -13222,7 +13219,7 @@ instruct CallLeafNoFPDirect(method meth) %{
ins_cost(300);
format %{ "CALL_LEAF_NOFP,runtime " %}
opcode(0xE8); /* E8 cd */
- ins_encode(Java_To_Runtime(meth));
+ ins_encode(pre_call_resets, Java_To_Runtime(meth));
ins_pipe( pipe_slow );
%}
diff --git a/hotspot/src/cpu/x86/vm/x86_64.ad b/hotspot/src/cpu/x86/vm/x86_64.ad
index 41e3ab1fb9f..c96f28ddcd2 100644
--- a/hotspot/src/cpu/x86/vm/x86_64.ad
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad
@@ -536,7 +536,7 @@ source %{
#define __ _masm.
static int clear_avx_size() {
- return (Compile::current()->max_vector_size() > 16) ? 3 : 0; // vzeroupper
+ return (VM_Version::supports_vzeroupper()) ? 3: 0; // vzeroupper
}
// !!!!! Special hack to get all types of calls to specify the byte offset
@@ -579,7 +579,7 @@ int CallStaticJavaDirectNode::compute_padding(int current_offset) const
{
current_offset += clear_avx_size(); // skip vzeroupper
current_offset += 1; // skip call opcode byte
- return round_to(current_offset, alignment_required()) - current_offset;
+ return align_up(current_offset, alignment_required()) - current_offset;
}
// The address of the call instruction needs to be 4-byte aligned to
@@ -588,7 +588,7 @@ int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
{
current_offset += clear_avx_size(); // skip vzeroupper
current_offset += 11; // skip movq instruction + call opcode byte
- return round_to(current_offset, alignment_required()) - current_offset;
+ return align_up(current_offset, alignment_required()) - current_offset;
}
// EMIT_RM()
@@ -919,7 +919,7 @@ int MachPrologNode::reloc() const
void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{
Compile* C = ra_->C;
- if (C->max_vector_size() > 16) {
+ if (VM_Version::supports_vzeroupper()) {
st->print("vzeroupper");
st->cr(); st->print("\t");
}
@@ -955,11 +955,9 @@ void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
Compile* C = ra_->C;
MacroAssembler _masm(&cbuf);
- if (C->max_vector_size() > 16) {
- // Clear upper bits of YMM registers when current compiled code uses
- // wide vectors to avoid AVX <-> SSE transition penalty during call.
- __ vzeroupper();
- }
+ // Clear upper bits of YMM registers when current compiled code uses
+ // wide vectors to avoid AVX <-> SSE transition penalty during call.
+ __ vzeroupper();
int framesize = C->frame_size_in_bytes();
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
@@ -2092,12 +2090,11 @@ encode %{
enc_class clear_avx %{
debug_only(int off0 = cbuf.insts_size());
- if (ra_->C->max_vector_size() > 16) {
- // Clear upper bits of YMM registers when current compiled code uses
- // wide vectors to avoid AVX <-> SSE transition penalty during call.
- MacroAssembler _masm(&cbuf);
- __ vzeroupper();
- }
+ // Clear upper bits of YMM registers to avoid AVX <-> SSE transition penalty
+ // Clear upper bits of YMM registers when current compiled code uses
+ // wide vectors to avoid AVX <-> SSE transition penalty during call.
+ MacroAssembler _masm(&cbuf);
+ __ vzeroupper();
debug_only(int off1 = cbuf.insts_size());
assert(off1 - off0 == clear_avx_size(), "correct size prediction");
%}
@@ -2810,7 +2807,7 @@ frame
// Ret Addr is on stack in slot 0 if no locks or verification or alignment.
// Otherwise, it is above the locks and verification slot and alignment word
return_addr(STACK - 2 +
- round_to((Compile::current()->in_preserve_stack_slots() +
+ align_up((Compile::current()->in_preserve_stack_slots() +
Compile::current()->fixed_slots()),
stack_alignment_in_slots()));
@@ -12158,7 +12155,7 @@ instruct CallLeafNoFPDirect(method meth)
ins_cost(300);
format %{ "call_leaf_nofp,runtime " %}
- ins_encode(Java_To_Runtime(meth));
+ ins_encode(clear_avx, Java_To_Runtime(meth));
ins_pipe(pipe_slow);
%}
diff --git a/hotspot/src/cpu/zero/vm/abstractInterpreter_zero.cpp b/hotspot/src/cpu/zero/vm/abstractInterpreter_zero.cpp
index de1a9584aeb..ee5f6aa888f 100644
--- a/hotspot/src/cpu/zero/vm/abstractInterpreter_zero.cpp
+++ b/hotspot/src/cpu/zero/vm/abstractInterpreter_zero.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -29,10 +29,6 @@
#include "runtime/frame.inline.hpp"
#include "utilities/globalDefinitions.hpp"
-bool AbstractInterpreter::can_be_compiled(methodHandle m) {
- return true;
-}
-
int AbstractInterpreter::BasicType_as_index(BasicType type) {
int i = 0;
switch (type) {
diff --git a/hotspot/src/cpu/zero/vm/bytes_zero.hpp b/hotspot/src/cpu/zero/vm/bytes_zero.hpp
index 186f09d6937..9b56c01e586 100644
--- a/hotspot/src/cpu/zero/vm/bytes_zero.hpp
+++ b/hotspot/src/cpu/zero/vm/bytes_zero.hpp
@@ -36,16 +36,6 @@ typedef union unaligned {
class Bytes: AllStatic {
public:
- // Returns true if the byte ordering used by Java is different
- // from the native byte ordering of the underlying machine.
- static inline bool is_Java_byte_ordering_different() {
-#ifdef VM_LITTLE_ENDIAN
- return true;
-#else
- return false;
-#endif
- }
-
// Efficient reading and writing of unaligned unsigned data in
// platform-specific byte ordering.
static inline u2 get_native_u2(address p){
diff --git a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp
index f7c51092c82..5c5a12a7085 100644
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp
+++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp
@@ -378,14 +378,7 @@ int CppInterpreter::native_entry(Method* method, intptr_t UNUSED, TRAPS) {
thread->set_thread_state(_thread_in_native_trans);
// Make sure new state is visible in the GC thread
- if (os::is_MP()) {
- if (UseMembar) {
- OrderAccess::fence();
- }
- else {
- InterfaceSupport::serialize_memory(thread);
- }
- }
+ InterfaceSupport::serialize_thread_state(thread);
// Handle safepoint operations, pending suspend requests,
// and pending asynchronous exceptions.
diff --git a/hotspot/src/cpu/zero/vm/frame_zero.cpp b/hotspot/src/cpu/zero/vm/frame_zero.cpp
index 19acf03681e..ad4887fb4d6 100644
--- a/hotspot/src/cpu/zero/vm/frame_zero.cpp
+++ b/hotspot/src/cpu/zero/vm/frame_zero.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -444,4 +444,6 @@ intptr_t *frame::initial_deoptimization_info() {
frame::frame(void* sp, void* fp, void* pc) {
Unimplemented();
}
+
+void frame::pd_ps() {}
#endif
diff --git a/hotspot/src/cpu/zero/vm/interpreterFrame_zero.hpp b/hotspot/src/cpu/zero/vm/interpreterFrame_zero.hpp
index dd50aee8fd4..1cb658d18a3 100644
--- a/hotspot/src/cpu/zero/vm/interpreterFrame_zero.hpp
+++ b/hotspot/src/cpu/zero/vm/interpreterFrame_zero.hpp
@@ -30,6 +30,7 @@
#include "oops/method.hpp"
#include "runtime/thread.hpp"
#include "stack_zero.hpp"
+#include "utilities/align.hpp"
#ifdef CC_INTERP
// | ... |
@@ -57,8 +58,8 @@ class InterpreterFrame : public ZeroFrame {
protected:
enum Layout {
istate_off = jf_header_words +
- (align_size_up_(sizeof(BytecodeInterpreter),
- wordSize) >> LogBytesPerWord) - 1,
+ (align_up_(sizeof(BytecodeInterpreter),
+ wordSize) >> LogBytesPerWord) - 1,
header_words
};
diff --git a/hotspot/src/cpu/zero/vm/interpreterRT_zero.cpp b/hotspot/src/cpu/zero/vm/interpreterRT_zero.cpp
index 2e2dc3cab60..5078b40878c 100644
--- a/hotspot/src/cpu/zero/vm/interpreterRT_zero.cpp
+++ b/hotspot/src/cpu/zero/vm/interpreterRT_zero.cpp
@@ -35,6 +35,7 @@
#include "runtime/interfaceSupport.hpp"
#include "runtime/signature.hpp"
#include "stack_zero.inline.hpp"
+#include "utilities/align.hpp"
void InterpreterRuntime::SignatureHandlerGeneratorBase::pass_int() {
push(T_INT);
@@ -148,7 +149,7 @@ IRT_ENTRY(address,
ZeroStack *stack = thread->zero_stack();
int required_words =
- (align_size_up(sizeof(ffi_cif), wordSize) >> LogBytesPerWord) +
+ (align_up(sizeof(ffi_cif), wordSize) >> LogBytesPerWord) +
(method->is_static() ? 2 : 1) + method->size_of_parameters() + 1;
stack->overflow_check(required_words, CHECK_NULL);
diff --git a/hotspot/src/cpu/zero/vm/interpreterRT_zero.hpp b/hotspot/src/cpu/zero/vm/interpreterRT_zero.hpp
index 6330dd4fe4b..4d022bf2ec3 100644
--- a/hotspot/src/cpu/zero/vm/interpreterRT_zero.hpp
+++ b/hotspot/src/cpu/zero/vm/interpreterRT_zero.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -67,7 +67,7 @@ class SignatureHandlerGeneratorBase : public NativeSignatureIterator {
ffi_cif* _cif;
protected:
- SignatureHandlerGeneratorBase(methodHandle method, ffi_cif *cif)
+ SignatureHandlerGeneratorBase(const methodHandle& method, ffi_cif *cif)
: NativeSignatureIterator(method), _cif(cif) {
_cif->nargs = 0;
}
@@ -96,7 +96,7 @@ class SignatureHandlerGenerator : public SignatureHandlerGeneratorBase {
CodeBuffer* _cb;
public:
- SignatureHandlerGenerator(methodHandle method, CodeBuffer* buffer)
+ SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer)
: SignatureHandlerGeneratorBase(method, (ffi_cif *) buffer->insts_end()),
_cb(buffer) {
_cb->set_insts_end((address) (cif() + 1));
@@ -115,7 +115,7 @@ class SlowSignatureHandlerGenerator : public SignatureHandlerGeneratorBase {
intptr_t *_dst;
public:
- SlowSignatureHandlerGenerator(methodHandle method, intptr_t* buf)
+ SlowSignatureHandlerGenerator(const methodHandle& method, intptr_t* buf)
: SignatureHandlerGeneratorBase(method, (ffi_cif *) buf) {
_dst = (intptr_t *) (cif() + 1);
}
diff --git a/hotspot/src/cpu/zero/vm/metaspaceShared_zero.cpp b/hotspot/src/cpu/zero/vm/metaspaceShared_zero.cpp
deleted file mode 100644
index 8cb5e15588f..00000000000
--- a/hotspot/src/cpu/zero/vm/metaspaceShared_zero.cpp
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "assembler_zero.inline.hpp"
-#include "memory/metaspaceShared.hpp"
-
-void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
- void** vtable,
- char** md_top,
- char* md_end,
- char** mc_top,
- char* mc_end) {
- ShouldNotCallThis();
-}
diff --git a/hotspot/src/cpu/zero/vm/methodHandles_zero.cpp b/hotspot/src/cpu/zero/vm/methodHandles_zero.cpp
index 8b7beb15547..b6ae15df6a2 100644
--- a/hotspot/src/cpu/zero/vm/methodHandles_zero.cpp
+++ b/hotspot/src/cpu/zero/vm/methodHandles_zero.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -147,8 +147,8 @@ int MethodHandles::method_handle_entry_linkToVirtual(Method* method, intptr_t UN
Klass* clazz = recv->klass();
Klass* klass_part = InstanceKlass::cast(clazz);
ResourceMark rm(THREAD);
- klassVtable* vtable = klass_part->vtable();
- Method* vmtarget = vtable->method_at(vmindex);
+ klassVtable vtable = klass_part->vtable();
+ Method* vmtarget = vtable.method_at(vmindex);
invoke_target(vmtarget, THREAD);
diff --git a/hotspot/src/cpu/zero/vm/stack_zero.cpp b/hotspot/src/cpu/zero/vm/stack_zero.cpp
index 1acb96e97ec..a9bf5309921 100644
--- a/hotspot/src/cpu/zero/vm/stack_zero.cpp
+++ b/hotspot/src/cpu/zero/vm/stack_zero.cpp
@@ -28,6 +28,7 @@
#include "runtime/thread.hpp"
#include "stack_zero.hpp"
#include "stack_zero.inline.hpp"
+#include "utilities/align.hpp"
// Inlined causes circular inclusion with thread.hpp
ZeroStack::ZeroStack()
@@ -39,7 +40,7 @@ int ZeroStack::suggest_size(Thread *thread) const {
assert(needs_setup(), "already set up");
int abi_available = abi_stack_available(thread);
assert(abi_available >= 0, "available abi stack must be >= 0");
- return align_size_down(abi_available / 2, wordSize);
+ return align_down(abi_available / 2, wordSize);
}
void ZeroStack::handle_overflow(TRAPS) {
diff --git a/hotspot/src/cpu/zero/vm/stack_zero.hpp b/hotspot/src/cpu/zero/vm/stack_zero.hpp
index 6048cf2ac83..ca95c55a2b6 100644
--- a/hotspot/src/cpu/zero/vm/stack_zero.hpp
+++ b/hotspot/src/cpu/zero/vm/stack_zero.hpp
@@ -26,6 +26,7 @@
#ifndef CPU_ZERO_VM_STACK_ZERO_HPP
#define CPU_ZERO_VM_STACK_ZERO_HPP
+#include "utilities/align.hpp"
#include "utilities/sizes.hpp"
class ZeroStack {
@@ -88,7 +89,7 @@ class ZeroStack {
}
void *alloc(size_t size) {
- int count = align_size_up(size, wordSize) >> LogBytesPerWord;
+ int count = align_up(size, wordSize) >> LogBytesPerWord;
assert(count <= available_words(), "stack overflow");
return _sp -= count;
}
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/BinaryContainer.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/BinaryContainer.java
index 6cd7a4ad8b0..f6201f6e341 100644
--- a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/BinaryContainer.java
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/BinaryContainer.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,16 +36,19 @@ import java.util.Map;
import jdk.tools.jaotc.binformat.Symbol.Binding;
import jdk.tools.jaotc.binformat.Symbol.Kind;
import jdk.tools.jaotc.binformat.elf.JELFRelocObject;
+import jdk.tools.jaotc.binformat.macho.JMachORelocObject;
+import jdk.tools.jaotc.binformat.pecoff.JPECoffRelocObject;
import org.graalvm.compiler.hotspot.GraalHotSpotVMConfig;
import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderConfiguration;
+import org.graalvm.compiler.options.OptionValues;
/**
* A format-agnostic container class that holds various components of a binary.
*
*
* This class holds information necessary to create platform-specific binary containers such as
- * ELFContainer for Linux and Solaris operating systems or yet-to be created MachOContainer for Mac
- * OS or PEContainer for MS Windows operating systems.
+ * ELFContainer for Linux and Solaris operating systems or MachOContainer for Mac OS or PEContainer
+ * for MS Windows operating systems.
*
*
* Method APIs provided by this class are used to construct and populate platform-independent
@@ -56,6 +59,7 @@ import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderConfiguration;
* Methods to record and access code section contents, symbols and relocations are provided.
*/
public class BinaryContainer implements SymbolTable {
+ private final OptionValues graalOptions;
private final int codeSegmentSize;
@@ -257,36 +261,40 @@ public class BinaryContainer implements SymbolTable {
* Allocates a {@code BinaryContainer} object whose content will be generated in a file with the
* prefix {@code prefix}. It also initializes internal code container, symbol table and
* relocation tables.
+ *
+ * @param graalOptions
*/
- public BinaryContainer(GraalHotSpotVMConfig graalHotSpotVMConfig, GraphBuilderConfiguration graphBuilderConfig, String jvmVersion) {
+ public BinaryContainer(OptionValues graalOptions, GraalHotSpotVMConfig graalHotSpotVMConfig, GraphBuilderConfiguration graphBuilderConfig, String jvmVersion) {
+ this.graalOptions = graalOptions;
+
this.codeSegmentSize = graalHotSpotVMConfig.codeSegmentSize;
this.codeEntryAlignment = graalHotSpotVMConfig.codeEntryAlignment;
// read only, code
codeContainer = new CodeContainer(".text", this);
- extLinkageContainer = new CodeContainer(".hotspot.linkage.plt", this);
+ extLinkageContainer = new CodeContainer(".hs.plt.linkage", this);
// read only, info
configContainer = new ReadOnlyDataContainer(".config", this);
- metaspaceNamesContainer = new ReadOnlyDataContainer(".metaspace.names", this);
+ metaspaceNamesContainer = new ReadOnlyDataContainer(".meta.names", this);
methodsOffsetsContainer = new ReadOnlyDataContainer(".methods.offsets", this);
- klassesOffsetsContainer = new ReadOnlyDataContainer(".klasses.offsets", this);
- klassesDependenciesContainer = new ReadOnlyDataContainer(".klasses.dependencies", this);
+ klassesOffsetsContainer = new ReadOnlyDataContainer(".kls.offsets", this);
+ klassesDependenciesContainer = new ReadOnlyDataContainer(".kls.dependencies", this);
headerContainer = new HeaderContainer(jvmVersion, new ReadOnlyDataContainer(".header", this));
stubsOffsetsContainer = new ReadOnlyDataContainer(".stubs.offsets", this);
codeSegmentsContainer = new ReadOnlyDataContainer(".code.segments", this);
- constantDataContainer = new ReadOnlyDataContainer(".method.constdata", this);
+ constantDataContainer = new ReadOnlyDataContainer(".meth.constdata", this);
// needs relocation patching at load time by the loader
- methodMetadataContainer = new ReadOnlyDataContainer(".method.metadata", this);
+ methodMetadataContainer = new ReadOnlyDataContainer(".meth.metadata", this);
// writable sections
- metaspaceGotContainer = new ByteContainer(".metaspace.got", this);
+ metaspaceGotContainer = new ByteContainer(".meta.got", this);
metadataGotContainer = new ByteContainer(".metadata.got", this);
- methodStateContainer = new ByteContainer(".method.state", this);
+ methodStateContainer = new ByteContainer(".meth.state", this);
oopGotContainer = new ByteContainer(".oop.got", this);
- extLinkageGOTContainer = new ByteContainer(".hotspot.linkage.got", this);
+ extLinkageGOTContainer = new ByteContainer(".hs.got.linkage", this);
addGlobalSymbols();
@@ -300,20 +308,19 @@ public class BinaryContainer implements SymbolTable {
graalHotSpotVMConfig.useCompressedClassPointers,
graalHotSpotVMConfig.compactFields,
graalHotSpotVMConfig.useG1GC,
- graalHotSpotVMConfig.useCMSGC,
graalHotSpotVMConfig.useTLAB,
graalHotSpotVMConfig.useBiasedLocking,
- TieredAOT.getValue(),
+ TieredAOT.getValue(graalOptions),
graalHotSpotVMConfig.enableContended,
graalHotSpotVMConfig.restrictContended,
graphBuilderConfig.omitAssertions()
};
- int[] intFlags = { graalHotSpotVMConfig.getOopEncoding().shift,
- graalHotSpotVMConfig.getKlassEncoding().shift,
+ int[] intFlags = { graalHotSpotVMConfig.getOopEncoding().getShift(),
+ graalHotSpotVMConfig.getKlassEncoding().getShift(),
graalHotSpotVMConfig.contendedPaddingWidth,
graalHotSpotVMConfig.fieldsAllocationStyle,
- 1 << graalHotSpotVMConfig.getOopEncoding().alignment,
+ 1 << graalHotSpotVMConfig.logMinObjAlignment(),
graalHotSpotVMConfig.codeSegmentSize,
};
// @formatter:on
@@ -497,11 +504,20 @@ public class BinaryContainer implements SymbolTable {
switch (osName) {
case "Linux":
case "SunOS":
- JELFRelocObject elfso = new JELFRelocObject(this, outputFileName, aotVersion);
- elfso.createELFRelocObject(relocationTable, symbolTable.values());
+ JELFRelocObject elfobj = new JELFRelocObject(this, outputFileName, aotVersion);
+ elfobj.createELFRelocObject(relocationTable, symbolTable.values());
+ break;
+ case "Mac OS X":
+ JMachORelocObject machobj = new JMachORelocObject(this, outputFileName);
+ machobj.createMachORelocObject(relocationTable, symbolTable.values());
break;
default:
- throw new InternalError("Unsupported platform: " + osName);
+ if (osName.startsWith("Windows")) {
+ JPECoffRelocObject pecoffobj = new JPECoffRelocObject(this, outputFileName, aotVersion);
+ pecoffobj.createPECoffRelocObject(relocationTable, symbolTable.values());
+ break;
+ } else
+ throw new InternalError("Unsupported platform: " + osName);
}
}
@@ -742,11 +758,11 @@ public class BinaryContainer implements SymbolTable {
}
/**
- * Add constant data as follows. - Adding the data to the method.constdata section
+ * Add constant data as follows. - Adding the data to the meth.constdata section
*
* @param data
* @param alignment
- * @return the offset in the method.constdata of the data
+ * @return the offset in the meth.constdata of the data
*/
public int addConstantData(byte[] data, int alignment) {
// Get the current length of the metaspaceNameContainer
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/ByteContainer.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/ByteContainer.java
index 12daa9f356f..3d44ab0cf72 100644
--- a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/ByteContainer.java
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/ByteContainer.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@ package jdk.tools.jaotc.binformat;
import jdk.tools.jaotc.binformat.Symbol.Binding;
import jdk.tools.jaotc.binformat.Symbol.Kind;
-import jdk.tools.jaotc.jnilibelf.ELFContainer;
+import jdk.tools.jaotc.binformat.Container;
import java.io.ByteArrayOutputStream;
import java.nio.ByteBuffer;
@@ -41,7 +41,7 @@ import java.util.Arrays;
* The method {@code putIntAt} updates the content of {@code contentBytes}. Changes are not
* reflected in {@code contentStream}.
*/
-public class ByteContainer implements ELFContainer {
+public class ByteContainer implements Container {
/**
* {@code ByteBuffer} representation of {@code BinaryContainer}.
*/
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/ELFContainer.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/Container.java
similarity index 87%
rename from hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/ELFContainer.java
rename to hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/Container.java
index b65b6ae8d25..ad2b405ab90 100644
--- a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/ELFContainer.java
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/Container.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -21,9 +21,9 @@
* questions.
*/
-package jdk.tools.jaotc.jnilibelf;
+package jdk.tools.jaotc.binformat;
-public interface ELFContainer {
+public interface Container {
String getContainerName();
diff --git a/hotspot/test/compiler/testlibrary/rtm/predicate/SupportedCPU.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/NativeSymbol.java
similarity index 65%
rename from hotspot/test/compiler/testlibrary/rtm/predicate/SupportedCPU.java
rename to hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/NativeSymbol.java
index eab1f977723..e03870ebd40 100644
--- a/hotspot/test/compiler/testlibrary/rtm/predicate/SupportedCPU.java
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/NativeSymbol.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -21,17 +21,31 @@
* questions.
*/
-package compiler.testlibrary.rtm.predicate;
+package jdk.tools.jaotc.binformat;
-import jdk.test.lib.Platform;
-import sun.hotspot.cpuinfo.CPUInfo;
+/**
+ * This class represents ia native OS specific Symbol
+ */
+public abstract class NativeSymbol {
-import java.util.function.BooleanSupplier;
+ /** String table index. */
+ private int index;
-public class SupportedCPU implements BooleanSupplier {
- @Override
- public boolean getAsBoolean() {
- if (Platform.isPPC()) { return CPUInfo.hasFeature("tcheck"); }
- return CPUInfo.hasFeature("rtm");
+ public NativeSymbol(int index) {
+ this.index = index;
+ }
+
+ /**
+ * @return the index
+ */
+ public int getIndex() {
+ return index;
+ }
+
+ /**
+ * @index
+ */
+ public void setIndex(int index) {
+ this.index = index;
}
}
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/Symbol.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/Symbol.java
index ba0f9090ee7..8fbc0dd23a6 100644
--- a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/Symbol.java
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/Symbol.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@ package jdk.tools.jaotc.binformat;
import java.util.Objects;
-import jdk.tools.jaotc.jnilibelf.ELFSymbol;
+import jdk.tools.jaotc.binformat.NativeSymbol;
public class Symbol {
@@ -51,7 +51,7 @@ public class Symbol {
private final Kind kind;
private ByteContainer section;
- private ELFSymbol elfSymbol;
+ private NativeSymbol nativeSymbol;
/**
* Create symbol info.
@@ -77,12 +77,12 @@ public class Symbol {
return name;
}
- public ELFSymbol getElfSymbol() {
- return elfSymbol;
+ public NativeSymbol getNativeSymbol() {
+ return nativeSymbol;
}
- public void setElfSymbol(ELFSymbol elfSymbol) {
- this.elfSymbol = elfSymbol;
+ public void setNativeSymbol(NativeSymbol nativeSym) {
+ this.nativeSymbol = nativeSym;
}
public Binding getBinding() {
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/Elf.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/Elf.java
new file mode 100644
index 00000000000..e61176d07e7
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/Elf.java
@@ -0,0 +1,256 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.elf;
+
+/**
+ *
+ * Support for the creation of Elf Object files.
+ * Current support is limited to 64 bit x86_64.
+ *
+ */
+
+public class Elf {
+
+ /**
+ * Elf64_Ehdr structure defines
+ */
+ public enum Elf64_Ehdr {
+ e_ident( 0,16),
+ e_type(16, 2),
+ e_machine(18, 2),
+ e_version(20, 4),
+ e_entry(24, 8),
+ e_phoff(32, 8),
+ e_shoff(40, 8),
+ e_flags(48, 4),
+ e_ehsize(52, 2),
+ e_phentsize(54, 2),
+ e_phnum(56, 2),
+ e_shentsize(58, 2),
+ e_shnum(60, 2),
+ e_shstrndx(62, 2);
+
+ public final int off;
+ public final int sz;
+
+ Elf64_Ehdr(int offset, int size) {
+ this.off = offset;
+ this.sz = size;
+ }
+
+ public static int totalsize = 64;
+
+ /**
+ * Elf64_Ehdr defines
+ */
+
+ /**
+ * e_ident
+ */
+ public static final int EI_MAG0 = 0;
+ public static final byte ELFMAG0 = 0x7f;
+ public static final int EI_MAG1 = 1;
+ public static final byte ELFMAG1 = 0x45;
+ public static final int EI_MAG2 = 2;
+ public static final byte ELFMAG2 = 0x4c;
+ public static final int EI_MAG3 = 3;
+ public static final byte ELFMAG3 = 0x46;
+
+ public static final int EI_CLASS = 4;
+ public static final byte ELFCLASS64 = 0x2;
+
+ public static final int EI_DATA = 5;
+ public static final byte ELFDATA2LSB = 0x1;
+
+ public static final int EI_VERSION = 6;
+ public static final byte EV_CURRENT = 0x1;
+
+ public static final int EI_OSABI = 7;
+ public static final byte ELFOSABI_NONE = 0x0;
+
+ /**
+ * e_type
+ */
+ public static final char ET_REL = 0x1;
+
+ /**
+ * e_machine
+ */
+ public static final char EM_NONE = 0;
+ public static final char EM_X86_64 = 62;
+ public static final char EM_AARCH64 = 183;
+
+ /**
+ * e_version
+ */
+ // public static final int EV_CURRENT = 1;
+
+ }
+
+ /**
+ * Elf64_Shdr structure defines
+ */
+ public enum Elf64_Shdr {
+ sh_name( 0, 4),
+ sh_type( 4, 4),
+ sh_flags( 8, 8),
+ sh_addr(16, 8),
+ sh_offset(24, 8),
+ sh_size(32, 8),
+ sh_link(40, 4),
+ sh_info(44, 4),
+ sh_addralign(48, 8),
+ sh_entsize(56, 8);
+
+ public final int off;
+ public final int sz;
+
+ Elf64_Shdr(int offset, int size) {
+ this.off = offset;
+ this.sz = size;
+ }
+
+ public static int totalsize = 64;
+
+ /**
+ * Elf64_Shdr defines
+ */
+
+ /**
+ * sh_type
+ */
+ public static final int SHT_PROGBITS = 0x1;
+ public static final int SHT_SYMTAB = 0x2;
+ public static final int SHT_STRTAB = 0x3;
+ public static final int SHT_RELA = 0x4;
+ public static final int SHT_NOBITS = 0x8;
+ public static final int SHT_REL = 0x9;
+
+ public static final byte SHN_UNDEF = 0x0;
+
+ /**
+ * sh_flag
+ */
+ public static final int SHF_WRITE = 0x1;
+ public static final int SHF_ALLOC = 0x2;
+ public static final int SHF_EXECINSTR = 0x4;
+
+ }
+
+ /**
+ * Symbol table entry definitions
+ *
+ * Elf64_Sym structure defines
+ */
+ public enum Elf64_Sym {
+ st_name( 0, 4),
+ st_info( 4, 1),
+ st_other( 5, 1),
+ st_shndx( 6, 2),
+ st_value( 8, 8),
+ st_size(16, 8);
+
+ public final int off;
+ public final int sz;
+
+ Elf64_Sym(int offset, int size) {
+ this.off = offset;
+ this.sz = size;
+ }
+
+ public static int totalsize = 24;
+
+ /* ST_BIND is in bits 4-7 of st_info. ST_TYPE is in low 4 bits */
+ public static final byte STB_LOCAL = 0x0;
+ public static final byte STB_GLOBAL = 0x1;
+
+ public static final byte STT_NOTYPE = 0x0;
+ public static final byte STT_OBJECT = 0x1;
+ public static final byte STT_FUNC = 0x2;
+
+ public static byte ELF64_ST_INFO(byte bind, byte type) {
+ return (byte)(((bind) << 4) + ((type) & 0xf));
+ }
+
+ }
+
+ /**
+ * Elf64_Rel structure defines
+ */
+ public enum Elf64_Rel {
+ r_offset( 0, 8),
+ r_info( 8, 8);
+
+ public final int off;
+ public final int sz;
+
+ Elf64_Rel(int offset, int size) {
+ this.off = offset;
+ this.sz = size;
+ }
+
+ public static int totalsize = 16;
+
+ /**
+ * Relocation types
+ */
+ public static final int R_X86_64_NONE = 0x0;
+ public static final int R_X86_64_64 = 0x1;
+ public static final int R_X86_64_PC32 = 0x2;
+ public static final int R_X86_64_PLT32 = 0x4;
+ public static final int R_X86_64_GOTPCREL = 0x9;
+
+ }
+
+ /**
+ * Elf64_Rela structure defines
+ */
+ public enum Elf64_Rela {
+ r_offset( 0, 8),
+ r_info( 8, 8),
+ r_addend(16, 8);
+
+ public final int off;
+ public final int sz;
+
+ Elf64_Rela(int offset, int size) {
+ this.off = offset;
+ this.sz = size;
+ }
+
+ public static int totalsize = 24;
+
+ public static final int R_X86_64_NONE = 0x0;
+ public static final int R_X86_64_64 = 0x1;
+ public static final int R_X86_64_PC32 = 0x2;
+ public static final int R_X86_64_PLT32 = 0x4;
+ public static final int R_X86_64_GOTPCREL = 0x9;
+
+ public static long ELF64_R_INFO(int symidx, int type) {
+ return (((long)symidx << 32) + ((long)type));
+ }
+
+ }
+
+}
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfByteBuffer.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfByteBuffer.java
new file mode 100644
index 00000000000..8d7c8e7d41d
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfByteBuffer.java
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.elf;
+
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Ehdr;
+import jdk.tools.jaotc.binformat.elf.ElfTargetInfo;
+
+public class ElfByteBuffer {
+
+ public static ByteBuffer allocate(int size) {
+ ByteBuffer buf = ByteBuffer.allocate(size);
+ if (ElfTargetInfo.getElfEndian() == Elf64_Ehdr.ELFDATA2LSB)
+ buf.order(ByteOrder.LITTLE_ENDIAN);
+ else
+ buf.order(ByteOrder.BIG_ENDIAN);
+ return (buf);
+ }
+
+}
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfContainer.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfContainer.java
new file mode 100644
index 00000000000..03b8682c95e
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfContainer.java
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.elf;
+
+import java.io.File;
+import java.io.FileOutputStream;
+
+public class ElfContainer {
+
+ File outputFile;
+ FileOutputStream outputStream;
+ long fileOffset;
+
+ public ElfContainer(String fileName, String aotVersion) {
+ String baseName;
+
+ outputFile = new File(fileName);
+ if (outputFile.exists()) {
+ outputFile.delete();
+ }
+
+ try {
+ outputStream = new FileOutputStream(outputFile);
+ } catch (Exception e) {
+ System.out.println("ElfContainer: Can't create file " + fileName);
+ }
+ fileOffset = 0;
+ }
+
+ public void close() {
+ try {
+ outputStream.close();
+ } catch (Exception e) {
+ System.out.println("ElfContainer: close failed");
+ }
+ }
+
+ public void writeBytes(byte [] bytes) {
+ if (bytes == null) return;
+ try {
+ outputStream.write(bytes);
+ } catch (Exception e) {
+ System.out.println("ElfContainer: writeBytes failed");
+ }
+ fileOffset += bytes.length;
+ }
+
+ // Write bytes to output file with up front alignment padding
+ public void writeBytes(byte [] bytes, int alignment) {
+ if (bytes == null) return;
+ try {
+ // Pad to alignment
+ while ((fileOffset & (long)(alignment-1)) != 0) {
+ outputStream.write(0);
+ fileOffset++;
+ }
+ outputStream.write(bytes);
+ } catch (Exception e) {
+ System.out.println("ElfContainer: writeBytes failed");
+ }
+ fileOffset += bytes.length;
+ }
+}
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfHeader.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfHeader.java
new file mode 100644
index 00000000000..e930d5580a3
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfHeader.java
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.elf;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import jdk.tools.jaotc.binformat.elf.Elf;
+import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Ehdr;
+import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Shdr;
+import jdk.tools.jaotc.binformat.elf.ElfTargetInfo;
+import jdk.tools.jaotc.binformat.elf.ElfByteBuffer;
+
+public class ElfHeader {
+ ByteBuffer header;
+
+ public ElfHeader() {
+ header = ElfByteBuffer.allocate(Elf64_Ehdr.totalsize);
+
+ header.put(Elf64_Ehdr.e_ident.off+Elf64_Ehdr.EI_MAG0, Elf64_Ehdr.ELFMAG0);
+ header.put(Elf64_Ehdr.e_ident.off+Elf64_Ehdr.EI_MAG1, Elf64_Ehdr.ELFMAG1);
+ header.put(Elf64_Ehdr.e_ident.off+Elf64_Ehdr.EI_MAG2, Elf64_Ehdr.ELFMAG2);
+ header.put(Elf64_Ehdr.e_ident.off+Elf64_Ehdr.EI_MAG3, Elf64_Ehdr.ELFMAG3);
+ header.put(Elf64_Ehdr.e_ident.off+Elf64_Ehdr.EI_CLASS, Elf64_Ehdr.ELFCLASS64);
+ header.put(Elf64_Ehdr.e_ident.off+Elf64_Ehdr.EI_DATA, Elf64_Ehdr.ELFDATA2LSB);
+ header.put(Elf64_Ehdr.e_ident.off+Elf64_Ehdr.EI_VERSION, Elf64_Ehdr.EV_CURRENT);
+ header.put(Elf64_Ehdr.e_ident.off+Elf64_Ehdr.EI_OSABI, Elf64_Ehdr.ELFOSABI_NONE);
+
+ header.putChar(Elf64_Ehdr.e_type.off, Elf64_Ehdr.ET_REL);
+ header.putChar(Elf64_Ehdr.e_machine.off, ElfTargetInfo.getElfArch());
+ header.putInt(Elf64_Ehdr.e_version.off, Elf64_Ehdr.EV_CURRENT);
+ header.putChar(Elf64_Ehdr.e_ehsize.off, (char)Elf64_Ehdr.totalsize);
+ header.putChar(Elf64_Ehdr.e_shentsize.off, (char)Elf64_Shdr.totalsize);
+
+ }
+
+ // Update header with file offset of first section
+ public void setSectionOff(int offset) {
+ header.putLong(Elf64_Ehdr.e_shoff.off, offset);
+ }
+
+ // Update header with the number of total sections
+ public void setSectionNum(int count) {
+ header.putChar(Elf64_Ehdr.e_shnum.off, (char)count);
+ }
+
+ // Update header with the section index containing the
+ // string table for section names
+ public void setSectionStrNdx(int index) {
+ header.putChar(Elf64_Ehdr.e_shstrndx.off, (char)index);
+ }
+
+ public byte[] getArray() {
+ return header.array();
+ }
+}
+
diff --git a/hotspot/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.salver/src/org/graalvm/compiler/salver/writer/DumpWriter.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfRelocEntry.java
similarity index 55%
rename from hotspot/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.salver/src/org/graalvm/compiler/salver/writer/DumpWriter.java
rename to hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfRelocEntry.java
index 47a7ca3905e..817648ec65b 100644
--- a/hotspot/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.salver/src/org/graalvm/compiler/salver/writer/DumpWriter.java
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfRelocEntry.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -20,32 +20,32 @@
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
-package org.graalvm.compiler.salver.writer;
-import java.io.Closeable;
-import java.io.Flushable;
-import java.io.IOException;
+package jdk.tools.jaotc.binformat.elf;
+
import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
-public interface DumpWriter extends Closeable, Flushable, AutoCloseable {
+import jdk.tools.jaotc.binformat.elf.Elf;
+import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Rela;
+import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Ehdr;
+import jdk.tools.jaotc.binformat.elf.ElfTargetInfo;
+import jdk.tools.jaotc.binformat.elf.ElfByteBuffer;
- DumpWriter write(byte b) throws IOException;
+public class ElfRelocEntry {
+ ByteBuffer entry;
- DumpWriter write(byte[] arr) throws IOException;
+ public ElfRelocEntry(int offset, int symno, int type, int addend) {
- DumpWriter write(ByteBuffer buf) throws IOException;
+ entry = ElfByteBuffer.allocate(Elf64_Rela.totalsize);
- DumpWriter write(CharSequence csq) throws IOException;
+ entry.putLong(Elf64_Rela.r_offset.off, offset);
+ entry.putLong(Elf64_Rela.r_info.off, Elf64_Rela.ELF64_R_INFO(symno,type));
+ entry.putLong(Elf64_Rela.r_addend.off, addend);
+ }
- DumpWriter writeChar(char v) throws IOException;
-
- DumpWriter writeShort(short v) throws IOException;
-
- DumpWriter writeInt(int v) throws IOException;
-
- DumpWriter writeLong(long v) throws IOException;
-
- DumpWriter writeFloat(float v) throws IOException;
-
- DumpWriter writeDouble(double v) throws IOException;
+ public byte[] getArray() {
+ return entry.array();
+ }
}
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfRelocTable.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfRelocTable.java
new file mode 100644
index 00000000000..0e28a9f384e
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfRelocTable.java
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.elf;
+
+import java.util.ArrayList;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import jdk.tools.jaotc.binformat.elf.ElfRelocEntry;
+import jdk.tools.jaotc.binformat.elf.ElfTargetInfo;
+import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Rela;
+import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Ehdr;
+import jdk.tools.jaotc.binformat.elf.ElfByteBuffer;
+
+public class ElfRelocTable {
+ ArrayList> relocEntries;
+
+ public ElfRelocTable(int numsects) {
+ relocEntries = new ArrayList>(numsects);
+ for (int i = 0; i < numsects; i++)
+ relocEntries.add(new ArrayList());
+ }
+
+ public void createRelocationEntry(int sectindex,
+ int offset,
+ int symno,
+ int type,
+ int addend) {
+
+ ElfRelocEntry entry = new ElfRelocEntry(offset,
+ symno,
+ type,
+ addend);
+ relocEntries.get(sectindex).add(entry);
+ }
+
+ public int getNumRelocs(int section_index) {
+ return relocEntries.get(section_index).size();
+ }
+
+ // Return the relocation entries for a single section
+ // or null if no entries added to section
+ public byte [] getRelocData(int section_index) {
+ ArrayList entryList = relocEntries.get(section_index);
+
+ if (entryList.size() == 0)
+ return null;
+
+ ByteBuffer relocData = ElfByteBuffer.allocate(entryList.size() * Elf64_Rela.totalsize);
+
+ // Copy each entry to a single ByteBuffer
+ for (int i = 0; i < entryList.size(); i++) {
+ ElfRelocEntry entry = entryList.get(i);
+ relocData.put(entry.getArray());
+ }
+
+ return (relocData.array());
+ }
+}
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfSection.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfSection.java
new file mode 100644
index 00000000000..86415d23cca
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfSection.java
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.elf;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import jdk.tools.jaotc.binformat.elf.Elf;
+import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Ehdr;
+import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Shdr;
+import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Rel;
+import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Rela;
+import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Sym;
+import jdk.tools.jaotc.binformat.elf.ElfByteBuffer;
+
+public class ElfSection {
+ String name;
+ ByteBuffer section;
+ byte [] data;
+ boolean hasrelocations;
+ int sectionIndex;
+
+ /**
+ * String holding section name strings
+ */
+ private static StringBuilder sectNameTab = new StringBuilder();
+
+ /**
+ * Keeps track of bytes in section string table since strTabContent.length()
+ * is number of chars, not bytes.
+ */
+ private static int shStrTabNrOfBytes = 0;
+
+ public ElfSection(String sectName, byte [] sectData, int sectFlags,
+ int sectType, boolean hasRelocations, int align,
+ int sectIndex) {
+
+ section = ElfByteBuffer.allocate(Elf64_Shdr.totalsize);
+
+ // Return all 0's for NULL section
+ if (sectIndex == 0) {
+ sectNameTab.append('\0');
+ shStrTabNrOfBytes += 1;
+ data = null;
+ hasrelocations = false;
+ sectionIndex = 0;
+ return;
+ }
+
+ section.putInt(Elf64_Shdr.sh_name.off, shStrTabNrOfBytes);
+ sectNameTab.append(sectName).append('\0');
+ shStrTabNrOfBytes += (sectName.getBytes().length + 1);
+ name = sectName;
+
+ section.putInt(Elf64_Shdr.sh_type.off, sectType);
+ section.putLong(Elf64_Shdr.sh_flags.off, sectFlags);
+ section.putLong(Elf64_Shdr.sh_addr.off, 0);
+ section.putLong(Elf64_Shdr.sh_offset.off, 0);
+
+ if (sectName.equals(".shstrtab")) {
+ section.putLong(Elf64_Shdr.sh_size.off, shStrTabNrOfBytes);
+ data = sectNameTab.toString().getBytes();
+ }
+ else {
+ data = sectData;
+ section.putLong(Elf64_Shdr.sh_size.off, sectData.length);
+ }
+
+ section.putLong(Elf64_Shdr.sh_entsize.off, 0);
+
+ // Determine the entrysize
+ // based on type of section
+ switch (sectType) {
+ case Elf64_Shdr.SHT_SYMTAB:
+ section.putLong(Elf64_Shdr.sh_entsize.off, Elf64_Sym.totalsize);
+ break;
+ case Elf64_Shdr.SHT_RELA:
+ section.putLong(Elf64_Shdr.sh_entsize.off, Elf64_Rela.totalsize);
+ break;
+ case Elf64_Shdr.SHT_REL:
+ section.putLong(Elf64_Shdr.sh_entsize.off, Elf64_Rel.totalsize);
+ break;
+ default:
+ break;
+ }
+ section.putLong(Elf64_Shdr.sh_addralign.off, align);
+
+ hasrelocations = hasRelocations;
+ sectionIndex = sectIndex;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public long getSize() {
+ return section.getLong(Elf64_Shdr.sh_size.off);
+ }
+
+ public int getDataAlign() {
+ return ((int)section.getLong(Elf64_Shdr.sh_addralign.off));
+ }
+
+ // Alignment requirements for the Elf64_Shdr structures
+ public static int getShdrAlign() {
+ return (4);
+ }
+
+ public byte[] getArray() {
+ return section.array();
+ }
+
+ public byte[] getDataArray() {
+ return data;
+ }
+
+ public void setOffset(long offset) {
+ section.putLong(Elf64_Shdr.sh_offset.off, offset);
+ }
+
+ public void setLink(int link) {
+ section.putInt(Elf64_Shdr.sh_link.off, link);
+ }
+
+ public void setInfo(int info) {
+ section.putInt(Elf64_Shdr.sh_info.off, info);
+ }
+
+ public long getOffset() {
+ return (section.getLong(Elf64_Shdr.sh_offset.off));
+ }
+
+ public boolean hasRelocations() {
+ return hasrelocations;
+ }
+
+ public int getSectionId() {
+ return sectionIndex;
+ }
+
+}
+
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfSymbol.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfSymbol.java
new file mode 100644
index 00000000000..6a22019f6bf
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfSymbol.java
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.elf;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import jdk.tools.jaotc.binformat.NativeSymbol;
+import jdk.tools.jaotc.binformat.elf.Elf;
+import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Sym;
+import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Ehdr;
+import jdk.tools.jaotc.binformat.elf.ElfByteBuffer;
+
+public class ElfSymbol extends NativeSymbol {
+ ByteBuffer sym;
+
+ public ElfSymbol(int symbolindex, int strindex, byte type, byte bind,
+ byte sectindex, long offset, long size) {
+ super(symbolindex);
+ sym = ElfByteBuffer.allocate(Elf64_Sym.totalsize);
+
+ sym.putInt(Elf64_Sym.st_name.off, strindex);
+ sym.put(Elf64_Sym.st_info.off, Elf64_Sym.ELF64_ST_INFO(bind, type));
+ sym.put(Elf64_Sym.st_other.off, (byte)0);
+ // Section indexes start at 1 but we manage the index internally
+ // as 0 relative
+ sym.putChar(Elf64_Sym.st_shndx.off, (char)(sectindex));
+ sym.putLong(Elf64_Sym.st_value.off, offset);
+ sym.putLong(Elf64_Sym.st_size.off, size);
+ }
+
+ public byte[] getArray() {
+ return sym.array();
+ }
+}
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfSymtab.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfSymtab.java
new file mode 100644
index 00000000000..4105c152693
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfSymtab.java
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.elf;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.util.ArrayList;
+
+import jdk.tools.jaotc.binformat.elf.Elf;
+import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Sym;
+import jdk.tools.jaotc.binformat.elf.ElfSymbol;
+import jdk.tools.jaotc.binformat.elf.ElfByteBuffer;
+
+public class ElfSymtab {
+
+ ArrayListlocalSymbols = new ArrayList();
+ ArrayListglobalSymbols = new ArrayList();
+
+ /**
+ * number of symbols added
+ */
+ int symbolCount;
+
+ /**
+ * String holding symbol table strings
+ */
+ private StringBuilder strTabContent = new StringBuilder();
+
+ /**
+ * Keeps track of bytes in string table since strTabContent.length()
+ * is number of chars, not bytes.
+ */
+ private int strTabNrOfBytes = 0;
+
+ public ElfSymtab() {
+ symbolCount = 0;
+ }
+
+ public ElfSymbol addSymbolEntry(String name, byte type, byte bind,
+ byte secHdrIndex, long offset, long size) {
+ // Get the current symbol index and append symbol name to string table.
+ int index;
+ ElfSymbol sym;
+
+ if (name.isEmpty()) {
+ index = 0;
+ strTabContent.append('\0');
+ strTabNrOfBytes += 1;
+ sym = new ElfSymbol(symbolCount, index, type, bind, secHdrIndex, offset, size);
+ localSymbols.add(sym);
+ } else {
+ // We can't trust strTabContent.length() since that is
+ // chars (UTF16), keep track of bytes on our own.
+ index = strTabNrOfBytes;
+ // strTabContent.append("_").append(name).append('\0');
+ strTabContent.append(name).append('\0');
+ // + 1 for null, + 1 for "_"
+ //strTabNrOfBytes += (name.getBytes().length + 1 + 1);
+ strTabNrOfBytes += (name.getBytes().length + 1);
+
+ sym = new ElfSymbol(symbolCount, index, type, bind, secHdrIndex, offset, size);
+ if ((bind & Elf64_Sym.STB_GLOBAL) != 0)
+ globalSymbols.add(sym);
+ else
+ localSymbols.add(sym);
+ }
+ symbolCount++;
+ return (sym);
+ }
+
+ // Update the symbol indexes once all symbols have been added.
+ // This is required since we'll be reordering the symbols in the
+ // file to be in the order of Local then global.
+ public void updateIndexes() {
+ int index = 0;
+
+ // Update the local symbol indexes
+ for (int i = 0; i < localSymbols.size(); i++ ) {
+ ElfSymbol sym = localSymbols.get(i);
+ sym.setIndex(index++);
+ }
+
+ // Update the global symbol indexes
+ for (int i = 0; i < globalSymbols.size(); i++ ) {
+ ElfSymbol sym = globalSymbols.get(i);
+ sym.setIndex(index++);
+ }
+ }
+
+ public int getNumLocalSyms() { return localSymbols.size(); }
+ public int getNumGlobalSyms() { return globalSymbols.size(); }
+
+
+ // Create a single byte array that contains the symbol table entries
+ public byte[] getSymtabArray() {
+ int index = 0;
+ ByteBuffer symtabData = ElfByteBuffer.allocate(symbolCount*Elf64_Sym.totalsize);
+ byte [] retarray;
+
+ updateIndexes();
+
+ // Add the local symbols
+ for (int i = 0; i < localSymbols.size(); i++ ) {
+ ElfSymbol sym = localSymbols.get(i);
+ byte [] arr = sym.getArray();
+ symtabData.put(arr);
+ }
+ // Add the global symbols
+ for (int i = 0; i < globalSymbols.size(); i++ ) {
+ ElfSymbol sym = globalSymbols.get(i);
+ byte [] arr = sym.getArray();
+ symtabData.put(arr);
+ }
+ retarray = symtabData.array();
+
+ return (retarray);
+ }
+
+ // Return the string table array
+ public byte[] getStrtabArray() {
+ byte [] strs = strTabContent.toString().getBytes();
+ return (strs);
+ }
+}
+
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfTargetInfo.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfTargetInfo.java
new file mode 100644
index 00000000000..2c9a6a3c4a0
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfTargetInfo.java
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.elf;
+
+import java.nio.ByteOrder;
+import jdk.tools.jaotc.binformat.elf.Elf;
+import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Ehdr;
+
+/**
+ * Class that abstracts MACH-O target details.
+ *
+ */
+public class ElfTargetInfo {
+ /**
+ * Target architecture.
+ */
+ private static final char arch;
+
+ /**
+ * Architecture endian-ness.
+ */
+ private static final int endian = Elf64_Ehdr.ELFDATA2LSB;
+
+ /**
+ * Target OS string.
+ */
+ private static String osName;
+
+ static {
+ // Find the target arch details
+ String archStr = System.getProperty("os.arch").toLowerCase();
+ if (ByteOrder.nativeOrder() != ByteOrder.LITTLE_ENDIAN) {
+ System.out.println("Only Little Endian byte order supported!");
+ }
+
+ if (archStr.equals("amd64") || archStr.equals("x86_64")) {
+ arch = Elf64_Ehdr.EM_X86_64;
+ } else {
+ System.out.println("Unsupported architecture " + archStr);
+ arch = Elf64_Ehdr.EM_NONE;
+ }
+
+ osName = System.getProperty("os.name").toLowerCase();
+ if (!osName.equals("linux") && !osName.equals("sunos")) {
+ System.out.println("Unsupported Operating System " + osName);
+ osName = "Unknown";
+ }
+ }
+
+ public static char getElfArch() {
+ return arch;
+ }
+
+ public static int getElfEndian() {
+ return endian;
+ }
+
+ public static String getOsName() {
+ return osName;
+ }
+}
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/JELFRelocObject.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/JELFRelocObject.java
index 82bac0c7170..b288b554708 100644
--- a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/JELFRelocObject.java
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/JELFRelocObject.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,7 @@ import java.util.Collection;
import java.util.List;
import java.util.Map;
+import jdk.tools.jaotc.binformat.Container;
import jdk.tools.jaotc.binformat.BinaryContainer;
import jdk.tools.jaotc.binformat.ByteContainer;
import jdk.tools.jaotc.binformat.CodeContainer;
@@ -37,36 +38,62 @@ import jdk.tools.jaotc.binformat.ReadOnlyDataContainer;
import jdk.tools.jaotc.binformat.Relocation;
import jdk.tools.jaotc.binformat.Relocation.RelocType;
import jdk.tools.jaotc.binformat.Symbol;
+import jdk.tools.jaotc.binformat.NativeSymbol;
import jdk.tools.jaotc.binformat.Symbol.Binding;
import jdk.tools.jaotc.binformat.Symbol.Kind;
-import jdk.tools.jaotc.jnilibelf.ELFContainer;
-import jdk.tools.jaotc.jnilibelf.ELFSymbol;
-import jdk.tools.jaotc.jnilibelf.JNIELFContainer;
-import jdk.tools.jaotc.jnilibelf.JNIELFRelocation;
-import jdk.tools.jaotc.jnilibelf.JNIELFTargetInfo;
-import jdk.tools.jaotc.jnilibelf.JNILibELFAPI.ELF;
-import jdk.tools.jaotc.jnilibelf.JNILibELFAPI.LibELF.Elf_Cmd;
-import jdk.tools.jaotc.jnilibelf.JNILibELFAPI.LibELF.Elf_Type;
-import jdk.tools.jaotc.jnilibelf.Pointer;
+
+import jdk.tools.jaotc.binformat.elf.Elf;
+import jdk.tools.jaotc.binformat.elf.ElfSymbol;
+import jdk.tools.jaotc.binformat.elf.ElfTargetInfo;
+import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Ehdr;
+import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Shdr;
+import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Sym;
+import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Rel;
+import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Rela;
public class JELFRelocObject {
private final BinaryContainer binContainer;
- private final JNIELFContainer elfContainer;
+ private final ElfContainer elfContainer;
private final int segmentSize;
public JELFRelocObject(BinaryContainer binContainer, String outputFileName, String aotVersion) {
this.binContainer = binContainer;
- this.elfContainer = new JNIELFContainer(outputFileName, aotVersion);
+ this.elfContainer = new ElfContainer(outputFileName, aotVersion);
this.segmentSize = binContainer.getCodeSegmentSize();
}
- private void createByteSection(ByteContainer c, int scnFlags) {
+ private ElfSection createByteSection(ArrayListsections,
+ String sectName,
+ byte [] scnData,
+ boolean hasRelocs,
+ int align,
+ int scnFlags,
+ int scnType) {
+
+ ElfSection sect = new ElfSection(sectName,
+ scnData,
+ scnFlags,
+ scnType,
+ hasRelocs,
+ align,
+ sections.size());
+ // Add this section to our list
+ sections.add(sect);
+
+ return (sect);
+ }
+
+ private void createByteSection(ArrayListsections,
+ ByteContainer c, int scnFlags) {
+ ElfSection sect;
+ boolean hasRelocs = c.hasRelocations();
byte[] scnData = c.getByteArray();
- int scnType = ELF.SHT_PROGBITS;
- boolean zeros = !c.hasRelocations();
+
+ int scnType = Elf64_Shdr.SHT_PROGBITS;
+ boolean zeros = !hasRelocs;
if (zeros) {
for (byte b : scnData) {
if (b != 0) {
@@ -75,30 +102,30 @@ public class JELFRelocObject {
}
}
if (zeros) {
- scnType = ELF.SHT_NOBITS;
+ scnType = Elf64_Shdr.SHT_NOBITS;
}
}
- int sectionId = elfContainer.createSection(c.getContainerName(), scnData, Elf_Type.ELF_T_BYTE, segmentSize, scnType, scnFlags, ELF.SHN_UNDEF, 0);
- c.setSectionId(sectionId);
- // Clear out code section data to allow for GC
- c.clear();
+ sect = createByteSection(sections, c.getContainerName(),
+ scnData, hasRelocs, segmentSize,
+ scnFlags, scnType);
+ c.setSectionId(sect.getSectionId());
}
- private void createCodeSection(CodeContainer c) {
- createByteSection(c, ELF.SHF_ALLOC | ELF.SHF_EXECINSTR);
+ private void createCodeSection(ArrayListsections, CodeContainer c) {
+ createByteSection(sections, c, Elf64_Shdr.SHF_ALLOC | Elf64_Shdr.SHF_EXECINSTR);
}
- private void createReadOnlySection(ReadOnlyDataContainer c) {
- createByteSection(c, ELF.SHF_ALLOC);
+ private void createReadOnlySection(ArrayListsections, ReadOnlyDataContainer c) {
+ createByteSection(sections, c, Elf64_Shdr.SHF_ALLOC);
}
- private void createReadWriteSection(ByteContainer c) {
- createByteSection(c, ELF.SHF_ALLOC | ELF.SHF_WRITE);
+ private void createReadWriteSection(ArrayListsections, ByteContainer c) {
+ createByteSection(sections, c, Elf64_Shdr.SHF_ALLOC | Elf64_Shdr.SHF_WRITE);
}
/**
- * Create an ELF relocatable object using jdk.tools.jaotc.jnilibelf API.
+ * Create an ELF relocatable object
*
* @param relocationTable
* @param symbols
@@ -106,145 +133,174 @@ public class JELFRelocObject {
*/
public void createELFRelocObject(Map> relocationTable, Collection symbols) throws IOException {
// Allocate ELF Header
- elfContainer.createELFHeader(ELF.ET_REL);
+ ElfHeader eh = new ElfHeader();
+
+ ArrayList sections = new ArrayList();
+
+ // Create the null section
+ createByteSection(sections, null, null, false, 1, 0, 0);
// Create text section
- createCodeSection(binContainer.getCodeContainer());
- createReadOnlySection(binContainer.getMetaspaceNamesContainer());
- createReadOnlySection(binContainer.getKlassesOffsetsContainer());
- createReadOnlySection(binContainer.getMethodsOffsetsContainer());
- createReadOnlySection(binContainer.getKlassesDependenciesContainer());
- createReadWriteSection(binContainer.getMetaspaceGotContainer());
- createReadWriteSection(binContainer.getMetadataGotContainer());
- createReadWriteSection(binContainer.getMethodStateContainer());
- createReadWriteSection(binContainer.getOopGotContainer());
- createReadWriteSection(binContainer.getMethodMetadataContainer());
- createReadOnlySection(binContainer.getStubsOffsetsContainer());
- createReadOnlySection(binContainer.getHeaderContainer().getContainer());
- createReadOnlySection(binContainer.getCodeSegmentsContainer());
- createReadOnlySection(binContainer.getConstantDataContainer());
- createReadOnlySection(binContainer.getConfigContainer());
+ createCodeSection(sections, binContainer.getCodeContainer());
+ createReadOnlySection(sections, binContainer.getMetaspaceNamesContainer());
+ createReadOnlySection(sections, binContainer.getKlassesOffsetsContainer());
+ createReadOnlySection(sections, binContainer.getMethodsOffsetsContainer());
+ createReadOnlySection(sections, binContainer.getKlassesDependenciesContainer());
+ createReadWriteSection(sections, binContainer.getMetaspaceGotContainer());
+ createReadWriteSection(sections, binContainer.getMetadataGotContainer());
+ createReadWriteSection(sections, binContainer.getMethodStateContainer());
+ createReadWriteSection(sections, binContainer.getOopGotContainer());
+ createReadWriteSection(sections, binContainer.getMethodMetadataContainer());
+ createReadOnlySection(sections, binContainer.getStubsOffsetsContainer());
+ createReadOnlySection(sections, binContainer.getHeaderContainer().getContainer());
+ createReadOnlySection(sections, binContainer.getCodeSegmentsContainer());
+ createReadOnlySection(sections, binContainer.getConstantDataContainer());
+ createReadOnlySection(sections, binContainer.getConfigContainer());
// createExternalLinkage();
- createCodeSection(binContainer.getExtLinkageContainer());
- createReadWriteSection(binContainer.getExtLinkageGOTContainer());
+ createCodeSection(sections, binContainer.getExtLinkageContainer());
+ createReadWriteSection(sections, binContainer.getExtLinkageGOTContainer());
// Get ELF symbol data from BinaryContainer object's symbol tables
- createELFSymbolTables(symbols);
+ ElfSymtab symtab = createELFSymbolTables(sections, symbols);
// Create string table section and symbol table sections in
- // that order since symtab section needs to set the index of strtab in sh_link field
- int strTabSectionIndex = elfContainer.createSection(".strtab", elfContainer.getStrTabContent().getBytes(StandardCharsets.UTF_8), Elf_Type.ELF_T_BYTE, 1, ELF.SHT_STRTAB, 0, ELF.SHN_UNDEF, 0);
+ // that order since symtab section needs to set the index of
+ // strtab in sh_link field
+ ElfSection strTabSection = createByteSection(sections,
+ ".strtab",
+ symtab.getStrtabArray(),
+ false,
+ 1,
+ 0,
+ Elf64_Shdr.SHT_STRTAB);
- // Now create .symtab section with the symtab data constructed. On Linux, sh_link of symtab
- // contains the index of string table its symbols reference and
- // sh_info contains the index of first non-local symbol
- int scnInfo = elfContainer.getFirstNonLocalSymbolIndex();
- int symTabSectionIndex = elfContainer.createSection(".symtab", getELFSymbolTableData(), Elf_Type.ELF_T_SYM, 8, ELF.SHT_SYMTAB, ELF.SHF_ALLOC, strTabSectionIndex, scnInfo);
+ // Now create .symtab section with the symtab data constructed.
+ // On Linux, sh_link of symtab contains the index of string table
+ // its symbols reference and sh_info contains the index of first
+ // non-local symbol
+ ElfSection symTabSection = createByteSection(sections,
+ ".symtab",
+ symtab.getSymtabArray(),
+ false,
+ 8,
+ 0,
+ Elf64_Shdr.SHT_SYMTAB);
+ symTabSection.setLink(strTabSection.getSectionId());
+ symTabSection.setInfo(symtab.getNumLocalSyms());
- buildRelocations(relocationTable, symTabSectionIndex);
+ ElfRelocTable elfRelocTable = createElfRelocTable(sections,
+ relocationTable);
+
+ createElfRelocSections(sections, elfRelocTable, symTabSection.getSectionId());
// Now, finally, after creating all sections, create shstrtab section
- elfContainer.createSection(".shstrtab", elfContainer.getShStrTabContent().getBytes(StandardCharsets.UTF_8), Elf_Type.ELF_T_BYTE, 1, ELF.SHT_STRTAB, 0, ELF.SHN_UNDEF, 0);
+ ElfSection shStrTabSection = createByteSection(sections,
+ ".shstrtab",
+ null,
+ false,
+ 1,
+ 0,
+ Elf64_Shdr.SHT_STRTAB);
+ eh.setSectionStrNdx(shStrTabSection.getSectionId());
- // Run elf_update
- elfContainer.elfUpdate(Elf_Cmd.ELF_C_NULL);
+ // Update all section offsets and the Elf header section offset
+ // Write the Header followed by the contents of each section
+ // and then the section structures (section table).
+ int file_offset = Elf64_Ehdr.totalsize;
- // Run elfUpdate again to write it out.
- elfContainer.elfUpdate(Elf_Cmd.ELF_C_WRITE);
- // Finish ELF processing
- elfContainer.elfEnd();
+ // and round it up
+ file_offset = (file_offset + (sections.get(1).getDataAlign()-1)) &
+ ~((sections.get(1).getDataAlign()-1));
+
+ // Calc file offsets for section data skipping null section
+ for (int i = 1; i < sections.size(); i++) {
+ ElfSection sect = sections.get(i);
+ file_offset = (file_offset + (sect.getDataAlign()-1)) &
+ ~((sect.getDataAlign()-1));
+ sect.setOffset(file_offset);
+ file_offset += sect.getSize();
+ }
+
+ // Align the section table
+ file_offset = (file_offset + (ElfSection.getShdrAlign()-1)) &
+ ~((ElfSection.getShdrAlign()-1));
+
+ // Update the Elf Header with the offset of the first Elf64_Shdr
+ // and the number of sections.
+ eh.setSectionOff(file_offset);
+ eh.setSectionNum(sections.size());
+
+ // Write out the Header
+ elfContainer.writeBytes(eh.getArray());
+
+ // Write out each section contents skipping null section
+ for (int i = 1; i < sections.size(); i++) {
+ ElfSection sect = sections.get(i);
+ elfContainer.writeBytes(sect.getDataArray(), sect.getDataAlign());
+ }
+
+ // Write out the section table
+ for (int i = 0; i < sections.size(); i++) {
+ ElfSection sect = sections.get(i);
+ elfContainer.writeBytes(sect.getArray(), ElfSection.getShdrAlign());
+ }
+
+ elfContainer.close();
}
-
- private void buildRelocations(Map> relocationTable, final int symTabSectionIndex) {
- /*
- * Create relocation sections. This needs to be done after symbol table sections were
- * created since relocation entries will need indices of sections to which they apply.
- */
- createELFRelocationTables(relocationTable);
- createAllRelocationSections(new SymTabELFContainer(symTabSectionIndex));
- }
-
/**
* Construct ELF symbol data from BinaryContainer object's symbol tables. Both dynamic ELF
* symbol table and ELF symbol table are created from BinaryContainer's symbol info.
*
* @param symbols
*/
- private void createELFSymbolTables(Collection symbols) {
+ private ElfSymtab createELFSymbolTables(ArrayList sections, Collection symbols) {
+ ElfSymtab symtab = new ElfSymtab();
+
// First, create the initial null symbol. This is a local symbol.
- elfContainer.createELFSymbolEntry("", 0, 0, ELF.SHN_UNDEF, 0, 0, true);
+ symtab.addSymbolEntry("", (byte)0, (byte)0, Elf64_Shdr.SHN_UNDEF, 0, 0);
// Now create ELF symbol entries for all symbols.
for (Symbol symbol : symbols) {
// Get the index of section this symbol is defined in.
int secHdrIndex = symbol.getSection().getSectionId();
- boolean isLocal = (symbol.getBinding() == Binding.LOCAL);
- ELFSymbol elfSymbol = elfContainer.createELFSymbolEntry(symbol.getName(), getELFTypeOf(symbol), getELFBindOf(symbol), secHdrIndex, symbol.getSize(), symbol.getOffset(), isLocal);
- symbol.setElfSymbol(elfSymbol);
+ ElfSymbol elfSymbol = symtab.addSymbolEntry(symbol.getName(), getELFTypeOf(symbol), getELFBindOf(symbol), (byte)secHdrIndex, symbol.getOffset(), symbol.getSize());
+ symbol.setNativeSymbol((NativeSymbol)elfSymbol);
}
+ return (symtab);
}
- /**
- * Construct ELF symbol data from BinaryContainer object's symbol tables.
- *
- * @return a byte array containing the symbol table
- */
- private byte[] getELFSymbolTableData() {
- final int entrySize = JNIELFTargetInfo.sizeOfSymtabEntry();
-
- // First, add all local symbols.
- List localSymbols = elfContainer.getLocalSymbols();
- List globalSymbols = elfContainer.getGlobalSymbols();
-
- int localSymCount = localSymbols.size();
- int globalSymCount = globalSymbols.size();
- byte[] sectionDataArray = new byte[(localSymCount + globalSymCount) * entrySize];
-
- for (int i = 0; i < localSymCount; i++) {
- ELFSymbol symbol = localSymbols.get(i);
- Pointer address = symbol.getAddress();
- address.copyBytesTo(sectionDataArray, entrySize, i * entrySize);
- }
-
- // Next, add all global symbols.
-
- for (int i = 0; i < globalSymCount; i++) {
- ELFSymbol symbol = globalSymbols.get(i);
- Pointer address = symbol.getAddress();
- address.copyBytesTo(sectionDataArray, entrySize, (localSymCount + i) * entrySize);
- }
-
- return sectionDataArray;
- }
-
- private static int getELFTypeOf(Symbol sym) {
+ private static byte getELFTypeOf(Symbol sym) {
Kind kind = sym.getKind();
if (kind == Symbol.Kind.NATIVE_FUNCTION || kind == Symbol.Kind.JAVA_FUNCTION) {
- return ELF.STT_FUNC;
+ return Elf64_Sym.STT_FUNC;
} else if (kind == Symbol.Kind.OBJECT) {
- return ELF.STT_OBJECT;
+ return Elf64_Sym.STT_OBJECT;
}
- return ELF.STT_NOTYPE;
+ return Elf64_Sym.STT_NOTYPE;
}
- private static int getELFBindOf(Symbol sym) {
+ private static byte getELFBindOf(Symbol sym) {
Binding binding = sym.getBinding();
if (binding == Symbol.Binding.GLOBAL) {
- return ELF.STB_GLOBAL;
+ return Elf64_Sym.STB_GLOBAL;
}
- return ELF.STB_LOCAL;
+ return Elf64_Sym.STB_LOCAL;
}
/**
- * Construct ELF relocation section data from BinaryContainer object's relocation tables.
+ * Construct a Elf relocation table from BinaryContainer object's relocation tables.
*
+ * @param sections
* @param relocationTable
*/
- private void createELFRelocationTables(Map> relocationTable) {
+ private ElfRelocTable createElfRelocTable(ArrayList sections,
+ Map> relocationTable) {
+
+ ElfRelocTable elfRelocTable = new ElfRelocTable(sections.size());
/*
- * For each of the symbols with associated relocation records, create an ELF relocation
+ * For each of the symbols with associated relocation records, create a Elf relocation
* entry.
*/
for (Map.Entry> entry : relocationTable.entrySet()) {
@@ -252,18 +308,26 @@ public class JELFRelocObject {
Symbol symbol = entry.getKey();
for (Relocation reloc : relocs) {
- createRelocation(symbol, reloc);
+ createRelocation(symbol, reloc, elfRelocTable);
}
}
for (Map.Entry entry : binContainer.getUniqueRelocationTable().entrySet()) {
- createRelocation(entry.getKey(), entry.getValue());
+ createRelocation(entry.getKey(), entry.getValue(), elfRelocTable);
}
+
+ return (elfRelocTable);
}
- private void createRelocation(Symbol symbol, Relocation reloc) {
+ private void createRelocation(Symbol symbol, Relocation reloc, ElfRelocTable elfRelocTable) {
RelocType relocType = reloc.getType();
+
int elfRelocType = getELFRelocationType(relocType);
+ ElfSymbol sym = (ElfSymbol)symbol.getNativeSymbol();
+ int symno = sym.getIndex();
+ int sectindex = reloc.getSection().getSectionId();
+ int offset = reloc.getOffset();
+ int addend = 0;
switch (relocType) {
case FOREIGN_CALL_DIRECT:
@@ -271,85 +335,89 @@ public class JELFRelocObject {
case STUB_CALL_DIRECT:
case FOREIGN_CALL_INDIRECT_GOT: {
// Create relocation entry
- int addend = -4; // Size in bytes of the patch location
+ // System.out.println("getELFRelocationType: PLT relocation type using X86_64_RELOC_BRANCH");
+ addend = -4; // Size in bytes of the patch location
// Relocation should be applied at the location after call operand
- int offset = reloc.getOffset() + reloc.getSize() + addend;
- elfContainer.createELFRelocationEntry(reloc.getSection(), offset, elfRelocType, addend, symbol.getElfSymbol());
+ offset = offset + reloc.getSize() + addend;
break;
}
case FOREIGN_CALL_DIRECT_FAR: {
// Create relocation entry
- int addend = -8; // Size in bytes of the patch location
+ addend = -8; // Size in bytes of the patch location
// Relocation should be applied at the location after call operand
// 10 = 2 (jmp [r]) + 8 (imm64)
- int offset = reloc.getOffset() + reloc.getSize() + addend - 2;
- elfContainer.createELFRelocationEntry(reloc.getSection(), offset, elfRelocType, addend, symbol.getElfSymbol());
+ offset = offset + reloc.getSize() + addend - 2;
break;
}
case FOREIGN_CALL_INDIRECT:
case JAVA_CALL_INDIRECT:
case STUB_CALL_INDIRECT: {
// Do nothing.
- break;
+ return;
}
case EXTERNAL_DATA_REFERENCE_FAR: {
// Create relocation entry
- int addend = -4; // Size of 32-bit address of the GOT
+ addend = -4; // Size of 32-bit address of the GOT
/*
* Relocation should be applied before the test instruction to the move instruction.
- * reloc.getOffset() points to the test instruction after the instruction that loads
+ * offset points to the test instruction after the instruction that loads
* the address of polling page. So set the offset appropriately.
*/
- int offset = reloc.getOffset() + addend;
- elfContainer.createELFRelocationEntry(reloc.getSection(), offset, elfRelocType, addend, symbol.getElfSymbol());
+ offset = offset + addend;
break;
}
case METASPACE_GOT_REFERENCE:
case EXTERNAL_PLT_TO_GOT:
case STATIC_STUB_TO_STATIC_METHOD:
case STATIC_STUB_TO_HOTSPOT_LINKAGE_GOT: {
- int addend = -4; // Size of 32-bit address of the GOT
+ addend = -4; // Size of 32-bit address of the GOT
/*
- * Relocation should be applied before the test instruction to the move instruction.
- * reloc.getOffset() points to the test instruction after the instruction that loads
- * the address of polling page. So set the offset appropriately.
+ * Relocation should be applied before the test instruction to
+ * the move instruction. reloc.getOffset() points to the
+ * test instruction after the instruction that loads the
+ * address of polling page. So set the offset appropriately.
*/
- int offset = reloc.getOffset() + addend;
- elfContainer.createELFRelocationEntry(reloc.getSection(), offset, elfRelocType, addend, symbol.getElfSymbol());
+ offset = offset + addend;
break;
}
case EXTERNAL_GOT_TO_PLT:
case LOADTIME_ADDRESS: {
// this is load time relocations
- elfContainer.createELFRelocationEntry(reloc.getSection(), reloc.getOffset(), elfRelocType, 0, symbol.getElfSymbol());
break;
}
default:
throw new InternalError("Unhandled relocation type: " + relocType);
}
+ elfRelocTable.createRelocationEntry(sectindex, offset, symno, elfRelocType, addend);
}
- // TODO: Populate the mapping of RelocType to ELF relocation types
private static int getELFRelocationType(RelocType relocType) {
int elfRelocType = 0; // R__NONE if #define'd to 0 for all values of ARCH
- switch (JNIELFTargetInfo.getELFArch()) {
- case ELF.EM_X64_64:
+ switch (ElfTargetInfo.getElfArch()) {
+ case Elf64_Ehdr.EM_X86_64:
// Return R_X86_64_* entries based on relocType
- if (relocType == RelocType.FOREIGN_CALL_DIRECT || relocType == RelocType.JAVA_CALL_DIRECT || relocType == RelocType.FOREIGN_CALL_INDIRECT_GOT) {
- elfRelocType = JNIELFRelocation.X86_64.R_X86_64_PLT32;
+ if (relocType == RelocType.FOREIGN_CALL_DIRECT ||
+ relocType == RelocType.JAVA_CALL_DIRECT ||
+ relocType == RelocType.FOREIGN_CALL_INDIRECT_GOT) {
+ elfRelocType = Elf64_Rela.R_X86_64_PLT32;
} else if (relocType == RelocType.STUB_CALL_DIRECT) {
- elfRelocType = JNIELFRelocation.X86_64.R_X86_64_PC32;
+ elfRelocType = Elf64_Rela.R_X86_64_PC32;
} else if (relocType == RelocType.FOREIGN_CALL_DIRECT_FAR) {
- elfRelocType = JNIELFRelocation.X86_64.R_X86_64_64;
- } else if (relocType == RelocType.FOREIGN_CALL_INDIRECT || relocType == RelocType.JAVA_CALL_INDIRECT || relocType == RelocType.STUB_CALL_INDIRECT) {
- elfRelocType = JNIELFRelocation.X86_64.R_X86_64_NONE;
+ elfRelocType = Elf64_Rela.R_X86_64_64;
+ } else if (relocType == RelocType.FOREIGN_CALL_INDIRECT ||
+ relocType == RelocType.JAVA_CALL_INDIRECT ||
+ relocType == RelocType.STUB_CALL_INDIRECT) {
+ elfRelocType = Elf64_Rela.R_X86_64_NONE;
} else if ((relocType == RelocType.EXTERNAL_DATA_REFERENCE_FAR)) {
- elfRelocType = JNIELFRelocation.X86_64.R_X86_64_GOTPCREL;
- } else if (relocType == RelocType.METASPACE_GOT_REFERENCE || relocType == RelocType.EXTERNAL_PLT_TO_GOT || relocType == RelocType.STATIC_STUB_TO_STATIC_METHOD ||
- relocType == RelocType.STATIC_STUB_TO_HOTSPOT_LINKAGE_GOT) {
- elfRelocType = JNIELFRelocation.X86_64.R_X86_64_PC32;
- } else if (relocType == RelocType.EXTERNAL_GOT_TO_PLT || relocType == RelocType.LOADTIME_ADDRESS) {
- elfRelocType = JNIELFRelocation.X86_64.R_X86_64_64;
+ elfRelocType = Elf64_Rela.R_X86_64_GOTPCREL;
+ } else if (relocType == RelocType.METASPACE_GOT_REFERENCE ||
+ relocType == RelocType.EXTERNAL_PLT_TO_GOT ||
+ relocType == RelocType.STATIC_STUB_TO_STATIC_METHOD ||
+ relocType == RelocType.STATIC_STUB_TO_HOTSPOT_LINKAGE_GOT) {
+ elfRelocType = Elf64_Rela.R_X86_64_PC32;
+ } else if (relocType == RelocType.EXTERNAL_GOT_TO_PLT ||
+ relocType == RelocType.LOADTIME_ADDRESS) {
+ elfRelocType = Elf64_Rela.R_X86_64_64;
} else {
assert false : "Unhandled relocation type: " + relocType;
}
@@ -360,61 +428,27 @@ public class JELFRelocObject {
return elfRelocType;
}
- private void createAllRelocationSections(ELFContainer symtab) {
- for (Map.Entry> entry : elfContainer.getRelocTables().entrySet()) {
- createRelocationSection(entry.getKey(), entry.getValue(), symtab);
- }
- }
+ private void createElfRelocSections(ArrayList sections,
+ ElfRelocTable elfRelocTable,
+ int symtabsectidx) {
- private void createRelocationSection(ELFContainer container, ArrayList relocations, ELFContainer symtab) {
- String secName = container.getContainerName();
- int entrySize = JNIELFTargetInfo.sizeOfRelocEntry();
- int numEntries = relocations.size();
- byte[] sectionDataBytes = new byte[numEntries * entrySize];
+ // Grab count before we create new sections
+ int count = sections.size();
- for (int index = 0; index < relocations.size(); index++) {
- Pointer entry = relocations.get(index);
- entry.copyBytesTo(sectionDataBytes, entrySize, index * entrySize);
- }
- String fullSecName;
- // If relocDat is non-null create section
- if (sectionDataBytes.length > 0) {
- int scnType;
- Elf_Type dataType;
- if (JNIELFTargetInfo.createReloca() == 0) {
- scnType = ELF.SHT_REL;
- dataType = Elf_Type.ELF_T_REL;
- fullSecName = ".rel" + secName;
- } else {
- scnType = ELF.SHT_RELA;
- dataType = Elf_Type.ELF_T_RELA;
- fullSecName = ".rela" + secName;
+ for (int i = 0; i < count; i++) {
+ if (elfRelocTable.getNumRelocs(i) > 0) {
+ ElfSection sect = sections.get(i);
+ String relname = ".rela" + sect.getName();
+ ElfSection relocSection = createByteSection(sections,
+ relname,
+ elfRelocTable.getRelocData(i),
+ false,
+ 8,
+ 0,
+ Elf64_Shdr.SHT_RELA);
+ relocSection.setLink(symtabsectidx);
+ relocSection.setInfo(sect.getSectionId());
}
- // assert compareBytes(relocData.toByteArray(), sectionDataBytes) : "******* Bad array
- // copy";
- // sh_link holds the index of section header of symbol table associated with this
- // relocation table.
- // sh_info holds the index of section header to which this relocation table applies
- // to.
- elfContainer.createSection(fullSecName, sectionDataBytes, dataType, 8, scnType, ELF.SHF_ALLOC, symtab.getSectionId(), container.getSectionId());
- }
- }
-
- private static class SymTabELFContainer implements ELFContainer {
- private final int symTabSectionIndex;
-
- public SymTabELFContainer(int symTabSectionIndex) {
- this.symTabSectionIndex = symTabSectionIndex;
- }
-
- @Override
- public String getContainerName() {
- return ".symtab";
- }
-
- @Override
- public int getSectionId() {
- return symTabSectionIndex;
}
}
}
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/JMachORelocObject.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/JMachORelocObject.java
new file mode 100644
index 00000000000..39941757672
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/JMachORelocObject.java
@@ -0,0 +1,475 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ *
+ * File Layout generated by JMachORelocObject
+ *
+ * MachO Header
+ * Load Commands
+ * LC_SEGMENT_64
+ * - Sections
+ * LC_VERSION_MIN_MAX
+ * LC_SYMTAB
+ * LC_DYSYMTAB
+ * Section Data
+ * Relocation entries
+ * Symbol table
+ *
+ */
+
+package jdk.tools.jaotc.binformat.macho;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import jdk.tools.jaotc.binformat.BinaryContainer;
+import jdk.tools.jaotc.binformat.ByteContainer;
+import jdk.tools.jaotc.binformat.CodeContainer;
+import jdk.tools.jaotc.binformat.ReadOnlyDataContainer;
+import jdk.tools.jaotc.binformat.Relocation;
+import jdk.tools.jaotc.binformat.Relocation.RelocType;
+import jdk.tools.jaotc.binformat.Symbol;
+import jdk.tools.jaotc.binformat.NativeSymbol;
+import jdk.tools.jaotc.binformat.Symbol.Binding;
+import jdk.tools.jaotc.binformat.Symbol.Kind;
+
+import jdk.tools.jaotc.binformat.macho.MachO;
+import jdk.tools.jaotc.binformat.macho.MachO.section_64;
+import jdk.tools.jaotc.binformat.macho.MachO.mach_header_64;
+import jdk.tools.jaotc.binformat.macho.MachO.segment_command_64;
+import jdk.tools.jaotc.binformat.macho.MachO.version_min_command;
+import jdk.tools.jaotc.binformat.macho.MachO.symtab_command;
+import jdk.tools.jaotc.binformat.macho.MachO.dysymtab_command;
+import jdk.tools.jaotc.binformat.macho.MachO.nlist_64;
+import jdk.tools.jaotc.binformat.macho.MachO.reloc_info;
+import jdk.tools.jaotc.binformat.macho.MachOContainer;
+import jdk.tools.jaotc.binformat.macho.MachOTargetInfo;
+import jdk.tools.jaotc.binformat.macho.MachOSymtab;
+import jdk.tools.jaotc.binformat.macho.MachORelocTable;
+
+public class JMachORelocObject {
+
+ private final BinaryContainer binContainer;
+
+ private final MachOContainer machoContainer;
+
+ private final int segmentSize;
+
+ public JMachORelocObject(BinaryContainer binContainer, String outputFileName) {
+ this.binContainer = binContainer;
+ this.machoContainer = new MachOContainer(outputFileName);
+ this.segmentSize = binContainer.getCodeSegmentSize();
+ }
+
+ private void createByteSection(ArrayListsections,
+ ByteContainer c, String sectName, String segName, int scnFlags) {
+
+ if (c.getByteArray().length == 0) {
+ // System.out.println("Skipping creation of " + sectName + " section, no data\n");
+ }
+
+ MachOSection sect = new MachOSection(sectName,
+ segName,
+ c.getByteArray(),
+ scnFlags,
+ c.hasRelocations(),
+ segmentSize);
+ // Add this section to our list
+ sections.add(sect);
+
+ // Record the section Id (0 relative)
+ c.setSectionId(sections.size()-1);
+
+ // TODO: Clear out code section data to allow for GC
+ // c.clear();
+ }
+
+ private void createCodeSection(ArrayListsections, CodeContainer c) {
+ createByteSection(sections, c, /*c.getContainerName()*/ "__text", "__TEXT",
+ section_64.S_ATTR_PURE_INSTRUCTIONS|
+ section_64.S_ATTR_SOME_INSTRUCTIONS);
+ }
+
+ private void createReadOnlySection(ArrayListsections, ReadOnlyDataContainer c) {
+ createByteSection(sections, c, c.getContainerName(), "__TEXT",
+ section_64.S_ATTR_SOME_INSTRUCTIONS);
+ }
+
+ private void createReadWriteSection(ArrayListsections, ByteContainer c) {
+ createByteSection(sections, c, c.getContainerName(), "__DATA", section_64.S_REGULAR);
+ }
+
+ /**
+ * Create an MachO relocatable object
+ *
+ * @param relocationTable
+ * @param symbols
+ * @throws IOException throws {@code IOException} as a result of file system access failures.
+ */
+ public void createMachORelocObject(Map> relocationTable, Collection symbols) throws IOException {
+ // Allocate MachO Header
+ // with 4 load commands
+ // LC_SEGMENT_64
+ // LC_VERSION_MIN_MACOSX
+ // LC_SYMTAB
+ // LC_DYSYMTAB
+
+ MachOHeader mh = new MachOHeader();
+
+ ArrayList sections = new ArrayList();
+
+ // Create Sections contained in the main Segment LC_SEGMENT_64
+
+ createCodeSection(sections, binContainer.getCodeContainer());
+ createReadOnlySection(sections, binContainer.getMetaspaceNamesContainer());
+ createReadOnlySection(sections, binContainer.getKlassesOffsetsContainer());
+ createReadOnlySection(sections, binContainer.getMethodsOffsetsContainer());
+ createReadOnlySection(sections, binContainer.getKlassesDependenciesContainer());
+ createReadWriteSection(sections, binContainer.getMetaspaceGotContainer());
+ createReadWriteSection(sections, binContainer.getMetadataGotContainer());
+ createReadWriteSection(sections, binContainer.getMethodStateContainer());
+ createReadWriteSection(sections, binContainer.getOopGotContainer());
+ createReadWriteSection(sections, binContainer.getMethodMetadataContainer());
+ createReadOnlySection(sections, binContainer.getStubsOffsetsContainer());
+ createReadOnlySection(sections, binContainer.getHeaderContainer().getContainer());
+ createReadOnlySection(sections, binContainer.getCodeSegmentsContainer());
+ createReadOnlySection(sections, binContainer.getConstantDataContainer());
+ createReadOnlySection(sections, binContainer.getConfigContainer());
+
+ // createExternalLinkage();
+
+ createCodeSection(sections, binContainer.getExtLinkageContainer());
+ createReadWriteSection(sections, binContainer.getExtLinkageGOTContainer());
+ // Update the Header sizeofcmds size.
+ // This doesn't include the Header struct size
+ mh.setCmdSizes(4, segment_command_64.totalsize +
+ (section_64.totalsize * sections.size()) +
+ version_min_command.totalsize +
+ symtab_command.totalsize +
+ dysymtab_command.totalsize);
+
+ // Initialize file offset for data past commands
+ int file_offset = mach_header_64.totalsize + mh.getCmdSize();
+ // and round it up
+ file_offset = (file_offset + (sections.get(0).getAlign()-1)) & ~((sections.get(0).getAlign()-1));
+ long address = 0;
+ int segment_offset = file_offset;
+
+ for (int i = 0; i < sections.size(); i++) {
+ MachOSection sect = sections.get(i);
+ file_offset = (file_offset + (sect.getAlign()-1)) & ~((sect.getAlign()-1));
+ address = (address + (sect.getAlign()-1)) & ~((sect.getAlign()-1));
+ sect.setOffset(file_offset);
+ sect.setAddr(address);
+ file_offset += sect.getSize();
+ address += sect.getSize();
+ }
+
+ // File size for Segment data
+ int segment_size = file_offset - segment_offset;
+
+ // Create the LC_SEGMENT_64 Segment which contains the MachOSections
+ MachOSegment seg = new MachOSegment(segment_command_64.totalsize +
+ (section_64.totalsize * sections.size()),
+ segment_offset,
+ segment_size,
+ sections.size());
+
+
+ MachOVersion vers = new MachOVersion();
+
+ // Get symbol data from BinaryContainer object's symbol tables
+ MachOSymtab symtab = createMachOSymbolTables(sections, symbols);
+
+ // Create LC_DYSYMTAB command
+ MachODySymtab dysymtab = new MachODySymtab(symtab.getNumLocalSyms(),
+ symtab.getNumGlobalSyms(),
+ symtab.getNumUndefSyms());
+
+ // Create the Relocation Tables
+ MachORelocTable machORelocs = createMachORelocTable(sections, relocationTable, symtab);
+ // Calculate file offset for relocation data
+ file_offset = (file_offset + (machORelocs.getAlign()-1)) & ~((machORelocs.getAlign()-1));
+
+ // Update relocation sizing information in each section
+ for (int i = 0; i < sections.size(); i++) {
+ MachOSection sect = sections.get(i);
+ if (sect.hasRelocations()) {
+ int nreloc = machORelocs.getNumRelocs(i);
+ sect.setReloff(file_offset);
+ sect.setRelcount(nreloc);
+ file_offset += (nreloc * reloc_info.totalsize);
+ }
+ }
+
+ // Calculate and set file offset for symbol table data
+ file_offset = (file_offset + (symtab.getAlign()-1)) & ~((symtab.getAlign()-1));
+ symtab.setOffset(file_offset);
+
+
+ // Write Out Header
+ machoContainer.writeBytes(mh.getArray());
+ // Write out first Segment
+ machoContainer.writeBytes(seg.getArray());
+ // Write out sections within first Segment
+ for (int i = 0; i < sections.size(); i++) {
+ MachOSection sect = sections.get(i);
+ machoContainer.writeBytes(sect.getArray());
+ }
+
+ // Write out LC_VERSION_MIN_MACOSX command
+ machoContainer.writeBytes(vers.getArray());
+
+ // Write out LC_SYMTAB command
+ symtab.calcSizes();
+ machoContainer.writeBytes(symtab.getCmdArray());
+
+ // Write out LC_DYSYMTAB command
+ machoContainer.writeBytes(dysymtab.getArray());
+
+ // Write out data associated with each Section
+ for (int i = 0; i < sections.size(); i++) {
+ MachOSection sect = sections.get(i);
+ machoContainer.writeBytes(sect.getDataArray(), sect.getAlign());
+ }
+
+ // Write out the relocation tables for all sections
+ for (int i = 0; i < sections.size(); i++) {
+ if (machORelocs.getNumRelocs(i) > 0)
+ machoContainer.writeBytes(machORelocs.getRelocData(i), machORelocs.getAlign());
+ }
+
+ // Write out data associated with LC_SYMTAB
+ machoContainer.writeBytes(symtab.getDataArray(), symtab.getAlign());
+
+ machoContainer.close();
+ }
+
+ /**
+ * Construct MachO symbol data from BinaryContainer object's symbol tables. Both dynamic MachO
+ * symbol table and MachO symbol table are created from BinaryContainer's symbol info.
+ *
+ * @param symbols
+ * @param symtab
+ */
+ private MachOSymtab createMachOSymbolTables(ArrayListsections,
+ Collection symbols) {
+ MachOSymtab symtab = new MachOSymtab();
+ // First, create the initial null symbol. This is a local symbol.
+ symtab.addSymbolEntry("", (byte)nlist_64.N_UNDF, (byte)0, (long)0);
+
+ // Now create MachO symbol entries for all symbols.
+ for (Symbol symbol : symbols) {
+ int sectionId = symbol.getSection().getSectionId();
+
+ // Symbol offsets are relative to the section memory addr
+ long sectionAddr = sections.get(sectionId).getAddr();
+
+ MachOSymbol machoSymbol = symtab.addSymbolEntry(symbol.getName(),
+ getMachOTypeOf(symbol),
+ (byte)sectionId,
+ symbol.getOffset() + sectionAddr);
+ symbol.setNativeSymbol((NativeSymbol)machoSymbol);
+ }
+
+ // Now that all symbols are enterred, update the
+ // symbol indexes. This is necessary since they will
+ // be reordered based on local, global and undefined.
+ symtab.updateIndexes();
+
+ return (symtab);
+ }
+
+ private static byte getMachOTypeOf(Symbol sym) {
+ Kind kind = sym.getKind();
+ byte type = nlist_64.N_UNDF;
+
+ // Global or Local
+ if (sym.getBinding() == Symbol.Binding.GLOBAL)
+ type = nlist_64.N_EXT;
+
+ // If Function or Data, add section type
+ if (kind == Symbol.Kind.NATIVE_FUNCTION ||
+ kind == Symbol.Kind.JAVA_FUNCTION ||
+ kind == Symbol.Kind.OBJECT) {
+ type |= (nlist_64.N_SECT);
+ }
+
+ return (type);
+ }
+
+ /**
+ * Construct a MachO relocation table from BinaryContainer object's relocation tables.
+ *
+ * @param sections
+ * @param relocationTable
+ * @param symtab
+ */
+ private MachORelocTable createMachORelocTable(ArrayList sections,
+ Map> relocationTable,
+ MachOSymtab symtab) {
+
+ MachORelocTable machORelocTable = new MachORelocTable(sections.size());
+ /*
+ * For each of the symbols with associated relocation records, create a MachO relocation
+ * entry.
+ */
+ for (Map.Entry> entry : relocationTable.entrySet()) {
+ List relocs = entry.getValue();
+ Symbol symbol = entry.getKey();
+
+ for (Relocation reloc : relocs) {
+ createRelocation(symbol, reloc, machORelocTable);
+ }
+ }
+
+ for (Map.Entry entry : binContainer.getUniqueRelocationTable().entrySet()) {
+ createRelocation(entry.getKey(), entry.getValue(), machORelocTable);
+ }
+
+ return (machORelocTable);
+ }
+
+ private void createRelocation(Symbol symbol, Relocation reloc, MachORelocTable machORelocTable) {
+ RelocType relocType = reloc.getType();
+
+ int machORelocType = getMachORelocationType(relocType);
+ MachOSymbol sym = (MachOSymbol)symbol.getNativeSymbol();
+ int symno = sym.getIndex();
+ int sectindex = reloc.getSection().getSectionId();
+ int offset = reloc.getOffset();
+ int pcrel = 0;
+ int length = 0;
+ int isextern = 1;
+
+/*
+ System.out.println("reloctype: " + relocType + " size is " +
+ reloc.getSize() + " offset is " + offset +
+ " Section Index is " + (sectindex) +
+ " Symbol Index is " + symno +
+ " Symbol Name is " + symbol.getName() + "\n");
+*/
+
+ switch (relocType) {
+ case FOREIGN_CALL_DIRECT:
+ case JAVA_CALL_DIRECT:
+ case STUB_CALL_DIRECT:
+ case FOREIGN_CALL_INDIRECT_GOT: {
+ // Create relocation entry
+ // System.out.println("getMachORelocationType: PLT relocation type using X86_64_RELOC_BRANCH");
+ int addend = -4; // Size in bytes of the patch location
+ // Relocation should be applied at the location after call operand
+ offset = offset + reloc.getSize() + addend;
+ pcrel = 1; length = 2;
+ break;
+ }
+ case FOREIGN_CALL_DIRECT_FAR: {
+ // Create relocation entry
+ int addend = -8; // Size in bytes of the patch location
+ // Relocation should be applied at the location after call operand
+ // 10 = 2 (jmp [r]) + 8 (imm64)
+ offset = offset + reloc.getSize() + addend - 2;
+ pcrel = 0; length = 3;
+ break;
+ }
+ case FOREIGN_CALL_INDIRECT:
+ case JAVA_CALL_INDIRECT:
+ case STUB_CALL_INDIRECT: {
+ // Do nothing.
+ return;
+ }
+ case EXTERNAL_DATA_REFERENCE_FAR: {
+ // Create relocation entry
+ int addend = -4; // Size of 32-bit address of the GOT
+ /*
+ * Relocation should be applied before the test instruction to the move instruction.
+ * offset points to the test instruction after the instruction that loads
+ * the address of polling page. So set the offset appropriately.
+ */
+ offset = offset + addend;
+ pcrel = 0; length = 2;
+ break;
+ }
+ case METASPACE_GOT_REFERENCE:
+ case EXTERNAL_PLT_TO_GOT:
+ case STATIC_STUB_TO_STATIC_METHOD:
+ case STATIC_STUB_TO_HOTSPOT_LINKAGE_GOT: {
+ int addend = -4; // Size of 32-bit address of the GOT
+ /*
+ * Relocation should be applied before the test instruction to
+ * the move instruction. reloc.getOffset() points to the
+ * test instruction after the instruction that loads the
+ * address of polling page. So set the offset appropriately.
+ */
+ offset = offset + addend;
+ pcrel = 1; length = 2;
+ break;
+ }
+ case EXTERNAL_GOT_TO_PLT:
+ case LOADTIME_ADDRESS: {
+ // this is load time relocations
+ pcrel = 0; length = 3;
+ break;
+ }
+ default:
+ throw new InternalError("Unhandled relocation type: " + relocType);
+ }
+ machORelocTable.createRelocationEntry(sectindex, offset, symno,
+ pcrel, length, isextern,
+ machORelocType);
+ }
+
+ private static int getMachORelocationType(RelocType relocType) {
+ int machORelocType = 0;
+ switch (MachOTargetInfo.getMachOArch()) {
+ case mach_header_64.CPU_TYPE_X86_64:
+ // Return X86_64_RELOC_* entries based on relocType
+ if (relocType == RelocType.FOREIGN_CALL_DIRECT || relocType == RelocType.JAVA_CALL_DIRECT || relocType == RelocType.FOREIGN_CALL_INDIRECT_GOT) {
+ machORelocType = reloc_info.X86_64_RELOC_BRANCH;
+ } else if (relocType == RelocType.STUB_CALL_DIRECT) {
+ machORelocType = reloc_info.X86_64_RELOC_BRANCH;
+ } else if (relocType == RelocType.FOREIGN_CALL_DIRECT_FAR) {
+ machORelocType = reloc_info.X86_64_RELOC_UNSIGNED;
+ } else if (relocType == RelocType.FOREIGN_CALL_INDIRECT || relocType == RelocType.JAVA_CALL_INDIRECT || relocType == RelocType.STUB_CALL_INDIRECT) {
+ machORelocType = reloc_info.X86_64_RELOC_NONE;
+ } else if ((relocType == RelocType.EXTERNAL_DATA_REFERENCE_FAR)) {
+ machORelocType = reloc_info.X86_64_RELOC_GOT;
+ } else if (relocType == RelocType.METASPACE_GOT_REFERENCE || relocType == RelocType.EXTERNAL_PLT_TO_GOT || relocType == RelocType.STATIC_STUB_TO_STATIC_METHOD ||
+ relocType == RelocType.STATIC_STUB_TO_HOTSPOT_LINKAGE_GOT) {
+ machORelocType = reloc_info.X86_64_RELOC_BRANCH;
+ } else if (relocType == RelocType.EXTERNAL_GOT_TO_PLT || relocType == RelocType.LOADTIME_ADDRESS) {
+ machORelocType = reloc_info.X86_64_RELOC_UNSIGNED;
+ } else {
+ assert false : "Unhandled relocation type: " + relocType;
+ }
+ break;
+ default:
+ System.out.println("Relocation Type mapping: Unhandled architecture");
+ }
+ return machORelocType;
+ }
+}
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachO.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachO.java
new file mode 100644
index 00000000000..752723f25dc
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachO.java
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.macho;
+
+/**
+ *
+ * Support for the creation of Mach-o Object files.
+ * Current support is limited to 64 bit x86_64.
+ *
+ * File Format Overview:
+ *
+ * mach_header
+ * load_commands
+ * Typical Mac OSX 64-bit object files have these 4 load_commands
+ * (LC_SEGMENT_64, LC_SYMTAB, LC_VERSIN_MIN_MACOSX, LC_DYSYMTAB)
+ * Segments corresponding to load_commands
+ * (which each include multiple Sections)
+ */
+
+public class MachO {
+
+ /**
+ * mach_header_64 structure defines
+ */
+ public enum mach_header_64 {
+ magic( 0, 4),
+ cputype( 4, 4),
+ cpusubtype( 8, 4),
+ filetype(12, 4),
+ ncmds(16, 4),
+ sizeofcmds(20, 4),
+ flags(24, 4),
+ reserved(28, 4);
+
+ public final int off;
+ public final int sz;
+
+ mach_header_64(int offset, int size) {
+ this.off = offset;
+ this.sz = size;
+ }
+
+ public static int totalsize = 32;
+
+ /**
+ * mach_header_64 defines
+ */
+ public static final int MH_MAGIC = 0xfeedface;
+ public static final int MH_MAGIC_64 = 0xfeedfacf;
+ public static final int MH_SUBSECTIONS_VIA_SYMBOLS = 0x2000;
+
+ /**
+ * filetype
+ */
+ public static final int MH_OBJECT = 0x1;
+
+ /**
+ * cputype
+ */
+ public static final int CPU_TYPE_ANY = -1;
+ public static final int CPU_ARCH_ABI64 = 0x1000000;
+ public static final int CPU_TYPE_X86_64 = 0x1000007;
+ public static final int CPU_TYPE_ARM64 = 0x100000c;
+ /**
+ * cpusubtype
+ */
+ public static final int CPU_SUBTYPE_I386_ALL = 3;
+ public static final int CPU_SUBTYPE_ARM64_ALL = 0;
+ public static final int CPU_SUBTYPE_LITTLE_ENDIAN = 0;
+ public static final int CPU_SUBTYPE_BIG_ENDIAN = 1;
+
+ }
+
+ /**
+ * segment_command_64 structure defines
+ */
+ public enum segment_command_64 {
+ cmd( 0, 4),
+ cmdsize( 4, 4),
+ segname( 8,16),
+ vmaddr(24, 8),
+ vmsize(32, 8),
+ fileoff(40, 8),
+ filesize(48, 8),
+ maxprot(56, 4),
+ initprot(60, 4),
+ nsects(64, 4),
+ flags(68, 4);
+
+ public final int off;
+ public final int sz;
+
+ segment_command_64(int offset, int size) {
+ this.off = offset;
+ this.sz = size;
+ }
+
+ public static int totalsize = 72;
+
+ public static final int LC_SEGMENT_64 = 0x19;
+ }
+
+ /**
+ * section_64 structure defines
+ */
+ public enum section_64 {
+ sectname( 0,16),
+ segname(16,16),
+ addr(32, 8),
+ size(40, 8),
+ offset(48, 4),
+ align(52, 4),
+ reloff(56, 4),
+ nreloc(60, 4),
+ flags(64, 4),
+ reserved1(68, 4),
+ reserved2(72, 4),
+ reserved3(76, 4);
+
+ public final int off;
+ public final int sz;
+
+ section_64(int offset, int size) {
+ this.off = offset;
+ this.sz = size;
+ }
+
+ public static int totalsize = 80;
+
+ public static int S_REGULAR = 0x0;
+ public static int S_CSTRING_LITERALS = 0x2;
+ public static int S_ATTR_PURE_INSTRUCTIONS = 0x80000000;
+ public static int S_ATTR_SOME_INSTRUCTIONS = 0x400;
+ }
+
+ /**
+ * version_min_command structure defines
+ */
+ public enum version_min_command {
+ cmd( 0, 4),
+ cmdsize( 4, 4),
+ version( 8, 4),
+ sdk(12, 4);
+
+ public final int off;
+ public final int sz;
+
+ version_min_command(int offset, int size) {
+ this.off = offset;
+ this.sz = size;
+ }
+
+ public static int totalsize = 16;
+
+ public static final int LC_VERSION_MIN_MACOSX = 0x24;
+ public static final int LC_VERSION_MIN_IPHONEOS = 0x25;
+ }
+
+ /**
+ * symtab_command structure defines
+ */
+ public enum symtab_command {
+ cmd( 0, 4),
+ cmdsize( 4, 4),
+ symoff( 8, 4),
+ nsyms(12, 4),
+ stroff(16, 4),
+ strsize(20, 4);
+
+ public final int off;
+ public final int sz;
+
+ symtab_command(int offset, int size) {
+ this.off = offset;
+ this.sz = size;
+ }
+
+ public static int totalsize = 24;
+
+ public static final int LC_SYMTAB = 0x2;
+ }
+
+ /**
+ * Symbol table entry definitions
+ *
+ * nlist_64 structure defines
+ */
+ public enum nlist_64 {
+ n_strx( 0, 4),
+ n_type( 4, 1),
+ n_sect( 5, 1),
+ n_desc( 6, 2),
+ n_value( 8, 8);
+
+ public final int off;
+ public final int sz;
+
+ nlist_64(int offset, int size) {
+ this.off = offset;
+ this.sz = size;
+ }
+
+ public static int totalsize = 16;
+
+ public static final int N_EXT = 0x1;
+ public static final int N_TYPE = 0xe;
+ public static final int N_UNDF = 0x0;
+ public static final int N_SECT = 0xe;
+ }
+
+ /**
+ * dysymtab_command structure defines
+ */
+ public enum dysymtab_command {
+ cmd( 0, 4),
+ cmdsize( 4, 4),
+ ilocalsym( 8, 4),
+ nlocalsym(12, 4),
+ iextdefsym(16, 4),
+ nextdefsym(20, 4),
+ iundefsym(24, 4),
+ nundefsym(28, 4),
+ tocoff(32, 4),
+ ntoc(36, 4),
+ modtaboff(40, 4),
+ nmodtab(44, 4),
+ extrefsymoff(48, 4),
+ nextrefsyms(52, 4),
+ indirectsymoff(56, 4),
+ nindirectsyms(60, 4),
+ extreloff(64, 4),
+ nextrel(68, 4),
+ locreloff(72, 4),
+ nlocrel(76, 4);
+
+ public final int off;
+ public final int sz;
+
+ dysymtab_command(int offset, int size) {
+ this.off = offset;
+ this.sz = size;
+ }
+
+ public static int totalsize = 80;
+
+ public static final int LC_DYSYMTAB = 0xb;
+ }
+
+ /**
+ * relocation_info structure defines
+ */
+ public enum reloc_info {
+ r_address( 0, 4),
+ r_relocinfo( 4, 4);
+
+ public final int off;
+ public final int sz;
+
+ reloc_info(int offset, int size) {
+ this.off = offset;
+ this.sz = size;
+ }
+
+ public static int totalsize = 8;
+
+ public static final int REL_SYMNUM_MASK = 0xffffff;
+ public static final int REL_SYMNUM_SHIFT = 0x0;
+ public static final int REL_PCREL_MASK = 0x1;
+ public static final int REL_PCREL_SHIFT = 0x18;
+ public static final int REL_LENGTH_MASK = 0x3;
+ public static final int REL_LENGTH_SHIFT = 0x19;
+ public static final int REL_EXTERN_MASK = 0x1;
+ public static final int REL_EXTERN_SHIFT = 0x1b;
+ public static final int REL_TYPE_MASK = 0xf;
+ public static final int REL_TYPE_SHIFT = 0x1c;
+
+ /* reloc_type_x86_64 defines */
+
+ public static final int X86_64_RELOC_NONE = 0x0;
+ public static final int X86_64_RELOC_BRANCH = 0x2;
+ public static final int X86_64_RELOC_GOT = 0x4;
+ public static final int X86_64_RELOC_GOT_LOAD = 0x3;
+ public static final int X86_64_RELOC_SIGNED = 0x1;
+ public static final int X86_64_RELOC_UNSIGNED = 0x0;
+ }
+}
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOByteBuffer.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOByteBuffer.java
new file mode 100644
index 00000000000..14b75387277
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOByteBuffer.java
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.macho;
+
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import jdk.tools.jaotc.binformat.macho.MachOTargetInfo;
+import jdk.tools.jaotc.binformat.macho.MachO.mach_header_64;
+
+public class MachOByteBuffer {
+
+ public static ByteBuffer allocate(int size) {
+ ByteBuffer buf = ByteBuffer.allocate(size);
+ if (MachOTargetInfo.getMachOEndian() ==
+ MachO.mach_header_64.CPU_SUBTYPE_LITTLE_ENDIAN)
+ buf.order(ByteOrder.LITTLE_ENDIAN);
+ else
+ buf.order(ByteOrder.BIG_ENDIAN);
+ return (buf);
+ }
+
+}
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOContainer.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOContainer.java
new file mode 100644
index 00000000000..bcb46c368c7
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOContainer.java
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.macho;
+
+import java.io.File;
+import java.io.FileOutputStream;
+
+public class MachOContainer {
+
+ File outputFile;
+ FileOutputStream outputStream;
+ long fileOffset;
+
+ public MachOContainer(String fileName) {
+ String baseName;
+
+ outputFile = new File(fileName);
+ if (outputFile.exists()) {
+ outputFile.delete();
+ }
+
+ try {
+ outputStream = new FileOutputStream(outputFile);
+ } catch (Exception e) {
+ System.out.println("MachOContainer: Can't create file " + fileName);
+ }
+ fileOffset = 0;
+ }
+
+ public void close() {
+ try {
+ outputStream.close();
+ } catch (Exception e) {
+ System.out.println("MachOContainer: close failed");
+ }
+ }
+
+ public void writeBytes(byte [] bytes) {
+ try {
+ outputStream.write(bytes);
+ } catch (Exception e) {
+ System.out.println("MachOContainer: writeBytes failed");
+ }
+ fileOffset += bytes.length;
+ }
+
+ // Write bytes to output file with up front alignment padding
+ public void writeBytes(byte [] bytes, int alignment) {
+ try {
+ // Pad to alignment
+ while ((fileOffset & (long)(alignment-1)) != 0) {
+ outputStream.write(0);
+ fileOffset++;
+ }
+ outputStream.write(bytes);
+ } catch (Exception e) {
+ System.out.println("MachOContainer: writeBytes failed");
+ }
+ fileOffset += bytes.length;
+ }
+}
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachODySymtab.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachODySymtab.java
new file mode 100644
index 00000000000..4ede3869087
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachODySymtab.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.macho;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import jdk.tools.jaotc.binformat.macho.MachO;
+import jdk.tools.jaotc.binformat.macho.MachO.dysymtab_command;
+import jdk.tools.jaotc.binformat.macho.MachOByteBuffer;
+
+public class MachODySymtab {
+ ByteBuffer dysymtab;
+
+ public MachODySymtab(int nlocal, int nglobal, int nundef) {
+ dysymtab = MachOByteBuffer.allocate(dysymtab_command.totalsize);
+
+ dysymtab.putInt(dysymtab_command.cmd.off, dysymtab_command.LC_DYSYMTAB);
+ dysymtab.putInt(dysymtab_command.cmdsize.off, dysymtab_command.totalsize);
+ dysymtab.putInt(dysymtab_command.ilocalsym.off, 0);
+ dysymtab.putInt(dysymtab_command.nlocalsym.off, nlocal);
+ dysymtab.putInt(dysymtab_command.iextdefsym.off, nlocal);
+ dysymtab.putInt(dysymtab_command.nextdefsym.off, nglobal);
+ dysymtab.putInt(dysymtab_command.iundefsym.off, nlocal+nglobal);
+ dysymtab.putInt(dysymtab_command.nundefsym.off, nundef);
+ }
+
+ public byte[] getArray() {
+ return dysymtab.array();
+ }
+}
+
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOHeader.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOHeader.java
new file mode 100644
index 00000000000..ae50f0186d9
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOHeader.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.macho;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import jdk.tools.jaotc.binformat.macho.MachO;
+import jdk.tools.jaotc.binformat.macho.MachO.mach_header_64;
+import jdk.tools.jaotc.binformat.macho.MachOTargetInfo;
+import jdk.tools.jaotc.binformat.macho.MachOByteBuffer;
+
+public class MachOHeader {
+ ByteBuffer header;
+
+ public MachOHeader() {
+ header = MachOByteBuffer.allocate(mach_header_64.totalsize);
+
+ header.putInt(mach_header_64.magic.off, mach_header_64.MH_MAGIC_64);
+ header.putInt(mach_header_64.cputype.off, MachOTargetInfo.getMachOArch());
+ header.putInt(mach_header_64.cpusubtype.off, MachOTargetInfo.getMachOSubArch());
+ header.putInt(mach_header_64.flags.off, 0x2000);
+ header.putInt(mach_header_64.filetype.off, mach_header_64.MH_OBJECT);
+ }
+
+ public void setCmdSizes(int ncmds, int sizeofcmds) {
+ header.putInt(mach_header_64.ncmds.off, ncmds);
+ header.putInt(mach_header_64.sizeofcmds.off, sizeofcmds);
+ }
+
+ public int getCmdSize() {
+ return (header.getInt(mach_header_64.sizeofcmds.off));
+ }
+
+ public byte[] getArray() {
+ return header.array();
+ }
+}
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachORelocEntry.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachORelocEntry.java
new file mode 100644
index 00000000000..f32cfda3d78
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachORelocEntry.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.macho;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import jdk.tools.jaotc.binformat.macho.MachO;
+import jdk.tools.jaotc.binformat.macho.MachO.reloc_info;
+import jdk.tools.jaotc.binformat.macho.MachOByteBuffer;
+
+public class MachORelocEntry {
+ ByteBuffer entry;
+
+ public MachORelocEntry(int offset,
+ int symno,
+ int pcrel,
+ int length,
+ int isextern,
+ int type) {
+
+ entry = MachOByteBuffer.allocate(reloc_info.totalsize);
+
+ entry.putInt(reloc_info.r_address.off, offset);
+
+ // Encode and store the relocation entry bitfields
+ entry.putInt(reloc_info.r_relocinfo.off,
+ ((symno & reloc_info.REL_SYMNUM_MASK)
+ << reloc_info.REL_SYMNUM_SHIFT) |
+ ((pcrel & reloc_info.REL_PCREL_MASK)
+ << reloc_info.REL_PCREL_SHIFT) |
+ ((length & reloc_info.REL_LENGTH_MASK)
+ << reloc_info.REL_LENGTH_SHIFT) |
+ ((isextern & reloc_info.REL_EXTERN_MASK)
+ << reloc_info.REL_EXTERN_SHIFT) |
+ ((type & reloc_info.REL_TYPE_MASK)
+ << reloc_info.REL_TYPE_SHIFT));
+ }
+
+ public byte[] getArray() {
+ return entry.array();
+ }
+}
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachORelocTable.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachORelocTable.java
new file mode 100644
index 00000000000..196ae4615a8
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachORelocTable.java
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.macho;
+
+import java.util.ArrayList;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import jdk.tools.jaotc.binformat.macho.MachORelocEntry;
+import jdk.tools.jaotc.binformat.macho.MachOTargetInfo;
+import jdk.tools.jaotc.binformat.macho.MachO.reloc_info;
+import jdk.tools.jaotc.binformat.macho.MachOByteBuffer;
+
+public class MachORelocTable {
+ ArrayList> relocEntries;
+ int fileOffset;
+
+ public MachORelocTable(int numsects) {
+ relocEntries = new ArrayList>(numsects);
+ for (int i = 0; i < numsects; i++)
+ relocEntries.add(new ArrayList());
+ }
+
+ public void createRelocationEntry(int sectindex,
+ int offset,
+ int symno,
+ int pcrel,
+ int length,
+ int isextern,
+ int type) {
+
+ MachORelocEntry entry = new MachORelocEntry(offset,
+ symno,
+ pcrel,
+ length,
+ isextern,
+ type);
+ relocEntries.get(sectindex).add(entry);
+ }
+
+ public int getAlign() {
+ return (4);
+ }
+
+ public int getNumRelocs(int section_index) {
+ return relocEntries.get(section_index).size();
+ }
+
+ // Return the relocation entries for a single section
+ // or null if no entries added to section
+ public byte [] getRelocData(int section_index) {
+ ArrayList entryList = relocEntries.get(section_index);
+
+ if (entryList.size() == 0)
+ return null;
+
+ ByteBuffer relocData = MachOByteBuffer.allocate(entryList.size() * reloc_info.totalsize);
+
+ // Copy each entry to a single ByteBuffer
+ for (int i = 0; i < entryList.size(); i++) {
+ MachORelocEntry entry = entryList.get(i);
+ relocData.put(entry.getArray());
+ }
+
+ return (relocData.array());
+ }
+}
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOSection.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOSection.java
new file mode 100644
index 00000000000..61b814a795a
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOSection.java
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.macho;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import jdk.tools.jaotc.binformat.macho.MachO;
+import jdk.tools.jaotc.binformat.macho.MachO.section_64;
+import jdk.tools.jaotc.binformat.macho.MachOByteBuffer;
+
+public class MachOSection {
+ ByteBuffer section;
+ byte [] data;
+ boolean hasrelocations;
+
+ public MachOSection(String sectName, String segName, byte [] sectData, int sectFlags, boolean hasRelocations, int align) {
+ section = MachOByteBuffer.allocate(section_64.totalsize);
+
+ // TODO: Hotspot uses long section names.
+ // They are getting truncated.
+ // Is this a problem??
+ byte[] sectNameBytes = sectName.getBytes();
+ int sectNameMax = section_64.sectname.sz < sectNameBytes.length ?
+ section_64.sectname.sz : sectNameBytes.length;
+
+ for (int i = 0; i < sectNameMax; i++)
+ section.put(section_64.sectname.off+i, sectNameBytes[i]);
+
+ byte[] segNameBytes = segName.getBytes();
+ int segNameMax = section_64.segname.sz < segNameBytes.length ?
+ section_64.segname.sz : segNameBytes.length;
+
+ for (int i = 0; i < segNameMax; i++)
+ section.put(section_64.segname.off+i, segNameBytes[i]);
+
+ section.putLong(section_64.size.off, sectData.length);
+
+ section.putInt(section_64.align.off,
+ 31 - Integer.numberOfLeadingZeros(align));
+
+ section.putInt(section_64.flags.off, sectFlags);
+
+ data = sectData;
+
+ hasrelocations = hasRelocations;
+ }
+
+ public long getSize() {
+ return section.getLong(section_64.size.off);
+ }
+
+ public int getAlign() {
+ return (1 << section.getInt(section_64.align.off));
+ }
+
+ public byte[] getArray() {
+ return section.array();
+ }
+
+ public byte[] getDataArray() {
+ return data;
+ }
+
+ public void setAddr(long addr) {
+ section.putLong(section_64.addr.off, addr);
+ }
+
+ public long getAddr() {
+ return (section.getLong(section_64.addr.off));
+ }
+
+ public void setOffset(int offset) {
+ section.putInt(section_64.offset.off, offset);
+ }
+
+ public int getOffset() {
+ return (section.getInt(section_64.offset.off));
+ }
+
+ public void setReloff(int offset) {
+ section.putInt(section_64.reloff.off, offset);
+ }
+
+ public void setRelcount(int count) {
+ section.putInt(section_64.nreloc.off, count);
+ }
+
+ public boolean hasRelocations() {
+ return hasrelocations;
+ }
+}
+
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOSegment.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOSegment.java
new file mode 100644
index 00000000000..3f588512783
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOSegment.java
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.macho;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import jdk.tools.jaotc.binformat.macho.MachO;
+import jdk.tools.jaotc.binformat.macho.MachO.segment_command_64;
+import jdk.tools.jaotc.binformat.macho.MachOByteBuffer;
+
+public class MachOSegment {
+ ByteBuffer segment;
+
+ public MachOSegment(int size, int fileoff, int filesize, int nsects) {
+ segment = MachOByteBuffer.allocate(segment_command_64.totalsize);
+
+ segment.putInt(segment_command_64.cmd.off, segment_command_64.LC_SEGMENT_64);
+ segment.putInt(segment_command_64.cmdsize.off, size);
+ segment.putInt(segment_command_64.maxprot.off, 7);
+ segment.putInt(segment_command_64.initprot.off, 7);
+ segment.putInt(segment_command_64.nsects.off, nsects);
+ segment.putInt(segment_command_64.flags.off, 0);
+ segment.putLong(segment_command_64.vmaddr.off, 0);
+ segment.putLong(segment_command_64.vmsize.off, filesize);
+ segment.putLong(segment_command_64.fileoff.off, fileoff);
+ segment.putLong(segment_command_64.filesize.off, filesize);
+ }
+
+ public byte[] getArray() {
+ return segment.array();
+ }
+}
+
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOSymbol.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOSymbol.java
new file mode 100644
index 00000000000..7c4444d9cab
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOSymbol.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.macho;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import jdk.tools.jaotc.binformat.NativeSymbol;
+import jdk.tools.jaotc.binformat.macho.MachO;
+import jdk.tools.jaotc.binformat.macho.MachO.nlist_64;
+import jdk.tools.jaotc.binformat.macho.MachOByteBuffer;
+
+public class MachOSymbol extends NativeSymbol {
+ ByteBuffer sym;
+
+ public MachOSymbol(int symbolindex, int strindex, byte type, byte sectindex, long offset) {
+ super(symbolindex);
+ sym = MachOByteBuffer.allocate(nlist_64.totalsize);
+
+ sym.putInt(nlist_64.n_strx.off, strindex);
+ sym.put(nlist_64.n_type.off, type);
+ // Section indexes start at 1 but we manage the index internally
+ // as 0 relative
+ sym.put(nlist_64.n_sect.off, (byte)(sectindex+1));
+ sym.putChar(nlist_64.n_desc.off, (char )0);
+ sym.putLong(nlist_64.n_value.off, offset);
+ }
+
+ public byte[] getArray() {
+ return sym.array();
+ }
+}
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOSymtab.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOSymtab.java
new file mode 100644
index 00000000000..be24bc83cd8
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOSymtab.java
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.macho;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.util.ArrayList;
+
+import jdk.tools.jaotc.binformat.macho.MachO;
+import jdk.tools.jaotc.binformat.macho.MachO.symtab_command;
+import jdk.tools.jaotc.binformat.macho.MachO.nlist_64;
+import jdk.tools.jaotc.binformat.macho.MachOSymbol;
+import jdk.tools.jaotc.binformat.macho.MachOByteBuffer;
+
+public class MachOSymtab {
+
+ /**
+ * ByteBuffer holding the LC_SYMTAB command contents
+ */
+ ByteBuffer symtabCmd;
+
+ /**
+ * ByteBuffer holding the symbol table entries and strings
+ */
+ ByteBuffer symtabData;
+
+ int symtabDataSize;
+
+ ArrayListlocalSymbols = new ArrayList();
+ ArrayListglobalSymbols = new ArrayList();
+ ArrayListundefSymbols = new ArrayList();
+
+ /**
+ * number of symbols added
+ */
+ int symbolCount;
+
+ /**
+ * String holding symbol table strings
+ */
+ private StringBuilder strTabContent = new StringBuilder();
+
+ /**
+ * Keeps track of bytes in string table since strTabContent.length()
+ * is number of chars, not bytes.
+ */
+ private int strTabNrOfBytes = 0;
+
+ public MachOSymtab() {
+ symtabCmd = MachOByteBuffer.allocate(symtab_command.totalsize);
+
+ symtabCmd.putInt(symtab_command.cmd.off, symtab_command.LC_SYMTAB);
+ symtabCmd.putInt(symtab_command.cmdsize.off, symtab_command.totalsize);
+
+ symbolCount = 0;
+
+ }
+
+ public int getAlign() {
+ return (4);
+ }
+
+ public MachOSymbol addSymbolEntry(String name, byte type, byte secHdrIndex, long offset) {
+ // Get the current symbol index and append symbol name to string table.
+ int index;
+ MachOSymbol sym;
+
+ if (name.isEmpty()) {
+ index = 0;
+ strTabContent.append('\0');
+ strTabNrOfBytes += 1;
+ sym = new MachOSymbol(symbolCount, index, type, secHdrIndex, offset);
+ localSymbols.add(sym);
+ } else {
+ // We can't trust strTabContent.length() since that is
+ // chars (UTF16), keep track of bytes on our own.
+ index = strTabNrOfBytes;
+ strTabContent.append("_").append(name).append('\0');
+ // + 1 for null, + 1 for "_"
+ strTabNrOfBytes += (name.getBytes().length + 1 + 1);
+
+ sym = new MachOSymbol(symbolCount, index, type, secHdrIndex, offset);
+ switch (type) {
+ case nlist_64.N_EXT:
+ undefSymbols.add(sym);
+ break;
+ case nlist_64.N_SECT:
+ case nlist_64.N_UNDF: // null symbol
+ localSymbols.add(sym);
+ break;
+ case nlist_64.N_SECT|nlist_64.N_EXT:
+ globalSymbols.add(sym);
+ break;
+ default:
+ System.out.println("Unsupported Symbol type " + type);
+ break;
+ }
+ }
+ symbolCount++;
+ return (sym);
+ }
+
+ public void setOffset(int symoff) {
+ symtabCmd.putInt(symtab_command.symoff.off, symoff);
+ }
+
+ // Update the symbol indexes once all symbols have been added.
+ // This is required since we'll be reordering the symbols in the
+ // file to be in the order of Local, global and Undefined.
+ public void updateIndexes() {
+ int index = 0;
+
+ // Update the local symbol indexes
+ for (int i = 0; i < localSymbols.size(); i++ ) {
+ MachOSymbol sym = localSymbols.get(i);
+ sym.setIndex(index++);
+ }
+
+ // Update the global symbol indexes
+ for (int i = 0; i < globalSymbols.size(); i++ ) {
+ MachOSymbol sym = globalSymbols.get(i);
+ sym.setIndex(index++);
+ }
+
+ // Update the undefined symbol indexes
+ for (int i = index; i < undefSymbols.size(); i++ ) {
+ MachOSymbol sym = undefSymbols.get(i);
+ sym.setIndex(index++);
+ }
+ }
+
+ // Update LC_SYMTAB command fields based on the number of symbols added
+ // return the file size taken up by symbol table entries and strings
+ public int calcSizes() {
+ int stroff;
+
+ stroff = symtabCmd.getInt(symtab_command.symoff.off) + (nlist_64.totalsize * symbolCount);
+ symtabCmd.putInt(symtab_command.nsyms.off, symbolCount);
+ symtabCmd.putInt(symtab_command.stroff.off, stroff);
+ symtabCmd.putInt(symtab_command.strsize.off, strTabNrOfBytes);
+ symtabDataSize = (nlist_64.totalsize * symbolCount) + strTabNrOfBytes;
+
+ return (symtabDataSize);
+ }
+
+ public int getNumLocalSyms() { return localSymbols.size(); }
+ public int getNumGlobalSyms() { return globalSymbols.size(); }
+ public int getNumUndefSyms() { return undefSymbols.size(); }
+
+ public byte[] getCmdArray() {
+ return symtabCmd.array();
+ }
+
+ // Create a single byte array that contains the symbol table entries
+ // and string table
+ public byte[] getDataArray() {
+ int index = 0;
+ symtabData = MachOByteBuffer.allocate(symtabDataSize);
+ byte [] retarray;
+
+ // Add the local symbols
+ for (int i = 0; i < localSymbols.size(); i++ ) {
+ MachOSymbol sym = localSymbols.get(i);
+ byte [] arr = sym.getArray();
+ symtabData.put(arr);
+ }
+ // Add the global symbols
+ for (int i = 0; i < globalSymbols.size(); i++ ) {
+ MachOSymbol sym = globalSymbols.get(i);
+ byte [] arr = sym.getArray();
+ symtabData.put(arr);
+ }
+ // Add the undefined symbols
+ for (int i = 0; i < undefSymbols.size(); i++ ) {
+ MachOSymbol sym = undefSymbols.get(i);
+ byte [] arr = sym.getArray();
+ symtabData.put(arr);
+ }
+
+ // Add the stringtable
+ byte [] strs = strTabContent.toString().getBytes();
+ symtabData.put(strs);
+
+ retarray = symtabData.array();
+
+ return (retarray);
+ }
+}
+
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOTargetInfo.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOTargetInfo.java
new file mode 100644
index 00000000000..1a0a199d499
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOTargetInfo.java
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.macho;
+
+import java.nio.ByteOrder;
+import jdk.tools.jaotc.binformat.macho.MachO;
+import jdk.tools.jaotc.binformat.macho.MachO.mach_header_64;
+
+/**
+ * Class that abstracts MACH-O target details.
+ *
+ */
+public class MachOTargetInfo {
+ /**
+ * Target architecture and subtype.
+ */
+ private static final int arch;
+ private static final int subarch;
+
+ /**
+ * Architecture endian-ness.
+ */
+ private static final int endian = mach_header_64.CPU_SUBTYPE_LITTLE_ENDIAN;
+
+ /**
+ * Target OS string.
+ */
+ private static final String osName;
+
+ static {
+ // Find the target arch details
+ String archStr = System.getProperty("os.arch").toLowerCase();
+
+ if (ByteOrder.nativeOrder() != ByteOrder.LITTLE_ENDIAN) {
+ System.out.println("Only Little Endian byte order supported!");
+ }
+
+ if (archStr.equals("amd64") || archStr.equals("x86_64")) {
+ arch = mach_header_64.CPU_TYPE_X86_64;
+ subarch = mach_header_64.CPU_SUBTYPE_I386_ALL;
+ } else {
+ System.out.println("Unsupported architecture " + archStr);
+ arch = mach_header_64.CPU_TYPE_ANY;
+ subarch = 0;
+ }
+
+ osName = System.getProperty("os.name").toLowerCase();
+ }
+
+ public static int getMachOArch() {
+ return arch;
+ }
+
+ public static int getMachOSubArch() {
+ return subarch;
+ }
+
+ public static int getMachOEndian() {
+ return endian;
+ }
+
+ public static String getOsName() {
+ return osName;
+ }
+}
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOVersion.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOVersion.java
new file mode 100644
index 00000000000..57475aeb0eb
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/macho/MachOVersion.java
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.macho;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import jdk.tools.jaotc.binformat.macho.MachO;
+import jdk.tools.jaotc.binformat.macho.MachO.version_min_command;
+import jdk.tools.jaotc.binformat.macho.MachOByteBuffer;
+
+public class MachOVersion {
+ ByteBuffer version;
+
+ public MachOVersion() {
+ version = MachOByteBuffer.allocate(version_min_command.totalsize);
+
+ version.putInt(version_min_command.cmd.off, version_min_command.LC_VERSION_MIN_MACOSX);
+ version.putInt(version_min_command.cmdsize.off, version_min_command.totalsize);
+ version.putInt(version_min_command.version.off, (10 << 16) | (10 << 8)); /* MacOSX 10.10 */
+ version.putInt(version_min_command.sdk.off, 0); /* N/A SDK */
+ }
+
+ public byte[] getArray() {
+ return version.array();
+ }
+}
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/JPECoffRelocObject.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/JPECoffRelocObject.java
new file mode 100644
index 00000000000..89c853e5ac9
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/JPECoffRelocObject.java
@@ -0,0 +1,425 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.pecoff;
+
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import jdk.tools.jaotc.binformat.Container;
+import jdk.tools.jaotc.binformat.BinaryContainer;
+import jdk.tools.jaotc.binformat.ByteContainer;
+import jdk.tools.jaotc.binformat.CodeContainer;
+import jdk.tools.jaotc.binformat.ReadOnlyDataContainer;
+import jdk.tools.jaotc.binformat.Relocation;
+import jdk.tools.jaotc.binformat.Relocation.RelocType;
+import jdk.tools.jaotc.binformat.Symbol;
+import jdk.tools.jaotc.binformat.NativeSymbol;
+import jdk.tools.jaotc.binformat.Symbol.Binding;
+import jdk.tools.jaotc.binformat.Symbol.Kind;
+
+import jdk.tools.jaotc.binformat.pecoff.PECoff;
+import jdk.tools.jaotc.binformat.pecoff.PECoffSymbol;
+import jdk.tools.jaotc.binformat.pecoff.PECoffTargetInfo;
+import jdk.tools.jaotc.binformat.pecoff.PECoff.IMAGE_FILE_HEADER;
+import jdk.tools.jaotc.binformat.pecoff.PECoff.IMAGE_SECTION_HEADER;
+import jdk.tools.jaotc.binformat.pecoff.PECoff.IMAGE_SYMBOL;
+import jdk.tools.jaotc.binformat.pecoff.PECoff.IMAGE_RELOCATION;
+
+public class JPECoffRelocObject {
+
+ private final BinaryContainer binContainer;
+
+ private final PECoffContainer pecoffContainer;
+
+ private final int segmentSize;
+
+ public JPECoffRelocObject(BinaryContainer binContainer, String outputFileName, String aotVersion) {
+ this.binContainer = binContainer;
+ this.pecoffContainer = new PECoffContainer(outputFileName, aotVersion);
+ this.segmentSize = binContainer.getCodeSegmentSize();
+ if (segmentSize != 64) {
+ System.out.println("binContainer alignment size not 64 bytes, update JPECoffRelocObject");
+ }
+ }
+
+ private PECoffSection createByteSection(ArrayListsections,
+ String sectName,
+ byte [] scnData,
+ boolean hasRelocs,
+ int scnFlags) {
+
+ PECoffSection sect = new PECoffSection(sectName,
+ scnData,
+ scnFlags,
+ hasRelocs,
+ sections.size());
+ // Add this section to our list
+ sections.add(sect);
+
+ return (sect);
+ }
+
+ private void createByteSection(ArrayListsections,
+ ByteContainer c, int scnFlags) {
+ PECoffSection sect;
+ boolean hasRelocs = c.hasRelocations();
+ byte[] scnData = c.getByteArray();
+
+ sect = createByteSection(sections, c.getContainerName(),
+ scnData, hasRelocs,
+ scnFlags);
+
+ c.setSectionId(sect.getSectionId());
+ }
+
+ private void createCodeSection(ArrayListsections, CodeContainer c) {
+ createByteSection(sections, c, IMAGE_SECTION_HEADER.IMAGE_SCN_MEM_READ |
+ IMAGE_SECTION_HEADER.IMAGE_SCN_MEM_EXECUTE |
+ IMAGE_SECTION_HEADER.IMAGE_SCN_ALIGN_64BYTES |
+ IMAGE_SECTION_HEADER.IMAGE_SCN_CNT_CODE);
+ }
+
+ private void createReadOnlySection(ArrayListsections, ReadOnlyDataContainer c) {
+ createByteSection(sections, c, IMAGE_SECTION_HEADER.IMAGE_SCN_MEM_READ |
+ IMAGE_SECTION_HEADER.IMAGE_SCN_ALIGN_64BYTES |
+ IMAGE_SECTION_HEADER.IMAGE_SCN_CNT_INITIALIZED_DATA);
+ }
+
+ private void createReadWriteSection(ArrayListsections, ByteContainer c) {
+ int scnFlags = IMAGE_SECTION_HEADER.IMAGE_SCN_MEM_READ |
+ IMAGE_SECTION_HEADER.IMAGE_SCN_MEM_WRITE |
+ IMAGE_SECTION_HEADER.IMAGE_SCN_ALIGN_64BYTES;
+
+ if (c.getByteArray().length > 0)
+ scnFlags |= IMAGE_SECTION_HEADER.IMAGE_SCN_CNT_INITIALIZED_DATA;
+ else
+ scnFlags |= IMAGE_SECTION_HEADER.IMAGE_SCN_CNT_UNINITIALIZED_DATA;
+
+ createByteSection(sections, c, scnFlags);
+ }
+
+ /**
+ * Create an PECoff relocatable object
+ *
+ * @param relocationTable
+ * @param symbols
+ * @throws IOException throws {@code IOException} as a result of file system access failures.
+ */
+ public void createPECoffRelocObject(Map> relocationTable, Collection symbols) throws IOException {
+ ArrayList sections = new ArrayList();
+
+ // Create text section
+ createCodeSection(sections, binContainer.getCodeContainer());
+ createReadOnlySection(sections, binContainer.getMetaspaceNamesContainer());
+ createReadOnlySection(sections, binContainer.getKlassesOffsetsContainer());
+ createReadOnlySection(sections, binContainer.getMethodsOffsetsContainer());
+ createReadOnlySection(sections, binContainer.getKlassesDependenciesContainer());
+ createReadWriteSection(sections, binContainer.getMetaspaceGotContainer());
+ createReadWriteSection(sections, binContainer.getMetadataGotContainer());
+ createReadWriteSection(sections, binContainer.getMethodStateContainer());
+ createReadWriteSection(sections, binContainer.getOopGotContainer());
+ createReadWriteSection(sections, binContainer.getMethodMetadataContainer());
+ createReadOnlySection(sections, binContainer.getStubsOffsetsContainer());
+ createReadOnlySection(sections, binContainer.getHeaderContainer().getContainer());
+ createReadOnlySection(sections, binContainer.getCodeSegmentsContainer());
+ createReadOnlySection(sections, binContainer.getConstantDataContainer());
+ createReadOnlySection(sections, binContainer.getConfigContainer());
+
+ // createExternalLinkage();
+
+ createCodeSection(sections, binContainer.getExtLinkageContainer());
+ createReadWriteSection(sections, binContainer.getExtLinkageGOTContainer());
+
+ // Allocate PECoff Header
+ PECoffHeader header = new PECoffHeader();
+
+ // Get PECoff symbol data from BinaryContainer object's symbol tables
+ PECoffSymtab symtab = createPECoffSymbolTables(sections, symbols);
+
+ // Add Linker Directives Section
+ createByteSection(sections, ".drectve",
+ symtab.getDirectiveArray(), false,
+ IMAGE_SECTION_HEADER.IMAGE_SCN_LNK_INFO |
+ IMAGE_SECTION_HEADER.IMAGE_SCN_LNK_REMOVE |
+ IMAGE_SECTION_HEADER.IMAGE_SCN_ALIGN_1BYTES);
+
+ // Create the Relocation Tables
+ PECoffRelocTable pecoffRelocs = createPECoffRelocTable(sections, relocationTable);
+
+ // File Output Order
+ //
+ // HEADER (Need address of Symbol Table + symbol count)
+ // SECTIONS (Need pointer to Section Data, Relocation Table)
+ // DIRECTIVES
+ // SYMBOL TABLE
+ // SYMBOLS
+ // SECTION DATA
+ // RELOCATION TABLE
+
+ // Calculate Offset for Symbol table
+ int file_offset = IMAGE_FILE_HEADER.totalsize +
+ (IMAGE_SECTION_HEADER.totalsize*sections.size());
+
+ // Update Header fields
+ header.setSectionCount(sections.size());
+ header.setSymbolCount(symtab.getSymtabCount());
+ header.setSymbolOff(file_offset);
+
+ // Calculate file offset for first section
+ file_offset += ((symtab.getSymtabCount() * IMAGE_SYMBOL.totalsize) +
+ symtab.getStrtabSize());
+ // And round it up
+ file_offset = (file_offset + (sections.get(0).getDataAlign()-1)) &
+ ~((sections.get(0).getDataAlign()-1));
+
+ // Calc file offsets for section data
+ for (int i = 0; i < sections.size(); i++) {
+ PECoffSection sect = sections.get(i);
+ file_offset = (file_offset + (sect.getDataAlign()-1)) &
+ ~((sect.getDataAlign()-1));
+ sect.setOffset(file_offset);
+ file_offset += sect.getSize();
+ }
+
+ // Update relocation sizing information in each section
+ for (int i = 0; i < sections.size(); i++) {
+ PECoffSection sect = sections.get(i);
+ if (sect.hasRelocations()) {
+ int nreloc = pecoffRelocs.getNumRelocs(i);
+ sect.setReloff(file_offset);
+ sect.setRelcount(nreloc);
+ // extended relocations add an addition entry
+ if (nreloc > 0xFFFF) nreloc++;
+ file_offset += (nreloc * IMAGE_RELOCATION.totalsize);
+ }
+ }
+
+ // Write out the Header
+ pecoffContainer.writeBytes(header.getArray());
+
+ // Write out the section table
+ for (int i = 0; i < sections.size(); i++) {
+ PECoffSection sect = sections.get(i);
+ pecoffContainer.writeBytes(sect.getArray(), PECoffSection.getShdrAlign());
+ }
+
+ // Write out the symbol table and string table
+ pecoffContainer.writeBytes(symtab.getSymtabArray(), 4);
+ pecoffContainer.writeBytes(symtab.getStrtabArray(), 1);
+
+ // Write out each section contents
+ for (int i = 0; i < sections.size(); i++) {
+ PECoffSection sect = sections.get(i);
+ pecoffContainer.writeBytes(sect.getDataArray(), sect.getDataAlign());
+ }
+
+ // Write out Relocation Tables
+ for (int i = 0; i < sections.size(); i++) {
+ if (pecoffRelocs.getNumRelocs(i) > 0) {
+ pecoffContainer.writeBytes(pecoffRelocs.getRelocData(i));
+ }
+ }
+ pecoffContainer.close();
+ }
+
+ /**
+ * Construct PECoff symbol data from BinaryContainer object's symbol tables. Both dynamic PECoff
+ * symbol table and PECoff symbol table are created from BinaryContainer's symbol info.
+ *
+ * @param symbols
+ */
+ private PECoffSymtab createPECoffSymbolTables(ArrayList sections, Collection symbols) {
+ PECoffSymtab symtab = new PECoffSymtab();
+
+ // First, create the initial null symbol. This is a local symbol.
+ // symtab.addSymbolEntry("", (byte)0, (byte)0, (byte)0, 0, 0);
+
+ // Now create PECoff symbol entries for all symbols.
+ for (Symbol symbol : symbols) {
+ // Get the index of section this symbol is defined in.
+ int secHdrIndex = symbol.getSection().getSectionId();
+ PECoffSymbol pecoffSymbol = symtab.addSymbolEntry(symbol.getName(), getPECoffTypeOf(symbol), getPECoffClassOf(symbol), (byte)secHdrIndex, symbol.getOffset(), symbol.getSize());
+ symbol.setNativeSymbol((NativeSymbol)pecoffSymbol);
+ }
+ return (symtab);
+ }
+
+ private static byte getPECoffTypeOf(Symbol sym) {
+ Kind kind = sym.getKind();
+ if (kind == Symbol.Kind.NATIVE_FUNCTION || kind == Symbol.Kind.JAVA_FUNCTION) {
+ return IMAGE_SYMBOL.IMAGE_SYM_DTYPE_FUNCTION;
+ }
+ return IMAGE_SYMBOL.IMAGE_SYM_DTYPE_NONE;
+ }
+
+ private static byte getPECoffClassOf(Symbol sym) {
+ Binding binding = sym.getBinding();
+ if (binding == Symbol.Binding.GLOBAL) {
+ return IMAGE_SYMBOL.IMAGE_SYM_CLASS_EXTERNAL;
+ }
+ return IMAGE_SYMBOL.IMAGE_SYM_CLASS_STATIC;
+ }
+
+ /**
+ * Construct a PECoff relocation table from BinaryContainer object's relocation tables.
+ *
+ * @param sections
+ * @param relocationTable
+ */
+ private PECoffRelocTable createPECoffRelocTable(ArrayList sections,
+ Map> relocationTable) {
+
+ PECoffRelocTable pecoffRelocTable = new PECoffRelocTable(sections.size());
+ /*
+ * For each of the symbols with associated relocation records, create a PECoff relocation
+ * entry.
+ */
+ for (Map.Entry> entry : relocationTable.entrySet()) {
+ List relocs = entry.getValue();
+ Symbol symbol = entry.getKey();
+
+ for (Relocation reloc : relocs) {
+ createRelocation(symbol, reloc, pecoffRelocTable);
+ }
+ }
+
+ for (Map.Entry entry : binContainer.getUniqueRelocationTable().entrySet()) {
+ createRelocation(entry.getKey(), entry.getValue(), pecoffRelocTable);
+ }
+
+ return (pecoffRelocTable);
+ }
+
+ private void createRelocation(Symbol symbol, Relocation reloc, PECoffRelocTable pecoffRelocTable) {
+ RelocType relocType = reloc.getType();
+
+ int pecoffRelocType = getPECoffRelocationType(relocType);
+ PECoffSymbol sym = (PECoffSymbol)symbol.getNativeSymbol();
+ int symno = sym.getIndex();
+ int sectindex = reloc.getSection().getSectionId();
+ int offset = reloc.getOffset();
+ int addend = 0;
+
+ switch (relocType) {
+ case FOREIGN_CALL_DIRECT:
+ case JAVA_CALL_DIRECT:
+ case STUB_CALL_DIRECT:
+ case FOREIGN_CALL_INDIRECT_GOT: {
+ // Create relocation entry
+ addend = -4; // Size in bytes of the patch location
+ // Relocation should be applied at the location after call operand
+ offset = offset + reloc.getSize() + addend;
+ break;
+ }
+ case FOREIGN_CALL_DIRECT_FAR: {
+ // Create relocation entry
+ addend = -8; // Size in bytes of the patch location
+ // Relocation should be applied at the location after call operand
+ // 10 = 2 (jmp [r]) + 8 (imm64)
+ offset = offset + reloc.getSize() + addend - 2;
+ break;
+ }
+ case FOREIGN_CALL_INDIRECT:
+ case JAVA_CALL_INDIRECT:
+ case STUB_CALL_INDIRECT: {
+ // Do nothing.
+ return;
+ }
+ case EXTERNAL_DATA_REFERENCE_FAR: {
+ // Create relocation entry
+ addend = -4; // Size of 32-bit address of the GOT
+ /*
+ * Relocation should be applied before the test instruction to the move instruction.
+ * offset points to the test instruction after the instruction that loads
+ * the address of polling page. So set the offset appropriately.
+ */
+ offset = offset + addend;
+ break;
+ }
+ case METASPACE_GOT_REFERENCE:
+ case EXTERNAL_PLT_TO_GOT:
+ case STATIC_STUB_TO_STATIC_METHOD:
+ case STATIC_STUB_TO_HOTSPOT_LINKAGE_GOT: {
+ addend = -4; // Size of 32-bit address of the GOT
+ /*
+ * Relocation should be applied before the test instruction to
+ * the move instruction. reloc.getOffset() points to the
+ * test instruction after the instruction that loads the
+ * address of polling page. So set the offset appropriately.
+ */
+ offset = offset + addend;
+ break;
+ }
+ case EXTERNAL_GOT_TO_PLT:
+ case LOADTIME_ADDRESS: {
+ // this is load time relocations
+ break;
+ }
+ default:
+ throw new InternalError("Unhandled relocation type: " + relocType);
+ }
+ pecoffRelocTable.createRelocationEntry(sectindex, offset, symno, pecoffRelocType);
+ }
+
+ // Return IMAGE_RELOCATION Type based on relocType
+ private static int getPECoffRelocationType(RelocType relocType) {
+ int pecoffRelocType = 0; // R__NONE if #define'd to 0 for all values of ARCH
+ switch (PECoffTargetInfo.getPECoffArch()) {
+ case IMAGE_FILE_HEADER.IMAGE_FILE_MACHINE_AMD64:
+ if (relocType == RelocType.FOREIGN_CALL_DIRECT ||
+ relocType == RelocType.JAVA_CALL_DIRECT ||
+ relocType == RelocType.FOREIGN_CALL_INDIRECT_GOT) {
+ pecoffRelocType = IMAGE_RELOCATION.IMAGE_REL_AMD64_REL32;
+ } else if (relocType == RelocType.STUB_CALL_DIRECT) {
+ pecoffRelocType = IMAGE_RELOCATION.IMAGE_REL_AMD64_REL32;
+ } else if (relocType == RelocType.FOREIGN_CALL_DIRECT_FAR) {
+ pecoffRelocType = IMAGE_RELOCATION.IMAGE_REL_AMD64_ADDR64;
+ } else if (relocType == RelocType.FOREIGN_CALL_INDIRECT ||
+ relocType == RelocType.JAVA_CALL_INDIRECT ||
+ relocType == RelocType.STUB_CALL_INDIRECT) {
+ pecoffRelocType = IMAGE_RELOCATION.IMAGE_REL_AMD64_ABSOLUTE;
+ } else if ((relocType == RelocType.EXTERNAL_DATA_REFERENCE_FAR)) {
+ pecoffRelocType = IMAGE_RELOCATION.IMAGE_REL_AMD64_REL32;
+ } else if (relocType == RelocType.METASPACE_GOT_REFERENCE ||
+ relocType == RelocType.EXTERNAL_PLT_TO_GOT ||
+ relocType == RelocType.STATIC_STUB_TO_STATIC_METHOD ||
+ relocType == RelocType.STATIC_STUB_TO_HOTSPOT_LINKAGE_GOT) {
+ pecoffRelocType = IMAGE_RELOCATION.IMAGE_REL_AMD64_REL32;
+ } else if (relocType == RelocType.EXTERNAL_GOT_TO_PLT ||
+ relocType == RelocType.LOADTIME_ADDRESS) {
+ pecoffRelocType = IMAGE_RELOCATION.IMAGE_REL_AMD64_ADDR64;
+ } else {
+ assert false : "Unhandled relocation type: " + relocType;
+ }
+ break;
+ default:
+ System.out.println("Relocation Type mapping: Unhandled architecture");
+ }
+ return pecoffRelocType;
+ }
+}
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoff.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoff.java
new file mode 100644
index 00000000000..f71b2a7dd7c
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoff.java
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.pecoff;
+
+/**
+ *
+ * Support for the creation of Coff files.
+ * Current support is limited to 64 bit x86_64.
+ *
+ */
+
+public class PECoff {
+
+ /**
+ * IMAGE_FILE_HEADER structure defines
+ */
+ public enum IMAGE_FILE_HEADER {
+ Machine( 0, 2),
+ NumberOfSections( 2, 2),
+ TimeDateStamp( 4, 4),
+ PointerToSymbolTable( 8, 4),
+ NumberOfSymbols(12, 4),
+ SizeOfOptionalHeader(16, 2),
+ Characteristics(18, 2);
+
+ public final int off;
+ public final int sz;
+
+ IMAGE_FILE_HEADER(int offset, int size) {
+ this.off = offset;
+ this.sz = size;
+ }
+
+ public static int totalsize = 20;
+
+ /**
+ * IMAGE_FILE_HEADER defines
+ */
+
+ /**
+ * Machine
+ */
+ public static final char IMAGE_FILE_MACHINE_UNKNOWN = 0x0;
+ public static final char IMAGE_FILE_MACHINE_AMD64 = 0x8664;
+
+ }
+
+ /**
+ * IMAGE_SECTION_HEADER structure defines
+ */
+ public enum IMAGE_SECTION_HEADER {
+ Name( 0, 8),
+ PhysicalAddress( 8, 4),
+ VirtualSize( 8, 4),
+ VirtualAddress(12, 4),
+ SizeOfRawData(16, 4),
+ PointerToRawData(20, 4),
+ PointerToRelocations(24, 4),
+ PointerToLinenumbers(28, 4),
+ NumberOfRelocations(32, 2),
+ NumberOfLinenumbers(34, 2),
+ Characteristics(36, 4);
+
+ public final int off;
+ public final int sz;
+
+ IMAGE_SECTION_HEADER(int offset, int size) {
+ this.off = offset;
+ this.sz = size;
+ }
+
+ public static int totalsize = 40;
+
+ /**
+ * IMAGE_SECTION_HEADER defines
+ */
+
+ /**
+ * Characteristics
+ */
+ public static final int IMAGE_SCN_CNT_CODE = 0x20;
+ public static final int IMAGE_SCN_CNT_INITIALIZED_DATA = 0x40;
+ public static final int IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x80;
+ public static final int IMAGE_SCN_LNK_COMDAT = 0x1000;
+ public static final int IMAGE_SCN_LNK_INFO = 0x200;
+ public static final int IMAGE_SCN_LNK_REMOVE = 0x800;
+
+ public static final int IMAGE_SCN_ALIGN_1BYTES = 0x100000;
+ public static final int IMAGE_SCN_ALIGN_2BYTES = 0x200000;
+ public static final int IMAGE_SCN_ALIGN_4BYTES = 0x300000;
+ public static final int IMAGE_SCN_ALIGN_8BYTES = 0x400000;
+ public static final int IMAGE_SCN_ALIGN_16BYTES = 0x500000;
+ public static final int IMAGE_SCN_ALIGN_32BYTES = 0x600000;
+ public static final int IMAGE_SCN_ALIGN_64BYTES = 0x700000;
+ public static final int IMAGE_SCN_ALIGN_MASK = 0xf00000;
+ public static final int IMAGE_SCN_ALIGN_SHIFT = 20;
+
+ public static final int IMAGE_SCN_LNK_NRELOC_OVFL = 0x01000000;
+
+ public static final int IMAGE_SCN_MEM_SHARED = 0x10000000;
+ public static final int IMAGE_SCN_MEM_EXECUTE = 0x20000000;
+ public static final int IMAGE_SCN_MEM_READ = 0x40000000;
+ public static final int IMAGE_SCN_MEM_WRITE = 0x80000000;
+
+ }
+
+ /**
+ * Symbol table entry definitions
+ *
+ * IMAGE_SYMBOL structure defines
+ */
+ public enum IMAGE_SYMBOL {
+ ShortName( 0, 8),
+ Short( 0, 4),
+ Long( 4, 4),
+ Value( 8, 4),
+ SectionNumber(12, 2),
+ Type(14, 2),
+ StorageClass(16, 1),
+ NumberOfAuxSymbols(17, 1);
+
+ public final int off;
+ public final int sz;
+
+ IMAGE_SYMBOL(int offset, int size) {
+ this.off = offset;
+ this.sz = size;
+ }
+
+ public static int totalsize = 18;
+
+ /**
+ * Type
+ */
+ public static final int IMAGE_SYM_DTYPE_NONE = 0x0;
+ public static final int IMAGE_SYM_DTYPE_FUNCTION = 0x20;
+
+ /**
+ * StorageClass
+ */
+ public static final int IMAGE_SYM_CLASS_NULL = 0x0;
+ public static final int IMAGE_SYM_CLASS_EXTERNAL = 0x2;
+ public static final int IMAGE_SYM_CLASS_STATIC = 0x3;
+ public static final int IMAGE_SYM_CLASS_LABEL = 0x6;
+
+ }
+
+ /**
+ * IMAGE_RELOCATION structure defines
+ */
+ public enum IMAGE_RELOCATION {
+ VirtualAddress( 0, 4),
+ SymbolTableIndex( 4, 4),
+ Type( 8, 2);
+
+ public final int off;
+ public final int sz;
+
+ IMAGE_RELOCATION(int offset, int size) {
+ this.off = offset;
+ this.sz = size;
+ }
+
+ public static int totalsize = 10;
+
+ /**
+ * Relocation types
+ */
+ public static final int IMAGE_REL_AMD64_ABSOLUTE = 0x0;
+ public static final int IMAGE_REL_AMD64_ADDR32 = 0x2;
+ public static final int IMAGE_REL_AMD64_ADDR64 = 0x1;
+ public static final int IMAGE_REL_AMD64_REL32 = 0x4;
+ public static final int IMAGE_REL_AMD64_REL32_1 = 0x5;
+ public static final int IMAGE_REL_AMD64_REL32_2 = 0x6;
+ public static final int IMAGE_REL_AMD64_REL32_3 = 0x7;
+ public static final int IMAGE_REL_AMD64_REL32_4 = 0x8;
+ public static final int IMAGE_REL_AMD64_REL32_5 = 0x9;
+
+ }
+
+}
diff --git a/hotspot/test/compiler/cpuflags/predicate/AESSupportPredicate.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffByteBuffer.java
similarity index 70%
rename from hotspot/test/compiler/cpuflags/predicate/AESSupportPredicate.java
rename to hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffByteBuffer.java
index c2ac3e24316..e212c6b64d7 100644
--- a/hotspot/test/compiler/cpuflags/predicate/AESSupportPredicate.java
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffByteBuffer.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -21,18 +21,18 @@
* questions.
*/
-package compiler.cpuflags.predicate;
+package jdk.tools.jaotc.binformat.pecoff;
-import sun.hotspot.cpuinfo.CPUInfo;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
-import java.util.function.BooleanSupplier;
+public class PECoffByteBuffer {
-public class AESSupportPredicate implements BooleanSupplier {
-
- private static final String AES = "aes";
-
- @Override
- public boolean getAsBoolean() {
- return CPUInfo.getFeatures().contains(AES);
+ public static ByteBuffer allocate(int size) {
+ ByteBuffer buf = ByteBuffer.allocate(size);
+ // Only support Little Endian on Windows
+ buf.order(ByteOrder.LITTLE_ENDIAN);
+ return (buf);
}
+
}
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffContainer.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffContainer.java
new file mode 100644
index 00000000000..75158fd219f
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffContainer.java
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.pecoff;
+
+import java.io.File;
+import java.io.FileOutputStream;
+
+public class PECoffContainer {
+
+ File outputFile;
+ FileOutputStream outputStream;
+ long fileOffset;
+
+ public PECoffContainer(String fileName, String aotVersion) {
+ String baseName;
+
+ outputFile = new File(fileName);
+ if (outputFile.exists()) {
+ outputFile.delete();
+ }
+
+ try {
+ outputStream = new FileOutputStream(outputFile);
+ } catch (Exception e) {
+ System.out.println("PECoffContainer: Can't create file " + fileName);
+ }
+ fileOffset = 0;
+ }
+
+ public void close() {
+ try {
+ outputStream.close();
+ } catch (Exception e) {
+ System.out.println("PECoffContainer: close failed");
+ }
+ }
+
+ public void writeBytes(byte [] bytes) {
+ if (bytes == null) return;
+ try {
+ outputStream.write(bytes);
+ } catch (Exception e) {
+ System.out.println("PECoffContainer: writeBytes failed");
+ }
+ fileOffset += bytes.length;
+ }
+
+ // Write bytes to output file with up front alignment padding
+ public void writeBytes(byte [] bytes, int alignment) {
+ if (bytes == null) return;
+ try {
+ // Pad to alignment
+ while ((fileOffset & (long)(alignment-1)) != 0) {
+ outputStream.write(0);
+ fileOffset++;
+ }
+ outputStream.write(bytes);
+ } catch (Exception e) {
+ System.out.println("PECoffContainer: writeBytes failed");
+ }
+ fileOffset += bytes.length;
+ }
+}
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffHeader.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffHeader.java
new file mode 100644
index 00000000000..7d2c84f841d
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffHeader.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.pecoff;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import jdk.tools.jaotc.binformat.pecoff.PECoff;
+import jdk.tools.jaotc.binformat.pecoff.PECoff.IMAGE_FILE_HEADER;
+import jdk.tools.jaotc.binformat.pecoff.PECoffTargetInfo;
+import jdk.tools.jaotc.binformat.pecoff.PECoffByteBuffer;
+
+public class PECoffHeader {
+ ByteBuffer header;
+
+ public PECoffHeader() {
+ header = PECoffByteBuffer.allocate(IMAGE_FILE_HEADER.totalsize);
+
+ header.putChar(IMAGE_FILE_HEADER.Machine.off, IMAGE_FILE_HEADER.IMAGE_FILE_MACHINE_AMD64);
+ header.putInt(IMAGE_FILE_HEADER.TimeDateStamp.off, (int)(System.currentTimeMillis()/1000));
+ header.putInt(IMAGE_FILE_HEADER.PointerToSymbolTable.off, 0);
+ header.putInt(IMAGE_FILE_HEADER.NumberOfSymbols.off, 0);
+ header.putChar(IMAGE_FILE_HEADER.SizeOfOptionalHeader.off, (char)0);
+ header.putChar(IMAGE_FILE_HEADER.Characteristics.off, (char)0);
+
+ }
+
+ // Update header with the number of total sections
+ public void setSectionCount(int count) {
+ header.putChar(IMAGE_FILE_HEADER.NumberOfSections.off, (char)count);
+ }
+
+ // Update header with the number of total symbols
+ public void setSymbolCount(int count) {
+ header.putInt(IMAGE_FILE_HEADER.NumberOfSymbols.off, count);
+ }
+
+ // Update header with the offset of symbol table
+ public void setSymbolOff(int offset) {
+ header.putInt(IMAGE_FILE_HEADER.PointerToSymbolTable.off, offset);
+ }
+
+ public byte[] getArray() {
+ return header.array();
+ }
+}
+
diff --git a/hotspot/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/util/ArraySet.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffRelocEntry.java
similarity index 56%
rename from hotspot/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/util/ArraySet.java
rename to hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffRelocEntry.java
index cf4a8824b6d..11284dc77a4 100644
--- a/hotspot/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/util/ArraySet.java
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffRelocEntry.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -20,38 +20,30 @@
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
-package org.graalvm.compiler.core.common.util;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Set;
+package jdk.tools.jaotc.binformat.pecoff;
-/**
- * Mimic a set implementation with an ArrayList. Beneficial for small sets (compared to
- * {@link HashSet}).
- */
-public class ArraySet extends ArrayList implements Set {
- private static final long serialVersionUID = 4476957522387436654L;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
- public ArraySet() {
- super();
+import jdk.tools.jaotc.binformat.pecoff.PECoff;
+import jdk.tools.jaotc.binformat.pecoff.PECoff.IMAGE_RELOCATION;
+import jdk.tools.jaotc.binformat.pecoff.PECoffByteBuffer;
+
+public class PECoffRelocEntry {
+ ByteBuffer entry;
+
+ public PECoffRelocEntry(int offset, int symno, int type) {
+
+ entry = PECoffByteBuffer.allocate(IMAGE_RELOCATION.totalsize);
+
+ entry.putInt(IMAGE_RELOCATION.VirtualAddress.off, offset);
+ entry.putInt(IMAGE_RELOCATION.SymbolTableIndex.off, symno);
+ entry.putChar(IMAGE_RELOCATION.Type.off, (char)type);
}
- public ArraySet(int i) {
- super(i);
- }
-
- public ArraySet(Collection extends E> c) {
- super(c);
- }
-
- @Override
- public boolean add(E e) {
- // avoid duplicated entries
- if (contains(e)) {
- return false;
- }
- return super.add(e);
+ public byte[] getArray() {
+ return entry.array();
}
}
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffRelocTable.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffRelocTable.java
new file mode 100644
index 00000000000..c51b9e8710a
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffRelocTable.java
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.pecoff;
+
+import java.util.ArrayList;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import jdk.tools.jaotc.binformat.pecoff.PECoff;
+import jdk.tools.jaotc.binformat.pecoff.PECoff.IMAGE_RELOCATION;
+import jdk.tools.jaotc.binformat.pecoff.PECoffRelocEntry;
+import jdk.tools.jaotc.binformat.pecoff.PECoffByteBuffer;
+
+public class PECoffRelocTable {
+ ArrayList> relocEntries;
+
+ public PECoffRelocTable(int numsects) {
+ relocEntries = new ArrayList>(numsects);
+ for (int i = 0; i < numsects; i++)
+ relocEntries.add(new ArrayList());
+ }
+
+ public void createRelocationEntry(int sectindex,
+ int offset,
+ int symno,
+ int type) {
+
+ PECoffRelocEntry entry = new PECoffRelocEntry(offset,
+ symno,
+ type);
+ relocEntries.get(sectindex).add(entry);
+ }
+
+ public int getAlign() { return (4); }
+
+ public int getNumRelocs(int section_index) {
+ return relocEntries.get(section_index).size();
+ }
+
+ // Return the relocation entries for a single section
+ // or null if no entries added to section
+ public byte [] getRelocData(int section_index) {
+ ArrayList entryList = relocEntries.get(section_index);
+ int entryCount = entryList.size();
+ int allocCount = entryCount;
+
+ if (entryCount == 0)
+ return null;
+
+ if (entryCount > 0xFFFF)
+ allocCount++;
+
+ ByteBuffer relocData = PECoffByteBuffer.allocate(allocCount * IMAGE_RELOCATION.totalsize);
+
+ // If number of relocs exceeds 65K, add the real size
+ // in a dummy first reloc entry
+ if (entryCount > 0xFFFF) {
+ PECoffRelocEntry entry = new PECoffRelocEntry(allocCount, 0, 0);
+ relocData.put(entry.getArray());
+ }
+
+ // Copy each entry to a single ByteBuffer
+ for (int i = 0; i < entryCount; i++) {
+ PECoffRelocEntry entry = entryList.get(i);
+ relocData.put(entry.getArray());
+ }
+
+ return (relocData.array());
+ }
+}
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffSection.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffSection.java
new file mode 100644
index 00000000000..0e05fdb830a
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffSection.java
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.pecoff;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import jdk.tools.jaotc.binformat.pecoff.PECoff;
+import jdk.tools.jaotc.binformat.pecoff.PECoff.IMAGE_SECTION_HEADER;
+import jdk.tools.jaotc.binformat.pecoff.PECoffByteBuffer;
+
+public class PECoffSection {
+ ByteBuffer section;
+ byte [] data;
+ boolean hasrelocations;
+ int sectionIndex;
+ int align;
+
+ public PECoffSection(String sectName, byte [] sectData, int sectFlags,
+ boolean hasRelocations, int sectIndex) {
+
+ section = PECoffByteBuffer.allocate(IMAGE_SECTION_HEADER.totalsize);
+
+ // bug: If JVM.oop.got section is empty, VM exits since JVM.oop.got
+ // symbol ends up as external forwarded reference.
+ if (sectData.length == 0) sectData = new byte[8];
+
+ // Copy only Max allowed bytes to Section Entry
+ byte [] Name = sectName.getBytes();
+ int max = Name.length <= IMAGE_SECTION_HEADER.Name.sz ?
+ Name.length : IMAGE_SECTION_HEADER.Name.sz;
+
+ section.put(Name, IMAGE_SECTION_HEADER.Name.off, max);
+
+ section.putInt(IMAGE_SECTION_HEADER.VirtualSize.off, 0);
+ section.putInt(IMAGE_SECTION_HEADER.VirtualAddress.off, 0);
+ section.putInt(IMAGE_SECTION_HEADER.SizeOfRawData.off, sectData.length);
+ section.putInt(IMAGE_SECTION_HEADER.PointerToLinenumbers.off, 0);
+ section.putChar(IMAGE_SECTION_HEADER.NumberOfLinenumbers.off, (char)0);
+
+ section.putInt(IMAGE_SECTION_HEADER.Characteristics.off, sectFlags);
+
+ // Extract alignment from Characteristics field
+ int alignshift = (sectFlags & IMAGE_SECTION_HEADER.IMAGE_SCN_ALIGN_MASK) >>
+ IMAGE_SECTION_HEADER.IMAGE_SCN_ALIGN_SHIFT;
+
+ // Use 8 byte alignment if not specified
+ if (alignshift == 0)
+ alignshift = 3;
+ else
+ --alignshift;
+
+ align = 1 << alignshift;
+
+ data = sectData;
+ hasrelocations = hasRelocations;
+ sectionIndex = sectIndex;
+ }
+
+ public long getSize() {
+ return section.getInt(IMAGE_SECTION_HEADER.SizeOfRawData.off);
+ }
+
+ public int getDataAlign() {
+ return (align);
+ }
+
+ // Alignment requirements for the IMAGE_SECTION_HEADER structures
+ public static int getShdrAlign() {
+ return (4);
+ }
+
+ public byte[] getArray() {
+ return section.array();
+ }
+
+ public byte[] getDataArray() {
+ return data;
+ }
+
+ public void setOffset(long offset) {
+ section.putInt(IMAGE_SECTION_HEADER.PointerToRawData.off, (int)offset);
+ }
+
+ public long getOffset() {
+ return (section.getInt(IMAGE_SECTION_HEADER.PointerToRawData.off));
+ }
+
+ public void setReloff(int offset) {
+ section.putInt(IMAGE_SECTION_HEADER.PointerToRelocations.off, offset);
+ }
+
+ public void setRelcount(int count) {
+ // If the number of relocs is larger than 65K, then set
+ // the overflow bit. The real count will be written to
+ // the first reloc entry for this section.
+ if (count > 0xFFFF) {
+ int flags;
+ section.putChar(IMAGE_SECTION_HEADER.NumberOfRelocations.off, (char)0xFFFF);
+ flags = section.getInt(IMAGE_SECTION_HEADER.Characteristics.off);
+ flags |= IMAGE_SECTION_HEADER.IMAGE_SCN_LNK_NRELOC_OVFL;
+ section.putInt(IMAGE_SECTION_HEADER.Characteristics.off, flags);
+ }
+ else {
+ section.putChar(IMAGE_SECTION_HEADER.NumberOfRelocations.off, (char)count);
+ }
+ }
+
+ public boolean hasRelocations() {
+ return hasrelocations;
+ }
+
+ public int getSectionId() {
+ return sectionIndex;
+ }
+
+}
+
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffSymbol.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffSymbol.java
new file mode 100644
index 00000000000..c305dbe071d
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffSymbol.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.pecoff;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import jdk.tools.jaotc.binformat.NativeSymbol;
+import jdk.tools.jaotc.binformat.pecoff.PECoff;
+import jdk.tools.jaotc.binformat.pecoff.PECoff.IMAGE_SYMBOL;
+import jdk.tools.jaotc.binformat.pecoff.PECoffByteBuffer;
+
+public class PECoffSymbol extends NativeSymbol {
+ ByteBuffer sym;
+
+ public PECoffSymbol(int symbolindex, int strindex, byte type, byte storageclass,
+ byte sectindex, long offset, long size) {
+ super(symbolindex);
+ sym = PECoffByteBuffer.allocate(IMAGE_SYMBOL.totalsize);
+
+ // We don't use short names
+ sym.putInt(IMAGE_SYMBOL.Short.off, 0);
+
+ sym.putInt(IMAGE_SYMBOL.Long.off, strindex);
+ sym.putInt(IMAGE_SYMBOL.Value.off, (int)offset);
+
+ // Section indexes start at 1 but we manage the index internally
+ // as 0 relative except in this structure
+ sym.putChar(IMAGE_SYMBOL.SectionNumber.off, (char)(sectindex+1));
+
+ sym.putChar(IMAGE_SYMBOL.Type.off, (char)type);
+ sym.put(IMAGE_SYMBOL.StorageClass.off, storageclass);
+ sym.put(IMAGE_SYMBOL.NumberOfAuxSymbols.off, (byte)0);
+ }
+
+ public byte[] getArray() {
+ return sym.array();
+ }
+}
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffSymtab.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffSymtab.java
new file mode 100644
index 00000000000..34e0ef73f5e
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffSymtab.java
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.pecoff;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.util.ArrayList;
+
+import jdk.tools.jaotc.binformat.pecoff.PECoff;
+import jdk.tools.jaotc.binformat.pecoff.PECoff.IMAGE_SYMBOL;
+import jdk.tools.jaotc.binformat.pecoff.PECoffSymbol;
+import jdk.tools.jaotc.binformat.pecoff.PECoffByteBuffer;
+
+public class PECoffSymtab {
+ ArrayListsymbols = new ArrayList();
+
+ /**
+ * number of symbols added
+ */
+ int symbolCount;
+
+ /**
+ * String holding symbol table strings
+ */
+ private StringBuilder strTabContent;
+
+ /**
+ * Keeps track of bytes in string table since strTabContent.length()
+ * is number of chars, not bytes.
+ */
+ private int strTabNrOfBytes;
+
+ /**
+ * String holding Linker Directives
+ */
+ private StringBuilder directives;
+
+ public PECoffSymtab() {
+ symbolCount = 0;
+ strTabContent = new StringBuilder();
+ directives = new StringBuilder();
+
+ // The first 4 bytes of the string table contain
+ // the length of the table (including this length field).
+ strTabNrOfBytes = 4;
+
+ // Make room for the 4 byte length field
+ strTabContent.append('\0').append('\0').append('\0').append('\0');
+
+ // Linker Directives start with 3 spaces to signify ANSI
+ directives.append(" ");
+ }
+
+ public PECoffSymbol addSymbolEntry(String name, byte type, byte storageclass,
+ byte secHdrIndex, long offset, long size) {
+ // Get the current symbol index and append symbol name to string table.
+ int index;
+ PECoffSymbol sym;
+
+ if (name.isEmpty()) {
+ index = 0;
+ strTabContent.append('\0');
+ strTabNrOfBytes += 1;
+ sym = new PECoffSymbol(symbolCount, index, type, storageclass, secHdrIndex, offset, size);
+ symbols.add(sym);
+ } else {
+ int nameSize = name.getBytes().length;
+
+ // We can't trust strTabContent.length() since that is
+ // chars (UTF16), keep track of bytes on our own.
+ index = strTabNrOfBytes;
+ // strTabContent.append('_').append(name).append('\0');
+ strTabContent.append(name).append('\0');
+ strTabNrOfBytes += (nameSize + 1);
+
+ sym = new PECoffSymbol(symbolCount, index, type, storageclass, secHdrIndex, offset, size);
+ symbols.add(sym);
+ if (storageclass == IMAGE_SYMBOL.IMAGE_SYM_CLASS_EXTERNAL)
+ addDirective(name, type);
+ }
+ symbolCount++;
+ return (sym);
+ }
+
+ private void addDirective(String name, byte type) {
+ directives.append("/EXPORT:" + name);
+ if(type != IMAGE_SYMBOL.IMAGE_SYM_DTYPE_FUNCTION) {
+ directives.append(",DATA");
+ }
+ directives.append(" ");
+ }
+
+ public int getSymtabCount() {
+ return symbolCount;
+ }
+
+ public int getStrtabSize() {
+ return strTabNrOfBytes;
+ }
+
+ // Return a byte array that contains the symbol table entries
+ public byte[] getSymtabArray() {
+ ByteBuffer symtabData = PECoffByteBuffer.allocate(symbolCount*IMAGE_SYMBOL.totalsize);
+ symtabData.order(ByteOrder.LITTLE_ENDIAN);
+
+ // copy all symbols
+ for (int i = 0; i < symbolCount; i++ ) {
+ PECoffSymbol sym = symbols.get(i);
+ byte [] arr = sym.getArray();
+ symtabData.put(arr);
+ }
+ return (symtabData.array());
+ }
+
+ // Return the string table array
+ public byte[] getStrtabArray() {
+ byte [] strs = strTabContent.toString().getBytes();
+
+ // Update the size of the string table
+ ByteBuffer buff = ByteBuffer.wrap(strs);
+ buff.order(ByteOrder.LITTLE_ENDIAN);
+ buff.putInt(0, strTabNrOfBytes);
+
+ return (strs);
+ }
+
+ public byte[] getDirectiveArray() {
+ return (directives.toString().getBytes());
+ }
+}
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffTargetInfo.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffTargetInfo.java
new file mode 100644
index 00000000000..9c40f64733f
--- /dev/null
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffTargetInfo.java
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.tools.jaotc.binformat.pecoff;
+
+import java.nio.ByteOrder;
+import jdk.tools.jaotc.binformat.pecoff.PECoff;
+import jdk.tools.jaotc.binformat.pecoff.PECoff.IMAGE_FILE_HEADER;
+
+/**
+ * Class that abstracts MACH-O target details.
+ *
+ */
+public class PECoffTargetInfo {
+ /**
+ * Target architecture.
+ */
+ private static final char arch;
+
+ /**
+ * Target OS string.
+ */
+ private static String osName;
+
+ static {
+ // Find the target arch details
+ String archStr = System.getProperty("os.arch").toLowerCase();
+ if (ByteOrder.nativeOrder() != ByteOrder.LITTLE_ENDIAN) {
+ System.out.println("Only Little Endian byte order supported!");
+ }
+
+ if (archStr.equals("amd64") || archStr.equals("x86_64")) {
+ arch = IMAGE_FILE_HEADER.IMAGE_FILE_MACHINE_AMD64;
+ } else {
+ System.out.println("Unsupported architecture " + archStr);
+ arch = IMAGE_FILE_HEADER.IMAGE_FILE_MACHINE_UNKNOWN;
+ }
+
+ osName = System.getProperty("os.name").toLowerCase();
+ if (!osName.contains("windows")) {
+ System.out.println("Unsupported Operating System " + osName);
+ osName = "Unknown";
+ }
+ }
+
+ public static char getPECoffArch() {
+ return arch;
+ }
+
+ public static String getOsName() {
+ return osName;
+ }
+}
+
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/ELFSymbol.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/ELFSymbol.java
deleted file mode 100644
index ddad5a018da..00000000000
--- a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/ELFSymbol.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.tools.jaotc.jnilibelf;
-
-/**
- * This class represents a {@code Elf32_Sym} or {@code Elf64_Sym} as defined in {@code elf.h}.
- */
-public class ELFSymbol {
- /** Symbol name. */
- private final String name;
-
- /** String table index. */
- private final int index;
-
- /** Native memory address of ELF sym entry. */
- private final Pointer address;
- private final boolean isLocal;
-
- public ELFSymbol(String name, int index, Pointer address, boolean isLocal) {
- this.name = name;
- this.index = index;
- this.address = address;
- this.isLocal = isLocal;
- }
-
- /**
- * @return the name
- */
- public String getName() {
- return name;
- }
-
- /**
- * @return the index
- */
- public int getIndex() {
- return index;
- }
-
- /**
- * @return the address
- */
- public Pointer getAddress() {
- return address;
- }
-
- @Override
- public String toString() {
- return "name=" + name + ", index=" + index + ", address=" + address;
- }
-
- public boolean isLocal() {
- return isLocal;
- }
-}
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/JNIELFContainer.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/JNIELFContainer.java
deleted file mode 100644
index 64d699f0371..00000000000
--- a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/JNIELFContainer.java
+++ /dev/null
@@ -1,476 +0,0 @@
-/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.tools.jaotc.jnilibelf;
-
-import static jdk.tools.jaotc.jnilibelf.UnsafeAccess.UNSAFE;
-
-import java.io.File;
-import java.nio.ByteBuffer;
-import java.nio.charset.StandardCharsets;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import jdk.tools.jaotc.jnilibelf.JNILibELFAPI.ELF;
-import jdk.tools.jaotc.jnilibelf.JNILibELFAPI.LibELF;
-import jdk.tools.jaotc.jnilibelf.JNILibELFAPI.LibELF.Elf_Type;
-
-/**
- * A class abstraction of an ELF file.
- *
- */
-public class JNIELFContainer {
-
- private String outputFileName;
- private File outFile;
- private int outFileDesc;
-
- /**
- * Pointer to Elf file. This is the same as struct Elf found in libelf.h
- */
- private Pointer elfPtr;
-
- /**
- * Class of the ELF container - one of ELFCLASS32 or ELFCLASS64.
- */
- private final int elfClass;
-
- /**
- * Pointer to ELF Header.
- */
- private Pointer ehdrPtr;
-
- /**
- * Pointer to Program Header.
- */
- private Pointer phdrPtr;
-
- /**
- * String holding .shstrtab contents.
- */
- private String shStrTabContent = "";
-
- /**
- * Map of local symbol indexes to ELF symbol entries.
- */
- private List localSymbolIndex = new ArrayList<>();
-
- /**
- * Map of global symbol indexes to ELF symbol entries.
- */
- private List globalSymbolIndex = new ArrayList<>();
-
- /**
- * String holding .strtab contents.
- */
- private StringBuilder strTabContent = new StringBuilder();
-
- /**
- * Keeps track of nr of bytes in .strtab since strTabContent.length() is number of chars, not
- * bytes.
- */
- private int strTabNrOfBytes = 0;
-
- /**
- * A hashtable that holds (section-name, relocation-table) pairs. For example, [(".rela.text",
- * rela-text-reloc-entries), (".rela.plt", rela-plt-reloc-entries), ...].
- */
- private Map> relocTables = new HashMap<>();
-
- /**
- * Create reloca; 0 => false and non-zero => true.
- */
- private final int createReloca;
-
- /**
- * Construct an ELFContainer in preparation for a disk image with file {@code prefix}.
- *
- * @param fileName name of ELF file to be created
- */
- public JNIELFContainer(String fileName, String aotVersion) {
- // Check for version compatibility
- if (!JNILibELFAPI.elfshim_version().equals(aotVersion)) {
- throw new InternalError("libelfshim version mismatch: " + JNILibELFAPI.elfshim_version() + " vs " + aotVersion);
- }
-
- elfClass = JNIELFTargetInfo.getELFClass();
- createReloca = JNIELFTargetInfo.createReloca();
- outputFileName = fileName;
- }
-
- /**
- * Get the local ELF symbol table.
- *
- * @return local symbol table
- */
- public List getLocalSymbols() {
- return localSymbolIndex;
- }
-
- /**
- * Get the global ELF symbol table.
- *
- * @return list of global ELF symbol table entries
- */
- public List getGlobalSymbols() {
- return globalSymbolIndex;
- }
-
- /**
- * Get string table content (.strtab).
- *
- * @return string table content
- */
- public String getStrTabContent() {
- return strTabContent.toString();
- }
-
- /**
- * Get section header string table content (.shstrtab).
- *
- * @return section header string table content
- */
- public String getShStrTabContent() {
- return shStrTabContent;
- }
-
- /**
- * Get relocation tables.
- *
- * @return relocation tables
- */
- public Map> getRelocTables() {
- return relocTables;
- }
-
- /**
- * Get the index of first non-local symbol in symbol table.
- *
- * @return symbol table index
- */
- public int getFirstNonLocalSymbolIndex() {
- return localSymbolIndex.size();
- }
-
- /**
- * Create ELF header of type {@code ececType}.
- *
- * @param type type of ELF executable
- */
- public void createELFHeader(int type) {
- // Check for version compatibility
- if (JNILibELFAPI.elf_version(ELF.EV_CURRENT) == ELF.EV_NONE) {
- throw new InternalError("ELF version mismatch");
- }
-
- outFile = constructRelocFile(outputFileName);
- // Open a temporary file for the shared library to be created
- // TODO: Revisit file permissions; need to add execute permission
- outFileDesc = JNILibELFAPI.open_rw(outFile.getPath());
-
- if (outFileDesc == -1) {
- System.out.println("Failed to open file " + outFile.getPath() + " to write relocatable object.");
- }
-
- elfPtr = JNILibELFAPI.elf_begin(outFileDesc, LibELF.Elf_Cmd.ELF_C_WRITE.intValue(), new Pointer(0L));
- if (elfPtr == null) {
- throw new InternalError("elf_begin failed");
- }
-
- // Allocate new Ehdr of current architecture class
-
- ehdrPtr = JNILibELFAPI.gelf_newehdr(elfPtr, elfClass);
-
- JNILibELFAPI.ehdr_set_data_encoding(ehdrPtr, JNIELFTargetInfo.getELFEndian());
- JNILibELFAPI.set_Ehdr_e_machine(elfClass, ehdrPtr, JNIELFTargetInfo.getELFArch());
- JNILibELFAPI.set_Ehdr_e_type(elfClass, ehdrPtr, type);
- JNILibELFAPI.set_Ehdr_e_version(elfClass, ehdrPtr, ELF.EV_CURRENT);
- }
-
- /**
- * If the file name has a .so extension, replace it with .o extension. Else just add .o
- * extension
- *
- * @param fileName
- * @return File object
- */
- private static File constructRelocFile(String fileName) {
- File relocFile = new File(fileName);
- if (relocFile.exists()) {
- if (!relocFile.delete()) {
- throw new InternalError("Failed to delete existing " + fileName + " file");
- }
- }
- return relocFile;
- }
-
- /**
- * Create {@code count} number of Program headers.
- *
- * @param count number of program headers to create
- * @return true upon success; false upon failure
- */
- public boolean createProgramHeader(int count) {
- phdrPtr = JNILibELFAPI.gelf_newphdr(elfPtr, count);
- if (phdrPtr == null) {
- System.out.println("gelf_newphdr error");
- return false;
- }
- return true;
- }
-
- /**
- * Set program header to be of type self.
- *
- * @return true
- */
- public boolean setProgHdrTypeToSelf() {
- // Set program header to be of type self
- JNILibELFAPI.phdr_set_type_self(elfClass, ehdrPtr, phdrPtr);
- // And thus mark it as dirty so that elfUpdate can recompute the structures
- JNILibELFAPI.elf_flagphdr(elfPtr, LibELF.Elf_Cmd.ELF_C_SET.intValue(), LibELF.ELF_F_DIRTY);
- // TODO: Error checking; look at the return value of elf_update
- // and call elf_errmsg appropriately.
- return true;
- }
-
- /**
- * Create a section. The corresponding section header and section data are created by calling
- * the necessary libelf APIs. The section that is created is inserted into the ELF container.
- *
- * @param secName name of the section
- * @param scnData section data
- * @param dataType data type
- * @param align section alignment
- * @param scnType section type
- * @param scnFlags section flags
- * @param scnLink sh_link field of Elf{32,64}_Shdr
- * @param scnInfo sh_info field of Elf{32,64}_Shdr
- * @return section index
- */
- public int createSection(String secName, byte[] scnData, Elf_Type dataType, int align, int scnType, int scnFlags, int scnLink, int scnInfo) {
- // Create a new section
- Pointer scnPtr = JNILibELFAPI.elf_newscn(elfPtr);
- if (scnPtr == null) {
- throw new InternalError("elf_newscn error");
- }
-
- // Allocate section data for the section
- Pointer scnDataPtr = JNILibELFAPI.elf_newdata(scnPtr);
- if (scnDataPtr == null) {
- String errMsg = JNILibELFAPI.elf_errmsg(-1);
- throw new InternalError("elf_newdata error: " + errMsg);
- }
-
- // Get the pointer to section header associated with the new section
- Pointer scnHdrPtr = JNILibELFAPI.elf64_getshdr(scnPtr);
-
- // Add name of the section to section name string
- // If secName is null, point the name to the 0th index
- // that holds `\0'
- byte[] modScnData;
- if (secName.isEmpty()) {
- JNILibELFAPI.set_Shdr_sh_name(elfClass, scnHdrPtr, 0);
- modScnData = scnData;
- } else {
- if (secName.equals(".shstrtab")) {
- // Modify .shstrtab data by inserting '\0' at index 0
- String shstrtabSecName = ".shstrtab" + '\0';
- // Additional byte for the '\0' at position 0
- ByteBuffer nbuf = ByteBuffer.allocate(scnData.length + 1 + shstrtabSecName.length());
- nbuf.put(0, (byte) 0);
- nbuf.position(1);
- nbuf.put(scnData);
- nbuf.position(scnData.length + 1);
- // Add the section name ".shstrtab" to its own data
- nbuf.put(shstrtabSecName.getBytes(StandardCharsets.UTF_8));
- modScnData = nbuf.array();
- JNILibELFAPI.set_Shdr_sh_name(elfClass, scnHdrPtr, scnData.length + 1);
- // Set strtab section index
- JNILibELFAPI.set_Ehdr_e_shstrndx(elfClass, ehdrPtr, JNILibELFAPI.elf_ndxscn(scnPtr));
- } else if (secName.equals(".strtab")) {
- // Modify strtab section data to insert '\0' at position 0.
- // Additional byte for the '\0' at position 0
- ByteBuffer nbuf = ByteBuffer.allocate(scnData.length + 1);
- nbuf.put(0, (byte) 0);
- nbuf.position(1);
- nbuf.put(scnData);
- modScnData = nbuf.array();
- // Set the sh_name
- JNILibELFAPI.set_Shdr_sh_name(elfClass, scnHdrPtr, shStrTabContent.length() + 1);
- // Add scnName to stringList
- shStrTabContent += secName + '\0';
- } else {
- // Set the sh_name
- JNILibELFAPI.set_Shdr_sh_name(elfClass, scnHdrPtr, shStrTabContent.length() + 1);
- // Add scnName to stringList
- shStrTabContent += secName + '\0';
- modScnData = scnData;
- }
- }
-
- final int scnDataBufSize = modScnData.length;
-
- Pointer scnDataBufPtr = null;
- if (scnType != ELF.SHT_NOBITS) {
- // Allocate native memory for section data
- final long address = UNSAFE.allocateMemory(scnDataBufSize + 1);
- scnDataBufPtr = new Pointer(address);
- scnDataBufPtr.put(modScnData);
- } else {
- scnDataBufPtr = new Pointer(0L);
- }
-
- // Set data descriptor fields
- JNILibELFAPI.set_Data_d_align(scnDataPtr, align);
- JNILibELFAPI.set_Data_d_buf(scnDataPtr, scnDataBufPtr);
- JNILibELFAPI.set_Data_d_size(scnDataPtr, scnDataBufSize);
- JNILibELFAPI.set_Data_d_off(scnDataPtr, 0);
- JNILibELFAPI.set_Data_d_type(scnDataPtr, dataType.intValue());
- JNILibELFAPI.set_Data_d_version(scnDataPtr, ELF.EV_CURRENT);
-
- JNILibELFAPI.set_Shdr_sh_type(elfClass, scnHdrPtr, scnType);
- JNILibELFAPI.set_Shdr_sh_flags(elfClass, scnHdrPtr, scnFlags);
- JNILibELFAPI.set_Shdr_sh_entsize(elfClass, scnHdrPtr, 0); // TODO: Is this right??
- JNILibELFAPI.set_Shdr_sh_link(elfClass, scnHdrPtr, scnLink);
- JNILibELFAPI.set_Shdr_sh_info(elfClass, scnHdrPtr, scnInfo);
-
- // Add hash section to section pointer list
- int index = JNILibELFAPI.elf_ndxscn(scnPtr);
- return index;
- }
-
- /**
- * Create an ELF symbol entry for a symbol with the given properties.
- *
- * @param name name of the section in which symName is referenced
- * @param type type of symName
- * @param bind binding of symName
- * @param secHdrIndex section header index of the section in which symName is referenced
- * (st_shndx of ELF symbol entry)
- * @param size symName size (st_size of ELF symbol entry)
- * @param value symName value (st_value of ELF symbol entry)
- * @param isLocal true if symbol is local.
- */
- public ELFSymbol createELFSymbolEntry(String name, int type, int bind, int secHdrIndex, int size, int value, boolean isLocal) {
- // Get the current symbol index and append symbol name to string table.
- int index;
- if (name.isEmpty()) {
- index = 0;
- } else {
- // NOTE: The +1 comes from the null symbol!
- // We can't trust strTabContent.length() since that is chars (UTF16), keep track of
- // bytes on our own.
- index = strTabNrOfBytes + 1;
- strTabContent.append(name).append('\0');
- strTabNrOfBytes += name.getBytes(StandardCharsets.UTF_8).length + 1;
- }
-
- // Create ELF symbol entry
- long address = JNILibELFAPI.create_sym_entry(elfClass, index, type, bind, secHdrIndex, size, value);
- if (address == 0) {
- throw new InternalError("create_sym_entry failed");
- }
- Pointer ptr = new Pointer(address);
-
- if (isLocal) {
- final int localIndex = localSymbolIndex.size();
- ELFSymbol symbol = new ELFSymbol(name, localIndex, ptr, isLocal);
- localSymbolIndex.add(symbol);
- return symbol;
- } else {
- final int globalIndex = globalSymbolIndex.size();
- ELFSymbol symbol = new ELFSymbol(name, globalIndex, ptr, isLocal);
- globalSymbolIndex.add(symbol);
- return symbol;
- }
- }
-
- /**
- * Create an ELF relocation entry for given symbol {@code name} to section {@code secname}.
- *
- * @param container the section
- * @param offset offset into the section contents at which the relocation needs to be applied
- * @param type ELF type of the relocation entry
- * @param addend Addend for for relocation of type reloca
- */
- public void createELFRelocationEntry(ELFContainer container, int offset, int type, int addend, ELFSymbol elfSymbol) {
- // Get the index of the symbol.
- int index;
- if (elfSymbol.isLocal()) {
- index = elfSymbol.getIndex();
- } else {
- /*
- * For global symbol entries the index will be offset by the number of local symbols
- * which will be listed first in the symbol table.
- */
- index = elfSymbol.getIndex() + localSymbolIndex.size();
- }
-
- long address = JNILibELFAPI.create_reloc_entry(elfClass, offset, index, type, addend, createReloca);
- if (address == 0) {
- throw new InternalError("create_reloc_entry failed");
- }
- Pointer ptr = new Pointer(address);
- /*
- * If section name associated with this symbol is set to undefined i.e., secname is null,
- * symIndex is undef i.e., 0.
- */
- if (relocTables.get(container) == null) {
- // Allocate a new table and add it to the hash table of reloc tables
- relocTables.put(container, new ArrayList<>());
- }
-
- // Add the entry
- relocTables.get(container).add(ptr);
- }
-
- /**
- * Invokes native libelf function loff_t elf_update (Elf *elfPtr, Elf_Cmd cmd).
- *
- * @param cmd command
- * @return return value of the native function called
- */
- public boolean elfUpdate(LibELF.Elf_Cmd cmd) {
- JNILibELFAPI.elf_update(elfPtr, cmd.intValue());
- // TODO: Error checking; look at the return value of elf_update
- // and call elf_errmsg appropriately.
- return true;
- }
-
- /**
- * Wrapper function that invokes int elf_end (Elf *elfPtr). and closes ELF output file
- * descriptor
- *
- * @return true
- */
- public boolean elfEnd() {
- // Finish ELF processing
- JNILibELFAPI.elf_end(elfPtr);
- // Close file descriptor
- JNILibELFAPI.close(outFileDesc);
- return true;
- }
-}
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/JNIELFRelocation.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/JNIELFRelocation.java
deleted file mode 100644
index 4c4cffd7abe..00000000000
--- a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/JNIELFRelocation.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.tools.jaotc.jnilibelf;
-
-/**
- * Class that abstracts ELF relocations.
- *
- */
-public interface JNIELFRelocation {
- int R_UNDEF = -1;
-
- /**
- * x86-specific relocation types.
- *
- */
- public interface I386 {
- /* i386 relocs. */
-
- int R_386_NONE = 0; /* No reloc */
- int R_386_32 = 1; /* Direct 32 bit */
- int R_386_PC32 = 2; /* PC relative 32 bit */
- int R_386_GOT32 = 3; /* 32 bit GOT entry */
- int R_386_PLT32 = 4; /* 32 bit PLT address */
- int R_386_COPY = 5; /* Copy symbol at runtime */
- int R_386_GLOB_DAT = 6; /* Create GOT entry */
- int R_386_JMP_SLOT = 7; /* Create PLT entry */
- int R_386_RELATIVE = 8; /* Adjust by program base */
- int R_386_GOTOFF = 9; /* 32 bit offset to GOT */
- int R_386_GOTPC = 10; /* 32 bit PC relative offset to GOT */
- int R_386_32PLT = 11;
- int R_386_TLS_TPOFF = 14; /* Offset in static TLS block */
- int R_386_TLS_IE = 15; /* Address of GOT entry for static TLS block offset */
- int R_386_TLS_GOTIE = 16; /* GOT entry for static TLS block offset */
- int R_386_TLS_LE = 17; /* Offset relative to static TLS block */
- int R_386_TLS_GD = 18; /* Direct 32 bit for GNU version of general dynamic thread local data */
- int R_386_TLS_LDM = 19; /*
- * Direct 32 bit for GNU version of local dynamic thread local data
- * in LE code
- */
- int R_386_16 = 20;
- int R_386_PC16 = 21;
- int R_386_8 = 22;
- int R_386_PC8 = 23;
- int R_386_TLS_GD_32 = 24; /* Direct 32 bit for general dynamic thread local data */
- int R_386_TLS_GD_PUSH = 25; /* Tag for pushl in GD TLS code */
- int R_386_TLS_GD_CALL = 26; /* Relocation for call to __tls_get_addr() */
- int R_386_TLS_GD_POP = 27; /* Tag for popl in GD TLS code */
- int R_386_TLS_LDM_32 = 28; /* Direct 32 bit for local dynamic thread local data in LE code */
- int R_386_TLS_LDM_PUSH = 29; /* Tag for pushl in LDM TLS code */
- int R_386_TLS_LDM_CALL = 30; /* Relocation for call to __tls_get_addr() in LDM code */
- int R_386_TLS_LDM_POP = 31; /* Tag for popl in LDM TLS code */
- int R_386_TLS_LDO_32 = 32; /* Offset relative to TLS block */
- int R_386_TLS_IE_32 = 33; /* GOT entry for negated static TLS block offset */
- int R_386_TLS_LE_32 = 34; /* Negated offset relative to static TLS block */
- int R_386_TLS_DTPMOD32 = 35; /* ID of module containing symbol */
- int R_386_TLS_DTPOFF32 = 36; /* Offset in TLS block */
- int R_386_TLS_TPOFF32 = 37; /* Negated offset in static TLS block */
- int R_386_SIZE32 = 38; /* 32-bit symbol size */
- int R_386_TLS_GOTDESC = 39; /* GOT offset for TLS descriptor. */
- int R_386_TLS_DESC_CALL = 40; /* Marker of call through TLS descriptor for relaxation. */
- int R_386_TLS_DESC = 41; /*
- * TLS descriptor containing pointer to code and to argument,
- * returning the TLS offset for the symbol.
- */
- int R_386_IRELATIVE = 42; /* Adjust indirectly by program base */
- /* Keep this the last entry. */
- int R_386_NUM = 43;
- }
-
- /**
- * x86_64-specific relocation types.
- */
- public interface X86_64 {
- /* AMD x86-64 relocations. */
- int R_X86_64_NONE = 0; /* No reloc */
- int R_X86_64_64 = 1; /* Direct 64 bit */
- int R_X86_64_PC32 = 2; /* PC relative 32 bit signed */
- int R_X86_64_GOT32 = 3; /* 32 bit GOT entry */
- int R_X86_64_PLT32 = 4; /* 32 bit PLT address */
- int R_X86_64_COPY = 5; /* Copy symbol at runtime */
- int R_X86_64_GLOB_DAT = 6; /* Create GOT entry */
- int R_X86_64_JUMP_SLOT = 7; /* Create PLT entry */
- int R_X86_64_RELATIVE = 8; /* Adjust by program base */
- int R_X86_64_GOTPCREL = 9; /* 32 bit signed PC relative offset to GOT */
- int R_X86_64_32 = 10; /* Direct 32 bit zero extended */
- int R_X86_64_32S = 11; /* Direct 32 bit sign extended */
- int R_X86_64_16 = 12; /* Direct 16 bit zero extended */
- int R_X86_64_PC16 = 13; /* 16 bit sign extended pc relative */
- int R_X86_64_8 = 14; /* Direct 8 bit sign extended */
- int R_X86_64_PC8 = 15; /* 8 bit sign extended pc relative */
- int R_X86_64_DTPMOD64 = 16; /* ID of module containing symbol */
- int R_X86_64_DTPOFF64 = 17; /* Offset in module's TLS block */
- int R_X86_64_TPOFF64 = 18; /* Offset in initial TLS block */
- int R_X86_64_TLSGD = 19; /*
- * 32 bit signed PC relative offset to two GOT entries for GD
- * symbol
- */
- int R_X86_64_TLSLD = 20; /*
- * 32 bit signed PC relative offset to two GOT entries for LD
- * symbol
- */
- int R_X86_64_DTPOFF32 = 21; /* Offset in TLS block */
- int R_X86_64_GOTTPOFF = 22; /*
- * 32 bit signed PC relative offset to GOT entry for IE symbol
- */
- int R_X86_64_TPOFF32 = 23; /* Offset in initial TLS block */
- int R_X86_64_PC64 = 24; /* PC relative 64 bit */
- int R_X86_64_GOTOFF64 = 25; /* 64 bit offset to GOT */
- int R_X86_64_GOTPC32 = 26; /* 32 bit signed pc relative offset to GOT */
- int R_X86_64_GOT64 = 27; /* 64-bit GOT entry offset */
- int R_X86_64_GOTPCREL64 = 28; /* 64-bit PC relative offset to GOT entry */
- int R_X86_64_GOTPC64 = 29; /* 64-bit PC relative offset to GOT */
- int R_X86_64_GOTPLT64 = 30; /* like GOT64, says PLT entry needed */
- int R_X86_64_PLTOFF64 = 31; /* 64-bit GOT relative offset to PLT entry */
- int R_X86_64_SIZE32 = 32; /* Size of symbol plus 32-bit addend */
- int R_X86_64_SIZE64 = 33; /* Size of symbol plus 64-bit addend */
- int R_X86_64_GOTPC32_TLSDESC = 34; /* GOT offset for TLS descriptor. */
- int R_X86_64_TLSDESC_CALL = 35; /*
- * Marker for call through TLS descriptor.
- */
- int R_X86_64_TLSDESC = 36; /* TLS descriptor. */
- int R_X86_64_IRELATIVE = 37; /* Adjust indirectly by program base */
- int R_X86_64_RELATIVE64 = 38; /* 64-bit adjust by program base */
-
- int R_X86_64_NUM = 39;
- }
-}
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/JNIELFTargetInfo.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/JNIELFTargetInfo.java
deleted file mode 100644
index 8248d6fba29..00000000000
--- a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/JNIELFTargetInfo.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.tools.jaotc.jnilibelf;
-
-import java.nio.ByteOrder;
-
-import jdk.tools.jaotc.jnilibelf.JNILibELFAPI.ELF;
-
-/**
- * Class that abstracts ELF target details.
- *
- */
-public class JNIELFTargetInfo {
- /**
- * ELF Class of the target.
- */
- private static final int elfClass;
- /**
- * Target architecture.
- */
- private static final int arch;
- /**
- * Architecture endian-ness.
- */
- private static final int endian;
-
- /**
- * Target OS string.
- */
- private static final String osName;
-
- static {
- // Find the target arch details
- String archStr = System.getProperty("os.arch").toLowerCase();
- String datamodelStr = System.getProperty("sun.arch.data.model");
-
- if (datamodelStr.equals("32")) {
- elfClass = ELF.ELFCLASS32;
- } else if (datamodelStr.equals("64")) {
- elfClass = ELF.ELFCLASS64;
- } else {
- System.out.println("Failed to discover ELF class!");
- elfClass = ELF.ELFCLASSNONE;
- }
-
- ByteOrder bo = ByteOrder.nativeOrder();
- if (bo == ByteOrder.LITTLE_ENDIAN) {
- endian = ELF.ELFDATA2LSB;
- } else if (bo == ByteOrder.BIG_ENDIAN) {
- endian = ELF.ELFDATA2MSB;
- } else {
- System.out.println("Failed to discover endian-ness!");
- endian = ELF.ELFDATANONE;
- }
-
- if (archStr.equals("x86")) {
- arch = ELF.EM_386;
- } else if (archStr.equals("amd64") || archStr.equals("x86_64")) {
- arch = ELF.EM_X64_64;
- } else if (archStr.equals("sparcv9")) {
- arch = ELF.EM_SPARCV9;
- } else {
- System.out.println("Unsupported architecture " + archStr);
- arch = ELF.EM_NONE;
- }
-
- osName = System.getProperty("os.name").toLowerCase();
- }
-
- public static int getELFArch() {
- return arch;
- }
-
- public static int getELFClass() {
- return elfClass;
- }
-
- public static int getELFEndian() {
- return endian;
- }
-
- public static String getOsName() {
- return osName;
- }
-
- public static int createReloca() {
- switch (arch) {
- case ELF.EM_X64_64:
- return 1;
- default:
- return 0;
- }
- }
-
- public static int sizeOfSymtabEntry() {
- return JNILibELFAPI.size_of_Sym(elfClass);
- }
-
- public static int sizeOfRelocEntry() {
- if (createReloca() == 1) {
- return JNILibELFAPI.size_of_Rela(elfClass);
- } else {
- return JNILibELFAPI.size_of_Rel(elfClass);
- }
- }
-}
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/JNILibELFAPI.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/JNILibELFAPI.java
deleted file mode 100644
index 5dc277b16ed..00000000000
--- a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/JNILibELFAPI.java
+++ /dev/null
@@ -1,677 +0,0 @@
-/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.tools.jaotc.jnilibelf;
-
-public class JNILibELFAPI {
-
- static {
- System.loadLibrary("jelfshim");
- }
-
- /**
- * Definitions for file open.
- */
- public static enum OpenFlags {
- O_RDONLY(0x0),
- O_WRONLY(0x1),
- O_RDWR(0x2),
- O_CREAT(0x40);
-
- private final int intVal;
-
- private OpenFlags(int v) {
- intVal = v;
- }
-
- public int intValue() {
- return intVal;
- }
- }
-
- /**
- * Definitions reflecting those in elf.h.
- *
- */
- public interface ELF {
- int EI_NIDENT = 16;
-
- int EI_CLASS = 4; /* File class byte index */
- int ELFCLASSNONE = 0; /* Invalid class */
- int ELFCLASS32 = 1; /* 32-bit objects */
- int ELFCLASS64 = 2; /* 64-bit objects */
- int ELFCLASSNUM = 3;
-
- int EI_DATA = 5; /* Data encoding byte index */
- int ELFDATANONE = 0; /* Invalid data encoding */
- int ELFDATA2LSB = 1; /* 2's complement, little endian */
- int ELFDATA2MSB = 2; /* 2's complement, big endian */
- int ELFDATANUM = 3;
-
- // Legal architecture values for e_machine (add others as needed)
- int EM_NONE = 0; /* No machine */
- int EM_SPARC = 2; /* SUN SPARC */
- int EM_386 = 3; /* Intel 80386 */
- int EM_SPARCV9 = 43; /* SPARC v9 64-bit */
- int EM_X64_64 = 62; /* AMD x86-64 architecture */
-
- /* Legal values for e_type (object file type). */
-
- int ET_NONE = 0; /* No file type */
- int ET_REL = 1; /* Relocatable file */
- int ET_EXEC = 2; /* Executable file */
- int ET_DYN = 3; /* Shared object file */
- int ET_CORE = 4; /* Core file */
- int ET_NUM = 5; /* Number of defined types */
- int ET_LOOS = 0xfe00; /* OS-specific range start */
- int ET_HIOS = 0xfeff; /* OS-specific range end */
- int ET_LOPROC = 0xff00; /* Processor-specific range start */
- int ET_HIPROC = 0xffff; /* Processor-specific range end */
-
- /* Legal values for e_version (version). */
-
- int EV_NONE = 0; /* Invalid ELF version */
- int EV_CURRENT = 1; /* Current version */
- int EV_NUM = 2;
-
- /* Legal values for p_type (segment type). */
-
- int PT_NULL = 0; /* Program header table entry unused */
- int PT_LOAD = 1; /* Loadable program segment */
- int PT_DYNAMIC = 2; /* Dynamic linking information */
- int PT_INTERP = 3; /* Program interpreter */
- int PT_NOTE = 4; /* Auxiliary information */
- int PT_SHLIB = 5; /* Reserved */
- int PT_PHDR = 6; /* Entry for header table itself */
- int PT_TLS = 7; /* Thread-local storage segment */
- int PT_NUM = 8; /* Number of defined types */
- int PT_LOOS = 0x60000000; /* Start of OS-specific */
- int PT_GNU_EH_FRAME = 0x6474e550; /* GCC .eh_frame_hdr segment */
- int PT_GNU_STACK = 0x6474e551; /* Indicates stack executability */
- int PT_GNU_RELRO = 0x6474e552; /* Read-only after relocation */
- int PT_LOSUNW = 0x6ffffffa;
- int PT_SUNWBSS = 0x6ffffffa; /* Sun Specific segment */
- int PT_SUNWSTACK = 0x6ffffffb; /* Stack segment */
- int PT_HISUNW = 0x6fffffff;
- int PT_HIOS = 0x6fffffff; /* End of OS-specific */
- int PT_LOPROC = 0x70000000; /* Start of processor-specific */
- int PT_HIPROC = 0x7fffffff; /* End of processor-specific */
-
- /* Special section indices. */
-
- int SHN_UNDEF = 0; /* Undefined section */
- int SHN_LORESERVE = 0xff00; /* Start of reserved indices */
- int SHN_LOPROC = 0xff00; /* Start of processor-specific */
- int SHN_BEFORE = 0xff00; /* Order section before all others (Solaris). */
- int SHN_AFTER = 0xff01; /* Order section after all others (Solaris). */
- int SHN_HIPROC = 0xff1f; /* End of processor-specific */
- int SHN_LOOS = 0xff20; /* Start of OS-specific */
- int SHN_HIOS = 0xff3f; /* End of OS-specific */
- int SHN_ABS = 0xfff1; /* Associated symbol is absolute */
- int SHN_COMMON = 0xfff2; /* Associated symbol is common */
- int SHN_XINDEX = 0xffff; /* Index is in extra table. */
- int SHN_HIRESERVE = 0xffff; /* End of reserved indices */
-
- /* Legal values for sh_type (section type). */
-
- int SHT_NULL = 0; /* Section header table entry unused */
- int SHT_PROGBITS = 1; /* Program data */
- int SHT_SYMTAB = 2; /* Symbol table */
- int SHT_STRTAB = 3; /* String table */
- int SHT_RELA = 4; /* Relocation entries with addends */
- int SHT_HASH = 5; /* Symbol hash table */
- int SHT_DYNAMIC = 6; /* Dynamic linking information */
- int SHT_NOTE = 7; /* Notes */
- int SHT_NOBITS = 8; /* Program space with no data (bss) */
- int SHT_REL = 9; /* Relocation entries, no addends */
- int SHT_SHLIB = 10; /* Reserved */
- int SHT_DYNSYM = 11; /* Dynamic linker symbol table */
- int SHT_INIT_ARRAY = 14; /* Array of constructors */
- int SHT_FINI_ARRAY = 15; /* Array of destructors */
- int SHT_PREINIT_ARRAY = 16; /* Array of pre-constructors */
- int SHT_GROUP = 17; /* Section group */
- int SHT_SYMTAB_SHNDX = 18; /* Extended section indeces */
- int SHT_NUM = 19; /* Number of defined types. */
- int SHT_LOOS = 0x60000000; /* Start OS-specific. */
- int SHT_GNU_ATTRIBUTES = 0x6ffffff5; /* Object attributes. */
- int SHT_GNU_HASH = 0x6ffffff6; /* GNU-style hash table. */
- int SHT_GNU_LIBLIST = 0x6ffffff7; /* Prelink library list */
- int SHT_CHECKSUM = 0x6ffffff8; /* Checksum for DSO content. */
- int SHT_LOSUNW = 0x6ffffffa; /* Sun-specific low bound. */
- int SHT_SUNW_move = 0x6ffffffa;
- int SHT_SUNW_COMDAT = 0x6ffffffb;
- int SHT_SUNW_syminfo = 0x6ffffffc;
- int SHT_GNU_verdef = 0x6ffffffd; /* Version definition section. */
- int SHT_GNU_verneed = 0x6ffffffe; /* Version needs section. */
- int SHT_GNU_versym = 0x6fffffff; /* Version symbol table. */
- int SHT_HISUNW = 0x6fffffff; /* Sun-specific high bound. */
- int SHT_HIOS = 0x6fffffff; /* End OS-specific type */
- int SHT_LOPROC = 0x70000000; /* Start of processor-specific */
- int SHT_HIPROC = 0x7fffffff; /* End of processor-specific */
- int SHT_LOUSER = 0x80000000; /* Start of application-specific */
- int SHT_HIUSER = 0x8fffffff; /* End of application-specific */
-
- /* Legal values for sh_flags (section flags). */
-
- int SHF_WRITE = (1 << 0); /* Writable */
- int SHF_ALLOC = (1 << 1); /* Occupies memory during execution */
- int SHF_EXECINSTR = (1 << 2); /* Executable */
- int SHF_MERGE = (1 << 4); /* Might be merged */
- int SHF_STRINGS = (1 << 5); /* Contains nul-terminated strings */
- int SHF_INFO_LINK = (1 << 6); /* `sh_info' contains SHT index */
- int SHF_LINK_ORDER = (1 << 7); /* Preserve order after combining */
- int SHF_OS_NONCONFORMING = (1 << 8); /* Non-standard OS specific handling required */
- int SHF_GROUP = (1 << 9); /* Section is member of a group. */
- int SHF_TLS = (1 << 10); /* Section hold thread-local data. */
- int SHF_MASKOS = 0x0ff00000; /* OS-specific. */
- int SHF_MASKPROC = 0xf0000000; /* Processor-specific */
- int SHF_ORDERED = (1 << 30); /* Special ordering requirement (Solaris). */
- int SHF_EXCLUDE = (1 << 31); /*
- * Section is excluded unless referenced or allocated
- * (Solaris).
- */
-
- /* Legal values for ST_BIND subfield of st_info (symbol binding). */
-
- int STB_LOCAL = 0; /* Local symbol */
- int STB_GLOBAL = 1; /* Global symbol */
- int STB_WEAK = 2; /* Weak symbol */
- int STB_NUM = 3; /* Number of defined types. */
- int STB_LOOS = 10; /* Start of OS-specific */
- int STB_GNU_UNIQUE = 10; /* Unique symbol. */
- int STB_HIOS = 12; /* End of OS-specific */
- int STB_LOPROC = 13; /* Start of processor-specific */
- int STB_HIPROC = 15; /* End of processor-specific */
-
- /* Legal values for ST_TYPE subfield of st_info (symbol type). */
-
- int STT_NOTYPE = 0; /* Symbol type is unspecified */
- int STT_OBJECT = 1; /* Symbol is a data object */
- int STT_FUNC = 2; /* Symbol is a code object */
- int STT_SECTION = 3; /* Symbol associated with a section */
- int STT_FILE = 4; /* Symbol's name is file name */
- int STT_COMMON = 5; /* Symbol is a common data object */
- int STT_TLS = 6; /* Symbol is thread-local data object */
- int STT_NUM = 7; /* Number of defined types. */
- int STT_LOOS = 10; /* Start of OS-specific */
- int STT_GNU_IFUNC = 10; /* Symbol is indirect code object */
- int STT_HIOS = 12; /* End of OS-specific */
- int STT_LOPROC = 13; /* Start of processor-specific */
- int STT_HIPROC = 15; /* End of processor-specific */
- }
-
- /**
- * Definitions reflecting those in libelf.h.
- *
- */
- public interface LibELF {
-
- public static enum Elf_Cmd {
- ELF_C_NULL("NULL"), /* Nothing, terminate, or compute only. */
- ELF_C_READ("READ"), /* Read .. */
- ELF_C_RDWR("RDWR"), /* Read and write .. */
- ELF_C_WRITE("WRITE"), /* Write .. */
- ELF_C_CLR("CLR"), /* Clear flag. */
- ELF_C_SET("SET"), /* Set flag. */
- ELF_C_FDDONE("FDDONE"), /*
- * Signal that file descriptor will not be used anymore.
- */
- ELF_C_FDREAD("FDREAD"), /*
- * Read rest of data so that file descriptor is not used
- * anymore.
- */
- /* The following are Linux-only extensions. */
- ELF_C_READ_MMAP("READ_MMAP"), /* Read, but mmap the file if possible. */
- ELF_C_RDWR_MMAP("RDWR_MMAP"), /* Read and write, with mmap. */
- ELF_C_WRITE_MMAP("WRITE_MMAP"), /* Write, with mmap. */
- ELF_C_READ_MMAP_PRIVATE("READ_MMAP_PRIVATE"), /*
- * Read, but memory is writable, results
- * are not written to the file.
- */
- ELF_C_EMPTY("EMPTY"), /* Copy basic file data but not the content. */
- /* The following are SunOS-only enums */
- ELF_C_WRIMAGE("WRIMAGE"),
- ELF_C_IMAGE("IMAGE"),
- /* Common last entry. */
- ELF_C_NUM("NUM");
- private final int intVal;
- private final String name;
-
- private Elf_Cmd(String cmd) {
- name = "ELF_C_" + cmd;
- switch (cmd) {
- case "NULL":
- // ELF_C_NULL has the same enum ordinal on both Linux and SunOS
- intVal = jdk.tools.jaotc.jnilibelf.linux.Elf_Cmd.ELF_C_NULL.ordinal();
- break;
-
- case "READ":
- // ELF_C_READ has the same enum ordinal on both Linux and SunOS
- intVal = jdk.tools.jaotc.jnilibelf.linux.Elf_Cmd.ELF_C_READ.ordinal();
- break;
-
- // Enums defined in libelf.h of both Linux and SunOS
- // but with different ordinals
- case "RDWR":
- if (JNIELFTargetInfo.getOsName().equals("linux")) {
- intVal = jdk.tools.jaotc.jnilibelf.linux.Elf_Cmd.ELF_C_RDWR.ordinal();
- } else if (JNIELFTargetInfo.getOsName().equals("sunos")) {
- intVal = jdk.tools.jaotc.jnilibelf.sunos.Elf_Cmd.ELF_C_RDWR.ordinal();
- } else {
- // Unsupported platform
- intVal = -1;
- }
- break;
-
- case "WRITE":
- if (JNIELFTargetInfo.getOsName().equals("linux")) {
- intVal = jdk.tools.jaotc.jnilibelf.linux.Elf_Cmd.ELF_C_WRITE.ordinal();
- } else if (JNIELFTargetInfo.getOsName().equals("sunos")) {
- intVal = jdk.tools.jaotc.jnilibelf.sunos.Elf_Cmd.ELF_C_WRITE.ordinal();
- } else {
- // Unsupported platform
- intVal = -1;
- }
- break;
-
- case "CLR":
- if (JNIELFTargetInfo.getOsName().equals("linux")) {
- intVal = jdk.tools.jaotc.jnilibelf.linux.Elf_Cmd.ELF_C_CLR.ordinal();
- } else if (JNIELFTargetInfo.getOsName().equals("sunos")) {
- intVal = jdk.tools.jaotc.jnilibelf.sunos.Elf_Cmd.ELF_C_CLR.ordinal();
- } else {
- // Unsupported platform
- intVal = -1;
- }
- break;
-
- case "SET":
- if (JNIELFTargetInfo.getOsName().equals("linux")) {
- intVal = jdk.tools.jaotc.jnilibelf.linux.Elf_Cmd.ELF_C_SET.ordinal();
- } else if (JNIELFTargetInfo.getOsName().equals("sunos")) {
- intVal = jdk.tools.jaotc.jnilibelf.sunos.Elf_Cmd.ELF_C_SET.ordinal();
- } else {
- // Unsupported platform
- intVal = -1;
- }
- break;
-
- case "FDDONE":
- if (JNIELFTargetInfo.getOsName().equals("linux")) {
- intVal = jdk.tools.jaotc.jnilibelf.linux.Elf_Cmd.ELF_C_FDDONE.ordinal();
- } else if (JNIELFTargetInfo.getOsName().equals("sunos")) {
- intVal = jdk.tools.jaotc.jnilibelf.sunos.Elf_Cmd.ELF_C_FDDONE.ordinal();
- } else {
- // Unsupported platform
- intVal = -1;
- }
- break;
-
- case "FDREAD":
- if (JNIELFTargetInfo.getOsName().equals("linux")) {
- intVal = jdk.tools.jaotc.jnilibelf.linux.Elf_Cmd.ELF_C_FDREAD.ordinal();
- } else if (JNIELFTargetInfo.getOsName().equals("sunos")) {
- intVal = jdk.tools.jaotc.jnilibelf.sunos.Elf_Cmd.ELF_C_FDREAD.ordinal();
- } else {
- // Unsupported platform
- intVal = -1;
- }
- break;
-
- case "NUM":
- if (JNIELFTargetInfo.getOsName().equals("linux")) {
- intVal = jdk.tools.jaotc.jnilibelf.linux.Elf_Cmd.ELF_C_NUM.ordinal();
- } else if (JNIELFTargetInfo.getOsName().equals("sunos")) {
- intVal = jdk.tools.jaotc.jnilibelf.sunos.Elf_Cmd.ELF_C_NUM.ordinal();
- } else {
- // Unsupported platform
- intVal = -1;
- }
- break;
-
- // Linux-only Elf_Cmd enums
- case "READ_MMAP":
- if (JNIELFTargetInfo.getOsName().equals("linux")) {
- intVal = jdk.tools.jaotc.jnilibelf.linux.Elf_Cmd.ELF_C_READ_MMAP.ordinal();
- } else {
- // Unsupported platform
- intVal = -1;
- }
- break;
-
- case "RDWR_MMAP":
- if (JNIELFTargetInfo.getOsName().equals("linux")) {
- intVal = jdk.tools.jaotc.jnilibelf.linux.Elf_Cmd.ELF_C_RDWR_MMAP.ordinal();
- } else {
- // Unsupported platform
- intVal = -1;
- }
- break;
-
- case "WRITE_MMAP":
- if (JNIELFTargetInfo.getOsName().equals("linux")) {
- intVal = jdk.tools.jaotc.jnilibelf.linux.Elf_Cmd.ELF_C_WRITE_MMAP.ordinal();
- } else {
- // Unsupported platform
- intVal = -1;
- }
- break;
-
- case "READ_MMAP_PRIVATE":
- if (JNIELFTargetInfo.getOsName().equals("linux")) {
- intVal = jdk.tools.jaotc.jnilibelf.linux.Elf_Cmd.ELF_C_READ_MMAP_PRIVATE.ordinal();
- } else {
- // Unsupported platform
- intVal = -1;
- }
- break;
-
- case "EMPTY":
- if (JNIELFTargetInfo.getOsName().equals("linux")) {
- intVal = jdk.tools.jaotc.jnilibelf.linux.Elf_Cmd.ELF_C_EMPTY.ordinal();
- } else {
- // Unsupported platform
- intVal = -1;
- }
- break;
- // SunOS-only Elf_Cmd enums
- case "WRIMAGE":
- if (JNIELFTargetInfo.getOsName().equals("linux")) {
- intVal = jdk.tools.jaotc.jnilibelf.sunos.Elf_Cmd.ELF_C_WRIMAGE.ordinal();
- } else {
- // Unsupported platform
- intVal = -1;
- }
- break;
- case "IMAGE":
- if (JNIELFTargetInfo.getOsName().equals("linux")) {
- intVal = jdk.tools.jaotc.jnilibelf.sunos.Elf_Cmd.ELF_C_IMAGE.ordinal();
- } else {
- // Unsupported platform
- intVal = -1;
- }
- break;
- default:
- intVal = -1;
- }
- }
-
- public int intValue() {
- assert intVal != -1 : "enum " + name + "not supported on " + JNIELFTargetInfo.getOsName();
- return intVal;
- }
-
- public String getName() {
- return name;
- }
- }
-
- public static enum Elf_Type {
- ELF_T_BYTE(0), /* unsigned char */
- ELF_T_ADDR(1), /* Elf32_Addr, Elf64_Addr, ... */
- ELF_T_DYN(2), /* Dynamic section record. */
- ELF_T_EHDR(3), /* ELF header. */
- ELF_T_HALF(4), /* Elf32_Half, Elf64_Half, ... */
- ELF_T_OFF(5), /* Elf32_Off, Elf64_Off, ... */
- ELF_T_PHDR(6), /* Program header. */
- ELF_T_RELA(7), /* Relocation entry with addend. */
- ELF_T_REL(8), /* Relocation entry. */
- ELF_T_SHDR(9), /* Section header. */
- ELF_T_SWORD(10), /* Elf32_Sword, Elf64_Sword, ... */
- ELF_T_SYM(11), /* Symbol record. */
- ELF_T_WORD(12), /* Elf32_Word, Elf64_Word, ... */
- ELF_T_XWORD(13), /* Elf32_Xword, Elf64_Xword, ... */
- ELF_T_SXWORD(14), /* Elf32_Sxword, Elf64_Sxword, ... */
- ELF_T_VDEF(15), /* Elf32_Verdef, Elf64_Verdef, ... */
- ELF_T_VDAUX(16), /* Elf32_Verdaux, Elf64_Verdaux, ... */
- ELF_T_VNEED(17), /* Elf32_Verneed, Elf64_Verneed, ... */
- ELF_T_VNAUX(18), /* Elf32_Vernaux, Elf64_Vernaux, ... */
- ELF_T_NHDR(19), /* Elf32_Nhdr, Elf64_Nhdr, ... */
- ELF_T_SYMINFO(20), /* Elf32_Syminfo, Elf64_Syminfo, ... */
- ELF_T_MOVE(21), /* Elf32_Move, Elf64_Move, ... */
- ELF_T_LIB(22), /* Elf32_Lib, Elf64_Lib, ... */
- ELF_T_GNUHASH(23), /* GNU-style hash section. */
- ELF_T_AUXV(24), /* Elf32_auxv_t, Elf64_auxv_t, ... */
- /* Keep this the last entry. */
- ELF_T_NUM(25);
-
- private final int intVal;
-
- private Elf_Type(int v) {
- intVal = v;
- }
-
- public int intValue() {
- return intVal;
- }
- }
-
- /* Flags for the ELF structures. */
- int ELF_F_DIRTY = 0x1;
- int ELF_F_LAYOUT = 0x4;
- int ELF_F_PERMISSIVE = 0x8;
-
- public static enum Elf_Kind {
- ELF_K_NONE(0), /* Unknown. */
- ELF_K_AR(1), /* Archive. */
- ELF_K_COFF(2), /* Stupid old COFF. */
- ELF_K_ELF(3), /* ELF file. */
- /* Keep this the last entry. */
- ELF_K_NUM(4);
- private final int intVal;
-
- private Elf_Kind(int v) {
- intVal = v;
- }
-
- public int intValue() {
- return intVal;
- }
- }
- }
-
- /**
- * Invoke native libelf function unsigned int elf_version (unsigned int v).
- *
- * @param v version
- * @return return value of native call
- */
- // Checkstyle: stop method name check
- static native int elf_version(int v);
-
- /**
- * Return version recorded in libelfshim.
- *
- * @return return version string
- */
- // Checkstyle: stop method name check
- static native String elfshim_version();
-
- /**
- * Invoke native libelf function Elf *elf_begin (int fildes, Elf_Cmd cmd, Elf *elfPtr).
- *
- * @param fildes open file descriptor
- * @param elfCRead command
- * @param elfHdrPtr pointer to ELF header
- * @return return value of native call
- */
- static native Pointer elf_begin(int fildes, int elfCRead, Pointer elfHdrPtr);
-
- /**
- * Invoke native libelf function elf_end (Elf *elfPtr).
- *
- * @param elfPtr pointer to ELF header
- * @return return value of native call
- */
- static native int elf_end(Pointer elfPtr);
-
- /**
- * Invoke native libelf function elf_end (Elf *elfPtr).
- *
- * @param elfPtr pointer to ELF header
- * @return return value of native call
- */
- static native int elf_kind(Pointer elfPtr);
-
- /**
- * Invoke native libelf function unsigned int elf_flagphdr (Elf *elf, Elf_Cmd cmd, unsigned int
- * flags).
- *
- * @param elfPtr Pointer to ELF descriptor
- * @param cmd command
- * @param flags flags
- * @return return value of native call
- */
- static native int elf_flagphdr(Pointer elfPtr, int cmd, int flags);
-
- /**
- * Invoke native libelf function Elf_Scn *elf_newscn (Elf *elfPtr).
- *
- * @param elfPtr Elf header pointer
- * @return return value of native call
- */
- static native Pointer elf_newscn(Pointer elfPtr);
-
- /**
- * Invoke native libelf function Elf_Data *elf_newdata (Elf_Scn *scn).
- *
- * @param scnPtr pointer to section for which the new data descriptor is to be created
- * @return return value of native call
- */
- static native Pointer elf_newdata(Pointer scnPtr);
-
- /**
- * Invoke native libelf function Elf64_Shdr *elf64_getshdr (Elf_Scn *scnPtr).
- *
- * @param scnPtr pointer to section whose header information is to be retrieved
- * @return return value of native call
- */
- static native Pointer elf64_getshdr(Pointer scnPtr);
-
- /**
- * Invoke native libelf function loff_t elf_update (Elf *elfPtr, Elf_Cmd cmd).
- *
- * @param elfPtr Pointer to ELF descriptor
- * @param cmd command
- * @return return value of native call
- */
- static native long elf_update(Pointer elfPtr, int cmd);
-
- /**
- * Invoke native libelf function char *elf_errmsg (int error).
- *
- * @param error error
- * @return return value of native call
- */
- static native String elf_errmsg(int error);
-
- /**
- * Invoke native libelf function size_t elf_ndxscn (Elf_Scn *scn).
- *
- * @param scn section pointer
- * @return return value of native call
- */
- static native int elf_ndxscn(Pointer scn);
-
- /**
- * GELF interfaces
- */
- /**
- * Invoke native libelf function unsigned long int gelf_newehdr (Elf *elf, int elfClass).
- *
- * @param elf ELF Header pointer
- * @param elfclass ELF class
- * @return return value of native call boxed as a pointer
- */
- static native Pointer gelf_newehdr(Pointer elf, int elfclass);
-
- /**
- * Invoke native libelf function unsigned long int gelf_newphdr (Elf *elf, size_t phnum).
- *
- * @param elf ELF header pointer
- * @param phnum number of program headers
- * @return return value of native call boxed as a pointer
- */
- static native Pointer gelf_newphdr(Pointer elf, int phnum);
-
- /**
- * Miscellaneous convenience native methods that help peek and poke ELF data structures.
- */
- static native int size_of_Sym(int elfClass);
-
- static native int size_of_Rela(int elfClass);
-
- static native int size_of_Rel(int elfClass);
-
- static native void ehdr_set_data_encoding(Pointer ehdr, int val);
-
- static native void set_Ehdr_e_machine(int elfclass, Pointer structPtr, int val);
-
- static native void set_Ehdr_e_type(int elfclass, Pointer structPtr, int val);
-
- static native void set_Ehdr_e_version(int elfclass, Pointer structPtr, int val);
-
- static native void set_Ehdr_e_shstrndx(int elfclass, Pointer structPtr, int val);
-
- static native void phdr_set_type_self(int elfclass, Pointer ehdr, Pointer phdr);
-
- static native void set_Shdr_sh_name(int elfclass, Pointer structPtr, int val);
-
- static native void set_Shdr_sh_type(int elfclass, Pointer structPtr, int val);
-
- static native void set_Shdr_sh_flags(int elfclass, Pointer structPtr, int val);
-
- static native void set_Shdr_sh_entsize(int elfclass, Pointer structPtr, int val);
-
- static native void set_Shdr_sh_link(int elfclass, Pointer structPtr, int val);
-
- static native void set_Shdr_sh_info(int elfclass, Pointer structPtr, int val);
-
- static native void set_Data_d_align(Pointer structPtr, int val);
-
- static native void set_Data_d_off(Pointer structPtr, int val);
-
- static native void set_Data_d_buf(Pointer structPtr, Pointer val);
-
- static native void set_Data_d_type(Pointer structPtr, int val);
-
- static native void set_Data_d_size(Pointer structPtr, int val);
-
- static native void set_Data_d_version(Pointer structPtr, int val);
-
- static native long create_sym_entry(int elfclass, int index, int type, int bind, int shndx, int size, int value);
-
- static native long create_reloc_entry(int elfclass, int roffset, int symtabIdx, int relocType, int raddend, int reloca);
-
- /**
- * File Operations.
- */
- static native int open_rw(String fileName);
-
- static native int open(String fileName, int flags);
-
- static native int open(String fileName, int flags, int mode);
-
- static native int close(int fd);
- // Checkstyle: resume method name check
-}
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/Pointer.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/Pointer.java
deleted file mode 100644
index a569584a8ce..00000000000
--- a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/Pointer.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.tools.jaotc.jnilibelf;
-
-import jdk.internal.misc.Unsafe;
-
-import static jdk.tools.jaotc.jnilibelf.UnsafeAccess.UNSAFE;
-
-public class Pointer {
-
- private final long address;
-
- public Pointer(long val) {
- address = val;
- }
-
- /**
- * Put (i.e., copy) content of byte array at consecutive addresses beginning at this Pointer.
- *
- * @param src source byte array
- */
- public void put(byte[] src) {
- UNSAFE.copyMemory(src, Unsafe.ARRAY_BYTE_BASE_OFFSET, null, address, src.length);
- }
-
- /**
- * Get (i.e., copy) content at this Pointer to the given byte array.
- *
- * @param dst destination byte array
- */
- public void get(byte[] dst) {
- UNSAFE.copyMemory(null, address, dst, Unsafe.ARRAY_BYTE_BASE_OFFSET, dst.length);
- }
-
- /**
- * Read {@code readSize} number of bytes to copy them starting at {@code startIndex} of
- * {@code byteArray}
- *
- * @param byteArray target array to copy bytes
- * @param readSize number of bytes to copy
- * @param startIndex index of the array to start copy at
- */
- public void copyBytesTo(byte[] byteArray, int readSize, int startIndex) {
- long end = (long)startIndex + (long)readSize;
- if (end > byteArray.length) {
- throw new IllegalArgumentException("writing beyond array bounds");
- }
- UNSAFE.copyMemory(null, address, byteArray, Unsafe.ARRAY_BYTE_BASE_OFFSET+startIndex, readSize);
- }
-
-}
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/linux/Elf_Cmd.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/linux/Elf_Cmd.java
deleted file mode 100644
index a32d202486c..00000000000
--- a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc.jnilibelf/src/jdk/tools/jaotc/jnilibelf/linux/Elf_Cmd.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.tools.jaotc.jnilibelf.linux;
-
-/**
- * Represent Elf_Cmd enums defined in libelf.h on Linux as they slightly different from libelf.h on
- * SunOS.
- */
-public enum Elf_Cmd {
- /** Nothing, terminate, or compute only. */
- ELF_C_NULL,
-
- /** Read. */
- ELF_C_READ,
-
- /** Read and write. */
- ELF_C_RDWR,
-
- /** Write. */
- ELF_C_WRITE,
-
- /** Clear flag. */
- ELF_C_CLR,
-
- /** Set flag. */
- ELF_C_SET,
-
- /**
- * Signal that file descriptor will not be used anymore.
- */
- ELF_C_FDDONE,
-
- /**
- * Read rest of data so that file descriptor is not used anymore.
- */
- ELF_C_FDREAD,
-
- /* The following are extensions. */
-
- /** Read, but mmap the file if possible. */
- ELF_C_READ_MMAP,
-
- /** Read and write, with mmap. */
- ELF_C_RDWR_MMAP,
-
- /** Write, with mmap. */
- ELF_C_WRITE_MMAP,
-
- /**
- * Read, but memory is writable, results are not written to the file.
- */
- ELF_C_READ_MMAP_PRIVATE,
-
- /** Copy basic file data but not the content. */
- ELF_C_EMPTY,
-
- /** Keep this the last entry. */
- ELF_C_NUM;
-}
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/AOTBackend.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/AOTBackend.java
index 3ba1067b959..9f71f1cad7e 100644
--- a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/AOTBackend.java
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/AOTBackend.java
@@ -23,17 +23,11 @@
package jdk.tools.jaotc;
-import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC;
-import static org.graalvm.compiler.core.common.GraalOptions.ImmutableCode;
-
import java.util.ListIterator;
import org.graalvm.compiler.code.CompilationResult;
import org.graalvm.compiler.core.GraalCompiler;
-import org.graalvm.compiler.core.common.CompilationIdentifier;
-import org.graalvm.compiler.debug.Debug;
-import org.graalvm.compiler.debug.Debug.Scope;
-import org.graalvm.compiler.debug.GraalError;
+import org.graalvm.compiler.debug.DebugContext;
import org.graalvm.compiler.hotspot.HotSpotBackend;
import org.graalvm.compiler.hotspot.HotSpotCompiledCodeBuilder;
import org.graalvm.compiler.hotspot.meta.HotSpotProviders;
@@ -43,8 +37,7 @@ import org.graalvm.compiler.lir.phases.LIRSuites;
import org.graalvm.compiler.nodes.StructuredGraph;
import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderConfiguration;
import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderConfiguration.Plugins;
-import org.graalvm.compiler.options.OptionValue;
-import org.graalvm.compiler.options.OptionValue.OverrideScope;
+import org.graalvm.compiler.options.OptionValues;
import org.graalvm.compiler.phases.BasePhase;
import org.graalvm.compiler.phases.OptimisticOptimizations;
import org.graalvm.compiler.phases.PhaseSuite;
@@ -60,19 +53,18 @@ import jdk.vm.ci.meta.ResolvedJavaMethod;
import jdk.vm.ci.meta.TriState;
public class AOTBackend {
-
private final Main main;
-
+ private final OptionValues graalOptions;
private final HotSpotBackend backend;
-
private final HotSpotProviders providers;
private final HotSpotCodeCacheProvider codeCache;
private final PhaseSuite graphBuilderSuite;
private final HighTierContext highTierContext;
private final GraalFilters filters;
- public AOTBackend(Main main, HotSpotBackend backend, GraalFilters filters) {
+ public AOTBackend(Main main, OptionValues graalOptions, HotSpotBackend backend, GraalFilters filters) {
this.main = main;
+ this.graalOptions = graalOptions;
this.backend = backend;
this.filters = filters;
providers = backend.getProviders();
@@ -85,37 +77,44 @@ public class AOTBackend {
return graphBuilderSuite;
}
+ public HotSpotBackend getBackend() {
+ return backend;
+ }
+
+ public HotSpotProviders getProviders() {
+ return providers;
+ }
+
private Suites getSuites() {
// create suites every time, as we modify options for the compiler
- return backend.getSuites().getDefaultSuites();
+ return backend.getSuites().getDefaultSuites(graalOptions);
}
private LIRSuites getLirSuites() {
// create suites every time, as we modify options for the compiler
- return backend.getSuites().getDefaultLIRSuites();
+ return backend.getSuites().getDefaultLIRSuites(graalOptions);
}
@SuppressWarnings("try")
- public CompilationResult compileMethod(ResolvedJavaMethod resolvedMethod) {
- try (OverrideScope s = OptionValue.override(ImmutableCode, true, GeneratePIC, true)) {
- StructuredGraph graph = buildStructuredGraph(resolvedMethod);
- if (graph != null) {
- return compileGraph(resolvedMethod, graph);
- }
- return null;
+ public CompilationResult compileMethod(ResolvedJavaMethod resolvedMethod, DebugContext debug) {
+ StructuredGraph graph = buildStructuredGraph(resolvedMethod, debug);
+ if (graph != null) {
+ return compileGraph(resolvedMethod, graph, debug);
}
+ return null;
}
/**
* Build a structured graph for the member.
*
* @param javaMethod method for whose code the graph is to be created
+ * @param debug
* @return structured graph
*/
@SuppressWarnings("try")
- private StructuredGraph buildStructuredGraph(ResolvedJavaMethod javaMethod) {
- try (Scope s = Debug.scope("AOTParseMethod")) {
- StructuredGraph graph = new StructuredGraph(javaMethod, StructuredGraph.AllowAssumptions.NO, false, CompilationIdentifier.INVALID_COMPILATION_ID);
+ private StructuredGraph buildStructuredGraph(ResolvedJavaMethod javaMethod, DebugContext debug) {
+ try (DebugContext.Scope s = debug.scope("AOTParseMethod")) {
+ StructuredGraph graph = new StructuredGraph.Builder(graalOptions, debug).method(javaMethod).useProfilingInfo(false).build();
graphBuilderSuite.apply(graph, highTierContext);
return graph;
} catch (Throwable e) {
@@ -125,8 +124,8 @@ public class AOTBackend {
}
@SuppressWarnings("try")
- private CompilationResult compileGraph(ResolvedJavaMethod resolvedMethod, StructuredGraph graph) {
- try (Scope s = Debug.scope("AOTCompileMethod")) {
+ private CompilationResult compileGraph(ResolvedJavaMethod resolvedMethod, StructuredGraph graph, DebugContext debug) {
+ try (DebugContext.Scope s = debug.scope("AOTCompileMethod")) {
ProfilingInfo profilingInfo = DefaultProfilingInfo.get(TriState.FALSE);
final boolean isImmutablePIC = true;
@@ -195,7 +194,7 @@ public class AOTBackend {
public void printCompiledMethod(HotSpotResolvedJavaMethod resolvedMethod, CompilationResult compResult) {
// This is really not installing the method.
- InstalledCode installedCode = codeCache.addCode(resolvedMethod, HotSpotCompiledCodeBuilder.createCompiledCode(null, null, compResult), null, null);
+ InstalledCode installedCode = codeCache.addCode(resolvedMethod, HotSpotCompiledCodeBuilder.createCompiledCode(codeCache, null, null, compResult), null, null);
String disassembly = codeCache.disassemble(installedCode);
if (disassembly != null) {
main.printlnDebug(disassembly);
diff --git a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/AOTCompilationTask.java b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/AOTCompilationTask.java
index f92ead4ba25..9a1aef5c2b5 100644
--- a/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/AOTCompilationTask.java
+++ b/hotspot/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/AOTCompilationTask.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,13 +26,15 @@ package jdk.tools.jaotc;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
+import org.graalvm.compiler.api.replacements.SnippetReflectionProvider;
import org.graalvm.compiler.code.CompilationResult;
import org.graalvm.compiler.core.GraalCompilerOptions;
-import org.graalvm.compiler.debug.Debug;
-import org.graalvm.compiler.debug.DebugEnvironment;
+import org.graalvm.compiler.debug.DebugContext;
import org.graalvm.compiler.debug.Management;
import org.graalvm.compiler.debug.TTY;
-import org.graalvm.compiler.debug.internal.DebugScope;
+import org.graalvm.compiler.debug.DebugContext.Activation;
+import org.graalvm.compiler.options.OptionValues;
+import org.graalvm.compiler.printer.GraalDebugHandlersFactory;
import jdk.vm.ci.hotspot.HotSpotJVMCIRuntime;
import jdk.vm.ci.hotspot.HotSpotResolvedJavaMethod;
@@ -54,6 +56,8 @@ public class AOTCompilationTask implements Runnable, Comparable