This commit is contained in:
Jesper Wilhelmsson 2017-06-22 00:51:07 +02:00
commit 3c874cfeb3
2109 changed files with 135597 additions and 48242 deletions

View File

@ -47,11 +47,10 @@ ifeq ($(INCLUDE_GRAAL), true)
$(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_MATCH_PROCESSOR, \ $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_MATCH_PROCESSOR, \
SETUP := GENERATE_OLDBYTECODE, \ SETUP := GENERATE_OLDBYTECODE, \
SRC := \ SRC := \
$(SRC_DIR)/org.graalvm.compiler.common/src \ $(SRC_DIR)/org.graalvm.word/src \
$(SRC_DIR)/org.graalvm.compiler.core/src \ $(SRC_DIR)/org.graalvm.compiler.core/src \
$(SRC_DIR)/org.graalvm.compiler.core.common/src \ $(SRC_DIR)/org.graalvm.compiler.core.common/src \
$(SRC_DIR)/org.graalvm.compiler.core.match.processor/src \ $(SRC_DIR)/org.graalvm.compiler.core.match.processor/src \
$(SRC_DIR)/org.graalvm.compiler.api.collections/src \
$(SRC_DIR)/org.graalvm.compiler.api.replacements/src \ $(SRC_DIR)/org.graalvm.compiler.api.replacements/src \
$(SRC_DIR)/org.graalvm.compiler.asm/src \ $(SRC_DIR)/org.graalvm.compiler.asm/src \
$(SRC_DIR)/org.graalvm.compiler.bytecode/src \ $(SRC_DIR)/org.graalvm.compiler.bytecode/src \
@ -68,6 +67,7 @@ ifeq ($(INCLUDE_GRAAL), true)
$(SRC_DIR)/org.graalvm.compiler.phases.common/src \ $(SRC_DIR)/org.graalvm.compiler.phases.common/src \
$(SRC_DIR)/org.graalvm.compiler.serviceprovider/src \ $(SRC_DIR)/org.graalvm.compiler.serviceprovider/src \
$(SRC_DIR)/org.graalvm.compiler.virtual/src \ $(SRC_DIR)/org.graalvm.compiler.virtual/src \
$(SRC_DIR)/org.graalvm.util/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.code/src \ $(VM_CI_SRC_DIR)/jdk.vm.ci.code/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.common/src \ $(VM_CI_SRC_DIR)/jdk.vm.ci.common/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.meta/src \ $(VM_CI_SRC_DIR)/jdk.vm.ci.meta/src \
@ -102,6 +102,7 @@ ifeq ($(INCLUDE_GRAAL), true)
SRC := \ SRC := \
$(SRC_DIR)/org.graalvm.compiler.options/src \ $(SRC_DIR)/org.graalvm.compiler.options/src \
$(SRC_DIR)/org.graalvm.compiler.options.processor/src \ $(SRC_DIR)/org.graalvm.compiler.options.processor/src \
$(SRC_DIR)/org.graalvm.util/src \
, \ , \
BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor, \ BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor, \
JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor.jar, \ JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor.jar, \
@ -114,9 +115,8 @@ ifeq ($(INCLUDE_GRAAL), true)
$(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_REPLACEMENTS_VERIFIER, \ $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_REPLACEMENTS_VERIFIER, \
SETUP := GENERATE_OLDBYTECODE, \ SETUP := GENERATE_OLDBYTECODE, \
SRC := \ SRC := \
$(SRC_DIR)/org.graalvm.compiler.common/src \ $(SRC_DIR)/org.graalvm.word/src \
$(SRC_DIR)/org.graalvm.compiler.replacements.verifier/src \ $(SRC_DIR)/org.graalvm.compiler.replacements.verifier/src \
$(SRC_DIR)/org.graalvm.compiler.api.collections/src \
$(SRC_DIR)/org.graalvm.compiler.api.replacements/src \ $(SRC_DIR)/org.graalvm.compiler.api.replacements/src \
$(SRC_DIR)/org.graalvm.compiler.code/src \ $(SRC_DIR)/org.graalvm.compiler.code/src \
$(SRC_DIR)/org.graalvm.compiler.core.common/src \ $(SRC_DIR)/org.graalvm.compiler.core.common/src \
@ -125,6 +125,7 @@ ifeq ($(INCLUDE_GRAAL), true)
$(SRC_DIR)/org.graalvm.compiler.nodeinfo/src \ $(SRC_DIR)/org.graalvm.compiler.nodeinfo/src \
$(SRC_DIR)/org.graalvm.compiler.options/src \ $(SRC_DIR)/org.graalvm.compiler.options/src \
$(SRC_DIR)/org.graalvm.compiler.serviceprovider/src \ $(SRC_DIR)/org.graalvm.compiler.serviceprovider/src \
$(SRC_DIR)/org.graalvm.util/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.code/src \ $(VM_CI_SRC_DIR)/jdk.vm.ci.code/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.common/src \ $(VM_CI_SRC_DIR)/jdk.vm.ci.common/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.meta/src \ $(VM_CI_SRC_DIR)/jdk.vm.ci.meta/src \

View File

@ -37,7 +37,6 @@ SRC_DIR := $(HOTSPOT_TOPDIR)/src/$(MODULE)/share/classes
PROC_SRC_SUBDIRS := \ PROC_SRC_SUBDIRS := \
org.graalvm.compiler.code \ org.graalvm.compiler.code \
org.graalvm.compiler.common \
org.graalvm.compiler.core \ org.graalvm.compiler.core \
org.graalvm.compiler.core.aarch64 \ org.graalvm.compiler.core.aarch64 \
org.graalvm.compiler.core.amd64 \ org.graalvm.compiler.core.amd64 \

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -52,7 +52,7 @@ ifeq ($(call check-jvm-feature, dtrace), true)
CXX := $(BUILD_CXX), \ CXX := $(BUILD_CXX), \
LDEXE := $(BUILD_CXX), \ LDEXE := $(BUILD_CXX), \
generateJvmOffsets.cpp_CXXFLAGS := $(JVM_CFLAGS) -mt -xnolib -norunpath, \ generateJvmOffsets.cpp_CXXFLAGS := $(JVM_CFLAGS) -mt -xnolib -norunpath, \
generateJvmOffsetsMain.c_CFLAGS := -library=%none -mt -m64 -norunpath -z nodefs, \ generateJvmOffsetsMain.c_CFLAGS := -mt -m64 -norunpath -z nodefs, \
LDFLAGS := -m64, \ LDFLAGS := -m64, \
LIBS := -lc, \ LIBS := -lc, \
OBJECT_DIR := $(JVM_VARIANT_OUTPUTDIR)/tools/dtrace-gen-offsets/objs, \ OBJECT_DIR := $(JVM_VARIANT_OUTPUTDIR)/tools/dtrace-gen-offsets/objs, \

View File

@ -1,53 +0,0 @@
#
# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
include $(SPEC)
include NativeCompilation.gmk
$(eval $(call IncludeCustomExtension, hotspot, lib/Lib-jdk.aot.gmk))
##############################################################################
# Build libjelfshim only when AOT is enabled.
ifeq ($(ENABLE_AOT), true)
JELFSHIM_NAME := jelfshim
$(eval $(call SetupNativeCompilation, BUILD_LIBJELFSHIM, \
TOOLCHAIN := TOOLCHAIN_DEFAULT, \
OPTIMIZATION := LOW, \
LIBRARY := $(JELFSHIM_NAME), \
OUTPUT_DIR := $(call FindLibDirForModule, $(MODULE)), \
SRC := $(HOTSPOT_TOPDIR)/src/jdk.aot/unix/native/libjelfshim, \
CFLAGS := $(CFLAGS_JDKLIB) $(ELF_CFLAGS) \
-DAOT_VERSION_STRING='"$(VERSION_STRING)"' \
-I$(SUPPORT_OUTPUTDIR)/headers/$(MODULE), \
LDFLAGS := $(LDFLAGS_JDKLIB), \
OBJECT_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/lib$(JELFSHIM_NAME), \
LIBS := $(ELF_LIBS) $(LIBS_JDKLIB), \
))
TARGETS += $(BUILD_LIBJELFSHIM)
endif
##############################################################################

View File

@ -35,12 +35,16 @@ include $(SPEC)
include MakeBase.gmk include MakeBase.gmk
include TestFilesCompilation.gmk include TestFilesCompilation.gmk
$(eval $(call IncludeCustomExtension, hotspot, test/JtregNative.gmk))
################################################################################ ################################################################################
# Targets for building the native tests themselves. # Targets for building the native tests themselves.
################################################################################ ################################################################################
# Add more directories here when needed. # Add more directories here when needed.
BUILD_HOTSPOT_JTREG_NATIVE_SRC := \ BUILD_HOTSPOT_JTREG_NATIVE_SRC += \
$(HOTSPOT_TOPDIR)/test/gc/g1/TestJNIWeakG1 \
$(HOTSPOT_TOPDIR)/test/gc/stress/gclocker \
$(HOTSPOT_TOPDIR)/test/native_sanity \ $(HOTSPOT_TOPDIR)/test/native_sanity \
$(HOTSPOT_TOPDIR)/test/runtime/jni/8025979 \ $(HOTSPOT_TOPDIR)/test/runtime/jni/8025979 \
$(HOTSPOT_TOPDIR)/test/runtime/jni/8033445 \ $(HOTSPOT_TOPDIR)/test/runtime/jni/8033445 \
@ -53,6 +57,7 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
$(HOTSPOT_TOPDIR)/test/runtime/modules/getModuleJNI \ $(HOTSPOT_TOPDIR)/test/runtime/modules/getModuleJNI \
$(HOTSPOT_TOPDIR)/test/runtime/SameObject \ $(HOTSPOT_TOPDIR)/test/runtime/SameObject \
$(HOTSPOT_TOPDIR)/test/runtime/BoolReturn \ $(HOTSPOT_TOPDIR)/test/runtime/BoolReturn \
$(HOTSPOT_TOPDIR)/test/runtime/noClassDefFoundMsg \
$(HOTSPOT_TOPDIR)/test/compiler/floatingpoint/ \ $(HOTSPOT_TOPDIR)/test/compiler/floatingpoint/ \
$(HOTSPOT_TOPDIR)/test/compiler/calls \ $(HOTSPOT_TOPDIR)/test/compiler/calls \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/GetNamedModule \ $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/GetNamedModule \
@ -65,6 +70,7 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/ModuleAwareAgents/ClassFileLoadHook \ $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/ModuleAwareAgents/ClassFileLoadHook \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/ModuleAwareAgents/ClassLoadPrepare \ $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/ModuleAwareAgents/ClassLoadPrepare \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/ModuleAwareAgents/ThreadStart \ $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/ModuleAwareAgents/ThreadStart \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/StartPhase/AllowedFunctions \
# #
# Add conditional directories here when needed. # Add conditional directories here when needed.
@ -91,6 +97,7 @@ ifeq ($(TOOLCHAIN_TYPE), solstudio)
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAClassFileLoadHook := -lc BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAClassFileLoadHook := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAClassLoadPrepare := -lc BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAClassLoadPrepare := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAThreadStart := -lc BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAThreadStart := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libAllowedFunctions := -lc
endif endif
ifeq ($(OPENJDK_TARGET_OS), linux) ifeq ($(OPENJDK_TARGET_OS), linux)

View File

@ -1,5 +1,5 @@
// //
// Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2014, Red Hat Inc. All rights reserved. // Copyright (c) 2014, Red Hat Inc. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
@ -3564,7 +3564,7 @@ const int Matcher::min_vector_size(const BasicType bt) {
} }
// Vector ideal reg. // Vector ideal reg.
const int Matcher::vector_ideal_reg(int len) { const uint Matcher::vector_ideal_reg(int len) {
switch(len) { switch(len) {
case 8: return Op_VecD; case 8: return Op_VecD;
case 16: return Op_VecX; case 16: return Op_VecX;
@ -3573,7 +3573,7 @@ const int Matcher::vector_ideal_reg(int len) {
return 0; return 0;
} }
const int Matcher::vector_shift_count_ideal_reg(int size) { const uint Matcher::vector_shift_count_ideal_reg(int size) {
return Op_VecX; return Op_VecX;
} }
@ -5423,6 +5423,16 @@ operand immI_56()
interface(CONST_INTER); interface(CONST_INTER);
%} %}
operand immI_63()
%{
predicate(n->get_int() == 63);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immI_64() operand immI_64()
%{ %{
predicate(n->get_int() == 64); predicate(n->get_int() == 64);
@ -5453,20 +5463,10 @@ operand immI_65535()
interface(CONST_INTER); interface(CONST_INTER);
%} %}
operand immL_63()
%{
predicate(n->get_int() == 63);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immL_255() operand immL_255()
%{ %{
predicate(n->get_int() == 255); predicate(n->get_long() == 255L);
match(ConI); match(ConL);
op_cost(0); op_cost(0);
format %{ %} format %{ %}
@ -11146,7 +11146,7 @@ instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
ins_pipe(ldiv_reg_reg); ins_pipe(ldiv_reg_reg);
%} %}
instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{ instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
match(Set dst (URShiftL (RShiftL src1 div1) div2)); match(Set dst (URShiftL (RShiftL src1 div1) div2));
ins_cost(INSN_COST); ins_cost(INSN_COST);
format %{ "lsr $dst, $src1, $div1" %} format %{ "lsr $dst, $src1, $div1" %}
@ -11156,7 +11156,7 @@ instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
ins_pipe(ialu_reg_shift); ins_pipe(ialu_reg_shift);
%} %}
instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{ instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
match(Set dst (AddL src (URShiftL (RShiftL src div1) div2))); match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
ins_cost(INSN_COST); ins_cost(INSN_COST);
format %{ "add $dst, $src, $div1" %} format %{ "add $dst, $src, $div1" %}
@ -15387,9 +15387,9 @@ instruct ShouldNotReachHere() %{
format %{ "ShouldNotReachHere" %} format %{ "ShouldNotReachHere" %}
ins_encode %{ ins_encode %{
// TODO // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
// implement proper trap call here // return true
__ brk(999); __ dpcs1(0xdead + 1);
%} %}
ins_pipe(pipe_class_default); ins_pipe(pipe_class_default);

View File

@ -109,9 +109,15 @@ int AbstractInterpreter::size_activation(int max_stack,
// for the callee's params we only need to account for the extra // for the callee's params we only need to account for the extra
// locals. // locals.
int size = overhead + int size = overhead +
(callee_locals - callee_params)*Interpreter::stackElementWords + (callee_locals - callee_params) +
monitors * frame::interpreter_frame_monitor_size() + monitors * frame::interpreter_frame_monitor_size() +
temps* Interpreter::stackElementWords + extra_args; // On the top frame, at all times SP <= ESP, and SP is
// 16-aligned. We ensure this by adjusting SP on method
// entry and re-entry to allow room for the maximum size of
// the expression stack. When we call another method we bump
// SP so that no stack space is wasted. So, only on the top
// frame do we need to allow max_stack words.
(is_top_frame ? max_stack : temps + extra_args);
// On AArch64 we always keep the stack pointer 16-aligned, so we // On AArch64 we always keep the stack pointer 16-aligned, so we
// must round up here. // must round up here.

View File

@ -30,12 +30,6 @@
class Bytes: AllStatic { class Bytes: AllStatic {
public: public:
// Returns true if the byte ordering used by Java is different from the native byte ordering
// of the underlying machine. For example, this is true for Intel x86, but false for Solaris
// on Sparc.
static inline bool is_Java_byte_ordering_different(){ return true; }
// Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering // Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering
// (no special code is needed since x86 CPUs can access unaligned data) // (no special code is needed since x86 CPUs can access unaligned data)
static inline u2 get_native_u2(address p) { return *(u2*)p; } static inline u2 get_native_u2(address p) { return *(u2*)p; }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -23,12 +23,6 @@
* *
*/ */
#include "precompiled.hpp"
#include "c1/c1_FpuStackSim.hpp"
#include "c1/c1_FrameMap.hpp"
#include "utilities/array.hpp"
#include "utilities/ostream.hpp"
//-------------------------------------------------------- //--------------------------------------------------------
// FpuStackSim // FpuStackSim
//-------------------------------------------------------- //--------------------------------------------------------

View File

@ -2740,8 +2740,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
// set already but no need to check. // set already but no need to check.
__ cbz(rscratch1, next); __ cbz(rscratch1, next);
__ andr(rscratch1, tmp, TypeEntries::type_unknown); __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
if (TypeEntries::is_type_none(current_klass)) { if (TypeEntries::is_type_none(current_klass)) {
__ cbz(rscratch2, none); __ cbz(rscratch2, none);
@ -2761,8 +2760,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
__ ldr(tmp, mdo_addr); __ ldr(tmp, mdo_addr);
__ andr(rscratch1, tmp, TypeEntries::type_unknown); __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
} }
// different than before. Cannot keep accurate profile. // different than before. Cannot keep accurate profile.
@ -2812,8 +2810,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
__ ldr(tmp, mdo_addr); __ ldr(tmp, mdo_addr);
__ andr(rscratch1, tmp, TypeEntries::type_unknown); __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
__ orr(tmp, tmp, TypeEntries::type_unknown); __ orr(tmp, tmp, TypeEntries::type_unknown);
__ str(tmp, mdo_addr); __ str(tmp, mdo_addr);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -270,7 +270,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
get_constant_pool(result); get_constant_pool(result);
// load pointer for resolved_references[] objArray // load pointer for resolved_references[] objArray
ldr(result, Address(result, ConstantPool::resolved_references_offset_in_bytes())); ldr(result, Address(result, ConstantPool::cache_offset_in_bytes()));
ldr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
// JNIHandles::resolve(obj); // JNIHandles::resolve(obj);
ldr(result, Address(result, 0)); ldr(result, Address(result, 0));
// Add in the index // Add in the index
@ -278,6 +279,15 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
} }
void InterpreterMacroAssembler::load_resolved_klass_at_offset(
Register cpool, Register index, Register klass, Register temp) {
add(temp, cpool, index, LSL, LogBytesPerWord);
ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
ldr(klass, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes())); // klass = cpool->_resolved_klasses
add(klass, klass, temp, LSL, LogBytesPerWord);
ldr(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
}
// Generate a subtype check: branch to ok_is_subtype if sub_klass is a // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
// subtype of super_klass. // subtype of super_klass.
// //
@ -682,7 +692,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
} }
// Load (object->mark() | 1) into swap_reg // Load (object->mark() | 1) into swap_reg
ldr(rscratch1, Address(obj_reg, 0)); ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
orr(swap_reg, rscratch1, 1); orr(swap_reg, rscratch1, 1);
// Save (object->mark() | 1) into BasicLock's displaced header // Save (object->mark() | 1) into BasicLock's displaced header
@ -694,14 +704,14 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
Label fail; Label fail;
if (PrintBiasedLockingStatistics) { if (PrintBiasedLockingStatistics) {
Label fast; Label fast;
cmpxchgptr(swap_reg, lock_reg, obj_reg, rscratch1, fast, &fail); cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, fast, &fail);
bind(fast); bind(fast);
atomic_incw(Address((address)BiasedLocking::fast_path_entry_count_addr()), atomic_incw(Address((address)BiasedLocking::fast_path_entry_count_addr()),
rscratch2, rscratch1, tmp); rscratch2, rscratch1, tmp);
b(done); b(done);
bind(fail); bind(fail);
} else { } else {
cmpxchgptr(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL); cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
} }
// Test if the oopMark is an obvious stack pointer, i.e., // Test if the oopMark is an obvious stack pointer, i.e.,
@ -791,7 +801,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
cbz(header_reg, done); cbz(header_reg, done);
// Atomic swap back the old header // Atomic swap back the old header
cmpxchgptr(swap_reg, header_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL); cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
// Call the runtime routine for slow case. // Call the runtime routine for slow case.
str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj
@ -1744,8 +1754,7 @@ void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register t
// Load the offset of the area within the MDO used for // Load the offset of the area within the MDO used for
// parameters. If it's negative we're not profiling any parameters // parameters. If it's negative we're not profiling any parameters
ldr(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()))); ldr(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
cmp(tmp1, 0u); tbnz(tmp1, 63, profile_continue); // i.e. sign bit set
br(Assembler::LT, profile_continue);
// Compute a pointer to the area for parameters from the offset // Compute a pointer to the area for parameters from the offset
// and move the pointer to the slot for the last // and move the pointer to the slot for the last

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -54,9 +54,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
int number_of_arguments, int number_of_arguments,
bool check_exceptions); bool check_exceptions);
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// base routine for all dispatches // base routine for all dispatches
void dispatch_base(TosState state, address* table, bool verifyoop = true); void dispatch_base(TosState state, address* table, bool verifyoop = true);
@ -67,6 +64,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
void jump_to_entry(address entry); void jump_to_entry(address entry);
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// Interpreter-specific registers // Interpreter-specific registers
void save_bcp() { void save_bcp() {
str(rbcp, Address(rfp, frame::interpreter_frame_bcp_offset * wordSize)); str(rbcp, Address(rfp, frame::interpreter_frame_bcp_offset * wordSize));
@ -123,6 +123,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
// load cpool->resolved_references(index); // load cpool->resolved_references(index);
void load_resolved_reference_at_index(Register result, Register index); void load_resolved_reference_at_index(Register result, Register index);
// load cpool->resolved_klass_at(index);
void load_resolved_klass_at_offset(Register cpool, Register index, Register klass, Register temp);
void pop_ptr(Register r = r0); void pop_ptr(Register r = r0);
void pop_i(Register r = r0); void pop_i(Register r = r0);
void pop_l(Register r = r0); void pop_l(Register r = r0);

View File

@ -76,8 +76,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
SafepointSynchronize::safepoint_counter_addr(), offset); SafepointSynchronize::safepoint_counter_addr(), offset);
Address safepoint_counter_addr(rcounter_addr, offset); Address safepoint_counter_addr(rcounter_addr, offset);
__ ldrw(rcounter, safepoint_counter_addr); __ ldrw(rcounter, safepoint_counter_addr);
__ andw(rscratch1, rcounter, 1); __ tbnz(rcounter, 0, slow);
__ cbnzw(rscratch1, slow);
__ eor(robj, c_rarg1, rcounter); __ eor(robj, c_rarg1, rcounter);
__ eor(robj, robj, rcounter); // obj, since __ eor(robj, robj, rcounter); // obj, since
// robj ^ rcounter ^ rcounter == robj // robj ^ rcounter ^ rcounter == robj

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -55,7 +55,7 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS)
} }
} }
#endif // ASSERT #endif // ASSERT
Handle obj = HotSpotObjectConstantImpl::object(constant); Handle obj(THREAD, HotSpotObjectConstantImpl::object(constant));
jobject value = JNIHandles::make_local(obj()); jobject value = JNIHandles::make_local(obj());
MacroAssembler::patch_oop(pc, (address)obj()); MacroAssembler::patch_oop(pc, (address)obj());
int oop_index = _oop_recorder->find_index(value); int oop_index = _oop_recorder->find_index(value);

View File

@ -515,7 +515,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
mov(rscratch1, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); mov(rscratch1, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
andr(swap_reg, swap_reg, rscratch1); andr(swap_reg, swap_reg, rscratch1);
orr(tmp_reg, swap_reg, rthread); orr(tmp_reg, swap_reg, rthread);
cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case); cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
// If the biasing toward our thread failed, this means that // If the biasing toward our thread failed, this means that
// another thread succeeded in biasing it toward itself and we // another thread succeeded in biasing it toward itself and we
// need to revoke that bias. The revocation will occur in the // need to revoke that bias. The revocation will occur in the
@ -542,7 +542,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
Label here; Label here;
load_prototype_header(tmp_reg, obj_reg); load_prototype_header(tmp_reg, obj_reg);
orr(tmp_reg, rthread, tmp_reg); orr(tmp_reg, rthread, tmp_reg);
cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case); cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
// If the biasing toward our thread failed, then another thread // If the biasing toward our thread failed, then another thread
// succeeded in biasing it toward itself and we need to revoke that // succeeded in biasing it toward itself and we need to revoke that
// bias. The revocation will occur in the runtime in the slow case. // bias. The revocation will occur in the runtime in the slow case.
@ -569,7 +569,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
{ {
Label here, nope; Label here, nope;
load_prototype_header(tmp_reg, obj_reg); load_prototype_header(tmp_reg, obj_reg);
cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, &nope); cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, rscratch1, here, &nope);
bind(here); bind(here);
// Fall through to the normal CAS-based lock, because no matter what // Fall through to the normal CAS-based lock, because no matter what
@ -2141,6 +2141,12 @@ void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Reg
b(*fail); b(*fail);
} }
void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp,
Label &succeed, Label *fail) {
assert(oopDesc::mark_offset_in_bytes() == 0, "assumption");
cmpxchgptr(oldv, newv, obj, tmp, succeed, fail);
}
void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp,
Label &succeed, Label *fail) { Label &succeed, Label *fail) {
// oldv holds comparison value // oldv holds comparison value

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -77,12 +77,6 @@ class MacroAssembler: public Assembler {
bool check_exceptions // whether to check for pending exceptions after return bool check_exceptions // whether to check for pending exceptions after return
); );
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler,
// as only the interpreter handles PopFrame and ForceEarlyReturn requests.
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
// Maximum size of class area in Metaspace when compressed // Maximum size of class area in Metaspace when compressed
@ -97,6 +91,12 @@ class MacroAssembler: public Assembler {
> (1u << log2_intptr(CompressedClassSpaceSize)))); > (1u << log2_intptr(CompressedClassSpaceSize))));
} }
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler,
// as only the interpreter handles PopFrame and ForceEarlyReturn requests.
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// Biased locking support // Biased locking support
// lock_reg and obj_reg must be loaded up with the appropriate values. // lock_reg and obj_reg must be loaded up with the appropriate values.
// swap_reg is killed. // swap_reg is killed.
@ -974,6 +974,8 @@ public:
// Various forms of CAS // Various forms of CAS
void cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp,
Label &suceed, Label *fail);
void cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, void cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp,
Label &suceed, Label *fail); Label &suceed, Label *fail);

View File

@ -1,126 +0,0 @@
/*
* Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
// This method will be called (as any other Klass virtual method) with
// the Klass itself as the first argument. Example:
//
// oop obj;
// int size = obj->klass()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();
//
// The dummy method is called with the Klass object as the first
// operand, and an object as the second argument.
//
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
// differing only by an ordinal constant, and they bear no relationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#define __ masm->
extern "C" {
void aarch64_prolog(void);
}
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
char** mc_top,
char* mc_end) {
#ifdef BUILTIN_SIM
// Write a dummy word to the writable shared metaspace.
// MetaspaceShared::initialize_shared_spaces will fill it with the
// address of aarch64_prolog().
address *prolog_ptr = (address*)*md_top;
*(intptr_t *)(*md_top) = (intptr_t)0;
(*md_top) += sizeof(intptr_t);
#endif
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
*(intptr_t *)(*md_top) = vtable_bytes;
*md_top += sizeof(intptr_t);
void** dummy_vtable = (void**)*md_top;
*vtable = dummy_vtable;
*md_top += vtable_bytes;
// Get ready to generate dummy methods.
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
MacroAssembler* masm = new MacroAssembler(&cb);
Label common_code;
for (int i = 0; i < vtbl_list_size; ++i) {
for (int j = 0; j < num_virtuals; ++j) {
dummy_vtable[num_virtuals * i + j] = (void*)masm->pc();
// We're called directly from C code.
#ifdef BUILTIN_SIM
__ c_stub_prolog(8, 0, MacroAssembler::ret_type_integral, prolog_ptr);
#endif
// Load rscratch1 with a value indicating vtable/offset pair.
// -- bits[ 7..0] (8 bits) which virtual method in table?
// -- bits[12..8] (5 bits) which virtual method table?
__ mov(rscratch1, (i << 8) + j);
__ b(common_code);
}
}
__ bind(common_code);
Register tmp0 = r10, tmp1 = r11; // AAPCS64 temporary registers
__ enter();
__ lsr(tmp0, rscratch1, 8); // isolate vtable identifier.
__ mov(tmp1, (address)vtbl_list); // address of list of vtable pointers.
__ ldr(tmp1, Address(tmp1, tmp0, Address::lsl(LogBytesPerWord))); // get correct vtable pointer.
__ str(tmp1, Address(c_rarg0)); // update vtable pointer in obj.
__ add(rscratch1, tmp1, rscratch1, ext::uxtb, LogBytesPerWord); // address of real method pointer.
__ ldr(rscratch1, Address(rscratch1)); // get real method pointer.
__ blrt(rscratch1, 8, 0, 1); // jump to the real method.
__ leave();
__ ret(lr);
*mc_top = (char*)__ pc();
}
#ifdef BUILTIN_SIM
void MetaspaceShared::relocate_vtbl_list(char **buffer) {
void **sim_entry = (void**)*buffer;
*sim_entry = (void*)aarch64_prolog;
*buffer += sizeof(intptr_t);
}
#endif

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -63,7 +63,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Register obj, SystemDictionary::WKID klass_id, Register obj, SystemDictionary::WKID klass_id,
const char* error_message) { const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id); InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id); Klass* klass = SystemDictionary::well_known_klass(klass_id);
Register temp = rscratch2; Register temp = rscratch2;
Register temp2 = rscratch1; // used by MacroAssembler::cmpptr Register temp2 = rscratch1; // used by MacroAssembler::cmpptr
Label L_ok, L_bad; Label L_ok, L_bad;
@ -137,8 +137,9 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
__ verify_oop(method_temp); __ verify_oop(method_temp);
__ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()))); __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
__ verify_oop(method_temp); __ verify_oop(method_temp);
// the following assumes that a Method* is normally compressed in the vmtarget field: __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())));
__ ldr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()))); __ verify_oop(method_temp);
__ ldr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())));
if (VerifyMethodHandles && !for_compiler_entry) { if (VerifyMethodHandles && !for_compiler_entry) {
// make sure recv is already on stack // make sure recv is already on stack
@ -282,7 +283,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes())); Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes())); Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())); Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()));
Address vmtarget_method( rmethod, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()));
Register temp1_recv_klass = temp1; Register temp1_recv_klass = temp1;
if (iid != vmIntrinsics::_linkToStatic) { if (iid != vmIntrinsics::_linkToStatic) {
@ -335,14 +337,16 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
if (VerifyMethodHandles) { if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3); verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
} }
__ ldr(rmethod, member_vmtarget); __ load_heap_oop(rmethod, member_vmtarget);
__ ldr(rmethod, vmtarget_method);
break; break;
case vmIntrinsics::_linkToStatic: case vmIntrinsics::_linkToStatic:
if (VerifyMethodHandles) { if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3); verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
} }
__ ldr(rmethod, member_vmtarget); __ load_heap_oop(rmethod, member_vmtarget);
__ ldr(rmethod, vmtarget_method);
break; break;
case vmIntrinsics::_linkToVirtual: case vmIntrinsics::_linkToVirtual:

View File

@ -1842,7 +1842,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
} }
// Load (object->mark() | 1) into swap_reg %r0 // Load (object->mark() | 1) into swap_reg %r0
__ ldr(rscratch1, Address(obj_reg, 0)); __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ orr(swap_reg, rscratch1, 1); __ orr(swap_reg, rscratch1, 1);
// Save (object->mark() | 1) into BasicLock's displaced header // Save (object->mark() | 1) into BasicLock's displaced header
@ -1850,7 +1850,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// src -> dest iff dest == r0 else r0 <- dest // src -> dest iff dest == r0 else r0 <- dest
{ Label here; { Label here;
__ cmpxchgptr(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL); __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
} }
// Hmm should this move to the slow path code area??? // Hmm should this move to the slow path code area???
@ -2029,7 +2029,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Atomic swap old header if oop still contains the stack lock // Atomic swap old header if oop still contains the stack lock
Label succeed; Label succeed;
__ cmpxchgptr(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock); __ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
__ bind(succeed); __ bind(succeed);
// slow path re-enters here // slow path re-enters here

View File

@ -402,14 +402,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(
return entry; return entry;
} }
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
address entry = __ pc();
// NULL last_sp until next java call
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
__ dispatch_next(state);
return entry;
}
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
address entry = __ pc(); address entry = __ pc();
@ -444,6 +436,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ notify(Assembler::method_reentry); __ notify(Assembler::method_reentry);
} }
#endif #endif
__ check_and_handle_popframe(rthread);
__ check_and_handle_earlyret(rthread);
__ get_dispatch(); __ get_dispatch();
__ dispatch_next(state, step); __ dispatch_next(state, step);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -246,8 +246,7 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
assert(load_bc_into_bc_reg, "we use bc_reg as temp"); assert(load_bc_into_bc_reg, "we use bc_reg as temp");
__ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1); __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
__ movw(bc_reg, bc); __ movw(bc_reg, bc);
__ cmpw(temp_reg, (unsigned) 0); __ cbzw(temp_reg, L_patch_done); // don't patch
__ br(Assembler::EQ, L_patch_done); // don't patch
} }
break; break;
default: default:
@ -3418,8 +3417,7 @@ void TemplateTable::_new() {
__ br(Assembler::NE, slow_case); __ br(Assembler::NE, slow_case);
// get InstanceKlass // get InstanceKlass
__ lea(r4, Address(r4, r3, Address::lsl(3))); __ load_resolved_klass_at_offset(r4, r3, r4, rscratch1);
__ ldr(r4, Address(r4, sizeof(ConstantPool)));
// make sure klass is initialized & doesn't have finalizer // make sure klass is initialized & doesn't have finalizer
// make sure klass is fully initialized // make sure klass is fully initialized
@ -3572,8 +3570,7 @@ void TemplateTable::checkcast()
// Get superklass in r0 and subklass in r3 // Get superklass in r0 and subklass in r3
__ bind(quicked); __ bind(quicked);
__ mov(r3, r0); // Save object in r3; r0 needed for subtype check __ mov(r3, r0); // Save object in r3; r0 needed for subtype check
__ lea(r0, Address(r2, r19, Address::lsl(3))); __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); // r0 = klass
__ ldr(r0, Address(r0, sizeof(ConstantPool)));
__ bind(resolved); __ bind(resolved);
__ load_klass(r19, r3); __ load_klass(r19, r3);
@ -3629,8 +3626,7 @@ void TemplateTable::instanceof() {
// Get superklass in r0 and subklass in r3 // Get superklass in r0 and subklass in r3
__ bind(quicked); __ bind(quicked);
__ load_klass(r3, r0); __ load_klass(r3, r0);
__ lea(r0, Address(r2, r19, Address::lsl(3))); __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1);
__ ldr(r0, Address(r0, sizeof(ConstantPool)));
__ bind(resolved); __ bind(resolved);

View File

@ -51,6 +51,11 @@ extern "C" void bad_compiled_vtable_index(JavaThread* thread,
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
const int aarch64_code_length = VtableStub::pd_code_size_limit(true); const int aarch64_code_length = VtableStub::pd_code_size_limit(true);
VtableStub* s = new(aarch64_code_length) VtableStub(true, vtable_index); VtableStub* s = new(aarch64_code_length) VtableStub(true, vtable_index);
// Can be NULL if there is no free space in the code cache.
if (s == NULL) {
return NULL;
}
ResourceMark rm; ResourceMark rm;
CodeBuffer cb(s->entry_point(), aarch64_code_length); CodeBuffer cb(s->entry_point(), aarch64_code_length);
MacroAssembler* masm = new MacroAssembler(&cb); MacroAssembler* masm = new MacroAssembler(&cb);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -234,8 +234,15 @@ void AbstractInterpreter::layout_activation(Method* method,
#ifdef AARCH64 #ifdef AARCH64
interpreter_frame->interpreter_frame_set_stack_top(stack_top); interpreter_frame->interpreter_frame_set_stack_top(stack_top);
// We have to add extra reserved slots to max_stack. There are 3 users of the extra slots,
// none of which are at the same time, so we just need to make sure there is enough room
// for the biggest user:
// -reserved slot for exception handler
// -reserved slots for JSR292. Method::extra_stack_entries() is the size.
// -3 reserved slots so get_method_counters() can save some registers before call_VM().
int max_stack = method->constMethod()->max_stack() + MAX2(3, Method::extra_stack_entries());
intptr_t* extended_sp = (intptr_t*) monbot - intptr_t* extended_sp = (intptr_t*) monbot -
(method->max_stack() + 1) * Interpreter::stackElementWords - // +1 is reserved slot for exception handler (max_stack * Interpreter::stackElementWords) -
popframe_extra_args; popframe_extra_args;
extended_sp = (intptr_t*)round_down((intptr_t)extended_sp, StackAlignmentInBytes); extended_sp = (intptr_t*)round_down((intptr_t)extended_sp, StackAlignmentInBytes);
interpreter_frame->interpreter_frame_set_extended_sp(extended_sp); interpreter_frame->interpreter_frame_set_extended_sp(extended_sp);

View File

@ -1,5 +1,5 @@
// //
// Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
// This code is free software; you can redistribute it and/or modify it // This code is free software; you can redistribute it and/or modify it
@ -1122,7 +1122,7 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
} }
// Vector ideal reg corresponding to specified size in bytes // Vector ideal reg corresponding to specified size in bytes
const int Matcher::vector_ideal_reg(int size) { const uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize >= size, ""); assert(MaxVectorSize >= size, "");
switch(size) { switch(size) {
case 8: return Op_VecD; case 8: return Op_VecD;
@ -1132,7 +1132,7 @@ const int Matcher::vector_ideal_reg(int size) {
return 0; return 0;
} }
const int Matcher::vector_shift_count_ideal_reg(int size) { const uint Matcher::vector_shift_count_ideal_reg(int size) {
return vector_ideal_reg(size); return vector_ideal_reg(size);
} }
@ -11752,9 +11752,13 @@ instruct ShouldNotReachHere( )
size(4); size(4);
// Use the following format syntax // Use the following format syntax
format %{ "breakpoint ; ShouldNotReachHere" %} format %{ "ShouldNotReachHere" %}
ins_encode %{ ins_encode %{
__ breakpoint(); #ifdef AARCH64
__ dpcs1(0xdead);
#else
__ udf(0xdead);
#endif
%} %}
ins_pipe(tail_call); ins_pipe(tail_call);
%} %}

View File

@ -578,6 +578,11 @@ class Assembler : public AbstractAssembler {
F(bl, 0xb) F(bl, 0xb)
#undef F #undef F
void udf(int imm_16) {
assert((imm_16 >> 16) == 0, "encoding constraint");
emit_int32(0xe7f000f0 | (imm_16 & 0xfff0) << 8 | (imm_16 & 0xf));
}
// ARMv7 instructions // ARMv7 instructions
#define F(mnemonic, wt) \ #define F(mnemonic, wt) \

View File

@ -1083,6 +1083,7 @@ class Assembler : public AbstractAssembler {
F(brk, 0b001, 0b000, 0b00) F(brk, 0b001, 0b000, 0b00)
F(hlt, 0b010, 0b000, 0b00) F(hlt, 0b010, 0b000, 0b00)
F(dpcs1, 0b101, 0b000, 0b01)
#undef F #undef F
enum SystemRegister { // o0<1> op1<3> CRn<4> CRm<4> op2<3> enum SystemRegister { // o0<1> op1<3> CRn<4> CRm<4> op2<3>

View File

@ -35,12 +35,6 @@
class Bytes: AllStatic { class Bytes: AllStatic {
public: public:
// Returns true if the byte ordering used by Java is different from the native byte ordering
// of the underlying machine.
static inline bool is_Java_byte_ordering_different() {
return VM_LITTLE_ENDIAN != 0;
}
static inline u2 get_Java_u2(address p) { static inline u2 get_Java_u2(address p) {
return (u2(p[0]) << 8) | u2(p[1]); return (u2(p[0]) << 8) | u2(p[1]);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -22,10 +22,4 @@
* *
*/ */
#include "precompiled.hpp"
#include "c1/c1_FpuStackSim.hpp"
#include "c1/c1_FrameMap.hpp"
#include "utilities/array.hpp"
#include "utilities/ostream.hpp"
// Nothing needed here // Nothing needed here

View File

@ -298,7 +298,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
Register cache = result; Register cache = result;
// load pointer for resolved_references[] objArray // load pointer for resolved_references[] objArray
ldr(cache, Address(result, ConstantPool::resolved_references_offset_in_bytes())); ldr(cache, Address(result, ConstantPool::cache_offset_in_bytes()));
ldr(cache, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
// JNIHandles::resolve(result) // JNIHandles::resolve(result)
ldr(cache, Address(cache, 0)); ldr(cache, Address(cache, 0));
// Add in the index // Add in the index
@ -308,6 +309,15 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
load_heap_oop(result, Address(cache, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); load_heap_oop(result, Address(cache, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
} }
void InterpreterMacroAssembler::load_resolved_klass_at_offset(
Register Rcpool, Register Rindex, Register Rklass) {
add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
ldrh(Rtemp, Address(Rtemp, sizeof(ConstantPool))); // Rtemp = resolved_klass_index
ldr(Rklass, Address(Rcpool, ConstantPool::resolved_klasses_offset_in_bytes())); // Rklass = cpool->_resolved_klasses
add(Rklass, Rklass, AsmOperand(Rtemp, lsl, LogBytesPerWord));
ldr(Rklass, Address(Rklass, Array<Klass*>::base_offset_in_bytes()));
}
// Generate a subtype check: branch to not_subtype if sub_klass is // Generate a subtype check: branch to not_subtype if sub_klass is
// not a subtype of super_klass. // not a subtype of super_klass.
// Profiling code for the subtype check failure (profile_typecheck_failed) // Profiling code for the subtype check failure (profile_typecheck_failed)
@ -2016,75 +2026,42 @@ void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
void InterpreterMacroAssembler::get_method_counters(Register method, void InterpreterMacroAssembler::get_method_counters(Register method,
Register Rcounters, Register Rcounters,
Label& skip) { Label& skip,
bool saveRegs,
Register reg1,
Register reg2,
Register reg3) {
const Address method_counters(method, Method::method_counters_offset()); const Address method_counters(method, Method::method_counters_offset());
Label has_counters; Label has_counters;
ldr(Rcounters, method_counters); ldr(Rcounters, method_counters);
cbnz(Rcounters, has_counters); cbnz(Rcounters, has_counters);
if (saveRegs) {
// Save and restore in use caller-saved registers since they will be trashed by call_VM
assert(reg1 != noreg, "must specify reg1");
assert(reg2 != noreg, "must specify reg2");
#ifdef AARCH64 #ifdef AARCH64
const Register tmp = Rcounters; assert(reg3 != noreg, "must specify reg3");
const int saved_regs_size = 20*wordSize; stp(reg1, reg2, Address(Rstack_top, -2*wordSize, pre_indexed));
stp(reg3, ZR, Address(Rstack_top, -2*wordSize, pre_indexed));
// Note: call_VM will cut SP according to Rstack_top value before call, and restore SP to
// extended_sp value from frame after the call.
// So make sure there is enough stack space to save registers and adjust Rstack_top accordingly.
{
Label enough_stack_space;
check_extended_sp(tmp);
sub(Rstack_top, Rstack_top, saved_regs_size);
cmp(SP, Rstack_top);
b(enough_stack_space, ls);
align_reg(tmp, Rstack_top, StackAlignmentInBytes);
mov(SP, tmp);
str(tmp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
bind(enough_stack_space);
check_stack_top();
int offset = 0;
stp(R0, R1, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R2, R3, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R4, R5, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R6, R7, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R8, R9, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R10, R11, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R12, R13, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R14, R15, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R16, R17, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R18, LR, Address(Rstack_top, offset)); offset += 2*wordSize;
assert (offset == saved_regs_size, "should be");
}
#else #else
push(RegisterSet(R0, R3) | RegisterSet(R12) | RegisterSet(R14)); assert(reg3 == noreg, "must not specify reg3");
#endif // AARCH64 push(RegisterSet(reg1) | RegisterSet(reg2));
#endif
}
mov(R1, method); mov(R1, method);
call_VM(noreg, CAST_FROM_FN_PTR(address, call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), R1);
InterpreterRuntime::build_method_counters), R1);
if (saveRegs) {
#ifdef AARCH64 #ifdef AARCH64
{ ldp(reg3, ZR, Address(Rstack_top, 2*wordSize, post_indexed));
int offset = 0; ldp(reg1, reg2, Address(Rstack_top, 2*wordSize, post_indexed));
ldp(R0, R1, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R2, R3, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R4, R5, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R6, R7, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R8, R9, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R10, R11, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R12, R13, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R14, R15, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R16, R17, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R18, LR, Address(Rstack_top, offset)); offset += 2*wordSize;
assert (offset == saved_regs_size, "should be");
add(Rstack_top, Rstack_top, saved_regs_size);
}
#else #else
pop(RegisterSet(R0, R3) | RegisterSet(R12) | RegisterSet(R14)); pop(RegisterSet(reg1) | RegisterSet(reg2));
#endif // AARCH64 #endif
}
ldr(Rcounters, method_counters); ldr(Rcounters, method_counters);
cbz(Rcounters, skip); // No MethodCounters created, OutOfMemory cbz(Rcounters, skip); // No MethodCounters created, OutOfMemory

View File

@ -53,9 +53,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
// Template interpreter specific version of call_VM_helper // Template interpreter specific version of call_VM_helper
virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions); virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions);
virtual void check_and_handle_popframe();
virtual void check_and_handle_earlyret();
// base routine for all dispatches // base routine for all dispatches
typedef enum { DispatchDefault, DispatchNormal } DispatchTableMode; typedef enum { DispatchDefault, DispatchNormal } DispatchTableMode;
void dispatch_base(TosState state, DispatchTableMode table_mode, bool verifyoop = true); void dispatch_base(TosState state, DispatchTableMode table_mode, bool verifyoop = true);
@ -63,6 +60,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
public: public:
InterpreterMacroAssembler(CodeBuffer* code); InterpreterMacroAssembler(CodeBuffer* code);
virtual void check_and_handle_popframe();
virtual void check_and_handle_earlyret();
// Interpreter-specific registers // Interpreter-specific registers
#if defined(AARCH64) && defined(ASSERT) #if defined(AARCH64) && defined(ASSERT)
@ -141,6 +141,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
// Load object from cpool->resolved_references(*bcp+1) // Load object from cpool->resolved_references(*bcp+1)
void load_resolved_reference_at_index(Register result, Register tmp); void load_resolved_reference_at_index(Register result, Register tmp);
// load cpool->resolved_klass_at(index); Rtemp is corrupted upon return
void load_resolved_klass_at_offset(Register Rcpool, Register Rindex, Register Rklass);
void store_check_part1(Register card_table_base); // Sets card_table_base register. void store_check_part1(Register card_table_base); // Sets card_table_base register.
void store_check_part2(Register obj, Register card_table_base, Register tmp); void store_check_part2(Register obj, Register card_table_base, Register tmp);
@ -328,7 +331,13 @@ class InterpreterMacroAssembler: public MacroAssembler {
void trace_state(const char* msg) PRODUCT_RETURN; void trace_state(const char* msg) PRODUCT_RETURN;
void get_method_counters(Register method, Register Rcounters, Label& skip); void get_method_counters(Register method,
Register Rcounters,
Label& skip,
bool saveRegs = false,
Register reg1 = noreg,
Register reg2 = noreg,
Register reg3 = noreg);
}; };
#endif // CPU_ARM_VM_INTERP_MASM_ARM_HPP #endif // CPU_ARM_VM_INTERP_MASM_ARM_HPP

View File

@ -206,6 +206,9 @@ protected:
// may customize this version by overriding it for its purposes (e.g., to save/restore // may customize this version by overriding it for its purposes (e.g., to save/restore
// additional registers when doing a VM call). // additional registers when doing a VM call).
virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions); virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions);
public:
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler, // The implementation is only non-empty for the InterpreterMacroAssembler,
@ -213,10 +216,6 @@ protected:
virtual void check_and_handle_popframe() {} virtual void check_and_handle_popframe() {}
virtual void check_and_handle_earlyret() {} virtual void check_and_handle_earlyret() {}
public:
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
// By default, we do not need relocation information for non // By default, we do not need relocation information for non
// patchable absolute addresses. However, when needed by some // patchable absolute addresses. However, when needed by some
// extensions, ignore_non_patchable_relocations can be modified, // extensions, ignore_non_patchable_relocations can be modified,

View File

@ -1,99 +0,0 @@
/*
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "assembler_arm.inline.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
// This method will be called (as any other Klass virtual method) with
// the Klass itself as the first argument. Example:
//
// oop obj;
// int size = obj->klass()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();
//
// The dummy method is called with the Klass object as the first
// operand, and an object as the second argument.
//
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
// differing only by an ordinal constant, and they bear no relationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#define __ masm->
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
char** mc_top,
char* mc_end) {
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
*(intptr_t *)(*md_top) = vtable_bytes;
*md_top += sizeof(intptr_t);
void** dummy_vtable = (void**)*md_top;
*vtable = dummy_vtable;
*md_top += vtable_bytes;
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
MacroAssembler* masm = new MacroAssembler(&cb);
for (int i = 0; i < vtbl_list_size; ++i) {
Label common_code;
for (int j = 0; j < num_virtuals; ++j) {
dummy_vtable[num_virtuals * i + j] = (void*) __ pc();
__ mov(Rtemp, j); // Rtemp contains an index of a virtual method in the table
__ b(common_code);
}
InlinedAddress vtable_address((address)&vtbl_list[i]);
__ bind(common_code);
const Register tmp2 = AARCH64_ONLY(Rtemp2) NOT_AARCH64(R4);
assert_different_registers(Rtemp, tmp2);
#ifndef AARCH64
__ push(tmp2);
#endif // !AARCH64
// Do not use ldr_global since the code must be portable across all ARM architectures
__ ldr_literal(tmp2, vtable_address);
__ ldr(tmp2, Address(tmp2)); // get correct vtable address
__ ldr(Rtemp, Address::indexed_ptr(tmp2, Rtemp)); // get real method pointer
__ str(tmp2, Address(R0)); // update vtable. R0 = "this"
#ifndef AARCH64
__ pop(tmp2);
#endif // !AARCH64
__ jump(Rtemp);
__ bind_literal(vtable_address);
}
__ flush();
*mc_top = (char*) __ pc();
}

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -67,7 +67,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Register obj, Register temp1, Register temp2, SystemDictionary::WKID klass_id, Register obj, Register temp1, Register temp2, SystemDictionary::WKID klass_id,
const char* error_message) { const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id); InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id); Klass* klass = SystemDictionary::well_known_klass(klass_id);
Label L_ok, L_bad; Label L_ok, L_bad;
BLOCK_COMMENT("verify_klass {"); BLOCK_COMMENT("verify_klass {");
__ verify_oop(obj); __ verify_oop(obj);
@ -157,8 +157,9 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
__ load_heap_oop(tmp, Address(tmp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()))); __ load_heap_oop(tmp, Address(tmp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
__ verify_oop(tmp); __ verify_oop(tmp);
// the following assumes that a Method* is normally compressed in the vmtarget field: __ load_heap_oop(Rmethod, Address(tmp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())));
__ ldr(Rmethod, Address(tmp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()))); __ verify_oop(Rmethod);
__ ldr(Rmethod, Address(Rmethod, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())));
if (VerifyMethodHandles && !for_compiler_entry) { if (VerifyMethodHandles && !for_compiler_entry) {
// make sure recv is already on stack // make sure recv is already on stack
@ -320,7 +321,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes())); Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes())); Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
Address member_vmtarget(member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())); Address member_vmtarget(member_reg, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()));
Address vmtarget_method(Rmethod, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()));
Register temp1_recv_klass = temp1; Register temp1_recv_klass = temp1;
if (iid != vmIntrinsics::_linkToStatic) { if (iid != vmIntrinsics::_linkToStatic) {
@ -375,14 +377,17 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
if (VerifyMethodHandles) { if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3); verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
} }
__ ldr(Rmethod, member_vmtarget); __ load_heap_oop(Rmethod, member_vmtarget);
__ ldr(Rmethod, vmtarget_method);
break; break;
case vmIntrinsics::_linkToStatic: case vmIntrinsics::_linkToStatic:
if (VerifyMethodHandles) { if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3); verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
} }
__ ldr(Rmethod, member_vmtarget); __ load_heap_oop(Rmethod, member_vmtarget);
__ ldr(Rmethod, vmtarget_method);
break;
break; break;
case vmIntrinsics::_linkToVirtual: case vmIntrinsics::_linkToVirtual:

View File

@ -270,12 +270,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
return entry; return entry;
} }
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
// Not used.
STOP("generate_continuation_for");
return NULL;
}
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
address entry = __ pc(); address entry = __ pc();
@ -310,6 +304,9 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ convert_retval_to_tos(state); __ convert_retval_to_tos(state);
#endif // !AARCH64 #endif // !AARCH64
__ check_and_handle_popframe();
__ check_and_handle_earlyret();
__ dispatch_next(state, step); __ dispatch_next(state, step);
return entry; return entry;
@ -1401,7 +1398,13 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
#ifdef AARCH64 #ifdef AARCH64
// setup RmaxStack // setup RmaxStack
__ ldrh(RmaxStack, Address(RconstMethod, ConstMethod::max_stack_offset())); __ ldrh(RmaxStack, Address(RconstMethod, ConstMethod::max_stack_offset()));
__ add(RmaxStack, RmaxStack, MAX2(1, Method::extra_stack_entries())); // reserve slots for exception handler and JSR292 appendix argument // We have to add extra reserved slots to max_stack. There are 3 users of the extra slots,
// none of which are at the same time, so we just need to make sure there is enough room
// for the biggest user:
// -reserved slot for exception handler
// -reserved slots for JSR292. Method::extra_stack_entries() is the size.
// -3 reserved slots so get_method_counters() can save some registers before call_VM().
__ add(RmaxStack, RmaxStack, MAX2(3, Method::extra_stack_entries()));
#endif // AARCH64 #endif // AARCH64
// see if we've got enough room on the stack for locals plus overhead. // see if we've got enough room on the stack for locals plus overhead.

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -2286,13 +2286,18 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
} }
__ bind(no_mdo); __ bind(no_mdo);
// Increment backedge counter in MethodCounters* // Increment backedge counter in MethodCounters*
__ get_method_counters(Rmethod, Rcounters, dispatch); // Note Rbumped_taken_count is a callee saved registers for ARM32, but caller saved for ARM64
__ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
Rdisp, R3_bytecode,
AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset())); const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset()));
__ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask, __ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask,
Rcnt, R4_tmp, eq, &backedge_counter_overflow); Rcnt, R4_tmp, eq, &backedge_counter_overflow);
} else { } else {
// increment counter // Increment backedge counter in MethodCounters*
__ get_method_counters(Rmethod, Rcounters, dispatch); __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
Rdisp, R3_bytecode,
AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
__ ldr_u32(Rtemp, Address(Rcounters, be_offset)); // load backedge counter __ ldr_u32(Rtemp, Address(Rcounters, be_offset)); // load backedge counter
__ add(Rtemp, Rtemp, InvocationCounter::count_increment); // increment counter __ add(Rtemp, Rtemp, InvocationCounter::count_increment); // increment counter
__ str_32(Rtemp, Address(Rcounters, be_offset)); // store counter __ str_32(Rtemp, Address(Rcounters, be_offset)); // store counter
@ -4367,10 +4372,9 @@ void TemplateTable::_new() {
#endif // AARCH64 #endif // AARCH64
// get InstanceKlass // get InstanceKlass
__ add(Rklass, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
__ ldr(Rklass, Address(Rklass, sizeof(ConstantPool)));
__ cmp(Rtemp, JVM_CONSTANT_Class); __ cmp(Rtemp, JVM_CONSTANT_Class);
__ b(slow_case, ne); __ b(slow_case, ne);
__ load_resolved_klass_at_offset(Rcpool, Rindex, Rklass);
// make sure klass is initialized & doesn't have finalizer // make sure klass is initialized & doesn't have finalizer
// make sure klass is fully initialized // make sure klass is fully initialized
@ -4642,8 +4646,7 @@ void TemplateTable::checkcast() {
// Get superklass in Rsuper and subklass in Rsub // Get superklass in Rsuper and subklass in Rsub
__ bind(quicked); __ bind(quicked);
__ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord)); __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
__ ldr(Rsuper, Address(Rtemp, sizeof(ConstantPool)));
__ bind(resolved); __ bind(resolved);
__ load_klass(Rsub, Robj); __ load_klass(Rsub, Robj);
@ -4716,8 +4719,7 @@ void TemplateTable::instanceof() {
// Get superklass in Rsuper and subklass in Rsub // Get superklass in Rsuper and subklass in Rsub
__ bind(quicked); __ bind(quicked);
__ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord)); __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
__ ldr(Rsuper, Address(Rtemp, sizeof(ConstantPool)));
__ bind(resolved); __ bind(resolved);
__ load_klass(Rsub, Robj); __ load_klass(Rsub, Robj);

View File

@ -256,7 +256,9 @@ void VM_Version::initialize() {
} }
} }
AllocatePrefetchDistance = 128; if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
FLAG_SET_DEFAULT(AllocatePrefetchDistance, 128);
}
#ifdef COMPILER2 #ifdef COMPILER2
FLAG_SET_DEFAULT(UseFPUForSpilling, true); FLAG_SET_DEFAULT(UseFPUForSpilling, true);

View File

@ -201,7 +201,9 @@ void VM_Version::initialize() {
} }
} }
AllocatePrefetchDistance = 128; if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
FLAG_SET_DEFAULT(AllocatePrefetchDistance, 128);
}
#ifdef COMPILER2 #ifdef COMPILER2
FLAG_SET_DEFAULT(UseFPUForSpilling, true); FLAG_SET_DEFAULT(UseFPUForSpilling, true);

View File

@ -376,10 +376,12 @@ class Assembler : public AbstractAssembler {
STWX_OPCODE = (31u << OPCODE_SHIFT | 151u << 1), STWX_OPCODE = (31u << OPCODE_SHIFT | 151u << 1),
STWU_OPCODE = (37u << OPCODE_SHIFT), STWU_OPCODE = (37u << OPCODE_SHIFT),
STWUX_OPCODE = (31u << OPCODE_SHIFT | 183u << 1), STWUX_OPCODE = (31u << OPCODE_SHIFT | 183u << 1),
STWBRX_OPCODE = (31u << OPCODE_SHIFT | 662u << 1),
STH_OPCODE = (44u << OPCODE_SHIFT), STH_OPCODE = (44u << OPCODE_SHIFT),
STHX_OPCODE = (31u << OPCODE_SHIFT | 407u << 1), STHX_OPCODE = (31u << OPCODE_SHIFT | 407u << 1),
STHU_OPCODE = (45u << OPCODE_SHIFT), STHU_OPCODE = (45u << OPCODE_SHIFT),
STHBRX_OPCODE = (31u << OPCODE_SHIFT | 918u << 1),
STB_OPCODE = (38u << OPCODE_SHIFT), STB_OPCODE = (38u << OPCODE_SHIFT),
STBX_OPCODE = (31u << OPCODE_SHIFT | 215u << 1), STBX_OPCODE = (31u << OPCODE_SHIFT | 215u << 1),
@ -401,11 +403,13 @@ class Assembler : public AbstractAssembler {
LD_OPCODE = (58u << OPCODE_SHIFT | 0u << XO_30_31_SHIFT), // DS-FORM LD_OPCODE = (58u << OPCODE_SHIFT | 0u << XO_30_31_SHIFT), // DS-FORM
LDU_OPCODE = (58u << OPCODE_SHIFT | 1u << XO_30_31_SHIFT), // DS-FORM LDU_OPCODE = (58u << OPCODE_SHIFT | 1u << XO_30_31_SHIFT), // DS-FORM
LDX_OPCODE = (31u << OPCODE_SHIFT | 21u << XO_21_30_SHIFT), // X-FORM LDX_OPCODE = (31u << OPCODE_SHIFT | 21u << XO_21_30_SHIFT), // X-FORM
LDBRX_OPCODE = (31u << OPCODE_SHIFT | 532u << 1), // X-FORM
STD_OPCODE = (62u << OPCODE_SHIFT | 0u << XO_30_31_SHIFT), // DS-FORM STD_OPCODE = (62u << OPCODE_SHIFT | 0u << XO_30_31_SHIFT), // DS-FORM
STDU_OPCODE = (62u << OPCODE_SHIFT | 1u << XO_30_31_SHIFT), // DS-FORM STDU_OPCODE = (62u << OPCODE_SHIFT | 1u << XO_30_31_SHIFT), // DS-FORM
STDUX_OPCODE = (31u << OPCODE_SHIFT | 181u << 1), // X-FORM STDUX_OPCODE = (31u << OPCODE_SHIFT | 181u << 1), // X-FORM
STDX_OPCODE = (31u << OPCODE_SHIFT | 149u << XO_21_30_SHIFT), // X-FORM STDX_OPCODE = (31u << OPCODE_SHIFT | 149u << XO_21_30_SHIFT), // X-FORM
STDBRX_OPCODE = (31u << OPCODE_SHIFT | 660u << 1), // X-FORM
RLDICR_OPCODE = (30u << OPCODE_SHIFT | 1u << XO_27_29_SHIFT), // MD-FORM RLDICR_OPCODE = (30u << OPCODE_SHIFT | 1u << XO_27_29_SHIFT), // MD-FORM
RLDICL_OPCODE = (30u << OPCODE_SHIFT | 0u << XO_27_29_SHIFT), // MD-FORM RLDICL_OPCODE = (30u << OPCODE_SHIFT | 0u << XO_27_29_SHIFT), // MD-FORM
@ -1552,6 +1556,9 @@ class Assembler : public AbstractAssembler {
inline void ld( Register d, int si16, Register s1); inline void ld( Register d, int si16, Register s1);
inline void ldu( Register d, int si16, Register s1); inline void ldu( Register d, int si16, Register s1);
// 8 bytes reversed
inline void ldbrx( Register d, Register s1, Register s2);
// For convenience. Load pointer into d from b+s1. // For convenience. Load pointer into d from b+s1.
inline void ld_ptr(Register d, int b, Register s1); inline void ld_ptr(Register d, int b, Register s1);
DEBUG_ONLY(inline void ld_ptr(Register d, ByteSize b, Register s1);) DEBUG_ONLY(inline void ld_ptr(Register d, ByteSize b, Register s1);)
@ -1560,10 +1567,12 @@ class Assembler : public AbstractAssembler {
inline void stwx( Register d, Register s1, Register s2); inline void stwx( Register d, Register s1, Register s2);
inline void stw( Register d, int si16, Register s1); inline void stw( Register d, int si16, Register s1);
inline void stwu( Register d, int si16, Register s1); inline void stwu( Register d, int si16, Register s1);
inline void stwbrx( Register d, Register s1, Register s2);
inline void sthx( Register d, Register s1, Register s2); inline void sthx( Register d, Register s1, Register s2);
inline void sth( Register d, int si16, Register s1); inline void sth( Register d, int si16, Register s1);
inline void sthu( Register d, int si16, Register s1); inline void sthu( Register d, int si16, Register s1);
inline void sthbrx( Register d, Register s1, Register s2);
inline void stbx( Register d, Register s1, Register s2); inline void stbx( Register d, Register s1, Register s2);
inline void stb( Register d, int si16, Register s1); inline void stb( Register d, int si16, Register s1);
@ -1573,6 +1582,7 @@ class Assembler : public AbstractAssembler {
inline void std( Register d, int si16, Register s1); inline void std( Register d, int si16, Register s1);
inline void stdu( Register d, int si16, Register s1); inline void stdu( Register d, int si16, Register s1);
inline void stdux(Register s, Register a, Register b); inline void stdux(Register s, Register a, Register b);
inline void stdbrx( Register d, Register s1, Register s2);
inline void st_ptr(Register d, int si16, Register s1); inline void st_ptr(Register d, int si16, Register s1);
DEBUG_ONLY(inline void st_ptr(Register d, ByteSize b, Register s1);) DEBUG_ONLY(inline void st_ptr(Register d, ByteSize b, Register s1);)
@ -2182,14 +2192,18 @@ class Assembler : public AbstractAssembler {
inline void lbz( Register d, int si16); inline void lbz( Register d, int si16);
inline void ldx( Register d, Register s2); inline void ldx( Register d, Register s2);
inline void ld( Register d, int si16); inline void ld( Register d, int si16);
inline void ldbrx(Register d, Register s2);
inline void stwx( Register d, Register s2); inline void stwx( Register d, Register s2);
inline void stw( Register d, int si16); inline void stw( Register d, int si16);
inline void stwbrx( Register d, Register s2);
inline void sthx( Register d, Register s2); inline void sthx( Register d, Register s2);
inline void sth( Register d, int si16); inline void sth( Register d, int si16);
inline void sthbrx( Register d, Register s2);
inline void stbx( Register d, Register s2); inline void stbx( Register d, Register s2);
inline void stb( Register d, int si16); inline void stb( Register d, int si16);
inline void stdx( Register d, Register s2); inline void stdx( Register d, Register s2);
inline void std( Register d, int si16); inline void std( Register d, int si16);
inline void stdbrx( Register d, Register s2);
// PPC 2, section 3.2.1 Instruction Cache Instructions // PPC 2, section 3.2.1 Instruction Cache Instructions
inline void icbi( Register s2); inline void icbi( Register s2);

View File

@ -327,6 +327,7 @@ inline void Assembler::lbzu( Register d, int si16, Register s1) { assert(d !=
inline void Assembler::ld( Register d, int si16, Register s1) { emit_int32(LD_OPCODE | rt(d) | ds(si16) | ra0mem(s1));} inline void Assembler::ld( Register d, int si16, Register s1) { emit_int32(LD_OPCODE | rt(d) | ds(si16) | ra0mem(s1));}
inline void Assembler::ldx( Register d, Register s1, Register s2) { emit_int32(LDX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));} inline void Assembler::ldx( Register d, Register s1, Register s2) { emit_int32(LDX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::ldu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LDU_OPCODE | rt(d) | ds(si16) | rta0mem(s1));} inline void Assembler::ldu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LDU_OPCODE | rt(d) | ds(si16) | rta0mem(s1));}
inline void Assembler::ldbrx( Register d, Register s1, Register s2) { emit_int32(LDBRX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::ld_ptr(Register d, int b, Register s1) { ld(d, b, s1); } inline void Assembler::ld_ptr(Register d, int b, Register s1) { ld(d, b, s1); }
DEBUG_ONLY(inline void Assembler::ld_ptr(Register d, ByteSize b, Register s1) { ld(d, in_bytes(b), s1); }) DEBUG_ONLY(inline void Assembler::ld_ptr(Register d, ByteSize b, Register s1) { ld(d, in_bytes(b), s1); })
@ -335,10 +336,12 @@ DEBUG_ONLY(inline void Assembler::ld_ptr(Register d, ByteSize b, Register s1) {
inline void Assembler::stwx( Register d, Register s1, Register s2) { emit_int32(STWX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));} inline void Assembler::stwx( Register d, Register s1, Register s2) { emit_int32(STWX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::stw( Register d, int si16, Register s1) { emit_int32(STW_OPCODE | rs(d) | d1(si16) | ra0mem(s1));} inline void Assembler::stw( Register d, int si16, Register s1) { emit_int32(STW_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}
inline void Assembler::stwu( Register d, int si16, Register s1) { emit_int32(STWU_OPCODE | rs(d) | d1(si16) | rta0mem(s1));} inline void Assembler::stwu( Register d, int si16, Register s1) { emit_int32(STWU_OPCODE | rs(d) | d1(si16) | rta0mem(s1));}
inline void Assembler::stwbrx( Register d, Register s1, Register s2) { emit_int32(STWBRX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::sthx( Register d, Register s1, Register s2) { emit_int32(STHX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));} inline void Assembler::sthx( Register d, Register s1, Register s2) { emit_int32(STHX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::sth( Register d, int si16, Register s1) { emit_int32(STH_OPCODE | rs(d) | d1(si16) | ra0mem(s1));} inline void Assembler::sth( Register d, int si16, Register s1) { emit_int32(STH_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}
inline void Assembler::sthu( Register d, int si16, Register s1) { emit_int32(STHU_OPCODE | rs(d) | d1(si16) | rta0mem(s1));} inline void Assembler::sthu( Register d, int si16, Register s1) { emit_int32(STHU_OPCODE | rs(d) | d1(si16) | rta0mem(s1));}
inline void Assembler::sthbrx( Register d, Register s1, Register s2) { emit_int32(STHBRX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::stbx( Register d, Register s1, Register s2) { emit_int32(STBX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));} inline void Assembler::stbx( Register d, Register s1, Register s2) { emit_int32(STBX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::stb( Register d, int si16, Register s1) { emit_int32(STB_OPCODE | rs(d) | d1(si16) | ra0mem(s1));} inline void Assembler::stb( Register d, int si16, Register s1) { emit_int32(STB_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}
@ -348,6 +351,7 @@ inline void Assembler::std( Register d, int si16, Register s1) { emit_int32(
inline void Assembler::stdx( Register d, Register s1, Register s2) { emit_int32(STDX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));} inline void Assembler::stdx( Register d, Register s1, Register s2) { emit_int32(STDX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::stdu( Register d, int si16, Register s1) { emit_int32(STDU_OPCODE | rs(d) | ds(si16) | rta0mem(s1));} inline void Assembler::stdu( Register d, int si16, Register s1) { emit_int32(STDU_OPCODE | rs(d) | ds(si16) | rta0mem(s1));}
inline void Assembler::stdux(Register s, Register a, Register b) { emit_int32(STDUX_OPCODE| rs(s) | rta0mem(a) | rb(b));} inline void Assembler::stdux(Register s, Register a, Register b) { emit_int32(STDUX_OPCODE| rs(s) | rta0mem(a) | rb(b));}
inline void Assembler::stdbrx( Register d, Register s1, Register s2) { emit_int32(STDBRX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::st_ptr(Register d, int b, Register s1) { std(d, b, s1); } inline void Assembler::st_ptr(Register d, int b, Register s1) { std(d, b, s1); }
DEBUG_ONLY(inline void Assembler::st_ptr(Register d, ByteSize b, Register s1) { std(d, in_bytes(b), s1); }) DEBUG_ONLY(inline void Assembler::st_ptr(Register d, ByteSize b, Register s1) { std(d, in_bytes(b), s1); })
@ -944,14 +948,18 @@ inline void Assembler::lbzx( Register d, Register s2) { emit_int32( LBZX_OPCODE
inline void Assembler::lbz( Register d, int si16 ) { emit_int32( LBZ_OPCODE | rt(d) | d1(si16));} inline void Assembler::lbz( Register d, int si16 ) { emit_int32( LBZ_OPCODE | rt(d) | d1(si16));}
inline void Assembler::ld( Register d, int si16 ) { emit_int32( LD_OPCODE | rt(d) | ds(si16));} inline void Assembler::ld( Register d, int si16 ) { emit_int32( LD_OPCODE | rt(d) | ds(si16));}
inline void Assembler::ldx( Register d, Register s2) { emit_int32( LDX_OPCODE | rt(d) | rb(s2));} inline void Assembler::ldx( Register d, Register s2) { emit_int32( LDX_OPCODE | rt(d) | rb(s2));}
inline void Assembler::ldbrx(Register d, Register s2) { emit_int32( LDBRX_OPCODE| rt(d) | rb(s2));}
inline void Assembler::stwx( Register d, Register s2) { emit_int32( STWX_OPCODE | rs(d) | rb(s2));} inline void Assembler::stwx( Register d, Register s2) { emit_int32( STWX_OPCODE | rs(d) | rb(s2));}
inline void Assembler::stw( Register d, int si16 ) { emit_int32( STW_OPCODE | rs(d) | d1(si16));} inline void Assembler::stw( Register d, int si16 ) { emit_int32( STW_OPCODE | rs(d) | d1(si16));}
inline void Assembler::stwbrx(Register d, Register s2){ emit_int32(STWBRX_OPCODE| rs(d) | rb(s2));}
inline void Assembler::sthx( Register d, Register s2) { emit_int32( STHX_OPCODE | rs(d) | rb(s2));} inline void Assembler::sthx( Register d, Register s2) { emit_int32( STHX_OPCODE | rs(d) | rb(s2));}
inline void Assembler::sth( Register d, int si16 ) { emit_int32( STH_OPCODE | rs(d) | d1(si16));} inline void Assembler::sth( Register d, int si16 ) { emit_int32( STH_OPCODE | rs(d) | d1(si16));}
inline void Assembler::sthbrx(Register d, Register s2){ emit_int32(STHBRX_OPCODE| rs(d) | rb(s2));}
inline void Assembler::stbx( Register d, Register s2) { emit_int32( STBX_OPCODE | rs(d) | rb(s2));} inline void Assembler::stbx( Register d, Register s2) { emit_int32( STBX_OPCODE | rs(d) | rb(s2));}
inline void Assembler::stb( Register d, int si16 ) { emit_int32( STB_OPCODE | rs(d) | d1(si16));} inline void Assembler::stb( Register d, int si16 ) { emit_int32( STB_OPCODE | rs(d) | d1(si16));}
inline void Assembler::std( Register d, int si16 ) { emit_int32( STD_OPCODE | rs(d) | ds(si16));} inline void Assembler::std( Register d, int si16 ) { emit_int32( STD_OPCODE | rs(d) | ds(si16));}
inline void Assembler::stdx( Register d, Register s2) { emit_int32( STDX_OPCODE | rs(d) | rb(s2));} inline void Assembler::stdx( Register d, Register s2) { emit_int32( STDX_OPCODE | rs(d) | rb(s2));}
inline void Assembler::stdbrx(Register d, Register s2){ emit_int32(STDBRX_OPCODE| rs(d) | rb(s2));}
// ra0 version // ra0 version
inline void Assembler::icbi( Register s2) { emit_int32( ICBI_OPCODE | rb(s2) ); } inline void Assembler::icbi( Register s2) { emit_int32( ICBI_OPCODE | rb(s2) ); }

View File

@ -37,10 +37,6 @@ class Bytes: AllStatic {
#if defined(VM_LITTLE_ENDIAN) #if defined(VM_LITTLE_ENDIAN)
// Returns true, if the byte ordering used by Java is different from the native byte ordering
// of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
static inline bool is_Java_byte_ordering_different() { return true; }
// Forward declarations of the compiler-dependent implementation // Forward declarations of the compiler-dependent implementation
static inline u2 swap_u2(u2 x); static inline u2 swap_u2(u2 x);
static inline u4 swap_u4(u4 x); static inline u4 swap_u4(u4 x);
@ -155,10 +151,6 @@ class Bytes: AllStatic {
#else // !defined(VM_LITTLE_ENDIAN) #else // !defined(VM_LITTLE_ENDIAN)
// Returns true, if the byte ordering used by Java is different from the nativ byte ordering
// of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
static inline bool is_Java_byte_ordering_different() { return false; }
// Thus, a swap between native and Java ordering is always a no-op: // Thus, a swap between native and Java ordering is always a no-op:
static inline u2 swap_u2(u2 x) { return x; } static inline u2 swap_u2(u2 x) { return x; }
static inline u4 swap_u4(u4 x) { return x; } static inline u4 swap_u4(u4 x) { return x; }

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved. * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -3177,9 +3177,8 @@ void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
assert_different_registers(val, crc, res); assert_different_registers(val, crc, res);
__ load_const_optimized(res, StubRoutines::crc_table_addr(), R0); __ load_const_optimized(res, StubRoutines::crc_table_addr(), R0);
__ nand(crc, crc, crc); // ~crc __ kernel_crc32_singleByteReg(crc, val, res, true);
__ update_byte_crc32(crc, val, res); __ mr(res, crc);
__ nand(res, crc, crc); // ~crc
} }
#undef __ #undef __

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -63,18 +63,6 @@ void LIRItem::load_nonconstant() {
} }
inline void load_int_as_long(LIR_List *ll, LIRItem &li, LIR_Opr dst) {
LIR_Opr r = li.value()->operand();
if (r->is_register()) {
LIR_Opr dst_l = FrameMap::as_long_opr(dst->as_register());
ll->convert(Bytecodes::_i2l, li.result(), dst_l); // Convert.
} else {
// Constants or memory get loaded with sign extend on this platform.
ll->move(li.result(), dst);
}
}
//-------------------------------------------------------------- //--------------------------------------------------------------
// LIRGenerator // LIRGenerator
//-------------------------------------------------------------- //--------------------------------------------------------------
@ -1426,10 +1414,9 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
arg2 = cc->at(1), arg2 = cc->at(1),
arg3 = cc->at(2); arg3 = cc->at(2);
// CCallingConventionRequiresIntsAsLongs
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits. crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits.
__ leal(LIR_OprFact::address(a), arg2); __ leal(LIR_OprFact::address(a), arg2);
load_int_as_long(gen()->lir(), len, arg3); len.load_item_force(arg3); // We skip int->long conversion here, , because CRC32 stub expects int.
__ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args()); __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args());
__ move(result_reg, result); __ move(result_reg, result);
@ -1441,6 +1428,76 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
} }
} }
void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
assert(UseCRC32CIntrinsics, "or should not be here");
LIR_Opr result = rlock_result(x);
switch (x->id()) {
case vmIntrinsics::_updateBytesCRC32C:
case vmIntrinsics::_updateDirectByteBufferCRC32C: {
bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
LIRItem crc(x->argument_at(0), this);
LIRItem buf(x->argument_at(1), this);
LIRItem off(x->argument_at(2), this);
LIRItem end(x->argument_at(3), this);
buf.load_item();
off.load_nonconstant();
end.load_nonconstant();
// len = end - off
LIR_Opr len = end.result();
LIR_Opr tmpA = new_register(T_INT);
LIR_Opr tmpB = new_register(T_INT);
__ move(end.result(), tmpA);
__ move(off.result(), tmpB);
__ sub(tmpA, tmpB, tmpA);
len = tmpA;
LIR_Opr index = off.result();
int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
if (off.result()->is_constant()) {
index = LIR_OprFact::illegalOpr;
offset += off.result()->as_jint();
}
LIR_Opr base_op = buf.result();
LIR_Address* a = NULL;
if (index->is_valid()) {
LIR_Opr tmp = new_register(T_LONG);
__ convert(Bytecodes::_i2l, index, tmp);
index = tmp;
__ add(index, LIR_OprFact::intptrConst(offset), index);
a = new LIR_Address(base_op, index, T_BYTE);
} else {
a = new LIR_Address(base_op, offset, T_BYTE);
}
BasicTypeList signature(3);
signature.append(T_INT);
signature.append(T_ADDRESS);
signature.append(T_INT);
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
const LIR_Opr result_reg = result_register_for(x->type());
LIR_Opr arg1 = cc->at(0),
arg2 = cc->at(1),
arg3 = cc->at(2);
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32C stub doesn't care about high bits.
__ leal(LIR_OprFact::address(a), arg2);
__ move(len, cc->at(2)); // We skip int->long conversion here, because CRC32C stub expects int.
__ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), LIR_OprFact::illegalOpr, result_reg, cc->args());
__ move(result_reg, result);
break;
}
default: {
ShouldNotReachHere();
}
}
}
void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) { void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
assert(x->number_of_arguments() == 3, "wrong type"); assert(x->number_of_arguments() == 3, "wrong type");
assert(UseFMA, "Needs FMA instructions support."); assert(UseFMA, "Needs FMA instructions support.");
@ -1467,7 +1524,3 @@ void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
fatal("vectorizedMismatch intrinsic is not implemented on this platform"); fatal("vectorizedMismatch intrinsic is not implemented on this platform");
} }
void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
Unimplemented();
}

View File

@ -164,7 +164,7 @@ define_pd_global(intx, InitArrayShortSize, 9*BytesPerLong);
product(bool, ZapMemory, false, "Write 0x0101... to empty memory." \ product(bool, ZapMemory, false, "Write 0x0101... to empty memory." \
" Use this to ease debugging.") \ " Use this to ease debugging.") \
\ \
/* Use Restricted Transactional Memory for lock eliding */ \ /* Use Restricted Transactional Memory for lock elision */ \
product(bool, UseRTMLocking, false, \ product(bool, UseRTMLocking, false, \
"Enable RTM lock eliding for inflated locks in compiled code") \ "Enable RTM lock eliding for inflated locks in compiled code") \
\ \
@ -174,24 +174,30 @@ define_pd_global(intx, InitArrayShortSize, 9*BytesPerLong);
product(bool, UseRTMDeopt, false, \ product(bool, UseRTMDeopt, false, \
"Perform deopt and recompilation based on RTM abort ratio") \ "Perform deopt and recompilation based on RTM abort ratio") \
\ \
product(uintx, RTMRetryCount, 5, \ product(int, RTMRetryCount, 5, \
"Number of RTM retries on lock abort or busy") \ "Number of RTM retries on lock abort or busy") \
range(0, max_jint) \
\ \
experimental(intx, RTMSpinLoopCount, 100, \ experimental(int, RTMSpinLoopCount, 100, \
"Spin count for lock to become free before RTM retry") \ "Spin count for lock to become free before RTM retry") \
range(0, 32767) /* immediate operand limit on ppc */ \
\ \
experimental(intx, RTMAbortThreshold, 1000, \ experimental(int, RTMAbortThreshold, 1000, \
"Calculate abort ratio after this number of aborts") \ "Calculate abort ratio after this number of aborts") \
range(0, max_jint) \
\ \
experimental(intx, RTMLockingThreshold, 10000, \ experimental(int, RTMLockingThreshold, 10000, \
"Lock count at which to do RTM lock eliding without " \ "Lock count at which to do RTM lock eliding without " \
"abort ratio calculation") \ "abort ratio calculation") \
range(0, max_jint) \
\ \
experimental(intx, RTMAbortRatio, 50, \ experimental(int, RTMAbortRatio, 50, \
"Lock abort ratio at which to stop use RTM lock eliding") \ "Lock abort ratio at which to stop use RTM lock eliding") \
range(0, 100) /* natural range, checked in vm_version_ppc.cpp */ \
\ \
experimental(intx, RTMTotalCountIncrRate, 64, \ experimental(int, RTMTotalCountIncrRate, 64, \
"Increment total RTM attempted lock count once every n times") \ "Increment total RTM attempted lock count once every n times") \
range(1, 32767) /* immediate operand limit on ppc */ \
\ \
experimental(intx, RTMLockingCalculationDelay, 0, \ experimental(intx, RTMLockingCalculationDelay, 0, \
"Number of milliseconds to wait before start calculating aborts " \ "Number of milliseconds to wait before start calculating aborts " \

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved. * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -45,8 +45,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
#define thread_(field_name) in_bytes(JavaThread::field_name ## _offset()), R16_thread #define thread_(field_name) in_bytes(JavaThread::field_name ## _offset()), R16_thread
#define method_(field_name) in_bytes(Method::field_name ## _offset()), R19_method #define method_(field_name) in_bytes(Method::field_name ## _offset()), R19_method
virtual void check_and_handle_popframe(Register java_thread); virtual void check_and_handle_popframe(Register scratch_reg);
virtual void check_and_handle_earlyret(Register java_thread); virtual void check_and_handle_earlyret(Register scratch_reg);
// Base routine for all dispatches. // Base routine for all dispatches.
void dispatch_base(TosState state, address* table); void dispatch_base(TosState state, address* table);
@ -79,6 +79,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
// Load object from cpool->resolved_references(index). // Load object from cpool->resolved_references(index).
void load_resolved_reference_at_index(Register result, Register index, Label *is_null = NULL); void load_resolved_reference_at_index(Register result, Register index, Label *is_null = NULL);
// load cpool->resolved_klass_at(index)
void load_resolved_klass_at_offset(Register Rcpool, Register Roffset, Register Rklass);
void load_receiver(Register Rparam_count, Register Rrecv_dst); void load_receiver(Register Rparam_count, Register Rrecv_dst);
// helpers for expression stack // helpers for expression stack

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved. * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -454,7 +454,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
Register tmp = index; // reuse Register tmp = index; // reuse
sldi(tmp, index, LogBytesPerHeapOop); sldi(tmp, index, LogBytesPerHeapOop);
// Load pointer for resolved_references[] objArray. // Load pointer for resolved_references[] objArray.
ld(result, ConstantPool::resolved_references_offset_in_bytes(), result); ld(result, ConstantPool::cache_offset_in_bytes(), result);
ld(result, ConstantPoolCache::resolved_references_offset_in_bytes(), result);
// JNIHandles::resolve(result) // JNIHandles::resolve(result)
ld(result, 0, result); ld(result, 0, result);
#ifdef ASSERT #ifdef ASSERT
@ -471,6 +472,25 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result, is_null); load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result, is_null);
} }
// load cpool->resolved_klass_at(index)
void InterpreterMacroAssembler::load_resolved_klass_at_offset(Register Rcpool, Register Roffset, Register Rklass) {
// int value = *(Rcpool->int_at_addr(which));
// int resolved_klass_index = extract_low_short_from_int(value);
add(Roffset, Rcpool, Roffset);
#if defined(VM_LITTLE_ENDIAN)
lhz(Roffset, sizeof(ConstantPool), Roffset); // Roffset = resolved_klass_index
#else
lhz(Roffset, sizeof(ConstantPool) + 2, Roffset); // Roffset = resolved_klass_index
#endif
ld(Rklass, ConstantPool::resolved_klasses_offset_in_bytes(), Rcpool); // Rklass = Rcpool->_resolved_klasses
sldi(Roffset, Roffset, LogBytesPerWord);
addi(Roffset, Roffset, Array<Klass*>::base_offset_in_bytes());
isync(); // Order load of instance Klass wrt. tags.
ldx(Rklass, Rklass, Roffset);
}
// Generate a subtype check: branch to ok_is_subtype if sub_klass is // Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass. Blows registers Rsub_klass, tmp1, tmp2. // a subtype of super_klass. Blows registers Rsub_klass, tmp1, tmp2.
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, Register Rsuper_klass, Register Rtmp1, void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, Register Rsuper_klass, Register Rtmp1,

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 SAP SE. All rights reserved. * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -2498,14 +2498,20 @@ void MacroAssembler::rtm_abort_ratio_calculation(Register rtm_counters_Reg,
// All transactions = total_count * RTMTotalCountIncrRate // All transactions = total_count * RTMTotalCountIncrRate
// Set no_rtm bit if (Aborted transactions >= All transactions * RTMAbortRatio) // Set no_rtm bit if (Aborted transactions >= All transactions * RTMAbortRatio)
ld(R0, RTMLockingCounters::abort_count_offset(), rtm_counters_Reg); ld(R0, RTMLockingCounters::abort_count_offset(), rtm_counters_Reg);
cmpdi(CCR0, R0, RTMAbortThreshold); if (is_simm(RTMAbortThreshold, 16)) { // cmpdi can handle 16bit immediate only.
blt(CCR0, L_check_always_rtm2); cmpdi(CCR0, R0, RTMAbortThreshold);
blt(CCR0, L_check_always_rtm2); // reload of rtm_counters_Reg not necessary
} else {
load_const_optimized(rtm_counters_Reg, RTMAbortThreshold);
cmpd(CCR0, R0, rtm_counters_Reg);
blt(CCR0, L_check_always_rtm1); // reload of rtm_counters_Reg required
}
mulli(R0, R0, 100); mulli(R0, R0, 100);
const Register tmpReg = rtm_counters_Reg; const Register tmpReg = rtm_counters_Reg;
ld(tmpReg, RTMLockingCounters::total_count_offset(), rtm_counters_Reg); ld(tmpReg, RTMLockingCounters::total_count_offset(), rtm_counters_Reg);
mulli(tmpReg, tmpReg, RTMTotalCountIncrRate); mulli(tmpReg, tmpReg, RTMTotalCountIncrRate); // allowable range: int16
mulli(tmpReg, tmpReg, RTMAbortRatio); mulli(tmpReg, tmpReg, RTMAbortRatio); // allowable range: int16
cmpd(CCR0, R0, tmpReg); cmpd(CCR0, R0, tmpReg);
blt(CCR0, L_check_always_rtm1); // jump to reload blt(CCR0, L_check_always_rtm1); // jump to reload
if (method_data != NULL) { if (method_data != NULL) {
@ -2521,7 +2527,13 @@ void MacroAssembler::rtm_abort_ratio_calculation(Register rtm_counters_Reg,
load_const_optimized(rtm_counters_Reg, (address)rtm_counters, R0); // reload load_const_optimized(rtm_counters_Reg, (address)rtm_counters, R0); // reload
bind(L_check_always_rtm2); bind(L_check_always_rtm2);
ld(tmpReg, RTMLockingCounters::total_count_offset(), rtm_counters_Reg); ld(tmpReg, RTMLockingCounters::total_count_offset(), rtm_counters_Reg);
cmpdi(CCR0, tmpReg, RTMLockingThreshold / RTMTotalCountIncrRate); int64_t thresholdValue = RTMLockingThreshold / RTMTotalCountIncrRate;
if (is_simm(thresholdValue, 16)) { // cmpdi can handle 16bit immediate only.
cmpdi(CCR0, tmpReg, thresholdValue);
} else {
load_const_optimized(R0, thresholdValue);
cmpd(CCR0, tmpReg, R0);
}
blt(CCR0, L_done); blt(CCR0, L_done);
if (method_data != NULL) { if (method_data != NULL) {
// Set rtm_state to "always rtm" in MDO. // Set rtm_state to "always rtm" in MDO.
@ -2620,7 +2632,7 @@ void MacroAssembler::rtm_stack_locking(ConditionRegister flag,
if (PrintPreciseRTMLockingStatistics || profile_rtm) { if (PrintPreciseRTMLockingStatistics || profile_rtm) {
Label L_noincrement; Label L_noincrement;
if (RTMTotalCountIncrRate > 1) { if (RTMTotalCountIncrRate > 1) {
branch_on_random_using_tb(tmp, (int)RTMTotalCountIncrRate, L_noincrement); branch_on_random_using_tb(tmp, RTMTotalCountIncrRate, L_noincrement);
} }
assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM"); assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM");
load_const_optimized(tmp, (address)stack_rtm_counters->total_count_addr(), R0); load_const_optimized(tmp, (address)stack_rtm_counters->total_count_addr(), R0);
@ -2687,7 +2699,7 @@ void MacroAssembler::rtm_inflated_locking(ConditionRegister flag,
if (PrintPreciseRTMLockingStatistics || profile_rtm) { if (PrintPreciseRTMLockingStatistics || profile_rtm) {
Label L_noincrement; Label L_noincrement;
if (RTMTotalCountIncrRate > 1) { if (RTMTotalCountIncrRate > 1) {
branch_on_random_using_tb(R0, (int)RTMTotalCountIncrRate, L_noincrement); branch_on_random_using_tb(R0, RTMTotalCountIncrRate, L_noincrement);
} }
assert(rtm_counters != NULL, "should not be NULL when profiling RTM"); assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
load_const(R0, (address)rtm_counters->total_count_addr(), tmpReg); load_const(R0, (address)rtm_counters->total_count_addr(), tmpReg);
@ -4120,7 +4132,7 @@ void MacroAssembler::update_byte_crc32(Register crc, Register val, Register tabl
* @param table register pointing to CRC table * @param table register pointing to CRC table
*/ */
void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
Register data, bool loopAlignment, bool invertCRC) { Register data, bool loopAlignment) {
assert_different_registers(crc, buf, len, table, data); assert_different_registers(crc, buf, len, table, data);
Label L_mainLoop, L_done; Label L_mainLoop, L_done;
@ -4131,10 +4143,6 @@ void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register
clrldi_(len, len, 32); // Enforce 32 bit. Anything to do? clrldi_(len, len, 32); // Enforce 32 bit. Anything to do?
beq(CCR0, L_done); beq(CCR0, L_done);
if (invertCRC) {
nand(crc, crc, crc); // ~c
}
mtctr(len); mtctr(len);
align(mainLoop_alignment); align(mainLoop_alignment);
BIND(L_mainLoop); BIND(L_mainLoop);
@ -4143,10 +4151,6 @@ void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register
update_byte_crc32(crc, data, table); update_byte_crc32(crc, data, table);
bdnz(L_mainLoop); // Iterate. bdnz(L_mainLoop); // Iterate.
if (invertCRC) {
nand(crc, crc, crc); // ~c
}
bind(L_done); bind(L_done);
} }
@ -4203,7 +4207,8 @@ void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register tab
*/ */
void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table, void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3, Register t0, Register t1, Register t2, Register t3,
Register tc0, Register tc1, Register tc2, Register tc3) { Register tc0, Register tc1, Register tc2, Register tc3,
bool invertCRC) {
assert_different_registers(crc, buf, len, table); assert_different_registers(crc, buf, len, table);
Label L_mainLoop, L_tail; Label L_mainLoop, L_tail;
@ -4217,14 +4222,16 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
const int complexThreshold = 2*mainLoop_stepping; const int complexThreshold = 2*mainLoop_stepping;
// Don't test for len <= 0 here. This pathological case should not occur anyway. // Don't test for len <= 0 here. This pathological case should not occur anyway.
// Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles. // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles
// The situation itself is detected and handled correctly by the conditional branches // for all well-behaved cases. The situation itself is detected and handled correctly
// following aghi(len, -stepping) and aghi(len, +stepping). // within update_byteLoop_crc32.
assert(tailLoop_stepping == 1, "check tailLoop_stepping!"); assert(tailLoop_stepping == 1, "check tailLoop_stepping!");
BLOCK_COMMENT("kernel_crc32_2word {"); BLOCK_COMMENT("kernel_crc32_2word {");
nand(crc, crc, crc); // ~c if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
// Check for short (<mainLoop_stepping) buffer. // Check for short (<mainLoop_stepping) buffer.
cmpdi(CCR0, len, complexThreshold); cmpdi(CCR0, len, complexThreshold);
@ -4245,7 +4252,7 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
blt(CCR0, L_tail); // For less than one mainloop_stepping left, do only tail processing blt(CCR0, L_tail); // For less than one mainloop_stepping left, do only tail processing
mr(len, tmp); // remaining bytes for main loop (>=mainLoop_stepping is guaranteed). mr(len, tmp); // remaining bytes for main loop (>=mainLoop_stepping is guaranteed).
} }
update_byteLoop_crc32(crc, buf, tmp2, table, data, false, false); update_byteLoop_crc32(crc, buf, tmp2, table, data, false);
} }
srdi(tmp2, len, log_stepping); // #iterations for mainLoop srdi(tmp2, len, log_stepping); // #iterations for mainLoop
@ -4281,9 +4288,11 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
// Process last few (<complexThreshold) bytes of buffer. // Process last few (<complexThreshold) bytes of buffer.
BIND(L_tail); BIND(L_tail);
update_byteLoop_crc32(crc, buf, len, table, data, false, false); update_byteLoop_crc32(crc, buf, len, table, data, false);
nand(crc, crc, crc); // ~c if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
BLOCK_COMMENT("} kernel_crc32_2word"); BLOCK_COMMENT("} kernel_crc32_2word");
} }
@ -4297,7 +4306,8 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
*/ */
void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table, void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3, Register t0, Register t1, Register t2, Register t3,
Register tc0, Register tc1, Register tc2, Register tc3) { Register tc0, Register tc1, Register tc2, Register tc3,
bool invertCRC) {
assert_different_registers(crc, buf, len, table); assert_different_registers(crc, buf, len, table);
Label L_mainLoop, L_tail; Label L_mainLoop, L_tail;
@ -4311,14 +4321,16 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
const int complexThreshold = 2*mainLoop_stepping; const int complexThreshold = 2*mainLoop_stepping;
// Don't test for len <= 0 here. This pathological case should not occur anyway. // Don't test for len <= 0 here. This pathological case should not occur anyway.
// Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles. // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles
// The situation itself is detected and handled correctly by the conditional branches // for all well-behaved cases. The situation itself is detected and handled correctly
// following aghi(len, -stepping) and aghi(len, +stepping). // within update_byteLoop_crc32.
assert(tailLoop_stepping == 1, "check tailLoop_stepping!"); assert(tailLoop_stepping == 1, "check tailLoop_stepping!");
BLOCK_COMMENT("kernel_crc32_1word {"); BLOCK_COMMENT("kernel_crc32_1word {");
nand(crc, crc, crc); // ~c if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
// Check for short (<mainLoop_stepping) buffer. // Check for short (<mainLoop_stepping) buffer.
cmpdi(CCR0, len, complexThreshold); cmpdi(CCR0, len, complexThreshold);
@ -4339,7 +4351,7 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
blt(CCR0, L_tail); // For less than one mainloop_stepping left, do only tail processing blt(CCR0, L_tail); // For less than one mainloop_stepping left, do only tail processing
mr(len, tmp); // remaining bytes for main loop (>=mainLoop_stepping is guaranteed). mr(len, tmp); // remaining bytes for main loop (>=mainLoop_stepping is guaranteed).
} }
update_byteLoop_crc32(crc, buf, tmp2, table, data, false, false); update_byteLoop_crc32(crc, buf, tmp2, table, data, false);
} }
srdi(tmp2, len, log_stepping); // #iterations for mainLoop srdi(tmp2, len, log_stepping); // #iterations for mainLoop
@ -4374,9 +4386,11 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
// Process last few (<complexThreshold) bytes of buffer. // Process last few (<complexThreshold) bytes of buffer.
BIND(L_tail); BIND(L_tail);
update_byteLoop_crc32(crc, buf, len, table, data, false, false); update_byteLoop_crc32(crc, buf, len, table, data, false);
nand(crc, crc, crc); // ~c if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
BLOCK_COMMENT("} kernel_crc32_1word"); BLOCK_COMMENT("} kernel_crc32_1word");
} }
@ -4389,16 +4403,24 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
* Uses R7_ARG5, R8_ARG6 as work registers. * Uses R7_ARG5, R8_ARG6 as work registers.
*/ */
void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table, void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3) { Register t0, Register t1, Register t2, Register t3,
bool invertCRC) {
assert_different_registers(crc, buf, len, table); assert_different_registers(crc, buf, len, table);
Register data = t0; // Holds the current byte to be folded into crc. Register data = t0; // Holds the current byte to be folded into crc.
BLOCK_COMMENT("kernel_crc32_1byte {"); BLOCK_COMMENT("kernel_crc32_1byte {");
// Process all bytes in a single-byte loop. if (invertCRC) {
update_byteLoop_crc32(crc, buf, len, table, data, true, true); nand(crc, crc, crc); // 1s complement of crc
}
// Process all bytes in a single-byte loop.
update_byteLoop_crc32(crc, buf, len, table, data, true);
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
BLOCK_COMMENT("} kernel_crc32_1byte"); BLOCK_COMMENT("} kernel_crc32_1byte");
} }
@ -4416,7 +4438,8 @@ void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len
*/ */
void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Register len, Register table, void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Register len, Register table,
Register constants, Register barretConstants, Register constants, Register barretConstants,
Register t0, Register t1, Register t2, Register t3, Register t4) { Register t0, Register t1, Register t2, Register t3, Register t4,
bool invertCRC) {
assert_different_registers(crc, buf, len, table); assert_different_registers(crc, buf, len, table);
Label L_alignedHead, L_tail, L_alignTail, L_start, L_end; Label L_alignedHead, L_tail, L_alignTail, L_start, L_end;
@ -4434,13 +4457,15 @@ void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Regi
Register tc0 = t4; Register tc0 = t4;
Register tc1 = constants; Register tc1 = constants;
Register tc2 = barretConstants; Register tc2 = barretConstants;
kernel_crc32_1word(crc, buf, len, table,t0, t1, t2, t3, tc0, tc1, tc2, table); kernel_crc32_1word(crc, buf, len, table,t0, t1, t2, t3, tc0, tc1, tc2, table, invertCRC);
b(L_end); b(L_end);
BIND(L_start); BIND(L_start);
// 2. ~c // 2. ~c
nand(crc, crc, crc); if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
// 3. calculate from 0 to first 128bit-aligned address // 3. calculate from 0 to first 128bit-aligned address
clrldi_(prealign, buf, 57); clrldi_(prealign, buf, 57);
@ -4449,7 +4474,7 @@ void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Regi
subfic(prealign, prealign, 128); subfic(prealign, prealign, 128);
subf(len, prealign, len); subf(len, prealign, len);
update_byteLoop_crc32(crc, buf, prealign, table, t2, false, false); update_byteLoop_crc32(crc, buf, prealign, table, t2, false);
// 4. calculate from first 128bit-aligned address to last 128bit-aligned address // 4. calculate from first 128bit-aligned address to last 128bit-aligned address
BIND(L_alignedHead); BIND(L_alignedHead);
@ -4464,12 +4489,14 @@ void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Regi
cmpdi(CCR0, postalign, 0); cmpdi(CCR0, postalign, 0);
beq(CCR0, L_tail); beq(CCR0, L_tail);
update_byteLoop_crc32(crc, buf, postalign, table, t2, false, false); update_byteLoop_crc32(crc, buf, postalign, table, t2, false);
BIND(L_tail); BIND(L_tail);
// 6. ~c // 6. ~c
nand(crc, crc, crc); if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
BIND(L_end); BIND(L_end);
@ -4961,16 +4988,35 @@ void MacroAssembler::kernel_crc32_1word_aligned(Register crc, Register buf, Regi
offsetInt -= 8; ld(R31, offsetInt, R1_SP); offsetInt -= 8; ld(R31, offsetInt, R1_SP);
} }
void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp) { void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp, bool invertCRC) {
assert_different_registers(crc, buf, /* len, not used!! */ table, tmp); assert_different_registers(crc, buf, /* len, not used!! */ table, tmp);
BLOCK_COMMENT("kernel_crc32_singleByte:"); BLOCK_COMMENT("kernel_crc32_singleByte:");
nand(crc, crc, crc); // ~c if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
lbz(tmp, 0, buf); // Byte from buffer, zero-extended. lbz(tmp, 0, buf); // Byte from buffer, zero-extended.
update_byte_crc32(crc, tmp, table); update_byte_crc32(crc, tmp, table);
nand(crc, crc, crc); // ~c if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
}
void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table, bool invertCRC) {
assert_different_registers(crc, val, table);
BLOCK_COMMENT("kernel_crc32_singleByteReg:");
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
update_byte_crc32(crc, val, table);
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
} }
// dest_lo += src1 + src2 // dest_lo += src1 + src2

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 SAP SE. All rights reserved. * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -819,33 +819,47 @@ class MacroAssembler: public Assembler {
Register tmp6, Register tmp7, Register tmp8, Register tmp9, Register tmp10, Register tmp6, Register tmp7, Register tmp8, Register tmp9, Register tmp10,
Register tmp11, Register tmp12, Register tmp13); Register tmp11, Register tmp12, Register tmp13);
// CRC32 Intrinsics. // Emitters for CRC32 calculation.
// A note on invertCRC:
// Unfortunately, internal representation of crc differs between CRC32 and CRC32C.
// CRC32 holds it's current crc value in the externally visible representation.
// CRC32C holds it's current crc value in internal format, ready for updating.
// Thus, the crc value must be bit-flipped before updating it in the CRC32 case.
// In the CRC32C case, it must be bit-flipped when it is given to the outside world (getValue()).
// The bool invertCRC parameter indicates whether bit-flipping is required before updates.
void load_reverse_32(Register dst, Register src); void load_reverse_32(Register dst, Register src);
int crc32_table_columns(Register table, Register tc0, Register tc1, Register tc2, Register tc3); int crc32_table_columns(Register table, Register tc0, Register tc1, Register tc2, Register tc3);
void fold_byte_crc32(Register crc, Register val, Register table, Register tmp); void fold_byte_crc32(Register crc, Register val, Register table, Register tmp);
void fold_8bit_crc32(Register crc, Register table, Register tmp); void fold_8bit_crc32(Register crc, Register table, Register tmp);
void update_byte_crc32(Register crc, Register val, Register table); void update_byte_crc32(Register crc, Register val, Register table);
void update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, void update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
Register data, bool loopAlignment, bool invertCRC); Register data, bool loopAlignment);
void update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc, void update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
Register t0, Register t1, Register t2, Register t3, Register t0, Register t1, Register t2, Register t3,
Register tc0, Register tc1, Register tc2, Register tc3); Register tc0, Register tc1, Register tc2, Register tc3);
void kernel_crc32_2word(Register crc, Register buf, Register len, Register table, void kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3, Register t0, Register t1, Register t2, Register t3,
Register tc0, Register tc1, Register tc2, Register tc3); Register tc0, Register tc1, Register tc2, Register tc3,
bool invertCRC);
void kernel_crc32_1word(Register crc, Register buf, Register len, Register table, void kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3, Register t0, Register t1, Register t2, Register t3,
Register tc0, Register tc1, Register tc2, Register tc3); Register tc0, Register tc1, Register tc2, Register tc3,
bool invertCRC);
void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table, void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3); Register t0, Register t1, Register t2, Register t3,
bool invertCRC);
void kernel_crc32_1word_vpmsumd(Register crc, Register buf, Register len, Register table, void kernel_crc32_1word_vpmsumd(Register crc, Register buf, Register len, Register table,
Register constants, Register barretConstants, Register constants, Register barretConstants,
Register t0, Register t1, Register t2, Register t3, Register t4); Register t0, Register t1, Register t2, Register t3, Register t4,
bool invertCRC);
void kernel_crc32_1word_aligned(Register crc, Register buf, Register len, void kernel_crc32_1word_aligned(Register crc, Register buf, Register len,
Register constants, Register barretConstants, Register constants, Register barretConstants,
Register t0, Register t1, Register t2); Register t0, Register t1, Register t2);
void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp); void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
bool invertCRC);
void kernel_crc32_singleByteReg(Register crc, Register val, Register table,
bool invertCRC);
// //
// Debugging // Debugging

View File

@ -1,78 +0,0 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "asm/codeBuffer.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
// This method will be called (as any other Klass virtual method) with
// the Klass itself as the first argument. Example:
//
// oop obj;
// int size = obj->klass()->klass_part()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();
//
// The dummy method is called with the Klass object as the first
// operand, and an object as the second argument.
//
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
// differing only by an ordinal constant, and they bear no releationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#define __ masm->
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
char** mc_top,
char* mc_end) {
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
*(intptr_t *)(*md_top) = vtable_bytes;
*md_top += sizeof(intptr_t);
void** dummy_vtable = (void**)*md_top;
*vtable = dummy_vtable;
*md_top += vtable_bytes;
// Get ready to generate dummy methods.
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
MacroAssembler* masm = new MacroAssembler(&cb);
// There are more general problems with CDS on ppc, so I can not
// really test this. But having this instead of Unimplementd() allows
// us to pass TestOptionsWithRanges.java.
__ unimplemented();
}

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -71,7 +71,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Register temp_reg, Register temp2_reg, Register temp_reg, Register temp2_reg,
const char* error_message) { const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id); InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id); Klass* klass = SystemDictionary::well_known_klass(klass_id);
Label L_ok, L_bad; Label L_ok, L_bad;
BLOCK_COMMENT("verify_klass {"); BLOCK_COMMENT("verify_klass {");
__ verify_oop(obj_reg); __ verify_oop(obj_reg);
@ -174,8 +174,9 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
__ verify_oop(method_temp); __ verify_oop(method_temp);
__ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()), method_temp, temp2); __ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()), method_temp, temp2);
__ verify_oop(method_temp); __ verify_oop(method_temp);
// The following assumes that a Method* is normally compressed in the vmtarget field: __ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()), method_temp);
__ ld(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), method_temp); __ verify_oop(method_temp);
__ ld(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()), method_temp);
if (VerifyMethodHandles && !for_compiler_entry) { if (VerifyMethodHandles && !for_compiler_entry) {
// Make sure recv is already on stack. // Make sure recv is already on stack.
@ -361,14 +362,16 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
if (VerifyMethodHandles) { if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp2); verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp2);
} }
__ ld(R19_method, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), member_reg); __ load_heap_oop(member_reg, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()), member_reg);
__ ld(R19_method, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()), member_reg);
break; break;
case vmIntrinsics::_linkToStatic: case vmIntrinsics::_linkToStatic:
if (VerifyMethodHandles) { if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp2); verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp2);
} }
__ ld(R19_method, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), member_reg); __ load_heap_oop(member_reg, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()), member_reg);
__ ld(R19_method, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()), member_reg);
break; break;
case vmIntrinsics::_linkToVirtual: case vmIntrinsics::_linkToVirtual:

View File

@ -1,5 +1,5 @@
// //
// Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2012, 2016 SAP SE. All rights reserved. // Copyright (c) 2012, 2016 SAP SE. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
@ -2053,12 +2053,12 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
} }
// Vector ideal reg. // Vector ideal reg.
const int Matcher::vector_ideal_reg(int size) { const uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize == 8 && size == 8, ""); assert(MaxVectorSize == 8 && size == 8, "");
return Op_RegL; return Op_RegL;
} }
const int Matcher::vector_shift_count_ideal_reg(int size) { const uint Matcher::vector_shift_count_ideal_reg(int size) {
fatal("vector shift is not supported"); fatal("vector shift is not supported");
return Node::NotAMachineReg; return Node::NotAMachineReg;
} }
@ -5842,6 +5842,16 @@ instruct loadConN_lo(iRegNdst dst, iRegNsrc src1, immN src2) %{
ins_pipe(pipe_class_default); ins_pipe(pipe_class_default);
%} %}
instruct rldicl(iRegLdst dst, iRegLsrc src, immI16 shift, immI16 mask_begin) %{
effect(DEF dst, USE src, USE shift, USE mask_begin);
size(4);
ins_encode %{
__ rldicl($dst$$Register, $src$$Register, $shift$$constant, $mask_begin$$constant);
%}
ins_pipe(pipe_class_default);
%}
// Needed to postalloc expand loadConN: ConN is loaded as ConI // Needed to postalloc expand loadConN: ConN is loaded as ConI
// leaving the upper 32 bits with sign-extension bits. // leaving the upper 32 bits with sign-extension bits.
// This clears these bits: dst = src & 0xFFFFFFFF. // This clears these bits: dst = src & 0xFFFFFFFF.
@ -10519,6 +10529,16 @@ instruct convB2I_reg(iRegIdst dst, iRegIsrc src, immI_24 amount) %{
ins_pipe(pipe_class_default); ins_pipe(pipe_class_default);
%} %}
instruct extsh(iRegIdst dst, iRegIsrc src) %{
effect(DEF dst, USE src);
size(4);
ins_encode %{
__ extsh($dst$$Register, $src$$Register);
%}
ins_pipe(pipe_class_default);
%}
// LShiftI 16 + RShiftI 16 converts short to int. // LShiftI 16 + RShiftI 16 converts short to int.
instruct convS2I_reg(iRegIdst dst, iRegIsrc src, immI_16 amount) %{ instruct convS2I_reg(iRegIdst dst, iRegIsrc src, immI_16 amount) %{
match(Set dst (RShiftI (LShiftI src amount) amount)); match(Set dst (RShiftI (LShiftI src amount) amount));
@ -12682,8 +12702,7 @@ instruct insrwi(iRegIdst dst, iRegIsrc src, immI16 pos, immI16 shift) %{
// Just slightly faster than java implementation. // Just slightly faster than java implementation.
instruct bytes_reverse_int_Ex(iRegIdst dst, iRegIsrc src) %{ instruct bytes_reverse_int_Ex(iRegIdst dst, iRegIsrc src) %{
match(Set dst (ReverseBytesI src)); match(Set dst (ReverseBytesI src));
predicate(UseCountLeadingZerosInstructionsPPC64); ins_cost(7*DEFAULT_COST);
ins_cost(DEFAULT_COST);
expand %{ expand %{
immI16 imm24 %{ (int) 24 %} immI16 imm24 %{ (int) 24 %}
@ -12705,6 +12724,172 @@ instruct bytes_reverse_int_Ex(iRegIdst dst, iRegIsrc src) %{
%} %}
%} %}
instruct bytes_reverse_long_Ex(iRegLdst dst, iRegLsrc src) %{
match(Set dst (ReverseBytesL src));
ins_cost(15*DEFAULT_COST);
expand %{
immI16 imm56 %{ (int) 56 %}
immI16 imm48 %{ (int) 48 %}
immI16 imm40 %{ (int) 40 %}
immI16 imm32 %{ (int) 32 %}
immI16 imm24 %{ (int) 24 %}
immI16 imm16 %{ (int) 16 %}
immI16 imm8 %{ (int) 8 %}
immI16 imm0 %{ (int) 0 %}
iRegLdst tmpL1;
iRegLdst tmpL2;
iRegLdst tmpL3;
iRegLdst tmpL4;
iRegLdst tmpL5;
iRegLdst tmpL6;
// src : |a|b|c|d|e|f|g|h|
rldicl(tmpL1, src, imm8, imm24); // tmpL1 : | | | |e|f|g|h|a|
rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |a| | | |e|
rldicl(tmpL3, tmpL2, imm32, imm0); // tmpL3 : | | | |e| | | |a|
rldicl(tmpL1, src, imm16, imm24); // tmpL1 : | | | |f|g|h|a|b|
rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |b| | | |f|
rldicl(tmpL4, tmpL2, imm40, imm0); // tmpL4 : | | |f| | | |b| |
orL_reg_reg(tmpL5, tmpL3, tmpL4); // tmpL5 : | | |f|e| | |b|a|
rldicl(tmpL1, src, imm24, imm24); // tmpL1 : | | | |g|h|a|b|c|
rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |c| | | |g|
rldicl(tmpL3, tmpL2, imm48, imm0); // tmpL3 : | |g| | | |c| | |
rldicl(tmpL1, src, imm32, imm24); // tmpL1 : | | | |h|a|b|c|d|
rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |d| | | |h|
rldicl(tmpL4, tmpL2, imm56, imm0); // tmpL4 : |h| | | |d| | | |
orL_reg_reg(tmpL6, tmpL3, tmpL4); // tmpL6 : |h|g| | |d|c| | |
orL_reg_reg(dst, tmpL5, tmpL6); // dst : |h|g|f|e|d|c|b|a|
%}
%}
instruct bytes_reverse_ushort_Ex(iRegIdst dst, iRegIsrc src) %{
match(Set dst (ReverseBytesUS src));
ins_cost(2*DEFAULT_COST);
expand %{
immI16 imm16 %{ (int) 16 %}
immI16 imm8 %{ (int) 8 %}
urShiftI_reg_imm(dst, src, imm8);
insrwi(dst, src, imm16, imm8);
%}
%}
instruct bytes_reverse_short_Ex(iRegIdst dst, iRegIsrc src) %{
match(Set dst (ReverseBytesS src));
ins_cost(3*DEFAULT_COST);
expand %{
immI16 imm16 %{ (int) 16 %}
immI16 imm8 %{ (int) 8 %}
iRegLdst tmpI1;
urShiftI_reg_imm(tmpI1, src, imm8);
insrwi(tmpI1, src, imm16, imm8);
extsh(dst, tmpI1);
%}
%}
// Load Integer reversed byte order
instruct loadI_reversed(iRegIdst dst, indirect mem) %{
match(Set dst (ReverseBytesI (LoadI mem)));
ins_cost(MEMORY_REF_COST);
size(4);
ins_encode %{
__ lwbrx($dst$$Register, $mem$$Register);
%}
ins_pipe(pipe_class_default);
%}
// Load Long - aligned and reversed
instruct loadL_reversed(iRegLdst dst, indirect mem) %{
match(Set dst (ReverseBytesL (LoadL mem)));
predicate(VM_Version::has_ldbrx());
ins_cost(MEMORY_REF_COST);
size(4);
ins_encode %{
__ ldbrx($dst$$Register, $mem$$Register);
%}
ins_pipe(pipe_class_default);
%}
// Load unsigned short / char reversed byte order
instruct loadUS_reversed(iRegIdst dst, indirect mem) %{
match(Set dst (ReverseBytesUS (LoadUS mem)));
ins_cost(MEMORY_REF_COST);
size(4);
ins_encode %{
__ lhbrx($dst$$Register, $mem$$Register);
%}
ins_pipe(pipe_class_default);
%}
// Load short reversed byte order
instruct loadS_reversed(iRegIdst dst, indirect mem) %{
match(Set dst (ReverseBytesS (LoadS mem)));
ins_cost(MEMORY_REF_COST + DEFAULT_COST);
size(8);
ins_encode %{
__ lhbrx($dst$$Register, $mem$$Register);
__ extsh($dst$$Register, $dst$$Register);
%}
ins_pipe(pipe_class_default);
%}
// Store Integer reversed byte order
instruct storeI_reversed(iRegIsrc src, indirect mem) %{
match(Set mem (StoreI mem (ReverseBytesI src)));
ins_cost(MEMORY_REF_COST);
size(4);
ins_encode %{
__ stwbrx($src$$Register, $mem$$Register);
%}
ins_pipe(pipe_class_default);
%}
// Store Long reversed byte order
instruct storeL_reversed(iRegLsrc src, indirect mem) %{
match(Set mem (StoreL mem (ReverseBytesL src)));
predicate(VM_Version::has_stdbrx());
ins_cost(MEMORY_REF_COST);
size(4);
ins_encode %{
__ stdbrx($src$$Register, $mem$$Register);
%}
ins_pipe(pipe_class_default);
%}
// Store unsigned short / char reversed byte order
instruct storeUS_reversed(iRegIsrc src, indirect mem) %{
match(Set mem (StoreC mem (ReverseBytesUS src)));
ins_cost(MEMORY_REF_COST);
size(4);
ins_encode %{
__ sthbrx($src$$Register, $mem$$Register);
%}
ins_pipe(pipe_class_default);
%}
// Store short reversed byte order
instruct storeS_reversed(iRegIsrc src, indirect mem) %{
match(Set mem (StoreC mem (ReverseBytesS src)));
ins_cost(MEMORY_REF_COST);
size(4);
ins_encode %{
__ sthbrx($src$$Register, $mem$$Register);
%}
ins_pipe(pipe_class_default);
%}
//---------- Replicate Vector Instructions ------------------------------------ //---------- Replicate Vector Instructions ------------------------------------
// Insrdi does replicate if src == dst. // Insrdi does replicate if src == dst.

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved. * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -3276,6 +3276,36 @@ class StubGenerator: public StubCodeGenerator {
return start; return start;
} }
// Compute CRC32/CRC32C function.
void generate_CRC_updateBytes(const char* name, Register table, bool invertCRC) {
// arguments to kernel_crc32:
const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call.
const Register data = R4_ARG2; // source byte array
const Register dataLen = R5_ARG3; // #bytes to process
const Register t0 = R2;
const Register t1 = R7;
const Register t2 = R8;
const Register t3 = R9;
const Register tc0 = R10;
const Register tc1 = R11;
const Register tc2 = R12;
BLOCK_COMMENT("Stub body {");
assert_different_registers(crc, data, dataLen, table);
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, table, invertCRC);
BLOCK_COMMENT("return");
__ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
__ blr();
BLOCK_COMMENT("} Stub body");
}
/** /**
* Arguments: * Arguments:
* *
@ -3296,14 +3326,14 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", name); StubCodeMark mark(this, "StubRoutines", name);
address start = __ function_entry(); // Remember stub start address (is rtn value). address start = __ function_entry(); // Remember stub start address (is rtn value).
const Register table = R6; // crc table address
#ifdef VM_LITTLE_ENDIAN
// arguments to kernel_crc32: // arguments to kernel_crc32:
const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call. const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call.
const Register data = R4_ARG2; // source byte array const Register data = R4_ARG2; // source byte array
const Register dataLen = R5_ARG3; // #bytes to process const Register dataLen = R5_ARG3; // #bytes to process
const Register table = R6; // crc table address
#ifdef VM_LITTLE_ENDIAN
if (VM_Version::has_vpmsumb()) { if (VM_Version::has_vpmsumb()) {
const Register constants = R2; // constants address const Register constants = R2; // constants address
const Register bconstants = R8; // barret table address const Register bconstants = R8; // barret table address
@ -3321,7 +3351,7 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::ppc64::generate_load_crc_constants_addr(_masm, constants); StubRoutines::ppc64::generate_load_crc_constants_addr(_masm, constants);
StubRoutines::ppc64::generate_load_crc_barret_constants_addr(_masm, bconstants); StubRoutines::ppc64::generate_load_crc_barret_constants_addr(_masm, bconstants);
__ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4); __ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, true);
BLOCK_COMMENT("return"); BLOCK_COMMENT("return");
__ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET). __ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
@ -3331,31 +3361,79 @@ class StubGenerator: public StubCodeGenerator {
} else } else
#endif #endif
{ {
const Register t0 = R2; StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
const Register t1 = R7; generate_CRC_updateBytes(name, table, true);
const Register t2 = R8; }
const Register t3 = R9;
const Register tc0 = R10; return start;
const Register tc1 = R11; }
const Register tc2 = R12;
/**
* Arguments:
*
* Inputs:
* R3_ARG1 - int crc
* R4_ARG2 - byte* buf
* R5_ARG3 - int length (of buffer)
*
* scratch:
* R2, R6-R12
*
* Ouput:
* R3_RET - int crc result
*/
// Compute CRC32C function.
address generate_CRC32C_updateBytes(const char* name) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ function_entry(); // Remember stub start address (is rtn value).
const Register table = R6; // crc table address
#if 0 // no vector support yet for CRC32C
#ifdef VM_LITTLE_ENDIAN
// arguments to kernel_crc32:
const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call.
const Register data = R4_ARG2; // source byte array
const Register dataLen = R5_ARG3; // #bytes to process
if (VM_Version::has_vpmsumb()) {
const Register constants = R2; // constants address
const Register bconstants = R8; // barret table address
const Register t0 = R9;
const Register t1 = R10;
const Register t2 = R11;
const Register t3 = R12;
const Register t4 = R7;
BLOCK_COMMENT("Stub body {"); BLOCK_COMMENT("Stub body {");
assert_different_registers(crc, data, dataLen, table); assert_different_registers(crc, data, dataLen, table);
StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table); StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
StubRoutines::ppc64::generate_load_crc32c_constants_addr(_masm, constants);
StubRoutines::ppc64::generate_load_crc32c_barret_constants_addr(_masm, bconstants);
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, table); __ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, false);
BLOCK_COMMENT("return"); BLOCK_COMMENT("return");
__ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET). __ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
__ blr(); __ blr();
BLOCK_COMMENT("} Stub body"); BLOCK_COMMENT("} Stub body");
} else
#endif
#endif
{
StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
generate_CRC_updateBytes(name, table, false);
} }
return start; return start;
} }
// Initialization // Initialization
void generate_initial() { void generate_initial() {
// Generates all stubs and initializes the entry points // Generates all stubs and initializes the entry points
@ -3383,6 +3461,12 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_crc_table_adr = (address)StubRoutines::ppc64::_crc_table; StubRoutines::_crc_table_adr = (address)StubRoutines::ppc64::_crc_table;
StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes"); StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes");
} }
// CRC32C Intrinsics.
if (UseCRC32CIntrinsics) {
StubRoutines::_crc32c_table_addr = (address)StubRoutines::ppc64::_crc32c_table;
StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes("CRC32C_updateBytes");
}
} }
void generate_all() { void generate_all() {

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved. * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -55,13 +55,16 @@ class ppc64 {
// CRC32 Intrinsics. // CRC32 Intrinsics.
static juint _crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE]; static juint _crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
static juint _crc32c_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
static juint* _constants; static juint* _constants;
static juint* _barret_constants; static juint* _barret_constants;
public: public:
// CRC32 Intrinsics. // CRC32 Intrinsics.
static void generate_load_table_addr(MacroAssembler* masm, Register table, address table_addr, uint64_t table_contents);
static void generate_load_crc_table_addr(MacroAssembler* masm, Register table); static void generate_load_crc_table_addr(MacroAssembler* masm, Register table);
static void generate_load_crc32c_table_addr(MacroAssembler* masm, Register table);
static void generate_load_crc_constants_addr(MacroAssembler* masm, Register table); static void generate_load_crc_constants_addr(MacroAssembler* masm, Register table);
static void generate_load_crc_barret_constants_addr(MacroAssembler* masm, Register table); static void generate_load_crc_barret_constants_addr(MacroAssembler* masm, Register table);
static juint* generate_crc_constants(); static juint* generate_crc_constants();

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2017 SAP SE. All rights reserved. * Copyright (c) 2015, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -643,12 +643,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
return entry; return entry;
} }
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
address entry = __ pc();
__ unimplemented("generate_continuation_for");
return entry;
}
// This entry is returned to when a call returns to the interpreter. // This entry is returned to when a call returns to the interpreter.
// When we arrive here, we expect that the callee stack frame is already popped. // When we arrive here, we expect that the callee stack frame is already popped.
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
@ -692,6 +686,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
#endif #endif
__ sldi(size, size, Interpreter::logStackElementSize); __ sldi(size, size, Interpreter::logStackElementSize);
__ add(R15_esp, R15_esp, size); __ add(R15_esp, R15_esp, size);
__ check_and_handle_popframe(R11_scratch1);
__ check_and_handle_earlyret(R11_scratch1);
__ dispatch_next(state, step); __ dispatch_next(state, step);
return entry; return entry;
} }
@ -1894,7 +1892,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
__ lwz(crc, 2*wordSize, argP); // Current crc state, zero extend to 64 bit to have a clean register. __ lwz(crc, 2*wordSize, argP); // Current crc state, zero extend to 64 bit to have a clean register.
StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table); StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
__ kernel_crc32_singleByte(crc, data, dataLen, table, tmp); __ kernel_crc32_singleByte(crc, data, dataLen, table, tmp, true);
// Restore caller sp for c2i case and return. // Restore caller sp for c2i case and return.
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started. __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
@ -1910,7 +1908,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
return NULL; return NULL;
} }
// CRC32 Intrinsics.
/** /**
* Method entry for static native methods: * Method entry for static native methods:
* int java.util.zip.CRC32.updateBytes( int crc, byte[] b, int off, int len) * int java.util.zip.CRC32.updateBytes( int crc, byte[] b, int off, int len)
@ -1986,7 +1984,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
// Performance measurements show the 1word and 2word variants to be almost equivalent, // Performance measurements show the 1word and 2word variants to be almost equivalent,
// with very light advantages for the 1word variant. We chose the 1word variant for // with very light advantages for the 1word variant. We chose the 1word variant for
// code compactness. // code compactness.
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3); __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3, true);
// Restore caller sp for c2i case and return. // Restore caller sp for c2i case and return.
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started. __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
@ -2002,8 +2000,88 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
return NULL; return NULL;
} }
// Not supported
/**
* Method entry for intrinsic-candidate (non-native) methods:
* int java.util.zip.CRC32C.updateBytes( int crc, byte[] b, int off, int end)
* int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long* buf, int off, int end)
* Unlike CRC32, CRC32C does not have any methods marked as native
* CRC32C also uses an "end" variable instead of the length variable CRC32 uses
**/
address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
if (UseCRC32CIntrinsics) {
address start = __ pc(); // Remember stub start address (is rtn value).
// We don't generate local frame and don't align stack because
// we not even call stub code (we generate the code inline)
// and there is no safepoint on this path.
// Load parameters.
// Z_esp is callers operand stack pointer, i.e. it points to the parameters.
const Register argP = R15_esp;
const Register crc = R3_ARG1; // crc value
const Register data = R4_ARG2; // address of java byte array
const Register dataLen = R5_ARG3; // source data len
const Register table = R6_ARG4; // address of crc32c table
const Register t0 = R9; // scratch registers for crc calculation
const Register t1 = R10;
const Register t2 = R11;
const Register t3 = R12;
const Register tc0 = R2; // registers to hold pre-calculated column addresses
const Register tc1 = R7;
const Register tc2 = R8;
const Register tc3 = table; // table address is reconstructed at the end of kernel_crc32_* emitters
const Register tmp = t0; // Only used very locally to calculate byte buffer address.
// Arguments are reversed on java expression stack.
// Calculate address of start element.
if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) { // Used for "updateDirectByteBuffer".
BLOCK_COMMENT("CRC32C_updateDirectByteBuffer {");
// crc @ (SP + 5W) (32bit)
// buf @ (SP + 3W) (64bit ptr to long array)
// off @ (SP + 2W) (32bit)
// dataLen @ (SP + 1W) (32bit)
// data = buf + off
__ ld( data, 3*wordSize, argP); // start of byte buffer
__ lwa( tmp, 2*wordSize, argP); // byte buffer offset
__ lwa( dataLen, 1*wordSize, argP); // #bytes to process
__ lwz( crc, 5*wordSize, argP); // current crc state
__ add( data, data, tmp); // Add byte buffer offset.
__ sub( dataLen, dataLen, tmp); // (end_index - offset)
} else { // Used for "updateBytes update".
BLOCK_COMMENT("CRC32C_updateBytes {");
// crc @ (SP + 4W) (32bit)
// buf @ (SP + 3W) (64bit ptr to byte array)
// off @ (SP + 2W) (32bit)
// dataLen @ (SP + 1W) (32bit)
// data = buf + off + base_offset
__ ld( data, 3*wordSize, argP); // start of byte buffer
__ lwa( tmp, 2*wordSize, argP); // byte buffer offset
__ lwa( dataLen, 1*wordSize, argP); // #bytes to process
__ add( data, data, tmp); // add byte buffer offset
__ sub( dataLen, dataLen, tmp); // (end_index - offset)
__ lwz( crc, 4*wordSize, argP); // current crc state
__ addi(data, data, arrayOopDesc::base_offset_in_bytes(T_BYTE));
}
StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
// Performance measurements show the 1word and 2word variants to be almost equivalent,
// with very light advantages for the 1word variant. We chose the 1word variant for
// code compactness.
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3, false);
// Restore caller sp for c2i case and return.
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
__ blr();
BLOCK_COMMENT("} CRC32C_update{Bytes|DirectByteBuffer}");
return start;
}
return NULL; return NULL;
} }

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2016 SAP SE. All rights reserved. * Copyright (c) 2013, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -3660,11 +3660,9 @@ void TemplateTable::_new() {
__ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
__ bne(CCR0, Lslow_case); __ bne(CCR0, Lslow_case);
// Get instanceKlass (load from Rcpool + sizeof(ConstantPool) + Rindex*BytesPerWord). // Get instanceKlass
__ sldi(Roffset, Rindex, LogBytesPerWord); __ sldi(Roffset, Rindex, LogBytesPerWord);
__ addi(Rscratch, Rcpool, sizeof(ConstantPool)); __ load_resolved_klass_at_offset(Rcpool, Roffset, RinstanceKlass);
__ isync(); // Order load of instance Klass wrt. tags.
__ ldx(RinstanceKlass, Roffset, Rscratch);
// Make sure klass is fully initialized and get instance_size. // Make sure klass is fully initialized and get instance_size.
__ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass); __ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass);
@ -3722,7 +3720,7 @@ void TemplateTable::_new() {
__ bge(CCR0, Lslow_case); __ bge(CCR0, Lslow_case);
// Increment waste limit to prevent getting stuck on this slow path. // Increment waste limit to prevent getting stuck on this slow path.
__ addi(RtlabWasteLimitValue, RtlabWasteLimitValue, (int)ThreadLocalAllocBuffer::refill_waste_limit_increment()); __ add_const_optimized(RtlabWasteLimitValue, RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment());
__ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread); __ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread);
} }
// else: No allocation in the shared eden. // fallthru: __ b(Lslow_case); // else: No allocation in the shared eden. // fallthru: __ b(Lslow_case);
@ -3875,9 +3873,7 @@ void TemplateTable::checkcast() {
// Extract target class from constant pool. // Extract target class from constant pool.
__ bind(Lquicked); __ bind(Lquicked);
__ sldi(Roffset, Roffset, LogBytesPerWord); __ sldi(Roffset, Roffset, LogBytesPerWord);
__ addi(Rcpool, Rcpool, sizeof(ConstantPool)); __ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass);
__ isync(); // Order load of specified Klass wrt. tags.
__ ldx(RspecifiedKlass, Rcpool, Roffset);
// Do the checkcast. // Do the checkcast.
__ bind(Lresolved); __ bind(Lresolved);
@ -3939,9 +3935,7 @@ void TemplateTable::instanceof() {
// Extract target class from constant pool. // Extract target class from constant pool.
__ bind(Lquicked); __ bind(Lquicked);
__ sldi(Roffset, Roffset, LogBytesPerWord); __ sldi(Roffset, Roffset, LogBytesPerWord);
__ addi(Rcpool, Rcpool, sizeof(ConstantPool)); __ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass);
__ isync(); // Order load of specified Klass wrt. tags.
__ ldx(RspecifiedKlass, Rcpool, Roffset);
// Do the checkcast. // Do the checkcast.
__ bind(Lresolved); __ bind(Lresolved);

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved. * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -111,7 +111,7 @@ void VM_Version::initialize() {
// Create and print feature-string. // Create and print feature-string.
char buf[(num_features+1) * 16]; // Max 16 chars per feature. char buf[(num_features+1) * 16]; // Max 16 chars per feature.
jio_snprintf(buf, sizeof(buf), jio_snprintf(buf, sizeof(buf),
"ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s", "ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
(has_fsqrt() ? " fsqrt" : ""), (has_fsqrt() ? " fsqrt" : ""),
(has_isel() ? " isel" : ""), (has_isel() ? " isel" : ""),
(has_lxarxeh() ? " lxarxeh" : ""), (has_lxarxeh() ? " lxarxeh" : ""),
@ -126,7 +126,9 @@ void VM_Version::initialize() {
(has_vpmsumb() ? " vpmsumb" : ""), (has_vpmsumb() ? " vpmsumb" : ""),
(has_tcheck() ? " tcheck" : ""), (has_tcheck() ? " tcheck" : ""),
(has_mfdscr() ? " mfdscr" : ""), (has_mfdscr() ? " mfdscr" : ""),
(has_vsx() ? " vsx" : "") (has_vsx() ? " vsx" : ""),
(has_ldbrx() ? " ldbrx" : ""),
(has_stdbrx() ? " stdbrx" : "")
// Make sure number of %s matches num_features! // Make sure number of %s matches num_features!
); );
_features_string = os::strdup(buf); _features_string = os::strdup(buf);
@ -172,18 +174,27 @@ void VM_Version::initialize() {
assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive"); assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
// Implementation does not use any of the vector instructions // If defined(VM_LITTLE_ENDIAN) and running on Power8 or newer hardware,
// available with Power8. Their exploitation is still pending. // the implementation uses the vector instructions available with Power8.
// In all other cases, the implementation uses only generally available instructions.
if (!UseCRC32Intrinsics) { if (!UseCRC32Intrinsics) {
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
FLAG_SET_DEFAULT(UseCRC32Intrinsics, true); FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
} }
} }
if (UseCRC32CIntrinsics) { // Implementation does not use any of the vector instructions available with Power8.
if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) // Their exploitation is still pending (aka "work in progress").
warning("CRC32C intrinsics are not available on this CPU"); if (!UseCRC32CIntrinsics) {
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false); if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
}
}
// TODO: Provide implementation.
if (UseAdler32Intrinsics) {
warning("Adler32Intrinsics not available on this CPU.");
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
} }
// The AES intrinsic stubs require AES instruction support. // The AES intrinsic stubs require AES instruction support.
@ -245,11 +256,6 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
} }
if (UseAdler32Intrinsics) {
warning("Adler32Intrinsics not available on this CPU.");
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
}
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
UseMultiplyToLenIntrinsic = true; UseMultiplyToLenIntrinsic = true;
} }
@ -659,6 +665,8 @@ void VM_Version::determine_features() {
a->tcheck(0); // code[12] -> tcheck a->tcheck(0); // code[12] -> tcheck
a->mfdscr(R0); // code[13] -> mfdscr a->mfdscr(R0); // code[13] -> mfdscr
a->lxvd2x(VSR0, R3_ARG1); // code[14] -> vsx a->lxvd2x(VSR0, R3_ARG1); // code[14] -> vsx
a->ldbrx(R7, R3_ARG1, R4_ARG2); // code[15] -> ldbrx
a->stdbrx(R7, R3_ARG1, R4_ARG2); // code[16] -> stdbrx
a->blr(); a->blr();
// Emit function to set one cache line to zero. Emit function descriptor and get pointer to it. // Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
@ -708,6 +716,8 @@ void VM_Version::determine_features() {
if (code[feature_cntr++]) features |= tcheck_m; if (code[feature_cntr++]) features |= tcheck_m;
if (code[feature_cntr++]) features |= mfdscr_m; if (code[feature_cntr++]) features |= mfdscr_m;
if (code[feature_cntr++]) features |= vsx_m; if (code[feature_cntr++]) features |= vsx_m;
if (code[feature_cntr++]) features |= ldbrx_m;
if (code[feature_cntr++]) features |= stdbrx_m;
// Print the detection code. // Print the detection code.
if (PrintAssembly) { if (PrintAssembly) {

View File

@ -47,6 +47,8 @@ protected:
tcheck, tcheck,
mfdscr, mfdscr,
vsx, vsx,
ldbrx,
stdbrx,
num_features // last entry to count features num_features // last entry to count features
}; };
enum Feature_Flag_Set { enum Feature_Flag_Set {
@ -66,6 +68,8 @@ protected:
tcheck_m = (1 << tcheck ), tcheck_m = (1 << tcheck ),
mfdscr_m = (1 << mfdscr ), mfdscr_m = (1 << mfdscr ),
vsx_m = (1 << vsx ), vsx_m = (1 << vsx ),
ldbrx_m = (1 << ldbrx ),
stdbrx_m = (1 << stdbrx ),
all_features_m = (unsigned long)-1 all_features_m = (unsigned long)-1
}; };
@ -100,6 +104,8 @@ public:
static bool has_tcheck() { return (_features & tcheck_m) != 0; } static bool has_tcheck() { return (_features & tcheck_m) != 0; }
static bool has_mfdscr() { return (_features & mfdscr_m) != 0; } static bool has_mfdscr() { return (_features & mfdscr_m) != 0; }
static bool has_vsx() { return (_features & vsx_m) != 0; } static bool has_vsx() { return (_features & vsx_m) != 0; }
static bool has_ldbrx() { return (_features & ldbrx_m) != 0; }
static bool has_stdbrx() { return (_features & stdbrx_m) != 0; }
// Assembler testing // Assembler testing
static void allow_all(); static void allow_all();

View File

@ -28,8 +28,6 @@
#undef LUCY_DBG #undef LUCY_DBG
#define NearLabel Label
// Immediate is an abstraction to represent the various immediate // Immediate is an abstraction to represent the various immediate
// operands which exist on z/Architecture. Neither this class nor // operands which exist on z/Architecture. Neither this class nor
// instances hereof have an own state. It consists of methods only. // instances hereof have an own state. It consists of methods only.

View File

@ -42,12 +42,6 @@ class Bytes: AllStatic {
// //
// In short, it makes no sense on z/Architecture to piecemeal get or put unaligned data. // In short, it makes no sense on z/Architecture to piecemeal get or put unaligned data.
// Returns true if the byte ordering used by Java is different from
// the native byte ordering of the underlying machine.
// z/Arch is big endian, thus, a swap between native and Java ordering
// is always a no-op.
static inline bool is_Java_byte_ordering_different() { return false; }
// Only swap on little endian machines => suffix `_le'. // Only swap on little endian machines => suffix `_le'.
static inline u2 swap_u2_le(u2 x) { return x; } static inline u2 swap_u2_le(u2 x) { return x; }
static inline u4 swap_u4_le(u4 x) { return x; } static inline u4 swap_u4_le(u4 x) { return x; }

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved. * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -3048,9 +3048,8 @@ void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
assert_different_registers(val, crc, res); assert_different_registers(val, crc, res);
__ load_const_optimized(res, StubRoutines::crc_table_addr()); __ load_const_optimized(res, StubRoutines::crc_table_addr());
__ not_(crc, noreg, false); // ~crc __ kernel_crc32_singleByteReg(crc, val, res, true);
__ update_byte_crc32(crc, val, res); __ z_lgfr(res, crc);
__ not_(res, crc, false); // ~crc
} }
#undef __ #undef __

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved. * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -61,20 +61,6 @@ void LIRItem::load_nonconstant(int bits) {
} }
} }
inline void load_int_as_long(LIR_List *ll, LIRItem &li, LIR_Opr dst) {
LIR_Opr r = li.value()->operand();
if (r->is_constant()) {
// Constants get loaded with sign extend on this platform.
ll->move(li.result(), dst);
} else {
if (!r->is_register()) {
li.load_item_force(dst);
}
LIR_Opr dst_l = FrameMap::as_long_opr(dst->as_register());
ll->convert(Bytecodes::_i2l, li.result(), dst_l); // Convert.
}
}
//-------------------------------------------------------------- //--------------------------------------------------------------
// LIRGenerator // LIRGenerator
//-------------------------------------------------------------- //--------------------------------------------------------------
@ -1224,10 +1210,9 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
LIR_Opr arg2 = cc->at(1); LIR_Opr arg2 = cc->at(1);
LIR_Opr arg3 = cc->at(2); LIR_Opr arg3 = cc->at(2);
// CCallingConventionRequiresIntsAsLongs
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits. crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits.
__ leal(LIR_OprFact::address(a), arg2); __ leal(LIR_OprFact::address(a), arg2);
load_int_as_long(gen()->lir(), len, arg3); len.load_item_force(arg3); // We skip int->long conversion here, because CRC32 stub expects int.
__ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args()); __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args());
__ move(result_reg, result); __ move(result_reg, result);
@ -1240,7 +1225,70 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
} }
void LIRGenerator::do_update_CRC32C(Intrinsic* x) { void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
Unimplemented(); assert(UseCRC32CIntrinsics, "or should not be here");
LIR_Opr result = rlock_result(x);
switch (x->id()) {
case vmIntrinsics::_updateBytesCRC32C:
case vmIntrinsics::_updateDirectByteBufferCRC32C: {
bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
LIRItem crc(x->argument_at(0), this);
LIRItem buf(x->argument_at(1), this);
LIRItem off(x->argument_at(2), this);
LIRItem end(x->argument_at(3), this);
buf.load_item();
off.load_nonconstant();
end.load_nonconstant();
// len = end - off
LIR_Opr len = end.result();
LIR_Opr tmpA = new_register(T_INT);
LIR_Opr tmpB = new_register(T_INT);
__ move(end.result(), tmpA);
__ move(off.result(), tmpB);
__ sub(tmpA, tmpB, tmpA);
len = tmpA;
LIR_Opr index = off.result();
int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
if (off.result()->is_constant()) {
index = LIR_OprFact::illegalOpr;
offset += off.result()->as_jint();
}
LIR_Opr base_op = buf.result();
if (index->is_valid()) {
LIR_Opr tmp = new_register(T_LONG);
__ convert(Bytecodes::_i2l, index, tmp);
index = tmp;
}
LIR_Address* a = new LIR_Address(base_op, index, offset, T_BYTE);
BasicTypeList signature(3);
signature.append(T_INT);
signature.append(T_ADDRESS);
signature.append(T_INT);
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
const LIR_Opr result_reg = result_register_for (x->type());
LIR_Opr arg1 = cc->at(0);
LIR_Opr arg2 = cc->at(1);
LIR_Opr arg3 = cc->at(2);
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32C stub doesn't care about high bits.
__ leal(LIR_OprFact::address(a), arg2);
__ move(len, cc->at(2)); // We skip int->long conversion here, because CRC32C stub expects int.
__ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), LIR_OprFact::illegalOpr, result_reg, cc->args());
__ move(result_reg, result);
break;
}
default: {
ShouldNotReachHere();
}
}
} }
void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) { void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
@ -1271,4 +1319,3 @@ void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
fatal("vectorizedMismatch intrinsic is not implemented on this platform"); fatal("vectorizedMismatch intrinsic is not implemented on this platform");
} }

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved. * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -107,24 +107,15 @@ void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) {
// TODO: Maybe implement +VerifyActivationFrameSize here. // TODO: Maybe implement +VerifyActivationFrameSize here.
// verify_thread(); // Too slow. We will just verify on method entry & exit. // verify_thread(); // Too slow. We will just verify on method entry & exit.
verify_oop(Z_tos, state); verify_oop(Z_tos, state);
#ifdef FAST_DISPATCH
if (table == Interpreter::dispatch_table(state)) {
// Use IdispatchTables.
add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
// Add offset to correct dispatch table.
sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // Multiply by wordSize.
ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // Get entry addr.
} else
#endif
{
// Dispatch table to use.
load_absolute_address(Z_tmp_1, (address) table); // Z_tmp_1 = table;
// 0 <= Z_bytecode < 256 => Use a 32 bit shift, because it is shorter than sllg. // Dispatch table to use.
// Z_bytecode must have been loaded zero-extended for this approach to be correct. load_absolute_address(Z_tmp_1, (address) table); // Z_tmp_1 = table;
z_sll(Z_bytecode, LogBytesPerWord, Z_R0); // Multiply by wordSize.
z_lg(Z_tmp_1, 0, Z_bytecode, Z_tmp_1); // Get entry addr. // 0 <= Z_bytecode < 256 => Use a 32 bit shift, because it is shorter than sllg.
} // Z_bytecode must have been loaded zero-extended for this approach to be correct.
z_sll(Z_bytecode, LogBytesPerWord, Z_R0); // Multiply by wordSize.
z_lg(Z_tmp_1, 0, Z_bytecode, Z_tmp_1); // Get entry addr.
z_br(Z_tmp_1); z_br(Z_tmp_1);
} }
@ -371,7 +362,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
Register tmp = index; // reuse Register tmp = index; // reuse
z_sllg(index, index, LogBytesPerHeapOop); // Offset into resolved references array. z_sllg(index, index, LogBytesPerHeapOop); // Offset into resolved references array.
// Load pointer for resolved_references[] objArray. // Load pointer for resolved_references[] objArray.
z_lg(result, ConstantPool::resolved_references_offset_in_bytes(), result); z_lg(result, ConstantPool::cache_offset_in_bytes(), result);
z_lg(result, ConstantPoolCache::resolved_references_offset_in_bytes(), result);
// JNIHandles::resolve(result) // JNIHandles::resolve(result)
z_lg(result, 0, result); // Load resolved references array itself. z_lg(result, 0, result); // Load resolved references array itself.
#ifdef ASSERT #ifdef ASSERT
@ -386,6 +378,16 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result); load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result);
} }
// load cpool->resolved_klass_at(index)
void InterpreterMacroAssembler::load_resolved_klass_at_offset(Register cpool, Register offset, Register iklass) {
// int value = *(Rcpool->int_at_addr(which));
// int resolved_klass_index = extract_low_short_from_int(value);
z_llgh(offset, Address(cpool, offset, sizeof(ConstantPool) + 2)); // offset = resolved_klass_index (s390 is big-endian)
z_sllg(offset, offset, LogBytesPerWord); // Convert 'index' to 'offset'
z_lg(iklass, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes())); // iklass = cpool->_resolved_klasses
z_lg(iklass, Address(iklass, offset, Array<Klass*>::base_offset_in_bytes()));
}
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
Register tmp, Register tmp,
int bcp_offset, int bcp_offset,

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved. * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -48,9 +48,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
bool allow_relocation, bool allow_relocation,
bool check_exceptions); bool check_exceptions);
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// Base routine for all dispatches. // Base routine for all dispatches.
void dispatch_base(TosState state, address* table); void dispatch_base(TosState state, address* table);
@ -58,6 +55,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
InterpreterMacroAssembler(CodeBuffer* c) InterpreterMacroAssembler(CodeBuffer* c)
: MacroAssembler(c) {} : MacroAssembler(c) {}
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
void jump_to_entry(address entry, Register Rscratch); void jump_to_entry(address entry, Register Rscratch);
virtual void load_earlyret_value(TosState state); virtual void load_earlyret_value(TosState state);
@ -115,6 +115,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
void load_resolved_reference_at_index(Register result, Register index); void load_resolved_reference_at_index(Register result, Register index);
// load cpool->resolved_klass_at(index)
void load_resolved_klass_at_offset(Register cpool, Register offset, Register iklass);
// Pop topmost element from stack. It just disappears. Useful if // Pop topmost element from stack. It just disappears. Useful if
// consumed previously by access via stackTop(). // consumed previously by access via stackTop().

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved. * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1616,6 +1616,8 @@ void MacroAssembler::branch_optimized(Assembler::branch_condition cond, Label& b
if (branch_target.is_bound()) { if (branch_target.is_bound()) {
address branch_addr = target(branch_target); address branch_addr = target(branch_target);
branch_optimized(cond, branch_addr); branch_optimized(cond, branch_addr);
} else if (branch_target.is_near()) {
z_brc(cond, branch_target); // Caller assures that the target will be in range for z_brc.
} else { } else {
z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time. z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time.
} }
@ -1674,7 +1676,8 @@ void MacroAssembler::compare_and_branch_optimized(Register r1,
bool has_sign) { bool has_sign) {
address branch_origin = pc(); address branch_origin = pc();
bool x2_imm8 = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2)); bool x2_imm8 = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2));
bool is_RelAddr16 = (branch_target.is_bound() && bool is_RelAddr16 = branch_target.is_near() ||
(branch_target.is_bound() &&
RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin)); RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin));
unsigned int casenum = (len64?2:0)+(has_sign?0:1); unsigned int casenum = (len64?2:0)+(has_sign?0:1);
@ -1744,13 +1747,21 @@ void MacroAssembler::compare_and_branch_optimized(Register r1,
Label& branch_target, Label& branch_target,
bool len64, bool len64,
bool has_sign) { bool has_sign) {
unsigned int casenum = (len64?2:0)+(has_sign?0:1); unsigned int casenum = (len64 ? 2 : 0) + (has_sign ? 0 : 1);
if (branch_target.is_bound()) { if (branch_target.is_bound()) {
address branch_addr = target(branch_target); address branch_addr = target(branch_target);
compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign); compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign);
} else { } else {
{ if (VM_Version::has_CompareBranch() && branch_target.is_near()) {
switch (casenum) {
case 0: z_crj( r1, r2, cond, branch_target); break;
case 1: z_clrj( r1, r2, cond, branch_target); break;
case 2: z_cgrj( r1, r2, cond, branch_target); break;
case 3: z_clgrj(r1, r2, cond, branch_target); break;
default: ShouldNotReachHere(); break;
}
} else {
switch (casenum) { switch (casenum) {
case 0: z_cr( r1, r2); break; case 0: z_cr( r1, r2); break;
case 1: z_clr(r1, r2); break; case 1: z_clr(r1, r2); break;
@ -2741,11 +2752,11 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
BLOCK_COMMENT("lookup_interface_method {"); BLOCK_COMMENT("lookup_interface_method {");
// Load start of itable entries into itable_entry_addr. // Load start of itable entries into itable_entry_addr.
z_llgf(vtable_len, Address(recv_klass, InstanceKlass::vtable_length_offset())); z_llgf(vtable_len, Address(recv_klass, Klass::vtable_length_offset()));
z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes())); z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
// Loop over all itable entries until desired interfaceOop(Rinterface) found. // Loop over all itable entries until desired interfaceOop(Rinterface) found.
const int vtable_base_offset = in_bytes(InstanceKlass::vtable_start_offset()); const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
add2reg_with_index(itable_entry_addr, add2reg_with_index(itable_entry_addr,
vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(), vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(),
@ -5927,8 +5938,7 @@ void MacroAssembler::update_byte_crc32(Register crc, Register val, Register tabl
* @param len register containing number of bytes * @param len register containing number of bytes
* @param table register pointing to CRC table * @param table register pointing to CRC table
*/ */
void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, Register data) {
Register data, bool invertCRC) {
assert_different_registers(crc, buf, len, table, data); assert_different_registers(crc, buf, len, table, data);
Label L_mainLoop, L_done; Label L_mainLoop, L_done;
@ -5938,20 +5948,12 @@ void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register
z_ltr(len, len); z_ltr(len, len);
z_brnh(L_done); z_brnh(L_done);
if (invertCRC) {
not_(crc, noreg, false); // ~c
}
bind(L_mainLoop); bind(L_mainLoop);
z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register. z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
add2reg(buf, mainLoop_stepping); // Advance buffer position. add2reg(buf, mainLoop_stepping); // Advance buffer position.
update_byte_crc32(crc, data, table); update_byte_crc32(crc, data, table);
z_brct(len, L_mainLoop); // Iterate. z_brct(len, L_mainLoop); // Iterate.
if (invertCRC) {
not_(crc, noreg, false); // ~c
}
bind(L_done); bind(L_done);
} }
@ -5968,6 +5970,7 @@ void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register tab
// c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \ // c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \
// crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24] // crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24]
// #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4 // #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4
// Pre-calculate (constant) column offsets, use columns 4..7 for big-endian.
const int ix0 = 4*(4*CRC32_COLUMN_SIZE); const int ix0 = 4*(4*CRC32_COLUMN_SIZE);
const int ix1 = 5*(4*CRC32_COLUMN_SIZE); const int ix1 = 5*(4*CRC32_COLUMN_SIZE);
const int ix2 = 6*(4*CRC32_COLUMN_SIZE); const int ix2 = 6*(4*CRC32_COLUMN_SIZE);
@ -5986,17 +5989,12 @@ void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register tab
rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true); // ((c >> 16) & 0xff) << 2 rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true); // ((c >> 16) & 0xff) << 2
rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true); // ((c >> 24) & 0xff) << 2 rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true); // ((c >> 24) & 0xff) << 2
// Load pre-calculated table values. // XOR indexed table values to calculate updated crc.
// Use columns 4..7 for big-endian.
z_ly(t3, Address(table, t3, (intptr_t)ix0));
z_ly(t2, Address(table, t2, (intptr_t)ix1)); z_ly(t2, Address(table, t2, (intptr_t)ix1));
z_ly(t1, Address(table, t1, (intptr_t)ix2));
z_ly(t0, Address(table, t0, (intptr_t)ix3)); z_ly(t0, Address(table, t0, (intptr_t)ix3));
z_xy(t2, Address(table, t3, (intptr_t)ix0));
// Calculate new crc from table values. z_xy(t0, Address(table, t1, (intptr_t)ix2));
z_xr(t2, t3); z_xr(t0, t2); // Now t0 contains the updated CRC value.
z_xr(t0, t1);
z_xr(t0, t2); // Now crc contains the final checksum value.
lgr_if_needed(crc, t0); lgr_if_needed(crc, t0);
} }
@ -6009,7 +6007,8 @@ void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register tab
* uses Z_R10..Z_R13 as work register. Must be saved/restored by caller! * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
*/ */
void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table, void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3) { Register t0, Register t1, Register t2, Register t3,
bool invertCRC) {
assert_different_registers(crc, buf, len, table); assert_different_registers(crc, buf, len, table);
Label L_mainLoop, L_tail; Label L_mainLoop, L_tail;
@ -6024,7 +6023,9 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
// The situation itself is detected and handled correctly by the conditional branches // The situation itself is detected and handled correctly by the conditional branches
// following aghi(len, -stepping) and aghi(len, +stepping). // following aghi(len, -stepping) and aghi(len, +stepping).
not_(crc, noreg, false); // 1s complement of crc if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
#if 0 #if 0
{ {
@ -6039,7 +6040,7 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
rotate_then_insert(ctr, ctr, 62, 63, 0, true); // TODO: should set cc rotate_then_insert(ctr, ctr, 62, 63, 0, true); // TODO: should set cc
z_sgfr(len, ctr); // Remaining len after alignment. z_sgfr(len, ctr); // Remaining len after alignment.
update_byteLoop_crc32(crc, buf, ctr, table, data, false); update_byteLoop_crc32(crc, buf, ctr, table, data);
} }
#endif #endif
@ -6047,21 +6048,23 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
z_srag(ctr, len, log_stepping); z_srag(ctr, len, log_stepping);
z_brnh(L_tail); z_brnh(L_tail);
z_lrvr(crc, crc); // Revert byte order because we are dealing with big-endian data. z_lrvr(crc, crc); // Revert byte order because we are dealing with big-endian data.
rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop
BIND(L_mainLoop); BIND(L_mainLoop);
update_1word_crc32(crc, buf, table, 0, 0, crc, t1, t2, t3); update_1word_crc32(crc, buf, table, 0, 0, crc, t1, t2, t3);
update_1word_crc32(crc, buf, table, 4, mainLoop_stepping, crc, t1, t2, t3); update_1word_crc32(crc, buf, table, 4, mainLoop_stepping, crc, t1, t2, t3);
z_brct(ctr, L_mainLoop); // Iterate. z_brct(ctr, L_mainLoop); // Iterate.
z_lrvr(crc, crc); // Revert byte order back to original. z_lrvr(crc, crc); // Revert byte order back to original.
// Process last few (<8) bytes of buffer. // Process last few (<8) bytes of buffer.
BIND(L_tail); BIND(L_tail);
update_byteLoop_crc32(crc, buf, len, table, data, false); update_byteLoop_crc32(crc, buf, len, table, data);
not_(crc, noreg, false); // 1s complement of crc if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
} }
/** /**
@ -6073,7 +6076,8 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
* uses Z_R10..Z_R13 as work register. Must be saved/restored by caller! * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
*/ */
void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table, void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3) { Register t0, Register t1, Register t2, Register t3,
bool invertCRC) {
assert_different_registers(crc, buf, len, table); assert_different_registers(crc, buf, len, table);
Label L_mainLoop, L_tail; Label L_mainLoop, L_tail;
@ -6087,7 +6091,9 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
// The situation itself is detected and handled correctly by the conditional branches // The situation itself is detected and handled correctly by the conditional branches
// following aghi(len, -stepping) and aghi(len, +stepping). // following aghi(len, -stepping) and aghi(len, +stepping).
not_(crc, noreg, false); // 1s complement of crc if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
// Check for short (<4 bytes) buffer. // Check for short (<4 bytes) buffer.
z_srag(ctr, len, log_stepping); z_srag(ctr, len, log_stepping);
@ -6099,13 +6105,16 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
BIND(L_mainLoop); BIND(L_mainLoop);
update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3); update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3);
z_brct(ctr, L_mainLoop); // Iterate. z_brct(ctr, L_mainLoop); // Iterate.
z_lrvr(crc, crc); // Revert byte order back to original. z_lrvr(crc, crc); // Revert byte order back to original.
// Process last few (<8) bytes of buffer. // Process last few (<8) bytes of buffer.
BIND(L_tail); BIND(L_tail);
update_byteLoop_crc32(crc, buf, len, table, data, false); update_byteLoop_crc32(crc, buf, len, table, data);
not_(crc, noreg, false); // 1s complement of crc if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
} }
/** /**
@ -6115,22 +6124,51 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
* @param table register pointing to CRC table * @param table register pointing to CRC table
*/ */
void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table, void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3) { Register t0, Register t1, Register t2, Register t3,
bool invertCRC) {
assert_different_registers(crc, buf, len, table); assert_different_registers(crc, buf, len, table);
Register data = t0; Register data = t0;
update_byteLoop_crc32(crc, buf, len, table, data, true); if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
update_byteLoop_crc32(crc, buf, len, table, data);
if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
} }
void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp) { void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
bool invertCRC) {
assert_different_registers(crc, buf, len, table, tmp); assert_different_registers(crc, buf, len, table, tmp);
not_(crc, noreg, false); // ~c if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
z_llgc(tmp, Address(buf, (intptr_t)0)); // Current byte of input buffer (zero extended). Avoids garbage in upper half of register. z_llgc(tmp, Address(buf, (intptr_t)0)); // Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
update_byte_crc32(crc, tmp, table); update_byte_crc32(crc, tmp, table);
not_(crc, noreg, false); // ~c if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
}
void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table,
bool invertCRC) {
assert_different_registers(crc, val, table);
if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
update_byte_crc32(crc, val, table);
if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
} }
// //

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved. * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1011,22 +1011,35 @@ class MacroAssembler: public Assembler {
int before = 0, int after = 0) PRODUCT_RETURN; int before = 0, int after = 0) PRODUCT_RETURN;
// Emitters for CRC32 calculation. // Emitters for CRC32 calculation.
// A note on invertCRC:
// Unfortunately, internal representation of crc differs between CRC32 and CRC32C.
// CRC32 holds it's current crc value in the externally visible representation.
// CRC32C holds it's current crc value in internal format, ready for updating.
// Thus, the crc value must be bit-flipped before updating it in the CRC32 case.
// In the CRC32C case, it must be bit-flipped when it is given to the outside world (getValue()).
// The bool invertCRC parameter indicates whether bit-flipping is required before updates.
private: private:
void fold_byte_crc32(Register crc, Register table, Register val, Register tmp); void fold_byte_crc32(Register crc, Register table, Register val, Register tmp);
void fold_8bit_crc32(Register crc, Register table, Register tmp); void fold_8bit_crc32(Register crc, Register table, Register tmp);
void update_byte_crc32( Register crc, Register val, Register table);
void update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, void update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
Register data, bool invertCRC); Register data);
void update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc, void update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
Register t0, Register t1, Register t2, Register t3); Register t0, Register t1, Register t2, Register t3);
public: public:
void update_byte_crc32( Register crc, Register val, Register table); void kernel_crc32_singleByteReg(Register crc, Register val, Register table,
void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp); bool invertCRC);
void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
bool invertCRC);
void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table, void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3); Register t0, Register t1, Register t2, Register t3,
bool invertCRC);
void kernel_crc32_1word(Register crc, Register buf, Register len, Register table, void kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3); Register t0, Register t1, Register t2, Register t3,
bool invertCRC);
void kernel_crc32_2word(Register crc, Register buf, Register len, Register table, void kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3); Register t0, Register t1, Register t2, Register t3,
bool invertCRC);
// Emitters for BigInteger.multiplyToLen intrinsic // Emitters for BigInteger.multiplyToLen intrinsic
// note: length of result array (zlen) is passed on the stack // note: length of result array (zlen) is passed on the stack

View File

@ -1,76 +0,0 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/codeBuffer.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
// This method will be called (as any other Klass virtual method) with
// the Klass itself as the first argument. Example:
//
// oop obj;
// int size = obj->klass()->klass_part()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();.
//
// The dummy method is called with the Klass object as the first
// operand, and an object as the second argument.
//
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
// differing only by an ordinal constant, and they bear no releationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#undef __
#define __ masm->
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
char** mc_top,
char* mc_end) {
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
*(intptr_t *)(*md_top) = vtable_bytes;
*md_top += sizeof(intptr_t);
void** dummy_vtable = (void**)*md_top;
*vtable = dummy_vtable;
*md_top += vtable_bytes;
// Get ready to generate dummy methods.
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
MacroAssembler* masm = new MacroAssembler(&cb);
__ unimplemented();
}

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -73,7 +73,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
const char* error_message) { const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id); InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id); Klass* klass = SystemDictionary::well_known_klass(klass_id);
assert(temp_reg != Z_R0 && // Is used as base register! assert(temp_reg != Z_R0 && // Is used as base register!
temp_reg != noreg && temp2_reg != noreg, "need valid registers!"); temp_reg != noreg && temp2_reg != noreg, "need valid registers!");
@ -200,10 +200,13 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
Address(method_temp, Address(method_temp,
NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()))); NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
__ verify_oop(method_temp); __ verify_oop(method_temp);
// The following assumes that a method is normally compressed in the vmtarget field. __ load_heap_oop(method_temp,
Address(method_temp,
NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())));
__ verify_oop(method_temp);
__ z_lg(method_temp, __ z_lg(method_temp,
Address(method_temp, Address(method_temp,
NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()))); NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())));
if (VerifyMethodHandles && !for_compiler_entry) { if (VerifyMethodHandles && !for_compiler_entry) {
// Make sure recv is already on stack. // Make sure recv is already on stack.
@ -371,7 +374,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes())); Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes())); Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
Address member_vmtarget(member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())); Address member_vmtarget(member_reg, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()));
Address vmtarget_method(Z_method, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()));
Register temp1_recv_klass = temp1; Register temp1_recv_klass = temp1;
if (iid != vmIntrinsics::_linkToStatic) { if (iid != vmIntrinsics::_linkToStatic) {
@ -424,7 +428,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
if (VerifyMethodHandles) { if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3); verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
} }
__ z_lg(Z_method, member_vmtarget); __ load_heap_oop(Z_method, member_vmtarget);
__ z_lg(Z_method, vmtarget_method);
method_is_live = true; method_is_live = true;
break; break;
@ -432,7 +437,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
if (VerifyMethodHandles) { if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3); verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
} }
__ z_lg(Z_method, member_vmtarget); __ load_heap_oop(Z_method, member_vmtarget);
__ z_lg(Z_method, vmtarget_method);
method_is_live = true; method_is_live = true;
break; break;

View File

@ -1,5 +1,5 @@
// //
// Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2016 SAP SE. All rights reserved. // Copyright (c) 2016 SAP SE. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
@ -1562,7 +1562,7 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
} }
// Vector ideal reg. // Vector ideal reg.
const int Matcher::vector_ideal_reg(int size) { const uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize == 8 && size == 8, ""); assert(MaxVectorSize == 8 && size == 8, "");
return Op_RegL; return Op_RegL;
} }
@ -1577,7 +1577,7 @@ const int Matcher::min_vector_size(const BasicType bt) {
return max_vector_size(bt); // Same as max. return max_vector_size(bt); // Same as max.
} }
const int Matcher::vector_shift_count_ideal_reg(int size) { const uint Matcher::vector_shift_count_ideal_reg(int size) {
fatal("vector shift is not supported"); fatal("vector shift is not supported");
return Node::NotAMachineReg; return Node::NotAMachineReg;
} }
@ -6768,6 +6768,7 @@ instruct sllI_reg_imm(iRegI dst, iRegI src, immI nbits) %{
format %{ "SLL $dst,$src,$nbits\t# use RISC-like SLLG also for int" %} format %{ "SLL $dst,$src,$nbits\t# use RISC-like SLLG also for int" %}
ins_encode %{ ins_encode %{
int Nbit = $nbits$$constant; int Nbit = $nbits$$constant;
assert((Nbit & (BitsPerJavaInteger - 1)) == Nbit, "Check shift mask in ideal graph");
__ z_sllg($dst$$Register, $src$$Register, Nbit & (BitsPerJavaInteger - 1), Z_R0); __ z_sllg($dst$$Register, $src$$Register, Nbit & (BitsPerJavaInteger - 1), Z_R0);
%} %}
ins_pipe(pipe_class_dummy); ins_pipe(pipe_class_dummy);
@ -6841,6 +6842,7 @@ instruct sraI_reg_imm(iRegI dst, immI src, flagsReg cr) %{
format %{ "SRA $dst,$src" %} format %{ "SRA $dst,$src" %}
ins_encode %{ ins_encode %{
int Nbit = $src$$constant; int Nbit = $src$$constant;
assert((Nbit & (BitsPerJavaInteger - 1)) == Nbit, "Check shift mask in ideal graph");
__ z_sra($dst$$Register, Nbit & (BitsPerJavaInteger - 1), Z_R0); __ z_sra($dst$$Register, Nbit & (BitsPerJavaInteger - 1), Z_R0);
%} %}
ins_pipe(pipe_class_dummy); ins_pipe(pipe_class_dummy);
@ -6893,6 +6895,7 @@ instruct srlI_reg_imm(iRegI dst, immI src) %{
format %{ "SRL $dst,$src" %} format %{ "SRL $dst,$src" %}
ins_encode %{ ins_encode %{
int Nbit = $src$$constant; int Nbit = $src$$constant;
assert((Nbit & (BitsPerJavaInteger - 1)) == Nbit, "Check shift mask in ideal graph");
__ z_srl($dst$$Register, Nbit & (BitsPerJavaInteger - 1), Z_R0); __ z_srl($dst$$Register, Nbit & (BitsPerJavaInteger - 1), Z_R0);
%} %}
ins_pipe(pipe_class_dummy); ins_pipe(pipe_class_dummy);

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved. * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -623,26 +623,6 @@ class StubGenerator: public StubCodeGenerator {
#define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm):_masm)-> #define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm):_masm)->
#endif #endif
//----------------------------------------------------------------------
// The following routine generates a subroutine to throw an asynchronous
// UnknownError when an unsafe access gets a fault that could not be
// reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.)
//
// Arguments:
// trapping PC: ??
//
// Results:
// Posts an asynchronous exception, skips the trapping instruction.
//
address generate_handler_for_unsafe_access() {
StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
{
address start = __ pc();
__ unimplemented("StubRoutines::handler_for_unsafe_access", 86);
return start;
}
}
// Support for uint StubRoutine::zarch::partial_subtype_check(Klass // Support for uint StubRoutine::zarch::partial_subtype_check(Klass
// sub, Klass super); // sub, Klass super);
// //
@ -2330,26 +2310,25 @@ class StubGenerator: public StubCodeGenerator {
} }
/**
// Arguments: * Arguments:
// Z_ARG1 - int crc *
// Z_ARG2 - byte* buf * Inputs:
// Z_ARG3 - int length (of buffer) * Z_ARG1 - int crc
// * Z_ARG2 - byte* buf
// Result: * Z_ARG3 - int length (of buffer)
// Z_RET - int crc result *
// * Result:
// Compute CRC32 function. * Z_RET - int crc result
address generate_CRC32_updateBytes(const char* name) { **/
__ align(CodeEntryAlignment); // Compute CRC function (generic, for all polynomials).
StubCodeMark mark(this, "StubRoutines", name); void generate_CRC_updateBytes(const char* name, Register table, bool invertCRC) {
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
// arguments to kernel_crc32: // arguments to kernel_crc32:
Register crc = Z_ARG1; // Current checksum, preset by caller or result from previous call, int. Register crc = Z_ARG1; // Current checksum, preset by caller or result from previous call, int.
Register data = Z_ARG2; // source byte array Register data = Z_ARG2; // source byte array
Register dataLen = Z_ARG3; // #bytes to process, int Register dataLen = Z_ARG3; // #bytes to process, int
Register table = Z_ARG4; // crc table address // Register table = Z_ARG4; // crc table address. Preloaded and passed in by caller.
const Register t0 = Z_R10; // work reg for kernel* emitters const Register t0 = Z_R10; // work reg for kernel* emitters
const Register t1 = Z_R11; // work reg for kernel* emitters const Register t1 = Z_R11; // work reg for kernel* emitters
const Register t2 = Z_R12; // work reg for kernel* emitters const Register t2 = Z_R12; // work reg for kernel* emitters
@ -2361,16 +2340,50 @@ class StubGenerator: public StubCodeGenerator {
// Crc used as int. // Crc used as int.
__ z_llgfr(dataLen, dataLen); __ z_llgfr(dataLen, dataLen);
StubRoutines::zarch::generate_load_crc_table_addr(_masm, table);
__ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers. __ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
__ z_stmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 to make them available as work registers. __ z_stmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 to make them available as work registers.
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3); __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, invertCRC);
__ z_lmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 back from stack. __ z_lmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 back from stack.
__ resize_frame(+(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers. __ resize_frame(+(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
__ z_llgfr(Z_RET, crc); // Updated crc is function result. No copying required, just zero upper 32 bits. __ z_llgfr(Z_RET, crc); // Updated crc is function result. No copying required, just zero upper 32 bits.
__ z_br(Z_R14); // Result already in Z_RET == Z_ARG1. __ z_br(Z_R14); // Result already in Z_RET == Z_ARG1.
}
// Compute CRC32 function.
address generate_CRC32_updateBytes(const char* name) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
assert(UseCRC32Intrinsics, "should not generate this stub (%s) with CRC32 intrinsics disabled", name);
BLOCK_COMMENT("CRC32_updateBytes {");
Register table = Z_ARG4; // crc32 table address.
StubRoutines::zarch::generate_load_crc_table_addr(_masm, table);
generate_CRC_updateBytes(name, table, true);
BLOCK_COMMENT("} CRC32_updateBytes");
return __ addr_at(start_off);
}
// Compute CRC32C function.
address generate_CRC32C_updateBytes(const char* name) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
assert(UseCRC32CIntrinsics, "should not generate this stub (%s) with CRC32C intrinsics disabled", name);
BLOCK_COMMENT("CRC32C_updateBytes {");
Register table = Z_ARG4; // crc32c table address.
StubRoutines::zarch::generate_load_crc32c_table_addr(_masm, table);
generate_CRC_updateBytes(name, table, false);
BLOCK_COMMENT("} CRC32C_updateBytes");
return __ addr_at(start_off); return __ addr_at(start_off);
} }
@ -2441,9 +2454,13 @@ class StubGenerator: public StubCodeGenerator {
// Entry points that are platform specific. // Entry points that are platform specific.
if (UseCRC32Intrinsics) { if (UseCRC32Intrinsics) {
// We have no CRC32 table on z/Architecture. StubRoutines::_crc_table_adr = (address)StubRoutines::zarch::_crc_table;
StubRoutines::_crc_table_adr = (address)StubRoutines::zarch::_crc_table; StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes");
StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes"); }
if (UseCRC32CIntrinsics) {
StubRoutines::_crc32c_table_addr = (address)StubRoutines::zarch::_crc32c_table;
StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes("CRC32C_updateBytes");
} }
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction. // Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
@ -2461,8 +2478,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false); StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false);
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false); StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
StubRoutines::zarch::_handler_for_unsafe_access_entry = generate_handler_for_unsafe_access();
// Support for verify_oop (must happen after universe_init). // Support for verify_oop (must happen after universe_init).
StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine(); StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine();

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved. * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -68,12 +68,11 @@ class zarch {
}; };
private: private:
static address _handler_for_unsafe_access_entry;
static int _atomic_memory_operation_lock; static int _atomic_memory_operation_lock;
static address _partial_subtype_check; static address _partial_subtype_check;
static juint _crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE]; static juint _crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
static juint _crc32c_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction. // Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
static address _trot_table_addr; static address _trot_table_addr;
@ -91,11 +90,11 @@ class zarch {
static int atomic_memory_operation_lock() { return _atomic_memory_operation_lock; } static int atomic_memory_operation_lock() { return _atomic_memory_operation_lock; }
static void set_atomic_memory_operation_lock(int value) { _atomic_memory_operation_lock = value; } static void set_atomic_memory_operation_lock(int value) { _atomic_memory_operation_lock = value; }
static address handler_for_unsafe_access_entry() { return _handler_for_unsafe_access_entry; }
static address partial_subtype_check() { return _partial_subtype_check; } static address partial_subtype_check() { return _partial_subtype_check; }
static void generate_load_absolute_address(MacroAssembler* masm, Register table, address table_addr, uint64_t table_contents);
static void generate_load_crc_table_addr(MacroAssembler* masm, Register table); static void generate_load_crc_table_addr(MacroAssembler* masm, Register table);
static void generate_load_crc32c_table_addr(MacroAssembler* masm, Register table);
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction. // Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
static void generate_load_trot_table_addr(MacroAssembler* masm, Register table); static void generate_load_trot_table_addr(MacroAssembler* masm, Register table);

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017 SAP SE. All rights reserved. * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -642,13 +642,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
return entry; return entry;
} }
// Unused, should never pass by.
address TemplateInterpreterGenerator::generate_continuation_for (TosState state) {
address entry = __ pc();
__ should_not_reach_here();
return entry;
}
address TemplateInterpreterGenerator::generate_return_entry_for (TosState state, int step, size_t index_size) { address TemplateInterpreterGenerator::generate_return_entry_for (TosState state, int step, size_t index_size) {
address entry = __ pc(); address entry = __ pc();
@ -683,6 +676,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for (TosState state,
__ z_llgc(size, Address(cache, offset, flags_offset+(sizeof(size_t)-1))); __ z_llgc(size, Address(cache, offset, flags_offset+(sizeof(size_t)-1)));
__ z_sllg(size, size, Interpreter::logStackElementSize); // Each argument size in bytes. __ z_sllg(size, size, Interpreter::logStackElementSize); // Each argument size in bytes.
__ z_agr(Z_esp, size); // Pop arguments. __ z_agr(Z_esp, size); // Pop arguments.
__ check_and_handle_popframe(Z_thread);
__ check_and_handle_earlyret(Z_thread);
__ dispatch_next(state, step); __ dispatch_next(state, step);
BLOCK_COMMENT("} return_entry"); BLOCK_COMMENT("} return_entry");
@ -1186,11 +1183,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// native_call: assert that mdo == NULL // native_call: assert that mdo == NULL
const bool check_for_mdo = !native_call DEBUG_ONLY(|| native_call); const bool check_for_mdo = !native_call DEBUG_ONLY(|| native_call);
if (ProfileInterpreter && check_for_mdo) { if (ProfileInterpreter && check_for_mdo) {
#ifdef FAST_DISPATCH
// FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
// they both use I2.
assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
#endif // FAST_DISPATCH
Label get_continue; Label get_continue;
__ load_and_test_long(Rmdp, method_(method_data)); __ load_and_test_long(Rmdp, method_(method_data));
@ -1933,8 +1925,11 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
return entry_point; return entry_point;
} }
// Method entry for static native methods:
// int java.util.zip.CRC32.update(int crc, int b) /**
* Method entry for static native methods:
* int java.util.zip.CRC32.update(int crc, int b)
*/
address TemplateInterpreterGenerator::generate_CRC32_update_entry() { address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
if (UseCRC32Intrinsics) { if (UseCRC32Intrinsics) {
@ -1964,7 +1959,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
__ z_llgf(crc, 2 * wordSize, argP); // Current crc state, zero extend to 64 bit to have a clean register. __ z_llgf(crc, 2 * wordSize, argP); // Current crc state, zero extend to 64 bit to have a clean register.
StubRoutines::zarch::generate_load_crc_table_addr(_masm, table); StubRoutines::zarch::generate_load_crc_table_addr(_masm, table);
__ kernel_crc32_singleByte(crc, data, dataLen, table, Z_R1); __ kernel_crc32_singleByte(crc, data, dataLen, table, Z_R1, true);
// Restore caller sp for c2i case. // Restore caller sp for c2i case.
__ resize_frame_absolute(Z_R10, Z_R0, true); // Cut the stack back to where the caller started. __ resize_frame_absolute(Z_R10, Z_R0, true); // Cut the stack back to where the caller started.
@ -1983,9 +1978,11 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
} }
// Method entry for static native methods: /**
// int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) * Method entry for static native methods:
// int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) * int java.util.zip.CRC32.updateBytes( int crc, byte[] b, int off, int len)
* int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len)
*/
address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
if (UseCRC32Intrinsics) { if (UseCRC32Intrinsics) {
@ -2020,10 +2017,10 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
// data = buf + off // data = buf + off
BLOCK_COMMENT("CRC32_updateByteBuffer {"); BLOCK_COMMENT("CRC32_updateByteBuffer {");
__ z_llgf(crc, 5*wordSize, argP); // current crc state __ z_llgf(crc, 5*wordSize, argP); // current crc state
__ z_lg(data, 3*wordSize, argP); // start of byte buffer __ z_lg(data, 3*wordSize, argP); // start of byte buffer
__ z_agf(data, 2*wordSize, argP); // Add byte buffer offset. __ z_agf(data, 2*wordSize, argP); // Add byte buffer offset.
__ z_lgf(dataLen, 1*wordSize, argP); // #bytes to process __ z_lgf(dataLen, 1*wordSize, argP); // #bytes to process
} else { // Used for "updateBytes update". } else { // Used for "updateBytes update".
// crc @ (SP + 4W) (32bit) // crc @ (SP + 4W) (32bit)
// buf @ (SP + 3W) (64bit ptr to byte array) // buf @ (SP + 3W) (64bit ptr to byte array)
// off @ (SP + 2W) (32bit) // off @ (SP + 2W) (32bit)
@ -2031,7 +2028,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
// data = buf + off + base_offset // data = buf + off + base_offset
BLOCK_COMMENT("CRC32_updateBytes {"); BLOCK_COMMENT("CRC32_updateBytes {");
__ z_llgf(crc, 4*wordSize, argP); // current crc state __ z_llgf(crc, 4*wordSize, argP); // current crc state
__ z_lg(data, 3*wordSize, argP); // start of byte buffer __ z_lg(data, 3*wordSize, argP); // start of byte buffer
__ z_agf(data, 2*wordSize, argP); // Add byte buffer offset. __ z_agf(data, 2*wordSize, argP); // Add byte buffer offset.
__ z_lgf(dataLen, 1*wordSize, argP); // #bytes to process __ z_lgf(dataLen, 1*wordSize, argP); // #bytes to process
__ z_aghi(data, arrayOopDesc::base_offset_in_bytes(T_BYTE)); __ z_aghi(data, arrayOopDesc::base_offset_in_bytes(T_BYTE));
@ -2041,7 +2038,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
__ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers. __ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
__ z_stmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 to make them available as work registers. __ z_stmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 to make them available as work registers.
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3); __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, true);
__ z_lmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 back from stack. __ z_lmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 back from stack.
// Restore caller sp for c2i case. // Restore caller sp for c2i case.
@ -2060,8 +2057,79 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
return NULL; return NULL;
} }
// Not supported
/**
* Method entry for intrinsic-candidate (non-native) methods:
* int java.util.zip.CRC32C.updateBytes( int crc, byte[] b, int off, int end)
* int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long* buf, int off, int end)
* Unlike CRC32, CRC32C does not have any methods marked as native
* CRC32C also uses an "end" variable instead of the length variable CRC32 uses
*/
address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
if (UseCRC32CIntrinsics) {
uint64_t entry_off = __ offset();
// We don't generate local frame and don't align stack because
// we call stub code and there is no safepoint on this path.
// Load parameters.
// Z_esp is callers operand stack pointer, i.e. it points to the parameters.
const Register argP = Z_esp;
const Register crc = Z_ARG1; // crc value
const Register data = Z_ARG2; // address of java byte array
const Register dataLen = Z_ARG3; // source data len
const Register table = Z_ARG4; // address of crc32 table
const Register t0 = Z_R10; // work reg for kernel* emitters
const Register t1 = Z_R11; // work reg for kernel* emitters
const Register t2 = Z_R12; // work reg for kernel* emitters
const Register t3 = Z_R13; // work reg for kernel* emitters
// Arguments are reversed on java expression stack.
// Calculate address of start element.
if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) { // Used for "updateByteBuffer direct".
// crc @ (SP + 5W) (32bit)
// buf @ (SP + 3W) (64bit ptr to long array)
// off @ (SP + 2W) (32bit)
// dataLen @ (SP + 1W) (32bit)
// data = buf + off
BLOCK_COMMENT("CRC32C_updateDirectByteBuffer {");
__ z_llgf(crc, 5*wordSize, argP); // current crc state
__ z_lg(data, 3*wordSize, argP); // start of byte buffer
__ z_agf(data, 2*wordSize, argP); // Add byte buffer offset.
__ z_lgf(dataLen, 1*wordSize, argP); // #bytes to process, calculated as
__ z_sgf(dataLen, Address(argP, 2*wordSize)); // (end_index - offset)
} else { // Used for "updateBytes update".
// crc @ (SP + 4W) (32bit)
// buf @ (SP + 3W) (64bit ptr to byte array)
// off @ (SP + 2W) (32bit)
// dataLen @ (SP + 1W) (32bit)
// data = buf + off + base_offset
BLOCK_COMMENT("CRC32C_updateBytes {");
__ z_llgf(crc, 4*wordSize, argP); // current crc state
__ z_lg(data, 3*wordSize, argP); // start of byte buffer
__ z_agf(data, 2*wordSize, argP); // Add byte buffer offset.
__ z_lgf(dataLen, 1*wordSize, argP); // #bytes to process, calculated as
__ z_sgf(dataLen, Address(argP, 2*wordSize)); // (end_index - offset)
__ z_aghi(data, arrayOopDesc::base_offset_in_bytes(T_BYTE));
}
StubRoutines::zarch::generate_load_crc32c_table_addr(_masm, table);
__ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
__ z_stmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 to make them available as work registers.
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, false);
__ z_lmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 back from stack.
// Restore caller sp for c2i case.
__ resize_frame_absolute(Z_R10, Z_R0, true); // Cut the stack back to where the caller started.
__ z_br(Z_R14);
BLOCK_COMMENT("} CRC32C_update{Bytes|DirectByteBuffer}");
return __ addr_at(entry_off);
}
return NULL; return NULL;
} }

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved. * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -3466,7 +3466,7 @@ void TemplateTable::invokevirtual_helper(Register index,
__ z_sllg(index, index, exact_log2(vtableEntry::size_in_bytes())); __ z_sllg(index, index, exact_log2(vtableEntry::size_in_bytes()));
__ mem2reg_opt(method, __ mem2reg_opt(method,
Address(Z_tmp_2, index, Address(Z_tmp_2, index,
InstanceKlass::vtable_start_offset() + in_ByteSize(vtableEntry::method_offset_in_bytes()))); Klass::vtable_start_offset() + in_ByteSize(vtableEntry::method_offset_in_bytes())));
__ profile_arguments_type(Z_ARG4, method, Z_ARG5, true); __ profile_arguments_type(Z_ARG4, method, Z_ARG5, true);
__ jump_from_interpreted(method, Z_ARG4); __ jump_from_interpreted(method, Z_ARG4);
BLOCK_COMMENT("} invokevirtual_helper"); BLOCK_COMMENT("} invokevirtual_helper");
@ -3708,7 +3708,7 @@ void TemplateTable::_new() {
__ z_sllg(offset, offset, LogBytesPerWord); // Convert to to offset. __ z_sllg(offset, offset, LogBytesPerWord); // Convert to to offset.
// Get InstanceKlass. // Get InstanceKlass.
Register iklass = cpool; Register iklass = cpool;
__ z_lg(iklass, Address(cpool, offset, sizeof(ConstantPool))); __ load_resolved_klass_at_offset(cpool, offset, iklass);
// Make sure klass is initialized & doesn't have finalizer. // Make sure klass is initialized & doesn't have finalizer.
// Make sure klass is fully initialized. // Make sure klass is fully initialized.
@ -3895,7 +3895,7 @@ void TemplateTable::checkcast() {
__ z_lgr(Z_ARG4, Z_tos); // Save receiver. __ z_lgr(Z_ARG4, Z_tos); // Save receiver.
__ z_sllg(index, index, LogBytesPerWord); // index2bytes for addressing __ z_sllg(index, index, LogBytesPerWord); // index2bytes for addressing
__ mem2reg_opt(klass, Address(cpool, index, sizeof(ConstantPool))); __ load_resolved_klass_at_offset(cpool, index, klass);
__ bind(resolved); __ bind(resolved);
@ -3969,8 +3969,7 @@ void TemplateTable::instanceof() {
__ load_klass(subklass, Z_tos); __ load_klass(subklass, Z_tos);
__ z_sllg(index, index, LogBytesPerWord); // index2bytes for addressing __ z_sllg(index, index, LogBytesPerWord); // index2bytes for addressing
__ mem2reg_opt(klass, __ load_resolved_klass_at_offset(cpool, index, klass);
Address(cpool, index, sizeof(ConstantPool)));
__ bind(resolved); __ bind(resolved);

View File

@ -111,13 +111,23 @@ void VM_Version::initialize() {
ContendedPaddingWidth = cache_line_size; ContendedPaddingWidth = cache_line_size;
} }
// On z/Architecture, the CRC32 intrinsics had to be implemented "by hand". // On z/Architecture, the CRC32/CRC32C intrinsics are implemented "by hand".
// They cannot be based on the CHECKSUM instruction which has been there // TODO: Provide implementation based on the vector instructions available from z13.
// since the very beginning (of z/Architecture). It computes "some kind of" a checksum // Note: The CHECKSUM instruction, which has been there since the very beginning
// which has nothing to do with the CRC32 algorithm. // (of z/Architecture), computes "some kind of" a checksum.
// It has nothing to do with the CRC32 algorithm.
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
FLAG_SET_DEFAULT(UseCRC32Intrinsics, true); FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
} }
if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
}
// TODO: Provide implementation.
if (UseAdler32Intrinsics) {
warning("Adler32Intrinsics not available on this CPU.");
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
}
// On z/Architecture, we take UseAES as the general switch to enable/disable the AES intrinsics. // On z/Architecture, we take UseAES as the general switch to enable/disable the AES intrinsics.
// The specific, and yet to be defined, switches UseAESxxxIntrinsics will then be set // The specific, and yet to be defined, switches UseAESxxxIntrinsics will then be set
@ -195,11 +205,6 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
} }
if (UseAdler32Intrinsics) {
warning("Adler32Intrinsics not available on this CPU.");
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
}
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true); FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true);
} }

View File

@ -83,7 +83,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
__ load_klass(rcvr_klass, Z_ARG1); __ load_klass(rcvr_klass, Z_ARG1);
// Set method (in case of interpreted method), and destination address. // Set method (in case of interpreted method), and destination address.
int entry_offset = in_bytes(InstanceKlass::vtable_start_offset()) + int entry_offset = in_bytes(Klass::vtable_start_offset()) +
vtable_index * vtableEntry::size_in_bytes(); vtable_index * vtableEntry::size_in_bytes();
#ifndef PRODUCT #ifndef PRODUCT
@ -96,8 +96,8 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// worst case actual size // worst case actual size
padding_bytes += __ load_const_size() - __ load_const_optimized_rtn_len(vtable_idx, vtable_index*vtableEntry::size_in_bytes(), true); padding_bytes += __ load_const_size() - __ load_const_optimized_rtn_len(vtable_idx, vtable_index*vtableEntry::size_in_bytes(), true);
assert(Immediate::is_uimm12(in_bytes(InstanceKlass::vtable_length_offset())), "disp to large"); assert(Immediate::is_uimm12(in_bytes(Klass::vtable_length_offset())), "disp to large");
__ z_cl(vtable_idx, in_bytes(InstanceKlass::vtable_length_offset()), rcvr_klass); __ z_cl(vtable_idx, in_bytes(Klass::vtable_length_offset()), rcvr_klass);
__ z_brl(L); __ z_brl(L);
__ z_lghi(Z_ARG3, vtable_index); // Debug code, don't optimize. __ z_lghi(Z_ARG3, vtable_index); // Debug code, don't optimize.
__ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), Z_ARG1, Z_ARG3, false); __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), Z_ARG1, Z_ARG3, false);
@ -187,11 +187,11 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
__ load_klass(rcvr_klass, Z_ARG1); __ load_klass(rcvr_klass, Z_ARG1);
// Load start of itable entries into itable_entry. // Load start of itable entries into itable_entry.
__ z_llgf(vtable_len, Address(rcvr_klass, InstanceKlass::vtable_length_offset())); __ z_llgf(vtable_len, Address(rcvr_klass, Klass::vtable_length_offset()));
__ z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes())); __ z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
// Loop over all itable entries until desired interfaceOop(Rinterface) found. // Loop over all itable entries until desired interfaceOop(Rinterface) found.
const int vtable_base_offset = in_bytes(InstanceKlass::vtable_start_offset()); const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
// Count unused bytes. // Count unused bytes.
start_pc = __ pc(); start_pc = __ pc();
__ add2reg_with_index(itable_entry_addr, vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(), rcvr_klass, vtable_len); __ add2reg_with_index(itable_entry_addr, vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(), rcvr_klass, vtable_len);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -270,9 +270,7 @@ void AbstractInterpreter::layout_activation(Method* method,
assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area"); assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area");
assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area"); assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area");
} }
#ifdef _LP64
assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd"); assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd");
#endif
*interpreter_frame->register_addr(Lmethod) = (intptr_t) method; *interpreter_frame->register_addr(Lmethod) = (intptr_t) method;
*interpreter_frame->register_addr(Llocals) = (intptr_t) locals; *interpreter_frame->register_addr(Llocals) = (intptr_t) locals;
@ -283,9 +281,6 @@ void AbstractInterpreter::layout_activation(Method* method,
*interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache(); *interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache();
// save the mirror in the interpreter frame // save the mirror in the interpreter frame
*interpreter_frame->interpreter_frame_mirror_addr() = method->method_holder()->java_mirror(); *interpreter_frame->interpreter_frame_mirror_addr() = method->method_holder()->java_mirror();
#ifdef FAST_DISPATCH
*interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table();
#endif
#ifdef ASSERT #ifdef ASSERT
BasicObjectLock* mp = (BasicObjectLock*)monitors; BasicObjectLock* mp = (BasicObjectLock*)monitors;

View File

@ -34,10 +34,6 @@ class Bytes: AllStatic {
// can I count on address always being a pointer to an unsigned char? Yes // can I count on address always being a pointer to an unsigned char? Yes
// Returns true, if the byte ordering used by Java is different from the nativ byte ordering
// of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
static inline bool is_Java_byte_ordering_different() { return false; }
// Thus, a swap between native and Java ordering is always a no-op: // Thus, a swap between native and Java ordering is always a no-op:
static inline u2 swap_u2(u2 x) { return x; } static inline u2 swap_u2(u2 x) { return x; }
static inline u4 swap_u4(u4 x) { return x; } static inline u4 swap_u4(u4 x) { return x; }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -22,10 +22,4 @@
* *
*/ */
#include "precompiled.hpp"
#include "c1/c1_FpuStackSim.hpp"
#include "c1/c1_FrameMap.hpp"
#include "utilities/array.hpp"
#include "utilities/ostream.hpp"
// No FPU stack on SPARC // No FPU stack on SPARC

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -159,21 +159,12 @@
public: public:
#ifdef _LP64
static LIR_Opr as_long_opr(Register r) { static LIR_Opr as_long_opr(Register r) {
return as_long_single_opr(r); return as_long_single_opr(r);
} }
static LIR_Opr as_pointer_opr(Register r) { static LIR_Opr as_pointer_opr(Register r) {
return as_long_single_opr(r); return as_long_single_opr(r);
} }
#else
static LIR_Opr as_long_opr(Register r) {
return as_long_pair_opr(r);
}
static LIR_Opr as_pointer_opr(Register r) {
return as_opr(r);
}
#endif
static LIR_Opr as_float_opr(FloatRegister r) { static LIR_Opr as_float_opr(FloatRegister r) {
return LIR_OprFact::single_fpu(r->encoding()); return LIR_OprFact::single_fpu(r->encoding());
} }

View File

@ -556,11 +556,9 @@ void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
// guarantee that 32-bit loads always sign extended but that isn't // guarantee that 32-bit loads always sign extended but that isn't
// true and since sign extension isn't free, it would impose a // true and since sign extension isn't free, it would impose a
// slight cost. // slight cost.
#ifdef _LP64
if (op->type() == T_INT) { if (op->type() == T_INT) {
__ br(acond, false, Assembler::pn, *(op->label())); __ br(acond, false, Assembler::pn, *(op->label()));
} else } else
#endif
__ brx(acond, false, Assembler::pn, *(op->label())); __ brx(acond, false, Assembler::pn, *(op->label()));
} }
// The peephole pass fills the delay slot // The peephole pass fills the delay slot
@ -576,12 +574,7 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
Register rlo = dst->as_register_lo(); Register rlo = dst->as_register_lo();
Register rhi = dst->as_register_hi(); Register rhi = dst->as_register_hi();
Register rval = op->in_opr()->as_register(); Register rval = op->in_opr()->as_register();
#ifdef _LP64
__ sra(rval, 0, rlo); __ sra(rval, 0, rlo);
#else
__ mov(rval, rlo);
__ sra(rval, BitsPerInt-1, rhi);
#endif
break; break;
} }
case Bytecodes::_i2d: case Bytecodes::_i2d:
@ -614,11 +607,7 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
Register rlo = op->in_opr()->as_register_lo(); Register rlo = op->in_opr()->as_register_lo();
Register rhi = op->in_opr()->as_register_hi(); Register rhi = op->in_opr()->as_register_hi();
Register rdst = dst->as_register(); Register rdst = dst->as_register();
#ifdef _LP64
__ sra(rlo, 0, rdst); __ sra(rlo, 0, rdst);
#else
__ mov(rlo, rdst);
#endif
break; break;
} }
case Bytecodes::_d2f: case Bytecodes::_d2f:
@ -711,7 +700,6 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType
case T_SHORT : __ sth(from_reg->as_register(), base, offset); break; case T_SHORT : __ sth(from_reg->as_register(), base, offset); break;
case T_INT : __ stw(from_reg->as_register(), base, offset); break; case T_INT : __ stw(from_reg->as_register(), base, offset); break;
case T_LONG : case T_LONG :
#ifdef _LP64
if (unaligned || PatchALot) { if (unaligned || PatchALot) {
// Don't use O7 here because it may be equal to 'base' (see LIR_Assembler::reg2mem) // Don't use O7 here because it may be equal to 'base' (see LIR_Assembler::reg2mem)
assert(G3_scratch != base, "can't handle this"); assert(G3_scratch != base, "can't handle this");
@ -722,11 +710,6 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType
} else { } else {
__ stx(from_reg->as_register_lo(), base, offset); __ stx(from_reg->as_register_lo(), base, offset);
} }
#else
assert(Assembler::is_simm13(offset + 4), "must be");
__ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
__ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes);
#endif
break; break;
case T_ADDRESS: case T_ADDRESS:
case T_METADATA: case T_METADATA:
@ -778,12 +761,7 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicTy
case T_SHORT : __ sth(from_reg->as_register(), base, disp); break; case T_SHORT : __ sth(from_reg->as_register(), base, disp); break;
case T_INT : __ stw(from_reg->as_register(), base, disp); break; case T_INT : __ stw(from_reg->as_register(), base, disp); break;
case T_LONG : case T_LONG :
#ifdef _LP64
__ stx(from_reg->as_register_lo(), base, disp); __ stx(from_reg->as_register_lo(), base, disp);
#else
assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match");
__ std(from_reg->as_register_hi(), base, disp);
#endif
break; break;
case T_ADDRESS: case T_ADDRESS:
__ st_ptr(from_reg->as_register(), base, disp); __ st_ptr(from_reg->as_register(), base, disp);
@ -826,40 +804,22 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ
case T_INT : __ ld(base, offset, to_reg->as_register()); break; case T_INT : __ ld(base, offset, to_reg->as_register()); break;
case T_LONG : case T_LONG :
if (!unaligned && !PatchALot) { if (!unaligned && !PatchALot) {
#ifdef _LP64
__ ldx(base, offset, to_reg->as_register_lo()); __ ldx(base, offset, to_reg->as_register_lo());
#else
assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
"must be sequential");
__ ldd(base, offset, to_reg->as_register_hi());
#endif
} else { } else {
#ifdef _LP64
assert(base != to_reg->as_register_lo(), "can't handle this"); assert(base != to_reg->as_register_lo(), "can't handle this");
assert(O7 != to_reg->as_register_lo(), "can't handle this"); assert(O7 != to_reg->as_register_lo(), "can't handle this");
__ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo()); __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
__ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
__ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo()); __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
__ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo()); __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
#else
if (base == to_reg->as_register_lo()) {
__ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
__ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
} else {
__ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
__ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
}
#endif
} }
break; break;
case T_METADATA: __ ld_ptr(base, offset, to_reg->as_register()); break; case T_METADATA: __ ld_ptr(base, offset, to_reg->as_register()); break;
case T_ADDRESS: case T_ADDRESS:
#ifdef _LP64
if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) { if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {
__ lduw(base, offset, to_reg->as_register()); __ lduw(base, offset, to_reg->as_register());
__ decode_klass_not_null(to_reg->as_register()); __ decode_klass_not_null(to_reg->as_register());
} else } else
#endif
{ {
__ ld_ptr(base, offset, to_reg->as_register()); __ ld_ptr(base, offset, to_reg->as_register());
} }
@ -921,13 +881,7 @@ int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType
case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break; case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break;
case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break; case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break;
case T_LONG : case T_LONG :
#ifdef _LP64
__ ldx(base, disp, to_reg->as_register_lo()); __ ldx(base, disp, to_reg->as_register_lo());
#else
assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
"must be sequential");
__ ldd(base, disp, to_reg->as_register_hi());
#endif
break; break;
default : ShouldNotReachHere(); default : ShouldNotReachHere();
} }
@ -1107,16 +1061,9 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
jlong con = c->as_jlong(); jlong con = c->as_jlong();
if (to_reg->is_double_cpu()) { if (to_reg->is_double_cpu()) {
#ifdef _LP64
__ set(con, to_reg->as_register_lo()); __ set(con, to_reg->as_register_lo());
#else
__ set(low(con), to_reg->as_register_lo());
__ set(high(con), to_reg->as_register_hi());
#endif
#ifdef _LP64
} else if (to_reg->is_single_cpu()) { } else if (to_reg->is_single_cpu()) {
__ set(con, to_reg->as_register()); __ set(con, to_reg->as_register());
#endif
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
assert(to_reg->is_double_fpu(), "wrong register kind"); assert(to_reg->is_double_fpu(), "wrong register kind");
@ -1190,12 +1137,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
__ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg()); __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg());
} else { } else {
assert(to_reg->is_double_cpu(), "Must be a long register."); assert(to_reg->is_double_cpu(), "Must be a long register.");
#ifdef _LP64
__ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo()); __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo());
#else
__ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo());
__ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi());
#endif
} }
} }
@ -1366,22 +1308,10 @@ void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
} }
} else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
if (from_reg->is_double_cpu()) { if (from_reg->is_double_cpu()) {
#ifdef _LP64
__ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register()); __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register());
#else
assert(to_reg->is_double_cpu() &&
from_reg->as_register_hi() != to_reg->as_register_lo() &&
from_reg->as_register_lo() != to_reg->as_register_hi(),
"should both be long and not overlap");
// long to long moves
__ mov(from_reg->as_register_hi(), to_reg->as_register_hi());
__ mov(from_reg->as_register_lo(), to_reg->as_register_lo());
#endif
#ifdef _LP64
} else if (to_reg->is_double_cpu()) { } else if (to_reg->is_double_cpu()) {
// int to int moves // int to int moves
__ mov(from_reg->as_register(), to_reg->as_register_lo()); __ mov(from_reg->as_register(), to_reg->as_register_lo());
#endif
} else { } else {
// int to int moves // int to int moves
__ mov(from_reg->as_register(), to_reg->as_register()); __ mov(from_reg->as_register(), to_reg->as_register());
@ -1460,21 +1390,6 @@ void LIR_Assembler::return_op(LIR_Opr result) {
if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
__ reserved_stack_check(); __ reserved_stack_check();
} }
// the poll may need a register so just pick one that isn't the return register
#if defined(TIERED) && !defined(_LP64)
if (result->type_field() == LIR_OprDesc::long_type) {
// Must move the result to G1
// Must leave proper result in O0,O1 and G1 (TIERED only)
__ sllx(I0, 32, G1); // Shift bits into high G1
__ srl (I1, 0, I1); // Zero extend O1 (harmless?)
__ or3 (I1, G1, G1); // OR 64 bits into G1
#ifdef ASSERT
// mangle it so any problems will show up
__ set(0xdeadbeef, I0);
__ set(0xdeadbeef, I1);
#endif
}
#endif // TIERED
__ set((intptr_t)os::get_polling_page(), L0); __ set((intptr_t)os::get_polling_page(), L0);
__ relocate(relocInfo::poll_return_type); __ relocate(relocInfo::poll_return_type);
__ ld_ptr(L0, 0, G0); __ ld_ptr(L0, 0, G0);
@ -1568,23 +1483,11 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
Register xhi = opr1->as_register_hi(); Register xhi = opr1->as_register_hi();
if (opr2->is_constant() && opr2->as_jlong() == 0) { if (opr2->is_constant() && opr2->as_jlong() == 0) {
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases"); assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases");
#ifdef _LP64
__ orcc(xhi, G0, G0); __ orcc(xhi, G0, G0);
#else
__ orcc(xhi, xlo, G0);
#endif
} else if (opr2->is_register()) { } else if (opr2->is_register()) {
Register ylo = opr2->as_register_lo(); Register ylo = opr2->as_register_lo();
Register yhi = opr2->as_register_hi(); Register yhi = opr2->as_register_hi();
#ifdef _LP64
__ cmp(xlo, ylo); __ cmp(xlo, ylo);
#else
__ subcc(xlo, ylo, xlo);
__ subccc(xhi, yhi, xhi);
if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
__ orcc(xhi, xlo, G0);
}
#endif
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
} }
@ -1612,13 +1515,7 @@ void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Op
ShouldNotReachHere(); ShouldNotReachHere();
} }
} else if (code == lir_cmp_l2i) { } else if (code == lir_cmp_l2i) {
#ifdef _LP64
__ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register()); __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register());
#else
__ lcmp(left->as_register_hi(), left->as_register_lo(),
right->as_register_hi(), right->as_register_lo(),
dst->as_register());
#endif
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
} }
@ -1656,12 +1553,11 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
ShouldNotReachHere(); ShouldNotReachHere();
} }
Label skip; Label skip;
#ifdef _LP64
if (type == T_INT) { if (type == T_INT) {
__ br(acond, false, Assembler::pt, skip); __ br(acond, false, Assembler::pt, skip);
} else } else {
#endif
__ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit __ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit
}
if (opr1->is_constant() && opr1->type() == T_INT) { if (opr1->is_constant() && opr1->type() == T_INT) {
Register dest = result->as_register(); Register dest = result->as_register();
if (Assembler::is_simm13(opr1->as_jint())) { if (Assembler::is_simm13(opr1->as_jint())) {
@ -1720,7 +1616,6 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
} }
} else if (dest->is_double_cpu()) { } else if (dest->is_double_cpu()) {
#ifdef _LP64
Register dst_lo = dest->as_register_lo(); Register dst_lo = dest->as_register_lo();
Register op1_lo = left->as_pointer_register(); Register op1_lo = left->as_pointer_register();
Register op2_lo = right->as_pointer_register(); Register op2_lo = right->as_pointer_register();
@ -1736,28 +1631,6 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
#else
Register op1_lo = left->as_register_lo();
Register op1_hi = left->as_register_hi();
Register op2_lo = right->as_register_lo();
Register op2_hi = right->as_register_hi();
Register dst_lo = dest->as_register_lo();
Register dst_hi = dest->as_register_hi();
switch (code) {
case lir_add:
__ addcc(op1_lo, op2_lo, dst_lo);
__ addc (op1_hi, op2_hi, dst_hi);
break;
case lir_sub:
__ subcc(op1_lo, op2_lo, dst_lo);
__ subc (op1_hi, op2_hi, dst_hi);
break;
default: ShouldNotReachHere();
}
#endif
} else { } else {
assert (right->is_single_cpu(), "Just Checking"); assert (right->is_single_cpu(), "Just Checking");
@ -1852,23 +1725,14 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
int simm13 = (int)c; int simm13 = (int)c;
switch (code) { switch (code) {
case lir_logic_and: case lir_logic_and:
#ifndef _LP64
__ and3 (left->as_register_hi(), 0, dest->as_register_hi());
#endif
__ and3 (left->as_register_lo(), simm13, dest->as_register_lo()); __ and3 (left->as_register_lo(), simm13, dest->as_register_lo());
break; break;
case lir_logic_or: case lir_logic_or:
#ifndef _LP64
__ or3 (left->as_register_hi(), 0, dest->as_register_hi());
#endif
__ or3 (left->as_register_lo(), simm13, dest->as_register_lo()); __ or3 (left->as_register_lo(), simm13, dest->as_register_lo());
break; break;
case lir_logic_xor: case lir_logic_xor:
#ifndef _LP64
__ xor3 (left->as_register_hi(), 0, dest->as_register_hi());
#endif
__ xor3 (left->as_register_lo(), simm13, dest->as_register_lo()); __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo());
break; break;
@ -1886,7 +1750,6 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
} else { } else {
#ifdef _LP64
Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() : Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() :
left->as_register_lo(); left->as_register_lo();
Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() : Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() :
@ -1898,26 +1761,6 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break; case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
#else
switch (code) {
case lir_logic_and:
__ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
__ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
break;
case lir_logic_or:
__ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
__ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
break;
case lir_logic_xor:
__ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
__ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
break;
default: ShouldNotReachHere();
}
#endif
} }
} }
} }
@ -1975,12 +1818,10 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
if (basic_type == T_ARRAY) basic_type = T_OBJECT; if (basic_type == T_ARRAY) basic_type = T_OBJECT;
#ifdef _LP64
// higher 32bits must be null // higher 32bits must be null
__ sra(dst_pos, 0, dst_pos); __ sra(dst_pos, 0, dst_pos);
__ sra(src_pos, 0, src_pos); __ sra(src_pos, 0, src_pos);
__ sra(length, 0, length); __ sra(length, 0, length);
#endif
// set up the arraycopy stub information // set up the arraycopy stub information
ArrayCopyStub* stub = op->stub(); ArrayCopyStub* stub = op->stub();
@ -2316,7 +2157,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
if (dest->is_single_cpu()) { if (dest->is_single_cpu()) {
#ifdef _LP64
if (left->type() == T_OBJECT) { if (left->type() == T_OBJECT) {
switch (code) { switch (code) {
case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break; case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break;
@ -2325,7 +2165,6 @@ void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
} else } else
#endif
switch (code) { switch (code) {
case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break; case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break;
case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break; case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break;
@ -2333,27 +2172,17 @@ void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
} else { } else {
#ifdef _LP64
switch (code) { switch (code) {
case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
#else
switch (code) {
case lir_shl: __ lshl (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
case lir_shr: __ lshr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
default: ShouldNotReachHere();
}
#endif
} }
} }
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
#ifdef _LP64
if (left->type() == T_OBJECT) { if (left->type() == T_OBJECT) {
count = count & 63; // shouldn't shift by more than sizeof(intptr_t) count = count & 63; // shouldn't shift by more than sizeof(intptr_t)
Register l = left->as_register(); Register l = left->as_register();
@ -2366,7 +2195,6 @@ void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr de
} }
return; return;
} }
#endif
if (dest->is_single_cpu()) { if (dest->is_single_cpu()) {
count = count & 0x1F; // Java spec count = count & 0x1F; // Java spec
@ -2425,7 +2253,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
op->tmp4()->as_register() == O1 && op->tmp4()->as_register() == O1 &&
op->klass()->as_register() == G5, "must be"); op->klass()->as_register() == G5, "must be");
LP64_ONLY( __ signx(op->len()->as_register()); ) __ signx(op->len()->as_register());
if (UseSlowPath || if (UseSlowPath ||
(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
@ -2748,7 +2576,6 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
Register new_value_hi = op->new_value()->as_register_hi(); Register new_value_hi = op->new_value()->as_register_hi();
Register t1 = op->tmp1()->as_register(); Register t1 = op->tmp1()->as_register();
Register t2 = op->tmp2()->as_register(); Register t2 = op->tmp2()->as_register();
#ifdef _LP64
__ mov(cmp_value_lo, t1); __ mov(cmp_value_lo, t1);
__ mov(new_value_lo, t2); __ mov(new_value_lo, t2);
// perform the compare and swap operation // perform the compare and swap operation
@ -2756,23 +2583,6 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
// generate condition code - if the swap succeeded, t2 ("new value" reg) was // generate condition code - if the swap succeeded, t2 ("new value" reg) was
// overwritten with the original value in "addr" and will be equal to t1. // overwritten with the original value in "addr" and will be equal to t1.
__ cmp(t1, t2); __ cmp(t1, t2);
#else
// move high and low halves of long values into single registers
__ sllx(cmp_value_hi, 32, t1); // shift high half into temp reg
__ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half
__ or3(t1, cmp_value_lo, t1); // t1 holds 64-bit compare value
__ sllx(new_value_hi, 32, t2);
__ srl(new_value_lo, 0, new_value_lo);
__ or3(t2, new_value_lo, t2); // t2 holds 64-bit value to swap
// perform the compare and swap operation
__ casx(addr, t1, t2);
// generate condition code - if the swap succeeded, t2 ("new value" reg) was
// overwritten with the original value in "addr" and will be equal to t1.
// Produce icc flag for 32bit.
__ sub(t1, t2, t2);
__ srlx(t2, 32, t1);
__ orcc(t2, t1, G0);
#endif
} else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
Register addr = op->addr()->as_pointer_register(); Register addr = op->addr()->as_pointer_register();
Register cmp_value = op->cmp_value()->as_register(); Register cmp_value = op->cmp_value()->as_register();
@ -2914,13 +2724,8 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
assert(data->is_CounterData(), "need CounterData for calls"); assert(data->is_CounterData(), "need CounterData for calls");
assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
Register mdo = op->mdo()->as_register(); Register mdo = op->mdo()->as_register();
#ifdef _LP64
assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
Register tmp1 = op->tmp1()->as_register_lo(); Register tmp1 = op->tmp1()->as_register_lo();
#else
assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
Register tmp1 = op->tmp1()->as_register();
#endif
metadata2reg(md->constant_encoding(), mdo); metadata2reg(md->constant_encoding(), mdo);
int mdo_offset_bias = 0; int mdo_offset_bias = 0;
if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) + if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
@ -3200,12 +3005,7 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
assert (left->is_double_cpu(), "Must be a long"); assert (left->is_double_cpu(), "Must be a long");
Register Rlow = left->as_register_lo(); Register Rlow = left->as_register_lo();
Register Rhi = left->as_register_hi(); Register Rhi = left->as_register_hi();
#ifdef _LP64
__ sub(G0, Rlow, dest->as_register_lo()); __ sub(G0, Rlow, dest->as_register_lo());
#else
__ subcc(G0, Rlow, dest->as_register_lo());
__ subc (G0, Rhi, dest->as_register_hi());
#endif
} }
} }
@ -3245,9 +3045,7 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest,
void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
#ifdef _LP64
ShouldNotReachHere(); ShouldNotReachHere();
#endif
NEEDS_CLEANUP; NEEDS_CLEANUP;
if (type == T_LONG) { if (type == T_LONG) {
@ -3491,31 +3289,6 @@ void LIR_Assembler::peephole(LIR_List* lir) {
inst->insert_before(i + 1, delay_op); inst->insert_before(i + 1, delay_op);
i++; i++;
} }
#if defined(TIERED) && !defined(_LP64)
// fixup the return value from G1 to O0/O1 for long returns.
// It's done here instead of in LIRGenerator because there's
// such a mismatch between the single reg and double reg
// calling convention.
LIR_OpJavaCall* callop = op->as_OpJavaCall();
if (callop->result_opr() == FrameMap::out_long_opr) {
LIR_OpJavaCall* call;
LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length());
for (int a = 0; a < arguments->length(); a++) {
arguments[a] = callop->arguments()[a];
}
if (op->code() == lir_virtual_call) {
call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
callop->vtable_offset(), arguments, callop->info());
} else {
call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
callop->addr(), arguments, callop->info());
}
inst->at_put(i - 1, call);
inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(),
T_LONG, lir_patch_none, NULL));
}
#endif
break; break;
} }
} }
@ -3533,14 +3306,10 @@ void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr
} else if (data->is_oop()) { } else if (data->is_oop()) {
Register obj = data->as_register(); Register obj = data->as_register();
Register narrow = tmp->as_register(); Register narrow = tmp->as_register();
#ifdef _LP64
assert(UseCompressedOops, "swap is 32bit only"); assert(UseCompressedOops, "swap is 32bit only");
__ encode_heap_oop(obj, narrow); __ encode_heap_oop(obj, narrow);
__ swap(as_Address(addr), narrow); __ swap(as_Address(addr), narrow);
__ decode_heap_oop(narrow, obj); __ decode_heap_oop(narrow, obj);
#else
__ swap(as_Address(addr), obj);
#endif
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -61,11 +61,7 @@
ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias); ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias);
enum { enum {
#ifdef _LP64
_call_stub_size = 68, _call_stub_size = 68,
#else
_call_stub_size = 20,
#endif // _LP64
_call_aot_stub_size = 0, _call_aot_stub_size = 0,
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(128), _exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(128),
_deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(64) _deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(64)

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -70,7 +70,7 @@ LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::Oexcepti
LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::Oissuing_pc_opr; } LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::Oissuing_pc_opr; }
LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); } LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); }
LIR_Opr LIRGenerator::syncTempOpr() { return new_register(T_OBJECT); } LIR_Opr LIRGenerator::syncTempOpr() { return new_register(T_OBJECT); }
LIR_Opr LIRGenerator::getThreadTemp() { return rlock_callee_saved(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); } LIR_Opr LIRGenerator::getThreadTemp() { return rlock_callee_saved(T_LONG); }
LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) { LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
LIR_Opr opr; LIR_Opr opr;
@ -215,13 +215,11 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
} }
} }
} else { } else {
#ifdef _LP64
if (index_opr->type() == T_INT) { if (index_opr->type() == T_INT) {
LIR_Opr tmp = new_register(T_LONG); LIR_Opr tmp = new_register(T_LONG);
__ convert(Bytecodes::_i2l, index_opr, tmp); __ convert(Bytecodes::_i2l, index_opr, tmp);
index_opr = tmp; index_opr = tmp;
} }
#endif
base_opr = new_pointer_register(); base_opr = new_pointer_register();
assert (index_opr->is_register(), "Must be register"); assert (index_opr->is_register(), "Must be register");
@ -1317,20 +1315,12 @@ void LIRGenerator::trace_block_entry(BlockBegin* block) {
void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address, void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
CodeEmitInfo* info) { CodeEmitInfo* info) {
#ifdef _LP64
__ store(value, address, info); __ store(value, address, info);
#else
__ volatile_store_mem_reg(value, address, info);
#endif
} }
void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result, void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
CodeEmitInfo* info) { CodeEmitInfo* info) {
#ifdef _LP64
__ load(address, result, info); __ load(address, result, info);
#else
__ volatile_load_mem_reg(address, result, info);
#endif
} }
@ -1340,11 +1330,6 @@ void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
LIR_Opr index_op = offset; LIR_Opr index_op = offset;
bool is_obj = (type == T_ARRAY || type == T_OBJECT); bool is_obj = (type == T_ARRAY || type == T_OBJECT);
#ifndef _LP64
if (is_volatile && type == T_LONG) {
__ volatile_store_unsafe_reg(data, src, offset, type, NULL, lir_patch_none);
} else
#endif
{ {
if (type == T_BOOLEAN) { if (type == T_BOOLEAN) {
type = T_BYTE; type = T_BYTE;
@ -1374,11 +1359,6 @@ void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset, void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
BasicType type, bool is_volatile) { BasicType type, bool is_volatile) {
#ifndef _LP64
if (is_volatile && type == T_LONG) {
__ volatile_load_unsafe_reg(src, offset, dst, type, NULL, lir_patch_none);
} else
#endif
{ {
LIR_Address* addr = new LIR_Address(src, offset, type); LIR_Address* addr = new LIR_Address(src, offset, type);
__ load(addr, dst); __ load(addr, dst);
@ -1403,17 +1383,13 @@ void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
// Because we want a 2-arg form of xchg // Because we want a 2-arg form of xchg
__ move(data, dst); __ move(data, dst);
assert (!x->is_add() && (type == T_INT || (is_obj LP64_ONLY(&& UseCompressedOops))), "unexpected type"); assert (!x->is_add() && (type == T_INT || (is_obj && UseCompressedOops)), "unexpected type");
LIR_Address* addr; LIR_Address* addr;
if (offset->is_constant()) { if (offset->is_constant()) {
#ifdef _LP64
jlong l = offset->as_jlong(); jlong l = offset->as_jlong();
assert((jlong)((jint)l) == l, "offset too large for constant"); assert((jlong)((jint)l) == l, "offset too large for constant");
jint c = (jint)l; jint c = (jint)l;
#else
jint c = offset->as_jint();
#endif
addr = new LIR_Address(src.result(), c, type); addr = new LIR_Address(src.result(), c, type);
} else { } else {
addr = new LIR_Address(src.result(), offset, type); addr = new LIR_Address(src.result(), offset, type);

View File

@ -48,16 +48,9 @@ LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) {
void LIR_Address::verify() const { void LIR_Address::verify() const {
assert(scale() == times_1, "Scaled addressing mode not available on SPARC and should not be used"); assert(scale() == times_1, "Scaled addressing mode not available on SPARC and should not be used");
assert(disp() == 0 || index()->is_illegal(), "can't have both"); assert(disp() == 0 || index()->is_illegal(), "can't have both");
#ifdef _LP64
assert(base()->is_cpu_register(), "wrong base operand"); assert(base()->is_cpu_register(), "wrong base operand");
assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand"); assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand");
assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA, assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
"wrong type for addresses"); "wrong type for addresses");
#else
assert(base()->is_single_cpu(), "wrong base operand");
assert(index()->is_illegal() || index()->is_single_cpu(), "wrong index operand");
assert(base()->type() == T_OBJECT || base()->type() == T_INT || base()->type() == T_METADATA,
"wrong type for addresses");
#endif
} }
#endif // PRODUCT #endif // PRODUCT

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -32,11 +32,7 @@ inline bool LinearScan::is_processed_reg_num(int reg_num) {
inline int LinearScan::num_physical_regs(BasicType type) { inline int LinearScan::num_physical_regs(BasicType type) {
// Sparc requires two cpu registers for long // Sparc requires two cpu registers for long
// and two cpu registers for double // and two cpu registers for double
#ifdef _LP64
if (type == T_DOUBLE) { if (type == T_DOUBLE) {
#else
if (type == T_DOUBLE || type == T_LONG) {
#endif
return 2; return 2;
} }
return 1; return 1;
@ -44,11 +40,7 @@ inline int LinearScan::num_physical_regs(BasicType type) {
inline bool LinearScan::requires_adjacent_regs(BasicType type) { inline bool LinearScan::requires_adjacent_regs(BasicType type) {
#ifdef _LP64
return type == T_DOUBLE; return type == T_DOUBLE;
#else
return type == T_DOUBLE || type == T_LONG;
#endif
} }
inline bool LinearScan::is_caller_save(int assigned_reg) { inline bool LinearScan::is_caller_save(int assigned_reg) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -273,13 +273,6 @@ void C1_MacroAssembler::initialize_object(
add(obj, hdr_size_in_bytes, t1); // compute address of first element add(obj, hdr_size_in_bytes, t1); // compute address of first element
sub(var_size_in_bytes, hdr_size_in_bytes, t2); // compute size of body sub(var_size_in_bytes, hdr_size_in_bytes, t2); // compute size of body
initialize_body(t1, t2); initialize_body(t1, t2);
#ifndef _LP64
} else if (con_size_in_bytes < threshold * 2) {
// on v9 we can do double word stores to fill twice as much space.
assert(hdr_size_in_bytes % 8 == 0, "double word aligned");
assert(con_size_in_bytes % 8 == 0, "double word aligned");
for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += 2 * HeapWordSize) stx(G0, obj, i);
#endif
} else if (con_size_in_bytes <= threshold) { } else if (con_size_in_bytes <= threshold) {
// use explicit NULL stores // use explicit NULL stores
for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += HeapWordSize) st_ptr(G0, obj, i); for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += HeapWordSize) st_ptr(G0, obj, i);

View File

@ -930,11 +930,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Label not_already_dirty, restart, refill, young_card; Label not_already_dirty, restart, refill, young_card;
#ifdef _LP64
__ srlx(addr, CardTableModRefBS::card_shift, addr); __ srlx(addr, CardTableModRefBS::card_shift, addr);
#else
__ srl(addr, CardTableModRefBS::card_shift, addr);
#endif
AddressLiteral rs(byte_map_base); AddressLiteral rs(byte_map_base);
__ set(rs, cardtable); // cardtable := <card table base> __ set(rs, cardtable); // cardtable := <card table base>

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -66,7 +66,6 @@ define_pd_global(bool, OptoRegScheduling, false);
define_pd_global(bool, SuperWordLoopUnrollAnalysis, false); define_pd_global(bool, SuperWordLoopUnrollAnalysis, false);
define_pd_global(bool, IdealizeClearArrayNode, true); define_pd_global(bool, IdealizeClearArrayNode, true);
#ifdef _LP64
// We need to make sure that all generated code is within // We need to make sure that all generated code is within
// 2 gigs of the libjvm.so runtime routines so we can use // 2 gigs of the libjvm.so runtime routines so we can use
// the faster "call" instruction rather than the expensive // the faster "call" instruction rather than the expensive
@ -82,17 +81,6 @@ define_pd_global(intx, CodeCacheExpansionSize, 64*K);
// Ergonomics related flags // Ergonomics related flags
define_pd_global(uint64_t,MaxRAM, 128ULL*G); define_pd_global(uint64_t,MaxRAM, 128ULL*G);
#else
// InitialCodeCacheSize derived from specjbb2000 run.
define_pd_global(intx, InitialCodeCacheSize, 1536*K); // Integral multiple of CodeCacheExpansionSize
define_pd_global(intx, ReservedCodeCacheSize, 32*M);
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M);
define_pd_global(intx, ProfiledCodeHeapSize, 14*M);
define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
define_pd_global(intx, CodeCacheExpansionSize, 32*K);
// Ergonomics related flags
define_pd_global(uint64_t, MaxRAM, 4ULL*G);
#endif
define_pd_global(uintx, CodeCacheMinBlockLength, 4); define_pd_global(uintx, CodeCacheMinBlockLength, 4);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -114,14 +114,8 @@ static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
} }
static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
#ifdef _LP64
assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
pd_conjoint_oops_atomic((oop*)from, (oop*)to, count); pd_conjoint_oops_atomic((oop*)from, (oop*)to, count);
#else
// Guarantee use of ldd/std via some asm code, because compiler won't.
// See solaris_sparc.il.
_Copy_conjoint_jlongs_atomic(from, to, count);
#endif
} }
static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
@ -162,7 +156,6 @@ static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count)
} }
static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) { static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
#ifdef _LP64
guarantee(mask_bits((uintptr_t)tohw, right_n_bits(LogBytesPerLong)) == 0, guarantee(mask_bits((uintptr_t)tohw, right_n_bits(LogBytesPerLong)) == 0,
"unaligned fill words"); "unaligned fill words");
julong* to = (julong*)tohw; julong* to = (julong*)tohw;
@ -170,12 +163,6 @@ static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
while (count-- > 0) { while (count-- > 0) {
*to++ = v; *to++ = v;
} }
#else // _LP64
juint* to = (juint*)tohw;
while (count-- > 0) {
*to++ = value;
}
#endif // _LP64
} }
typedef void (*_zero_Fn)(HeapWord* to, size_t count); typedef void (*_zero_Fn)(HeapWord* to, size_t count);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -114,11 +114,7 @@ address RegisterMap::pd_location(VMReg regname) const {
// register locations. When that is fixed we'd will return NULL // register locations. When that is fixed we'd will return NULL
// (or assert here). // (or assert here).
reg = regname->prev()->as_Register(); reg = regname->prev()->as_Register();
#ifdef _LP64
second_word = sizeof(jint); second_word = sizeof(jint);
#else
return NULL;
#endif // _LP64
} else { } else {
reg = regname->as_Register(); reg = regname->as_Register();
} }
@ -332,9 +328,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// Construct an unpatchable, deficient frame // Construct an unpatchable, deficient frame
void frame::init(intptr_t* sp, address pc, CodeBlob* cb) { void frame::init(intptr_t* sp, address pc, CodeBlob* cb) {
#ifdef _LP64
assert( (((intptr_t)sp & (wordSize-1)) == 0), "frame constructor passed an invalid sp"); assert( (((intptr_t)sp & (wordSize-1)) == 0), "frame constructor passed an invalid sp");
#endif
_sp = sp; _sp = sp;
_younger_sp = NULL; _younger_sp = NULL;
_pc = pc; _pc = pc;
@ -693,11 +687,9 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
intptr_t* d_scratch = fp() + interpreter_frame_d_scratch_fp_offset; intptr_t* d_scratch = fp() + interpreter_frame_d_scratch_fp_offset;
address l_addr = (address)l_scratch; address l_addr = (address)l_scratch;
#ifdef _LP64
// On 64-bit the result for 1/8/16/32-bit result types is in the other // On 64-bit the result for 1/8/16/32-bit result types is in the other
// word half // word half
l_addr += wordSize/2; l_addr += wordSize/2;
#endif
switch (type) { switch (type) {
case T_OBJECT: case T_OBJECT:

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -100,11 +100,7 @@
// size of each block, in order of increasing address: // size of each block, in order of increasing address:
register_save_words = 16, register_save_words = 16,
#ifdef _LP64
callee_aggregate_return_pointer_words = 0, callee_aggregate_return_pointer_words = 0,
#else
callee_aggregate_return_pointer_words = 1,
#endif
callee_register_argument_save_area_words = 6, callee_register_argument_save_area_words = 6,
// memory_parameter_words = <arbitrary>, // memory_parameter_words = <arbitrary>,

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -38,24 +38,14 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
// The expected size in bytes of a cache line, used to pad data structures. // The expected size in bytes of a cache line, used to pad data structures.
#if defined(TIERED) #if defined(TIERED)
#ifdef _LP64 // tiered, 64-bit, large machine
// tiered, 64-bit, large machine #define DEFAULT_CACHE_LINE_SIZE 128
#define DEFAULT_CACHE_LINE_SIZE 128
#else
// tiered, 32-bit, medium machine
#define DEFAULT_CACHE_LINE_SIZE 64
#endif
#elif defined(COMPILER1) #elif defined(COMPILER1)
// pure C1, 32-bit, small machine // pure C1, 32-bit, small machine
#define DEFAULT_CACHE_LINE_SIZE 16 #define DEFAULT_CACHE_LINE_SIZE 16
#elif defined(COMPILER2) || defined(SHARK) #elif defined(COMPILER2) || defined(SHARK)
#ifdef _LP64 // pure C2, 64-bit, large machine
// pure C2, 64-bit, large machine #define DEFAULT_CACHE_LINE_SIZE 128
#define DEFAULT_CACHE_LINE_SIZE 128
#else
// pure C2, 32-bit, medium machine
#define DEFAULT_CACHE_LINE_SIZE 64
#endif
#endif #endif
#if defined(SOLARIS) #if defined(SOLARIS)

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -56,18 +56,10 @@ define_pd_global(intx, InlineSmallCode, 1500);
#define DEFAULT_STACK_RED_PAGES (1) #define DEFAULT_STACK_RED_PAGES (1)
#define DEFAULT_STACK_RESERVED_PAGES (SOLARIS_ONLY(1) NOT_SOLARIS(0)) #define DEFAULT_STACK_RESERVED_PAGES (SOLARIS_ONLY(1) NOT_SOLARIS(0))
#ifdef _LP64
// Stack slots are 2X larger in LP64 than in the 32 bit VM.
define_pd_global(intx, CompilerThreadStackSize, 1024); define_pd_global(intx, CompilerThreadStackSize, 1024);
define_pd_global(intx, ThreadStackSize, 1024); define_pd_global(intx, ThreadStackSize, 1024);
define_pd_global(intx, VMThreadStackSize, 1024); define_pd_global(intx, VMThreadStackSize, 1024);
#define DEFAULT_STACK_SHADOW_PAGES (20 DEBUG_ONLY(+2)) #define DEFAULT_STACK_SHADOW_PAGES (20 DEBUG_ONLY(+2))
#else
define_pd_global(intx, CompilerThreadStackSize, 512);
define_pd_global(intx, ThreadStackSize, 512);
define_pd_global(intx, VMThreadStackSize, 512);
#define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2))
#endif // _LP64
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES #define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES #define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -32,13 +32,9 @@
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
int InlineCacheBuffer::ic_stub_code_size() { int InlineCacheBuffer::ic_stub_code_size() {
#ifdef _LP64
return (NativeMovConstReg::instruction_size + // sethi;add return (NativeMovConstReg::instruction_size + // sethi;add
NativeJump::instruction_size + // sethi; jmp; delay slot NativeJump::instruction_size + // sethi; jmp; delay slot
(1*BytesPerInstWord) + 1); // flush + 1 extra byte (1*BytesPerInstWord) + 1); // flush + 1 extra byte
#else
return (2+2+ 1) * wordSize + 1; // set/jump_to/nop + 1 byte so that code_end can be set in CodeBuffer
#endif
} }
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) { void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -39,11 +39,6 @@
#include "runtime/sharedRuntime.hpp" #include "runtime/sharedRuntime.hpp"
#include "runtime/thread.inline.hpp" #include "runtime/thread.inline.hpp"
#ifndef FAST_DISPATCH
#define FAST_DISPATCH 1
#endif
#undef FAST_DISPATCH
// Implementation of InterpreterMacroAssembler // Implementation of InterpreterMacroAssembler
// This file specializes the assember with interpreter-specific macros // This file specializes the assember with interpreter-specific macros
@ -78,23 +73,12 @@ void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args
// own dispatch. The dispatch address is computed and placed in IdispatchAddress // own dispatch. The dispatch address is computed and placed in IdispatchAddress
void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) { void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {
assert_not_delayed(); assert_not_delayed();
#ifdef FAST_DISPATCH
// FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
// they both use I2.
assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
ldub(Lbcp, bcp_incr, Lbyte_code); // load next bytecode
add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
// add offset to correct dispatch table
sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr
#else
ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
// dispatch table to use // dispatch table to use
AddressLiteral tbl(Interpreter::dispatch_table(state)); AddressLiteral tbl(Interpreter::dispatch_table(state));
sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
set(tbl, G3_scratch); // compute addr of table set(tbl, G3_scratch); // compute addr of table
ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr
#endif
} }
@ -281,23 +265,11 @@ void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* tab
// %%%%% maybe implement +VerifyActivationFrameSize here // %%%%% maybe implement +VerifyActivationFrameSize here
//verify_thread(); //too slow; we will just verify on method entry & exit //verify_thread(); //too slow; we will just verify on method entry & exit
if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__); if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
#ifdef FAST_DISPATCH // dispatch table to use
if (table == Interpreter::dispatch_table(state)) { AddressLiteral tbl(table);
// use IdispatchTables sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); set(tbl, G3_scratch); // compute addr of table
// add offset to correct dispatch table ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr
sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // get entry addr
} else {
#endif
// dispatch table to use
AddressLiteral tbl(table);
sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
set(tbl, G3_scratch); // compute addr of table
ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr
#ifdef FAST_DISPATCH
}
#endif
jmp( G3_scratch, 0 ); jmp( G3_scratch, 0 );
if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr);
else delayed()->nop(); else delayed()->nop();
@ -318,52 +290,32 @@ void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* tab
void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) { void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) {
assert_not_delayed(); assert_not_delayed();
#ifdef _LP64
ldf(FloatRegisterImpl::D, r1, offset, d); ldf(FloatRegisterImpl::D, r1, offset, d);
#else
ldf(FloatRegisterImpl::S, r1, offset, d);
ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor());
#endif
} }
// Known good alignment in _LP64 but unknown otherwise // Known good alignment in _LP64 but unknown otherwise
void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) { void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) {
assert_not_delayed(); assert_not_delayed();
#ifdef _LP64
stf(FloatRegisterImpl::D, d, r1, offset); stf(FloatRegisterImpl::D, d, r1, offset);
// store something more useful here // store something more useful here
debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
#else
stf(FloatRegisterImpl::S, d, r1, offset);
stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize);
#endif
} }
// Known good alignment in _LP64 but unknown otherwise // Known good alignment in _LP64 but unknown otherwise
void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) { void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) {
assert_not_delayed(); assert_not_delayed();
#ifdef _LP64
ldx(r1, offset, rd); ldx(r1, offset, rd);
#else
ld(r1, offset, rd);
ld(r1, offset + Interpreter::stackElementSize, rd->successor());
#endif
} }
// Known good alignment in _LP64 but unknown otherwise // Known good alignment in _LP64 but unknown otherwise
void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) { void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) {
assert_not_delayed(); assert_not_delayed();
#ifdef _LP64
stx(l, r1, offset); stx(l, r1, offset);
// store something more useful here // store something more useful here
stx(G0, r1, offset+Interpreter::stackElementSize); stx(G0, r1, offset+Interpreter::stackElementSize);
#else
st(l, r1, offset);
st(l->successor(), r1, offset + Interpreter::stackElementSize);
#endif
} }
void InterpreterMacroAssembler::pop_i(Register r) { void InterpreterMacroAssembler::pop_i(Register r) {
@ -527,9 +479,7 @@ void InterpreterMacroAssembler::empty_expression_stack() {
sub( Lesp, Gframe_size, Gframe_size ); sub( Lesp, Gframe_size, Gframe_size );
and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary
debug_only(verify_sp(Gframe_size, G4_scratch)); debug_only(verify_sp(Gframe_size, G4_scratch));
#ifdef _LP64
sub(Gframe_size, STACK_BIAS, Gframe_size ); sub(Gframe_size, STACK_BIAS, Gframe_size );
#endif
mov(Gframe_size, SP); mov(Gframe_size, SP);
bind(done); bind(done);
@ -541,28 +491,20 @@ void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) {
Label Bad, OK; Label Bad, OK;
// Saved SP must be aligned. // Saved SP must be aligned.
#ifdef _LP64
btst(2*BytesPerWord-1, Rsp); btst(2*BytesPerWord-1, Rsp);
#else
btst(LongAlignmentMask, Rsp);
#endif
br(Assembler::notZero, false, Assembler::pn, Bad); br(Assembler::notZero, false, Assembler::pn, Bad);
delayed()->nop(); delayed()->nop();
// Saved SP, plus register window size, must not be above FP. // Saved SP, plus register window size, must not be above FP.
add(Rsp, frame::register_save_words * wordSize, Rtemp); add(Rsp, frame::register_save_words * wordSize, Rtemp);
#ifdef _LP64
sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP
#endif
cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad); cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad);
// Saved SP must not be ridiculously below current SP. // Saved SP must not be ridiculously below current SP.
size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K); size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K);
set(maxstack, Rtemp); set(maxstack, Rtemp);
sub(SP, Rtemp, Rtemp); sub(SP, Rtemp, Rtemp);
#ifdef _LP64
add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp
#endif
cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad); cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad);
ba_short(OK); ba_short(OK);
@ -584,9 +526,7 @@ void InterpreterMacroAssembler::verify_esp(Register Resp) {
delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp); delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp);
stop("too many pops: Lesp points into monitor area"); stop("too many pops: Lesp points into monitor area");
bind(OK1); bind(OK1);
#ifdef _LP64
sub(Resp, STACK_BIAS, Resp); sub(Resp, STACK_BIAS, Resp);
#endif
cmp(Resp, SP); cmp(Resp, SP);
brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2); brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2);
delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp); delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp);
@ -696,21 +636,12 @@ void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(
} }
br(Assembler::zero, true, Assembler::pn, aligned); br(Assembler::zero, true, Assembler::pn, aligned);
#ifdef _LP64
delayed()->ldsw(Rtmp, 0, Rdst); delayed()->ldsw(Rtmp, 0, Rdst);
#else
delayed()->ld(Rtmp, 0, Rdst);
#endif
ldub(Lbcp, bcp_offset + 3, Rdst); ldub(Lbcp, bcp_offset + 3, Rdst);
ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst); ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst);
ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst); ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst);
#ifdef _LP64
ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp);
#else
// Unsigned load is faster than signed on some implementations
ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp);
#endif
or3(Rtmp, Rdst, Rdst ); or3(Rtmp, Rdst, Rdst );
bind(aligned); bind(aligned);
@ -796,7 +727,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
sll(index, LogBytesPerHeapOop, tmp); sll(index, LogBytesPerHeapOop, tmp);
get_constant_pool(result); get_constant_pool(result);
// load pointer for resolved_references[] objArray // load pointer for resolved_references[] objArray
ld_ptr(result, ConstantPool::resolved_references_offset_in_bytes(), result); ld_ptr(result, ConstantPool::cache_offset_in_bytes(), result);
ld_ptr(result, ConstantPoolCache::resolved_references_offset_in_bytes(), result);
// JNIHandles::resolve(result) // JNIHandles::resolve(result)
ld_ptr(result, 0, result); ld_ptr(result, 0, result);
// Add in the index // Add in the index
@ -805,6 +737,24 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
} }
// load cpool->resolved_klass_at(index)
void InterpreterMacroAssembler::load_resolved_klass_at_offset(Register Rcpool,
Register Roffset, Register Rklass) {
// int value = *this_cp->int_at_addr(which);
// int resolved_klass_index = extract_low_short_from_int(value);
//
// Because SPARC is big-endian, the low_short is at (cpool->int_at_addr(which) + 2 bytes)
add(Roffset, Rcpool, Roffset);
lduh(Roffset, sizeof(ConstantPool) + 2, Roffset); // Roffset = resolved_klass_index
Register Rresolved_klasses = Rklass;
ld_ptr(Rcpool, ConstantPool::resolved_klasses_offset_in_bytes(), Rresolved_klasses);
sll(Roffset, LogBytesPerWord, Roffset);
add(Roffset, Array<Klass*>::base_offset_in_bytes(), Roffset);
ld_ptr(Rresolved_klasses, Roffset, Rklass);
}
// Generate a subtype check: branch to ok_is_subtype if sub_klass is // Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2. // a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2.
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
@ -910,10 +860,8 @@ void InterpreterMacroAssembler::index_check_without_pop(Register array, Register
assert_not_delayed(); assert_not_delayed();
verify_oop(array); verify_oop(array);
#ifdef _LP64
// sign extend since tos (index) can be a 32bit value // sign extend since tos (index) can be a 32bit value
sra(index, G0, index); sra(index, G0, index);
#endif // _LP64
// check array // check array
Label ptr_ok; Label ptr_ok;
@ -1191,11 +1139,7 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
// return tos // return tos
assert(Otos_l1 == Otos_i, "adjust code below"); assert(Otos_l1 == Otos_i, "adjust code below");
switch (state) { switch (state) {
#ifdef _LP64
case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0 case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0
#else
case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through // O1 -> I1
#endif
case btos: // fall through case btos: // fall through
case ztos: // fall through case ztos: // fall through
case ctos: case ctos:
@ -1207,20 +1151,6 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
case vtos: /* nothing to do */ break; case vtos: /* nothing to do */ break;
default : ShouldNotReachHere(); default : ShouldNotReachHere();
} }
#if defined(COMPILER2) && !defined(_LP64)
if (state == ltos) {
// C2 expects long results in G1 we can't tell if we're returning to interpreted
// or compiled so just be safe use G1 and O0/O1
// Shift bits into high (msb) of G1
sllx(Otos_l1->after_save(), 32, G1);
// Zero extend low bits
srl (Otos_l2->after_save(), 0, Otos_l2->after_save());
or3 (Otos_l2->after_save(), G1, G1);
}
#endif /* COMPILER2 */
} }
// Lock object // Lock object
@ -1270,9 +1200,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object)
// Check if owner is self by comparing the value in the markOop of object // Check if owner is self by comparing the value in the markOop of object
// with the stack pointer // with the stack pointer
sub(temp_reg, SP, temp_reg); sub(temp_reg, SP, temp_reg);
#ifdef _LP64
sub(temp_reg, STACK_BIAS, temp_reg); sub(temp_reg, STACK_BIAS, temp_reg);
#endif
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
// Composite "andcc" test: // Composite "andcc" test:
@ -2711,11 +2639,7 @@ void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,
void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) { void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) {
if (is_native_call) { if (is_native_call) {
stf(FloatRegisterImpl::D, F0, d_tmp); stf(FloatRegisterImpl::D, F0, d_tmp);
#ifdef _LP64
stx(O0, l_tmp); stx(O0, l_tmp);
#else
std(O0, l_tmp);
#endif
} else { } else {
push(state); push(state);
} }
@ -2724,11 +2648,7 @@ void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native
void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) { void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) {
if (is_native_call) { if (is_native_call) {
ldf(FloatRegisterImpl::D, d_tmp, F0); ldf(FloatRegisterImpl::D, d_tmp, F0);
#ifdef _LP64
ldx(l_tmp, O0); ldx(l_tmp, O0);
#else
ldd(l_tmp, O0);
#endif
} else { } else {
pop(state); pop(state);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -70,9 +70,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
bool check_exception=true bool check_exception=true
); );
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// base routine for all dispatches // base routine for all dispatches
void dispatch_base(TosState state, address* table); void dispatch_base(TosState state, address* table);
@ -80,6 +77,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
InterpreterMacroAssembler(CodeBuffer* c) InterpreterMacroAssembler(CodeBuffer* c)
: MacroAssembler(c) {} : MacroAssembler(c) {}
virtual void check_and_handle_popframe(Register scratch_reg);
virtual void check_and_handle_earlyret(Register scratch_reg);
void jump_to_entry(address entry); void jump_to_entry(address entry);
virtual void load_earlyret_value(TosState state); virtual void load_earlyret_value(TosState state);
@ -196,6 +196,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
// load cpool->resolved_references(index); // load cpool->resolved_references(index);
void load_resolved_reference_at_index(Register result, Register index); void load_resolved_reference_at_index(Register result, Register index);
// load cpool->resolved_klass_at(index)
void load_resolved_klass_at_offset(Register Rcpool, Register Roffset, Register Rklass);
// common code // common code
void field_offset_at(int n, Register tmp, Register dest, Register base); void field_offset_at(int n, Register tmp, Register dest, Register base);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -53,47 +53,24 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
Argument jni_arg(jni_offset(), false); Argument jni_arg(jni_offset(), false);
Register Rtmp = O0; Register Rtmp = O0;
#ifdef _LP64
__ ldx(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp); __ ldx(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
__ store_long_argument(Rtmp, jni_arg); __ store_long_argument(Rtmp, jni_arg);
#else
__ ld(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
__ store_argument(Rtmp, jni_arg);
__ ld(Llocals, Interpreter::local_offset_in_bytes(offset() + 0), Rtmp);
Argument successor(jni_arg.successor());
__ store_argument(Rtmp, successor);
#endif
} }
void InterpreterRuntime::SignatureHandlerGenerator::pass_float() { void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
Argument jni_arg(jni_offset(), false); Argument jni_arg(jni_offset(), false);
#ifdef _LP64
FloatRegister Rtmp = F0; FloatRegister Rtmp = F0;
__ ldf(FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(offset()), Rtmp); __ ldf(FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(offset()), Rtmp);
__ store_float_argument(Rtmp, jni_arg); __ store_float_argument(Rtmp, jni_arg);
#else
Register Rtmp = O0;
__ ld(Llocals, Interpreter::local_offset_in_bytes(offset()), Rtmp);
__ store_argument(Rtmp, jni_arg);
#endif
} }
void InterpreterRuntime::SignatureHandlerGenerator::pass_double() { void InterpreterRuntime::SignatureHandlerGenerator::pass_double() {
Argument jni_arg(jni_offset(), false); Argument jni_arg(jni_offset(), false);
#ifdef _LP64
FloatRegister Rtmp = F0; FloatRegister Rtmp = F0;
__ ldf(FloatRegisterImpl::D, Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp); __ ldf(FloatRegisterImpl::D, Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
__ store_double_argument(Rtmp, jni_arg); __ store_double_argument(Rtmp, jni_arg);
#else
Register Rtmp = O0;
__ ld(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
__ store_argument(Rtmp, jni_arg);
__ ld(Llocals, Interpreter::local_offset_in_bytes(offset()), Rtmp);
Argument successor(jni_arg.successor());
__ store_argument(Rtmp, successor);
#endif
} }
void InterpreterRuntime::SignatureHandlerGenerator::pass_object() { void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
@ -171,7 +148,6 @@ class SlowSignatureHandler: public NativeSignatureIterator {
add_signature( non_float ); add_signature( non_float );
} }
#ifdef _LP64
virtual void pass_float() { virtual void pass_float() {
*_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0)); *_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
_from -= Interpreter::stackElementSize; _from -= Interpreter::stackElementSize;
@ -190,23 +166,6 @@ class SlowSignatureHandler: public NativeSignatureIterator {
_from -= 2*Interpreter::stackElementSize; _from -= 2*Interpreter::stackElementSize;
add_signature( long_sig ); add_signature( long_sig );
} }
#else
// pass_double() is pass_long() and pass_float() only _LP64
virtual void pass_long() {
_to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
_to[1] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
_to += 2;
_from -= 2*Interpreter::stackElementSize;
add_signature( non_float );
}
virtual void pass_float() {
*_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
_from -= Interpreter::stackElementSize;
add_signature( non_float );
}
#endif // _LP64
virtual void add_signature( intptr_t sig_type ) { virtual void add_signature( intptr_t sig_type ) {
if ( _argcount < (sizeof (intptr_t))*4 ) { if ( _argcount < (sizeof (intptr_t))*4 ) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -88,9 +88,7 @@ private:
// _last_Java_sp will always be a an unbiased stack pointer // _last_Java_sp will always be a an unbiased stack pointer
// if is is biased then some setter screwed up. This is // if is is biased then some setter screwed up. This is
// deadly. // deadly.
#ifdef _LP64
assert(((intptr_t)_last_Java_sp & 0xF) == 0, "Biased last_Java_sp"); assert(((intptr_t)_last_Java_sp & 0xF) == 0, "Biased last_Java_sp");
#endif
return _last_Java_sp; return _last_Java_sp;
} }

View File

@ -152,39 +152,19 @@ address JNI_FastGetField::generate_fast_get_long_field() {
__ ld_ptr (O1, 0, O5); __ ld_ptr (O1, 0, O5);
__ add (O5, O4, O5); __ add (O5, O4, O5);
#ifndef _LP64
assert(count < LIST_CAPACITY-1, "LIST_CAPACITY too small");
speculative_load_pclist[count++] = __ pc();
__ ld (O5, 0, G2);
speculative_load_pclist[count] = __ pc();
__ ld (O5, 4, O3);
#else
assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
speculative_load_pclist[count] = __ pc(); speculative_load_pclist[count] = __ pc();
__ ldx (O5, 0, O3); __ ldx (O5, 0, O3);
#endif
__ ld (cnt_addr, G1); __ ld (cnt_addr, G1);
__ cmp (G1, G4); __ cmp (G1, G4);
__ br (Assembler::notEqual, false, Assembler::pn, label2); __ br (Assembler::notEqual, false, Assembler::pn, label2);
__ delayed()->mov (O7, G1); __ delayed()->mov (O7, G1);
#ifndef _LP64
__ mov (G2, O0);
__ retl ();
__ delayed()->mov (O3, O1);
#else
__ retl (); __ retl ();
__ delayed()->mov (O3, O0); __ delayed()->mov (O3, O0);
#endif
#ifndef _LP64
slowcase_entry_pclist[count-1] = __ pc();
slowcase_entry_pclist[count++] = __ pc() ;
#else
slowcase_entry_pclist[count++] = __ pc(); slowcase_entry_pclist[count++] = __ pc();
#endif
__ bind (label1); __ bind (label1);
__ mov (O7, G1); __ mov (O7, G1);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -55,18 +55,10 @@ public:
static inline void put_int(jint from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = from; } static inline void put_int(jint from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = from; }
static inline void put_int(jint *from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = *from; } static inline void put_int(jint *from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = *from; }
#ifdef _LP64
// Longs are stored in native format in one JavaCallArgument slot at *(to+1). // Longs are stored in native format in one JavaCallArgument slot at *(to+1).
static inline void put_long(jlong from, intptr_t *to) { *(jlong *)(to + 1 + 0) = from; } static inline void put_long(jlong from, intptr_t *to) { *(jlong *)(to + 1 + 0) = from; }
static inline void put_long(jlong from, intptr_t *to, int& pos) { *(jlong *)(to + 1 + pos) = from; pos += 2; } static inline void put_long(jlong from, intptr_t *to, int& pos) { *(jlong *)(to + 1 + pos) = from; pos += 2; }
static inline void put_long(jlong *from, intptr_t *to, int& pos) { *(jlong *)(to + 1 + pos) = *from; pos += 2; } static inline void put_long(jlong *from, intptr_t *to, int& pos) { *(jlong *)(to + 1 + pos) = *from; pos += 2; }
#else
// Longs are stored in reversed native word format in two JavaCallArgument slots at *to.
// The high half is in *(to+1) and the low half in *to.
static inline void put_long(jlong from, intptr_t *to) { put_int2r((jint *)&from, (jint *)to); }
static inline void put_long(jlong from, intptr_t *to, int& pos) { put_int2r((jint *)&from, (jint *)to, pos); }
static inline void put_long(jlong *from, intptr_t *to, int& pos) { put_int2r((jint *) from, (jint *)to, pos); }
#endif
// Oops are stored in native format in one JavaCallArgument slot at *to. // Oops are stored in native format in one JavaCallArgument slot at *to.
static inline void put_obj(oop from, intptr_t *to) { *(oop *)(to + 0 ) = from; } static inline void put_obj(oop from, intptr_t *to) { *(oop *)(to + 0 ) = from; }
@ -78,39 +70,21 @@ public:
static inline void put_float(jfloat from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = from; } static inline void put_float(jfloat from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = from; }
static inline void put_float(jfloat *from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = *from; } static inline void put_float(jfloat *from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = *from; }
#ifdef _LP64
// Doubles are stored in native word format in one JavaCallArgument slot at *(to+1). // Doubles are stored in native word format in one JavaCallArgument slot at *(to+1).
static inline void put_double(jdouble from, intptr_t *to) { *(jdouble *)(to + 1 + 0) = from; } static inline void put_double(jdouble from, intptr_t *to) { *(jdouble *)(to + 1 + 0) = from; }
static inline void put_double(jdouble from, intptr_t *to, int& pos) { *(jdouble *)(to + 1 + pos) = from; pos += 2; } static inline void put_double(jdouble from, intptr_t *to, int& pos) { *(jdouble *)(to + 1 + pos) = from; pos += 2; }
static inline void put_double(jdouble *from, intptr_t *to, int& pos) { *(jdouble *)(to + 1 + pos) = *from; pos += 2; } static inline void put_double(jdouble *from, intptr_t *to, int& pos) { *(jdouble *)(to + 1 + pos) = *from; pos += 2; }
#else
// Doubles are stored in reversed native word format in two JavaCallArgument slots at *to.
static inline void put_double(jdouble from, intptr_t *to) { put_int2r((jint *)&from, (jint *)to); }
static inline void put_double(jdouble from, intptr_t *to, int& pos) { put_int2r((jint *)&from, (jint *)to, pos); }
static inline void put_double(jdouble *from, intptr_t *to, int& pos) { put_int2r((jint *) from, (jint *)to, pos); }
#endif
// The get_xxx routines, on the other hand, actually _do_ fetch // The get_xxx routines, on the other hand, actually _do_ fetch
// java primitive types from the interpreter stack. // java primitive types from the interpreter stack.
static inline jint get_int(intptr_t *from) { return *(jint *)from; } static inline jint get_int(intptr_t *from) { return *(jint *)from; }
#ifdef _LP64
static inline jlong get_long(intptr_t *from) { return *(jlong *)from; } static inline jlong get_long(intptr_t *from) { return *(jlong *)from; }
#else
static inline jlong get_long(intptr_t *from) { return ((jlong)(*( signed int *)((jint *)from )) << 32) |
((jlong)(*(unsigned int *)((jint *)from + 1)) << 0); }
#endif
static inline oop get_obj(intptr_t *from) { return *(oop *)from; } static inline oop get_obj(intptr_t *from) { return *(oop *)from; }
static inline jfloat get_float(intptr_t *from) { return *(jfloat *)from; } static inline jfloat get_float(intptr_t *from) { return *(jfloat *)from; }
#ifdef _LP64
static inline jdouble get_double(intptr_t *from) { return *(jdouble *)from; } static inline jdouble get_double(intptr_t *from) { return *(jdouble *)from; }
#else
static inline jdouble get_double(intptr_t *from) { jlong jl = ((jlong)(*( signed int *)((jint *)from )) << 32) |
((jlong)(*(unsigned int *)((jint *)from + 1)) << 0);
return *(jdouble *)&jl; }
#endif
}; };

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -39,10 +39,6 @@
typedef int jint; typedef int jint;
#ifdef _LP64 typedef long jlong;
typedef long jlong;
#else
typedef long long jlong;
#endif
typedef signed char jbyte; typedef signed char jbyte;

Some files were not shown because too many files have changed in this diff Show More