Merge
This commit is contained in:
commit
a41f617837
@ -47,11 +47,10 @@ ifeq ($(INCLUDE_GRAAL), true)
|
||||
$(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_MATCH_PROCESSOR, \
|
||||
SETUP := GENERATE_OLDBYTECODE, \
|
||||
SRC := \
|
||||
$(SRC_DIR)/org.graalvm.compiler.common/src \
|
||||
$(SRC_DIR)/org.graalvm.api.word/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.core/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.core.common/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.core.match.processor/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.api.collections/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.api.replacements/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.asm/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.bytecode/src \
|
||||
@ -68,6 +67,7 @@ ifeq ($(INCLUDE_GRAAL), true)
|
||||
$(SRC_DIR)/org.graalvm.compiler.phases.common/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.serviceprovider/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.virtual/src \
|
||||
$(SRC_DIR)/org.graalvm.util/src \
|
||||
$(VM_CI_SRC_DIR)/jdk.vm.ci.code/src \
|
||||
$(VM_CI_SRC_DIR)/jdk.vm.ci.common/src \
|
||||
$(VM_CI_SRC_DIR)/jdk.vm.ci.meta/src \
|
||||
@ -102,6 +102,7 @@ ifeq ($(INCLUDE_GRAAL), true)
|
||||
SRC := \
|
||||
$(SRC_DIR)/org.graalvm.compiler.options/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.options.processor/src \
|
||||
$(SRC_DIR)/org.graalvm.util/src \
|
||||
, \
|
||||
BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor, \
|
||||
JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor.jar, \
|
||||
@ -114,9 +115,8 @@ ifeq ($(INCLUDE_GRAAL), true)
|
||||
$(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_REPLACEMENTS_VERIFIER, \
|
||||
SETUP := GENERATE_OLDBYTECODE, \
|
||||
SRC := \
|
||||
$(SRC_DIR)/org.graalvm.compiler.common/src \
|
||||
$(SRC_DIR)/org.graalvm.api.word/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.replacements.verifier/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.api.collections/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.api.replacements/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.code/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.core.common/src \
|
||||
@ -125,6 +125,7 @@ ifeq ($(INCLUDE_GRAAL), true)
|
||||
$(SRC_DIR)/org.graalvm.compiler.nodeinfo/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.options/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.serviceprovider/src \
|
||||
$(SRC_DIR)/org.graalvm.util/src \
|
||||
$(VM_CI_SRC_DIR)/jdk.vm.ci.code/src \
|
||||
$(VM_CI_SRC_DIR)/jdk.vm.ci.common/src \
|
||||
$(VM_CI_SRC_DIR)/jdk.vm.ci.meta/src \
|
||||
|
@ -37,7 +37,6 @@ SRC_DIR := $(HOTSPOT_TOPDIR)/src/$(MODULE)/share/classes
|
||||
|
||||
PROC_SRC_SUBDIRS := \
|
||||
org.graalvm.compiler.code \
|
||||
org.graalvm.compiler.common \
|
||||
org.graalvm.compiler.core \
|
||||
org.graalvm.compiler.core.aarch64 \
|
||||
org.graalvm.compiler.core.amd64 \
|
||||
|
@ -1,53 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
include $(SPEC)
|
||||
include NativeCompilation.gmk
|
||||
|
||||
$(eval $(call IncludeCustomExtension, hotspot, lib/Lib-jdk.aot.gmk))
|
||||
|
||||
##############################################################################
|
||||
# Build libjelfshim only when AOT is enabled.
|
||||
ifeq ($(ENABLE_AOT), true)
|
||||
JELFSHIM_NAME := jelfshim
|
||||
|
||||
$(eval $(call SetupNativeCompilation, BUILD_LIBJELFSHIM, \
|
||||
TOOLCHAIN := TOOLCHAIN_DEFAULT, \
|
||||
OPTIMIZATION := LOW, \
|
||||
LIBRARY := $(JELFSHIM_NAME), \
|
||||
OUTPUT_DIR := $(call FindLibDirForModule, $(MODULE)), \
|
||||
SRC := $(HOTSPOT_TOPDIR)/src/jdk.aot/unix/native/libjelfshim, \
|
||||
CFLAGS := $(CFLAGS_JDKLIB) $(ELF_CFLAGS) \
|
||||
-DAOT_VERSION_STRING='"$(VERSION_STRING)"' \
|
||||
-I$(SUPPORT_OUTPUTDIR)/headers/$(MODULE), \
|
||||
LDFLAGS := $(LDFLAGS_JDKLIB), \
|
||||
OBJECT_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/lib$(JELFSHIM_NAME), \
|
||||
LIBS := $(ELF_LIBS) $(LIBS_JDKLIB), \
|
||||
))
|
||||
|
||||
TARGETS += $(BUILD_LIBJELFSHIM)
|
||||
endif
|
||||
|
||||
##############################################################################
|
@ -35,12 +35,16 @@ include $(SPEC)
|
||||
include MakeBase.gmk
|
||||
include TestFilesCompilation.gmk
|
||||
|
||||
$(eval $(call IncludeCustomExtension, hotspot, test/JtregNative.gmk))
|
||||
|
||||
################################################################################
|
||||
# Targets for building the native tests themselves.
|
||||
################################################################################
|
||||
|
||||
# Add more directories here when needed.
|
||||
BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
|
||||
BUILD_HOTSPOT_JTREG_NATIVE_SRC += \
|
||||
$(HOTSPOT_TOPDIR)/test/gc/g1/TestJNIWeakG1 \
|
||||
$(HOTSPOT_TOPDIR)/test/gc/stress/gclocker \
|
||||
$(HOTSPOT_TOPDIR)/test/native_sanity \
|
||||
$(HOTSPOT_TOPDIR)/test/runtime/jni/8025979 \
|
||||
$(HOTSPOT_TOPDIR)/test/runtime/jni/8033445 \
|
||||
@ -53,6 +57,7 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
|
||||
$(HOTSPOT_TOPDIR)/test/runtime/modules/getModuleJNI \
|
||||
$(HOTSPOT_TOPDIR)/test/runtime/SameObject \
|
||||
$(HOTSPOT_TOPDIR)/test/runtime/BoolReturn \
|
||||
$(HOTSPOT_TOPDIR)/test/runtime/noClassDefFoundMsg \
|
||||
$(HOTSPOT_TOPDIR)/test/compiler/floatingpoint/ \
|
||||
$(HOTSPOT_TOPDIR)/test/compiler/calls \
|
||||
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/GetNamedModule \
|
||||
@ -65,6 +70,7 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
|
||||
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/ModuleAwareAgents/ClassFileLoadHook \
|
||||
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/ModuleAwareAgents/ClassLoadPrepare \
|
||||
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/ModuleAwareAgents/ThreadStart \
|
||||
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/StartPhase/AllowedFunctions \
|
||||
#
|
||||
|
||||
# Add conditional directories here when needed.
|
||||
@ -91,6 +97,7 @@ ifeq ($(TOOLCHAIN_TYPE), solstudio)
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAClassFileLoadHook := -lc
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAClassLoadPrepare := -lc
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAThreadStart := -lc
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libAllowedFunctions := -lc
|
||||
endif
|
||||
|
||||
ifeq ($(OPENJDK_TARGET_OS), linux)
|
||||
|
@ -1,5 +1,5 @@
|
||||
//
|
||||
// Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
@ -3564,7 +3564,7 @@ const int Matcher::min_vector_size(const BasicType bt) {
|
||||
}
|
||||
|
||||
// Vector ideal reg.
|
||||
const int Matcher::vector_ideal_reg(int len) {
|
||||
const uint Matcher::vector_ideal_reg(int len) {
|
||||
switch(len) {
|
||||
case 8: return Op_VecD;
|
||||
case 16: return Op_VecX;
|
||||
@ -3573,7 +3573,7 @@ const int Matcher::vector_ideal_reg(int len) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const int Matcher::vector_shift_count_ideal_reg(int size) {
|
||||
const uint Matcher::vector_shift_count_ideal_reg(int size) {
|
||||
return Op_VecX;
|
||||
}
|
||||
|
||||
@ -15387,9 +15387,9 @@ instruct ShouldNotReachHere() %{
|
||||
format %{ "ShouldNotReachHere" %}
|
||||
|
||||
ins_encode %{
|
||||
// TODO
|
||||
// implement proper trap call here
|
||||
__ brk(999);
|
||||
// +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
|
||||
// return true
|
||||
__ dpcs1(0xdead + 1);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_class_default);
|
||||
|
@ -30,12 +30,6 @@
|
||||
|
||||
class Bytes: AllStatic {
|
||||
public:
|
||||
// Returns true if the byte ordering used by Java is different from the native byte ordering
|
||||
// of the underlying machine. For example, this is true for Intel x86, but false for Solaris
|
||||
// on Sparc.
|
||||
static inline bool is_Java_byte_ordering_different(){ return true; }
|
||||
|
||||
|
||||
// Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering
|
||||
// (no special code is needed since x86 CPUs can access unaligned data)
|
||||
static inline u2 get_native_u2(address p) { return *(u2*)p; }
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -23,12 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "c1/c1_FpuStackSim.hpp"
|
||||
#include "c1/c1_FrameMap.hpp"
|
||||
#include "utilities/array.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
//--------------------------------------------------------
|
||||
// FpuStackSim
|
||||
//--------------------------------------------------------
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -270,7 +270,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
|
||||
|
||||
get_constant_pool(result);
|
||||
// load pointer for resolved_references[] objArray
|
||||
ldr(result, Address(result, ConstantPool::resolved_references_offset_in_bytes()));
|
||||
ldr(result, Address(result, ConstantPool::cache_offset_in_bytes()));
|
||||
ldr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
|
||||
// JNIHandles::resolve(obj);
|
||||
ldr(result, Address(result, 0));
|
||||
// Add in the index
|
||||
@ -278,6 +279,15 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
|
||||
load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::load_resolved_klass_at_offset(
|
||||
Register cpool, Register index, Register klass, Register temp) {
|
||||
add(temp, cpool, index, LSL, LogBytesPerWord);
|
||||
ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
|
||||
ldr(klass, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes())); // klass = cpool->_resolved_klasses
|
||||
add(klass, klass, temp, LSL, LogBytesPerWord);
|
||||
ldr(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
|
||||
}
|
||||
|
||||
// Generate a subtype check: branch to ok_is_subtype if sub_klass is a
|
||||
// subtype of super_klass.
|
||||
//
|
||||
@ -682,7 +692,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
|
||||
}
|
||||
|
||||
// Load (object->mark() | 1) into swap_reg
|
||||
ldr(rscratch1, Address(obj_reg, 0));
|
||||
ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
orr(swap_reg, rscratch1, 1);
|
||||
|
||||
// Save (object->mark() | 1) into BasicLock's displaced header
|
||||
@ -694,14 +704,14 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
|
||||
Label fail;
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
Label fast;
|
||||
cmpxchgptr(swap_reg, lock_reg, obj_reg, rscratch1, fast, &fail);
|
||||
cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, fast, &fail);
|
||||
bind(fast);
|
||||
atomic_incw(Address((address)BiasedLocking::fast_path_entry_count_addr()),
|
||||
rscratch2, rscratch1, tmp);
|
||||
b(done);
|
||||
bind(fail);
|
||||
} else {
|
||||
cmpxchgptr(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
|
||||
cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
|
||||
}
|
||||
|
||||
// Test if the oopMark is an obvious stack pointer, i.e.,
|
||||
@ -791,7 +801,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
|
||||
cbz(header_reg, done);
|
||||
|
||||
// Atomic swap back the old header
|
||||
cmpxchgptr(swap_reg, header_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
|
||||
cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
|
||||
|
||||
// Call the runtime routine for slow case.
|
||||
str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -54,9 +54,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
int number_of_arguments,
|
||||
bool check_exceptions);
|
||||
|
||||
virtual void check_and_handle_popframe(Register java_thread);
|
||||
virtual void check_and_handle_earlyret(Register java_thread);
|
||||
|
||||
// base routine for all dispatches
|
||||
void dispatch_base(TosState state, address* table, bool verifyoop = true);
|
||||
|
||||
@ -67,6 +64,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
|
||||
void jump_to_entry(address entry);
|
||||
|
||||
virtual void check_and_handle_popframe(Register java_thread);
|
||||
virtual void check_and_handle_earlyret(Register java_thread);
|
||||
|
||||
// Interpreter-specific registers
|
||||
void save_bcp() {
|
||||
str(rbcp, Address(rfp, frame::interpreter_frame_bcp_offset * wordSize));
|
||||
@ -123,6 +123,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
// load cpool->resolved_references(index);
|
||||
void load_resolved_reference_at_index(Register result, Register index);
|
||||
|
||||
// load cpool->resolved_klass_at(index);
|
||||
void load_resolved_klass_at_offset(Register cpool, Register index, Register klass, Register temp);
|
||||
|
||||
void pop_ptr(Register r = r0);
|
||||
void pop_i(Register r = r0);
|
||||
void pop_l(Register r = r0);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -55,7 +55,7 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS)
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
Handle obj = HotSpotObjectConstantImpl::object(constant);
|
||||
Handle obj(THREAD, HotSpotObjectConstantImpl::object(constant));
|
||||
jobject value = JNIHandles::make_local(obj());
|
||||
MacroAssembler::patch_oop(pc, (address)obj());
|
||||
int oop_index = _oop_recorder->find_index(value);
|
||||
|
@ -515,7 +515,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
|
||||
mov(rscratch1, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
|
||||
andr(swap_reg, swap_reg, rscratch1);
|
||||
orr(tmp_reg, swap_reg, rthread);
|
||||
cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
|
||||
cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
|
||||
// If the biasing toward our thread failed, this means that
|
||||
// another thread succeeded in biasing it toward itself and we
|
||||
// need to revoke that bias. The revocation will occur in the
|
||||
@ -542,7 +542,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
|
||||
Label here;
|
||||
load_prototype_header(tmp_reg, obj_reg);
|
||||
orr(tmp_reg, rthread, tmp_reg);
|
||||
cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
|
||||
cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
|
||||
// If the biasing toward our thread failed, then another thread
|
||||
// succeeded in biasing it toward itself and we need to revoke that
|
||||
// bias. The revocation will occur in the runtime in the slow case.
|
||||
@ -569,7 +569,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
|
||||
{
|
||||
Label here, nope;
|
||||
load_prototype_header(tmp_reg, obj_reg);
|
||||
cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, &nope);
|
||||
cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, rscratch1, here, &nope);
|
||||
bind(here);
|
||||
|
||||
// Fall through to the normal CAS-based lock, because no matter what
|
||||
@ -2141,6 +2141,12 @@ void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Reg
|
||||
b(*fail);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp,
|
||||
Label &succeed, Label *fail) {
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "assumption");
|
||||
cmpxchgptr(oldv, newv, obj, tmp, succeed, fail);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp,
|
||||
Label &succeed, Label *fail) {
|
||||
// oldv holds comparison value
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -77,12 +77,6 @@ class MacroAssembler: public Assembler {
|
||||
bool check_exceptions // whether to check for pending exceptions after return
|
||||
);
|
||||
|
||||
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
|
||||
// The implementation is only non-empty for the InterpreterMacroAssembler,
|
||||
// as only the interpreter handles PopFrame and ForceEarlyReturn requests.
|
||||
virtual void check_and_handle_popframe(Register java_thread);
|
||||
virtual void check_and_handle_earlyret(Register java_thread);
|
||||
|
||||
void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
|
||||
|
||||
// Maximum size of class area in Metaspace when compressed
|
||||
@ -97,6 +91,12 @@ class MacroAssembler: public Assembler {
|
||||
> (1u << log2_intptr(CompressedClassSpaceSize))));
|
||||
}
|
||||
|
||||
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
|
||||
// The implementation is only non-empty for the InterpreterMacroAssembler,
|
||||
// as only the interpreter handles PopFrame and ForceEarlyReturn requests.
|
||||
virtual void check_and_handle_popframe(Register java_thread);
|
||||
virtual void check_and_handle_earlyret(Register java_thread);
|
||||
|
||||
// Biased locking support
|
||||
// lock_reg and obj_reg must be loaded up with the appropriate values.
|
||||
// swap_reg is killed.
|
||||
@ -974,6 +974,8 @@ public:
|
||||
|
||||
// Various forms of CAS
|
||||
|
||||
void cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp,
|
||||
Label &suceed, Label *fail);
|
||||
void cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp,
|
||||
Label &suceed, Label *fail);
|
||||
|
||||
|
@ -1,126 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
|
||||
// Generate the self-patching vtable method:
|
||||
//
|
||||
// This method will be called (as any other Klass virtual method) with
|
||||
// the Klass itself as the first argument. Example:
|
||||
//
|
||||
// oop obj;
|
||||
// int size = obj->klass()->oop_size(this);
|
||||
//
|
||||
// for which the virtual method call is Klass::oop_size();
|
||||
//
|
||||
// The dummy method is called with the Klass object as the first
|
||||
// operand, and an object as the second argument.
|
||||
//
|
||||
|
||||
//=====================================================================
|
||||
|
||||
// All of the dummy methods in the vtable are essentially identical,
|
||||
// differing only by an ordinal constant, and they bear no relationship
|
||||
// to the original method which the caller intended. Also, there needs
|
||||
// to be 'vtbl_list_size' instances of the vtable in order to
|
||||
// differentiate between the 'vtable_list_size' original Klass objects.
|
||||
|
||||
#define __ masm->
|
||||
|
||||
extern "C" {
|
||||
void aarch64_prolog(void);
|
||||
}
|
||||
|
||||
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
|
||||
void** vtable,
|
||||
char** md_top,
|
||||
char* md_end,
|
||||
char** mc_top,
|
||||
char* mc_end) {
|
||||
|
||||
#ifdef BUILTIN_SIM
|
||||
// Write a dummy word to the writable shared metaspace.
|
||||
// MetaspaceShared::initialize_shared_spaces will fill it with the
|
||||
// address of aarch64_prolog().
|
||||
address *prolog_ptr = (address*)*md_top;
|
||||
*(intptr_t *)(*md_top) = (intptr_t)0;
|
||||
(*md_top) += sizeof(intptr_t);
|
||||
#endif
|
||||
|
||||
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
|
||||
*(intptr_t *)(*md_top) = vtable_bytes;
|
||||
*md_top += sizeof(intptr_t);
|
||||
void** dummy_vtable = (void**)*md_top;
|
||||
*vtable = dummy_vtable;
|
||||
*md_top += vtable_bytes;
|
||||
|
||||
// Get ready to generate dummy methods.
|
||||
|
||||
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
|
||||
MacroAssembler* masm = new MacroAssembler(&cb);
|
||||
|
||||
Label common_code;
|
||||
for (int i = 0; i < vtbl_list_size; ++i) {
|
||||
for (int j = 0; j < num_virtuals; ++j) {
|
||||
dummy_vtable[num_virtuals * i + j] = (void*)masm->pc();
|
||||
|
||||
// We're called directly from C code.
|
||||
#ifdef BUILTIN_SIM
|
||||
__ c_stub_prolog(8, 0, MacroAssembler::ret_type_integral, prolog_ptr);
|
||||
#endif
|
||||
// Load rscratch1 with a value indicating vtable/offset pair.
|
||||
// -- bits[ 7..0] (8 bits) which virtual method in table?
|
||||
// -- bits[12..8] (5 bits) which virtual method table?
|
||||
__ mov(rscratch1, (i << 8) + j);
|
||||
__ b(common_code);
|
||||
}
|
||||
}
|
||||
|
||||
__ bind(common_code);
|
||||
|
||||
Register tmp0 = r10, tmp1 = r11; // AAPCS64 temporary registers
|
||||
__ enter();
|
||||
__ lsr(tmp0, rscratch1, 8); // isolate vtable identifier.
|
||||
__ mov(tmp1, (address)vtbl_list); // address of list of vtable pointers.
|
||||
__ ldr(tmp1, Address(tmp1, tmp0, Address::lsl(LogBytesPerWord))); // get correct vtable pointer.
|
||||
__ str(tmp1, Address(c_rarg0)); // update vtable pointer in obj.
|
||||
__ add(rscratch1, tmp1, rscratch1, ext::uxtb, LogBytesPerWord); // address of real method pointer.
|
||||
__ ldr(rscratch1, Address(rscratch1)); // get real method pointer.
|
||||
__ blrt(rscratch1, 8, 0, 1); // jump to the real method.
|
||||
__ leave();
|
||||
__ ret(lr);
|
||||
|
||||
*mc_top = (char*)__ pc();
|
||||
}
|
||||
|
||||
#ifdef BUILTIN_SIM
|
||||
void MetaspaceShared::relocate_vtbl_list(char **buffer) {
|
||||
void **sim_entry = (void**)*buffer;
|
||||
*sim_entry = (void*)aarch64_prolog;
|
||||
*buffer += sizeof(intptr_t);
|
||||
}
|
||||
#endif
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -63,7 +63,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
|
||||
Register obj, SystemDictionary::WKID klass_id,
|
||||
const char* error_message) {
|
||||
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
|
||||
KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
|
||||
Klass* klass = SystemDictionary::well_known_klass(klass_id);
|
||||
Register temp = rscratch2;
|
||||
Register temp2 = rscratch1; // used by MacroAssembler::cmpptr
|
||||
Label L_ok, L_bad;
|
||||
@ -137,8 +137,9 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
|
||||
__ verify_oop(method_temp);
|
||||
__ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
|
||||
__ verify_oop(method_temp);
|
||||
// the following assumes that a Method* is normally compressed in the vmtarget field:
|
||||
__ ldr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));
|
||||
__ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())));
|
||||
__ verify_oop(method_temp);
|
||||
__ ldr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())));
|
||||
|
||||
if (VerifyMethodHandles && !for_compiler_entry) {
|
||||
// make sure recv is already on stack
|
||||
@ -282,7 +283,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
|
||||
Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
|
||||
Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
|
||||
Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));
|
||||
Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()));
|
||||
Address vmtarget_method( rmethod, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()));
|
||||
|
||||
Register temp1_recv_klass = temp1;
|
||||
if (iid != vmIntrinsics::_linkToStatic) {
|
||||
@ -335,14 +337,16 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
if (VerifyMethodHandles) {
|
||||
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
|
||||
}
|
||||
__ ldr(rmethod, member_vmtarget);
|
||||
__ load_heap_oop(rmethod, member_vmtarget);
|
||||
__ ldr(rmethod, vmtarget_method);
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_linkToStatic:
|
||||
if (VerifyMethodHandles) {
|
||||
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
|
||||
}
|
||||
__ ldr(rmethod, member_vmtarget);
|
||||
__ load_heap_oop(rmethod, member_vmtarget);
|
||||
__ ldr(rmethod, vmtarget_method);
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_linkToVirtual:
|
||||
|
@ -1842,7 +1842,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// Load (object->mark() | 1) into swap_reg %r0
|
||||
__ ldr(rscratch1, Address(obj_reg, 0));
|
||||
__ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
__ orr(swap_reg, rscratch1, 1);
|
||||
|
||||
// Save (object->mark() | 1) into BasicLock's displaced header
|
||||
@ -1850,7 +1850,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
// src -> dest iff dest == r0 else r0 <- dest
|
||||
{ Label here;
|
||||
__ cmpxchgptr(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
|
||||
__ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
|
||||
}
|
||||
|
||||
// Hmm should this move to the slow path code area???
|
||||
@ -2029,7 +2029,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
// Atomic swap old header if oop still contains the stack lock
|
||||
Label succeed;
|
||||
__ cmpxchgptr(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
|
||||
__ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
|
||||
__ bind(succeed);
|
||||
|
||||
// slow path re-enters here
|
||||
|
@ -402,14 +402,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(
|
||||
return entry;
|
||||
}
|
||||
|
||||
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
|
||||
address entry = __ pc();
|
||||
// NULL last_sp until next java call
|
||||
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
__ dispatch_next(state);
|
||||
return entry;
|
||||
}
|
||||
|
||||
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
|
||||
address entry = __ pc();
|
||||
|
||||
@ -444,6 +436,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
__ notify(Assembler::method_reentry);
|
||||
}
|
||||
#endif
|
||||
|
||||
__ check_and_handle_popframe(rthread);
|
||||
__ check_and_handle_earlyret(rthread);
|
||||
|
||||
__ get_dispatch();
|
||||
__ dispatch_next(state, step);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -3418,8 +3418,7 @@ void TemplateTable::_new() {
|
||||
__ br(Assembler::NE, slow_case);
|
||||
|
||||
// get InstanceKlass
|
||||
__ lea(r4, Address(r4, r3, Address::lsl(3)));
|
||||
__ ldr(r4, Address(r4, sizeof(ConstantPool)));
|
||||
__ load_resolved_klass_at_offset(r4, r3, r4, rscratch1);
|
||||
|
||||
// make sure klass is initialized & doesn't have finalizer
|
||||
// make sure klass is fully initialized
|
||||
@ -3572,8 +3571,7 @@ void TemplateTable::checkcast()
|
||||
// Get superklass in r0 and subklass in r3
|
||||
__ bind(quicked);
|
||||
__ mov(r3, r0); // Save object in r3; r0 needed for subtype check
|
||||
__ lea(r0, Address(r2, r19, Address::lsl(3)));
|
||||
__ ldr(r0, Address(r0, sizeof(ConstantPool)));
|
||||
__ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); // r0 = klass
|
||||
|
||||
__ bind(resolved);
|
||||
__ load_klass(r19, r3);
|
||||
@ -3629,8 +3627,7 @@ void TemplateTable::instanceof() {
|
||||
// Get superklass in r0 and subklass in r3
|
||||
__ bind(quicked);
|
||||
__ load_klass(r3, r0);
|
||||
__ lea(r0, Address(r2, r19, Address::lsl(3)));
|
||||
__ ldr(r0, Address(r0, sizeof(ConstantPool)));
|
||||
__ load_resolved_klass_at_offset(r2, r19, r0, rscratch1);
|
||||
|
||||
__ bind(resolved);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -234,8 +234,15 @@ void AbstractInterpreter::layout_activation(Method* method,
|
||||
#ifdef AARCH64
|
||||
interpreter_frame->interpreter_frame_set_stack_top(stack_top);
|
||||
|
||||
// We have to add extra reserved slots to max_stack. There are 3 users of the extra slots,
|
||||
// none of which are at the same time, so we just need to make sure there is enough room
|
||||
// for the biggest user:
|
||||
// -reserved slot for exception handler
|
||||
// -reserved slots for JSR292. Method::extra_stack_entries() is the size.
|
||||
// -3 reserved slots so get_method_counters() can save some registers before call_VM().
|
||||
int max_stack = method->constMethod()->max_stack() + MAX2(3, Method::extra_stack_entries());
|
||||
intptr_t* extended_sp = (intptr_t*) monbot -
|
||||
(method->max_stack() + 1) * Interpreter::stackElementWords - // +1 is reserved slot for exception handler
|
||||
(max_stack * Interpreter::stackElementWords) -
|
||||
popframe_extra_args;
|
||||
extended_sp = (intptr_t*)round_down((intptr_t)extended_sp, StackAlignmentInBytes);
|
||||
interpreter_frame->interpreter_frame_set_extended_sp(extended_sp);
|
||||
|
@ -1,5 +1,5 @@
|
||||
//
|
||||
// Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@ -1122,7 +1122,7 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
|
||||
}
|
||||
|
||||
// Vector ideal reg corresponding to specified size in bytes
|
||||
const int Matcher::vector_ideal_reg(int size) {
|
||||
const uint Matcher::vector_ideal_reg(int size) {
|
||||
assert(MaxVectorSize >= size, "");
|
||||
switch(size) {
|
||||
case 8: return Op_VecD;
|
||||
@ -1132,7 +1132,7 @@ const int Matcher::vector_ideal_reg(int size) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const int Matcher::vector_shift_count_ideal_reg(int size) {
|
||||
const uint Matcher::vector_shift_count_ideal_reg(int size) {
|
||||
return vector_ideal_reg(size);
|
||||
}
|
||||
|
||||
@ -11752,9 +11752,13 @@ instruct ShouldNotReachHere( )
|
||||
|
||||
size(4);
|
||||
// Use the following format syntax
|
||||
format %{ "breakpoint ; ShouldNotReachHere" %}
|
||||
format %{ "ShouldNotReachHere" %}
|
||||
ins_encode %{
|
||||
__ breakpoint();
|
||||
#ifdef AARCH64
|
||||
__ dpcs1(0xdead);
|
||||
#else
|
||||
__ udf(0xdead);
|
||||
#endif
|
||||
%}
|
||||
ins_pipe(tail_call);
|
||||
%}
|
||||
|
@ -578,6 +578,11 @@ class Assembler : public AbstractAssembler {
|
||||
F(bl, 0xb)
|
||||
#undef F
|
||||
|
||||
void udf(int imm_16) {
|
||||
assert((imm_16 >> 16) == 0, "encoding constraint");
|
||||
emit_int32(0xe7f000f0 | (imm_16 & 0xfff0) << 8 | (imm_16 & 0xf));
|
||||
}
|
||||
|
||||
// ARMv7 instructions
|
||||
|
||||
#define F(mnemonic, wt) \
|
||||
|
@ -1083,6 +1083,7 @@ class Assembler : public AbstractAssembler {
|
||||
|
||||
F(brk, 0b001, 0b000, 0b00)
|
||||
F(hlt, 0b010, 0b000, 0b00)
|
||||
F(dpcs1, 0b101, 0b000, 0b01)
|
||||
#undef F
|
||||
|
||||
enum SystemRegister { // o0<1> op1<3> CRn<4> CRm<4> op2<3>
|
||||
|
@ -35,12 +35,6 @@
|
||||
class Bytes: AllStatic {
|
||||
|
||||
public:
|
||||
// Returns true if the byte ordering used by Java is different from the native byte ordering
|
||||
// of the underlying machine.
|
||||
static inline bool is_Java_byte_ordering_different() {
|
||||
return VM_LITTLE_ENDIAN != 0;
|
||||
}
|
||||
|
||||
static inline u2 get_Java_u2(address p) {
|
||||
return (u2(p[0]) << 8) | u2(p[1]);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,10 +22,4 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "c1/c1_FpuStackSim.hpp"
|
||||
#include "c1/c1_FrameMap.hpp"
|
||||
#include "utilities/array.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
// Nothing needed here
|
||||
|
@ -298,7 +298,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
|
||||
|
||||
Register cache = result;
|
||||
// load pointer for resolved_references[] objArray
|
||||
ldr(cache, Address(result, ConstantPool::resolved_references_offset_in_bytes()));
|
||||
ldr(cache, Address(result, ConstantPool::cache_offset_in_bytes()));
|
||||
ldr(cache, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
|
||||
// JNIHandles::resolve(result)
|
||||
ldr(cache, Address(cache, 0));
|
||||
// Add in the index
|
||||
@ -308,6 +309,15 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
|
||||
load_heap_oop(result, Address(cache, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::load_resolved_klass_at_offset(
|
||||
Register Rcpool, Register Rindex, Register Rklass) {
|
||||
add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
|
||||
ldrh(Rtemp, Address(Rtemp, sizeof(ConstantPool))); // Rtemp = resolved_klass_index
|
||||
ldr(Rklass, Address(Rcpool, ConstantPool::resolved_klasses_offset_in_bytes())); // Rklass = cpool->_resolved_klasses
|
||||
add(Rklass, Rklass, AsmOperand(Rtemp, lsl, LogBytesPerWord));
|
||||
ldr(Rklass, Address(Rklass, Array<Klass*>::base_offset_in_bytes()));
|
||||
}
|
||||
|
||||
// Generate a subtype check: branch to not_subtype if sub_klass is
|
||||
// not a subtype of super_klass.
|
||||
// Profiling code for the subtype check failure (profile_typecheck_failed)
|
||||
@ -2016,75 +2026,42 @@ void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
|
||||
|
||||
void InterpreterMacroAssembler::get_method_counters(Register method,
|
||||
Register Rcounters,
|
||||
Label& skip) {
|
||||
Label& skip,
|
||||
bool saveRegs,
|
||||
Register reg1,
|
||||
Register reg2,
|
||||
Register reg3) {
|
||||
const Address method_counters(method, Method::method_counters_offset());
|
||||
Label has_counters;
|
||||
|
||||
ldr(Rcounters, method_counters);
|
||||
cbnz(Rcounters, has_counters);
|
||||
|
||||
if (saveRegs) {
|
||||
// Save and restore in use caller-saved registers since they will be trashed by call_VM
|
||||
assert(reg1 != noreg, "must specify reg1");
|
||||
assert(reg2 != noreg, "must specify reg2");
|
||||
#ifdef AARCH64
|
||||
const Register tmp = Rcounters;
|
||||
const int saved_regs_size = 20*wordSize;
|
||||
|
||||
// Note: call_VM will cut SP according to Rstack_top value before call, and restore SP to
|
||||
// extended_sp value from frame after the call.
|
||||
// So make sure there is enough stack space to save registers and adjust Rstack_top accordingly.
|
||||
{
|
||||
Label enough_stack_space;
|
||||
check_extended_sp(tmp);
|
||||
sub(Rstack_top, Rstack_top, saved_regs_size);
|
||||
cmp(SP, Rstack_top);
|
||||
b(enough_stack_space, ls);
|
||||
|
||||
align_reg(tmp, Rstack_top, StackAlignmentInBytes);
|
||||
mov(SP, tmp);
|
||||
str(tmp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
|
||||
|
||||
bind(enough_stack_space);
|
||||
check_stack_top();
|
||||
|
||||
int offset = 0;
|
||||
stp(R0, R1, Address(Rstack_top, offset)); offset += 2*wordSize;
|
||||
stp(R2, R3, Address(Rstack_top, offset)); offset += 2*wordSize;
|
||||
stp(R4, R5, Address(Rstack_top, offset)); offset += 2*wordSize;
|
||||
stp(R6, R7, Address(Rstack_top, offset)); offset += 2*wordSize;
|
||||
stp(R8, R9, Address(Rstack_top, offset)); offset += 2*wordSize;
|
||||
stp(R10, R11, Address(Rstack_top, offset)); offset += 2*wordSize;
|
||||
stp(R12, R13, Address(Rstack_top, offset)); offset += 2*wordSize;
|
||||
stp(R14, R15, Address(Rstack_top, offset)); offset += 2*wordSize;
|
||||
stp(R16, R17, Address(Rstack_top, offset)); offset += 2*wordSize;
|
||||
stp(R18, LR, Address(Rstack_top, offset)); offset += 2*wordSize;
|
||||
assert (offset == saved_regs_size, "should be");
|
||||
}
|
||||
assert(reg3 != noreg, "must specify reg3");
|
||||
stp(reg1, reg2, Address(Rstack_top, -2*wordSize, pre_indexed));
|
||||
stp(reg3, ZR, Address(Rstack_top, -2*wordSize, pre_indexed));
|
||||
#else
|
||||
push(RegisterSet(R0, R3) | RegisterSet(R12) | RegisterSet(R14));
|
||||
#endif // AARCH64
|
||||
assert(reg3 == noreg, "must not specify reg3");
|
||||
push(RegisterSet(reg1) | RegisterSet(reg2));
|
||||
#endif
|
||||
}
|
||||
|
||||
mov(R1, method);
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::build_method_counters), R1);
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), R1);
|
||||
|
||||
if (saveRegs) {
|
||||
#ifdef AARCH64
|
||||
{
|
||||
int offset = 0;
|
||||
ldp(R0, R1, Address(Rstack_top, offset)); offset += 2*wordSize;
|
||||
ldp(R2, R3, Address(Rstack_top, offset)); offset += 2*wordSize;
|
||||
ldp(R4, R5, Address(Rstack_top, offset)); offset += 2*wordSize;
|
||||
ldp(R6, R7, Address(Rstack_top, offset)); offset += 2*wordSize;
|
||||
ldp(R8, R9, Address(Rstack_top, offset)); offset += 2*wordSize;
|
||||
ldp(R10, R11, Address(Rstack_top, offset)); offset += 2*wordSize;
|
||||
ldp(R12, R13, Address(Rstack_top, offset)); offset += 2*wordSize;
|
||||
ldp(R14, R15, Address(Rstack_top, offset)); offset += 2*wordSize;
|
||||
ldp(R16, R17, Address(Rstack_top, offset)); offset += 2*wordSize;
|
||||
ldp(R18, LR, Address(Rstack_top, offset)); offset += 2*wordSize;
|
||||
assert (offset == saved_regs_size, "should be");
|
||||
|
||||
add(Rstack_top, Rstack_top, saved_regs_size);
|
||||
}
|
||||
ldp(reg3, ZR, Address(Rstack_top, 2*wordSize, post_indexed));
|
||||
ldp(reg1, reg2, Address(Rstack_top, 2*wordSize, post_indexed));
|
||||
#else
|
||||
pop(RegisterSet(R0, R3) | RegisterSet(R12) | RegisterSet(R14));
|
||||
#endif // AARCH64
|
||||
pop(RegisterSet(reg1) | RegisterSet(reg2));
|
||||
#endif
|
||||
}
|
||||
|
||||
ldr(Rcounters, method_counters);
|
||||
cbz(Rcounters, skip); // No MethodCounters created, OutOfMemory
|
||||
|
@ -53,9 +53,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
// Template interpreter specific version of call_VM_helper
|
||||
virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions);
|
||||
|
||||
virtual void check_and_handle_popframe();
|
||||
virtual void check_and_handle_earlyret();
|
||||
|
||||
// base routine for all dispatches
|
||||
typedef enum { DispatchDefault, DispatchNormal } DispatchTableMode;
|
||||
void dispatch_base(TosState state, DispatchTableMode table_mode, bool verifyoop = true);
|
||||
@ -63,6 +60,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
public:
|
||||
InterpreterMacroAssembler(CodeBuffer* code);
|
||||
|
||||
virtual void check_and_handle_popframe();
|
||||
virtual void check_and_handle_earlyret();
|
||||
|
||||
// Interpreter-specific registers
|
||||
#if defined(AARCH64) && defined(ASSERT)
|
||||
|
||||
@ -141,6 +141,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
// Load object from cpool->resolved_references(*bcp+1)
|
||||
void load_resolved_reference_at_index(Register result, Register tmp);
|
||||
|
||||
// load cpool->resolved_klass_at(index); Rtemp is corrupted upon return
|
||||
void load_resolved_klass_at_offset(Register Rcpool, Register Rindex, Register Rklass);
|
||||
|
||||
void store_check_part1(Register card_table_base); // Sets card_table_base register.
|
||||
void store_check_part2(Register obj, Register card_table_base, Register tmp);
|
||||
|
||||
@ -328,7 +331,13 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
|
||||
void trace_state(const char* msg) PRODUCT_RETURN;
|
||||
|
||||
void get_method_counters(Register method, Register Rcounters, Label& skip);
|
||||
void get_method_counters(Register method,
|
||||
Register Rcounters,
|
||||
Label& skip,
|
||||
bool saveRegs = false,
|
||||
Register reg1 = noreg,
|
||||
Register reg2 = noreg,
|
||||
Register reg3 = noreg);
|
||||
};
|
||||
|
||||
#endif // CPU_ARM_VM_INTERP_MASM_ARM_HPP
|
||||
|
@ -206,6 +206,9 @@ protected:
|
||||
// may customize this version by overriding it for its purposes (e.g., to save/restore
|
||||
// additional registers when doing a VM call).
|
||||
virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions);
|
||||
public:
|
||||
|
||||
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
|
||||
|
||||
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
|
||||
// The implementation is only non-empty for the InterpreterMacroAssembler,
|
||||
@ -213,10 +216,6 @@ protected:
|
||||
virtual void check_and_handle_popframe() {}
|
||||
virtual void check_and_handle_earlyret() {}
|
||||
|
||||
public:
|
||||
|
||||
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
|
||||
|
||||
// By default, we do not need relocation information for non
|
||||
// patchable absolute addresses. However, when needed by some
|
||||
// extensions, ignore_non_patchable_relocations can be modified,
|
||||
|
@ -1,99 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "assembler_arm.inline.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
|
||||
// Generate the self-patching vtable method:
|
||||
//
|
||||
// This method will be called (as any other Klass virtual method) with
|
||||
// the Klass itself as the first argument. Example:
|
||||
//
|
||||
// oop obj;
|
||||
// int size = obj->klass()->oop_size(this);
|
||||
//
|
||||
// for which the virtual method call is Klass::oop_size();
|
||||
//
|
||||
// The dummy method is called with the Klass object as the first
|
||||
// operand, and an object as the second argument.
|
||||
//
|
||||
|
||||
//=====================================================================
|
||||
|
||||
// All of the dummy methods in the vtable are essentially identical,
|
||||
// differing only by an ordinal constant, and they bear no relationship
|
||||
// to the original method which the caller intended. Also, there needs
|
||||
// to be 'vtbl_list_size' instances of the vtable in order to
|
||||
// differentiate between the 'vtable_list_size' original Klass objects.
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
|
||||
void** vtable,
|
||||
char** md_top,
|
||||
char* md_end,
|
||||
char** mc_top,
|
||||
char* mc_end) {
|
||||
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
|
||||
*(intptr_t *)(*md_top) = vtable_bytes;
|
||||
*md_top += sizeof(intptr_t);
|
||||
void** dummy_vtable = (void**)*md_top;
|
||||
*vtable = dummy_vtable;
|
||||
*md_top += vtable_bytes;
|
||||
|
||||
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
|
||||
MacroAssembler* masm = new MacroAssembler(&cb);
|
||||
|
||||
for (int i = 0; i < vtbl_list_size; ++i) {
|
||||
Label common_code;
|
||||
for (int j = 0; j < num_virtuals; ++j) {
|
||||
dummy_vtable[num_virtuals * i + j] = (void*) __ pc();
|
||||
__ mov(Rtemp, j); // Rtemp contains an index of a virtual method in the table
|
||||
__ b(common_code);
|
||||
}
|
||||
|
||||
InlinedAddress vtable_address((address)&vtbl_list[i]);
|
||||
__ bind(common_code);
|
||||
const Register tmp2 = AARCH64_ONLY(Rtemp2) NOT_AARCH64(R4);
|
||||
assert_different_registers(Rtemp, tmp2);
|
||||
#ifndef AARCH64
|
||||
__ push(tmp2);
|
||||
#endif // !AARCH64
|
||||
// Do not use ldr_global since the code must be portable across all ARM architectures
|
||||
__ ldr_literal(tmp2, vtable_address);
|
||||
__ ldr(tmp2, Address(tmp2)); // get correct vtable address
|
||||
__ ldr(Rtemp, Address::indexed_ptr(tmp2, Rtemp)); // get real method pointer
|
||||
__ str(tmp2, Address(R0)); // update vtable. R0 = "this"
|
||||
#ifndef AARCH64
|
||||
__ pop(tmp2);
|
||||
#endif // !AARCH64
|
||||
__ jump(Rtemp);
|
||||
__ bind_literal(vtable_address);
|
||||
}
|
||||
|
||||
__ flush();
|
||||
*mc_top = (char*) __ pc();
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -67,7 +67,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
|
||||
Register obj, Register temp1, Register temp2, SystemDictionary::WKID klass_id,
|
||||
const char* error_message) {
|
||||
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
|
||||
KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
|
||||
Klass* klass = SystemDictionary::well_known_klass(klass_id);
|
||||
Label L_ok, L_bad;
|
||||
BLOCK_COMMENT("verify_klass {");
|
||||
__ verify_oop(obj);
|
||||
@ -157,8 +157,9 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
|
||||
__ load_heap_oop(tmp, Address(tmp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
|
||||
__ verify_oop(tmp);
|
||||
|
||||
// the following assumes that a Method* is normally compressed in the vmtarget field:
|
||||
__ ldr(Rmethod, Address(tmp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));
|
||||
__ load_heap_oop(Rmethod, Address(tmp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())));
|
||||
__ verify_oop(Rmethod);
|
||||
__ ldr(Rmethod, Address(Rmethod, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())));
|
||||
|
||||
if (VerifyMethodHandles && !for_compiler_entry) {
|
||||
// make sure recv is already on stack
|
||||
@ -320,7 +321,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
|
||||
Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
|
||||
Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
|
||||
Address member_vmtarget(member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));
|
||||
Address member_vmtarget(member_reg, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()));
|
||||
Address vmtarget_method(Rmethod, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()));
|
||||
|
||||
Register temp1_recv_klass = temp1;
|
||||
if (iid != vmIntrinsics::_linkToStatic) {
|
||||
@ -375,14 +377,17 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
if (VerifyMethodHandles) {
|
||||
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
|
||||
}
|
||||
__ ldr(Rmethod, member_vmtarget);
|
||||
__ load_heap_oop(Rmethod, member_vmtarget);
|
||||
__ ldr(Rmethod, vmtarget_method);
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_linkToStatic:
|
||||
if (VerifyMethodHandles) {
|
||||
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
|
||||
}
|
||||
__ ldr(Rmethod, member_vmtarget);
|
||||
__ load_heap_oop(Rmethod, member_vmtarget);
|
||||
__ ldr(Rmethod, vmtarget_method);
|
||||
break;
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_linkToVirtual:
|
||||
|
@ -270,12 +270,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
|
||||
return entry;
|
||||
}
|
||||
|
||||
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
|
||||
// Not used.
|
||||
STOP("generate_continuation_for");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
|
||||
address entry = __ pc();
|
||||
|
||||
@ -310,6 +304,9 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
__ convert_retval_to_tos(state);
|
||||
#endif // !AARCH64
|
||||
|
||||
__ check_and_handle_popframe();
|
||||
__ check_and_handle_earlyret();
|
||||
|
||||
__ dispatch_next(state, step);
|
||||
|
||||
return entry;
|
||||
@ -1401,7 +1398,13 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
#ifdef AARCH64
|
||||
// setup RmaxStack
|
||||
__ ldrh(RmaxStack, Address(RconstMethod, ConstMethod::max_stack_offset()));
|
||||
__ add(RmaxStack, RmaxStack, MAX2(1, Method::extra_stack_entries())); // reserve slots for exception handler and JSR292 appendix argument
|
||||
// We have to add extra reserved slots to max_stack. There are 3 users of the extra slots,
|
||||
// none of which are at the same time, so we just need to make sure there is enough room
|
||||
// for the biggest user:
|
||||
// -reserved slot for exception handler
|
||||
// -reserved slots for JSR292. Method::extra_stack_entries() is the size.
|
||||
// -3 reserved slots so get_method_counters() can save some registers before call_VM().
|
||||
__ add(RmaxStack, RmaxStack, MAX2(3, Method::extra_stack_entries()));
|
||||
#endif // AARCH64
|
||||
|
||||
// see if we've got enough room on the stack for locals plus overhead.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -2286,13 +2286,18 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
}
|
||||
__ bind(no_mdo);
|
||||
// Increment backedge counter in MethodCounters*
|
||||
__ get_method_counters(Rmethod, Rcounters, dispatch);
|
||||
// Note Rbumped_taken_count is a callee saved registers for ARM32, but caller saved for ARM64
|
||||
__ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
|
||||
Rdisp, R3_bytecode,
|
||||
AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
|
||||
const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset()));
|
||||
__ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask,
|
||||
Rcnt, R4_tmp, eq, &backedge_counter_overflow);
|
||||
} else {
|
||||
// increment counter
|
||||
__ get_method_counters(Rmethod, Rcounters, dispatch);
|
||||
// Increment backedge counter in MethodCounters*
|
||||
__ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
|
||||
Rdisp, R3_bytecode,
|
||||
AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
|
||||
__ ldr_u32(Rtemp, Address(Rcounters, be_offset)); // load backedge counter
|
||||
__ add(Rtemp, Rtemp, InvocationCounter::count_increment); // increment counter
|
||||
__ str_32(Rtemp, Address(Rcounters, be_offset)); // store counter
|
||||
@ -4367,10 +4372,9 @@ void TemplateTable::_new() {
|
||||
#endif // AARCH64
|
||||
|
||||
// get InstanceKlass
|
||||
__ add(Rklass, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
|
||||
__ ldr(Rklass, Address(Rklass, sizeof(ConstantPool)));
|
||||
__ cmp(Rtemp, JVM_CONSTANT_Class);
|
||||
__ b(slow_case, ne);
|
||||
__ load_resolved_klass_at_offset(Rcpool, Rindex, Rklass);
|
||||
|
||||
// make sure klass is initialized & doesn't have finalizer
|
||||
// make sure klass is fully initialized
|
||||
@ -4642,8 +4646,7 @@ void TemplateTable::checkcast() {
|
||||
|
||||
// Get superklass in Rsuper and subklass in Rsub
|
||||
__ bind(quicked);
|
||||
__ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
|
||||
__ ldr(Rsuper, Address(Rtemp, sizeof(ConstantPool)));
|
||||
__ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
|
||||
|
||||
__ bind(resolved);
|
||||
__ load_klass(Rsub, Robj);
|
||||
@ -4716,8 +4719,7 @@ void TemplateTable::instanceof() {
|
||||
|
||||
// Get superklass in Rsuper and subklass in Rsub
|
||||
__ bind(quicked);
|
||||
__ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
|
||||
__ ldr(Rsuper, Address(Rtemp, sizeof(ConstantPool)));
|
||||
__ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
|
||||
|
||||
__ bind(resolved);
|
||||
__ load_klass(Rsub, Robj);
|
||||
|
@ -376,10 +376,12 @@ class Assembler : public AbstractAssembler {
|
||||
STWX_OPCODE = (31u << OPCODE_SHIFT | 151u << 1),
|
||||
STWU_OPCODE = (37u << OPCODE_SHIFT),
|
||||
STWUX_OPCODE = (31u << OPCODE_SHIFT | 183u << 1),
|
||||
STWBRX_OPCODE = (31u << OPCODE_SHIFT | 662u << 1),
|
||||
|
||||
STH_OPCODE = (44u << OPCODE_SHIFT),
|
||||
STHX_OPCODE = (31u << OPCODE_SHIFT | 407u << 1),
|
||||
STHU_OPCODE = (45u << OPCODE_SHIFT),
|
||||
STHBRX_OPCODE = (31u << OPCODE_SHIFT | 918u << 1),
|
||||
|
||||
STB_OPCODE = (38u << OPCODE_SHIFT),
|
||||
STBX_OPCODE = (31u << OPCODE_SHIFT | 215u << 1),
|
||||
@ -401,11 +403,13 @@ class Assembler : public AbstractAssembler {
|
||||
LD_OPCODE = (58u << OPCODE_SHIFT | 0u << XO_30_31_SHIFT), // DS-FORM
|
||||
LDU_OPCODE = (58u << OPCODE_SHIFT | 1u << XO_30_31_SHIFT), // DS-FORM
|
||||
LDX_OPCODE = (31u << OPCODE_SHIFT | 21u << XO_21_30_SHIFT), // X-FORM
|
||||
LDBRX_OPCODE = (31u << OPCODE_SHIFT | 532u << 1), // X-FORM
|
||||
|
||||
STD_OPCODE = (62u << OPCODE_SHIFT | 0u << XO_30_31_SHIFT), // DS-FORM
|
||||
STDU_OPCODE = (62u << OPCODE_SHIFT | 1u << XO_30_31_SHIFT), // DS-FORM
|
||||
STDUX_OPCODE = (31u << OPCODE_SHIFT | 181u << 1), // X-FORM
|
||||
STDUX_OPCODE = (31u << OPCODE_SHIFT | 181u << 1), // X-FORM
|
||||
STDX_OPCODE = (31u << OPCODE_SHIFT | 149u << XO_21_30_SHIFT), // X-FORM
|
||||
STDBRX_OPCODE = (31u << OPCODE_SHIFT | 660u << 1), // X-FORM
|
||||
|
||||
RLDICR_OPCODE = (30u << OPCODE_SHIFT | 1u << XO_27_29_SHIFT), // MD-FORM
|
||||
RLDICL_OPCODE = (30u << OPCODE_SHIFT | 0u << XO_27_29_SHIFT), // MD-FORM
|
||||
@ -1552,6 +1556,9 @@ class Assembler : public AbstractAssembler {
|
||||
inline void ld( Register d, int si16, Register s1);
|
||||
inline void ldu( Register d, int si16, Register s1);
|
||||
|
||||
// 8 bytes reversed
|
||||
inline void ldbrx( Register d, Register s1, Register s2);
|
||||
|
||||
// For convenience. Load pointer into d from b+s1.
|
||||
inline void ld_ptr(Register d, int b, Register s1);
|
||||
DEBUG_ONLY(inline void ld_ptr(Register d, ByteSize b, Register s1);)
|
||||
@ -1560,10 +1567,12 @@ class Assembler : public AbstractAssembler {
|
||||
inline void stwx( Register d, Register s1, Register s2);
|
||||
inline void stw( Register d, int si16, Register s1);
|
||||
inline void stwu( Register d, int si16, Register s1);
|
||||
inline void stwbrx( Register d, Register s1, Register s2);
|
||||
|
||||
inline void sthx( Register d, Register s1, Register s2);
|
||||
inline void sth( Register d, int si16, Register s1);
|
||||
inline void sthu( Register d, int si16, Register s1);
|
||||
inline void sthbrx( Register d, Register s1, Register s2);
|
||||
|
||||
inline void stbx( Register d, Register s1, Register s2);
|
||||
inline void stb( Register d, int si16, Register s1);
|
||||
@ -1573,6 +1582,7 @@ class Assembler : public AbstractAssembler {
|
||||
inline void std( Register d, int si16, Register s1);
|
||||
inline void stdu( Register d, int si16, Register s1);
|
||||
inline void stdux(Register s, Register a, Register b);
|
||||
inline void stdbrx( Register d, Register s1, Register s2);
|
||||
|
||||
inline void st_ptr(Register d, int si16, Register s1);
|
||||
DEBUG_ONLY(inline void st_ptr(Register d, ByteSize b, Register s1);)
|
||||
@ -2182,14 +2192,18 @@ class Assembler : public AbstractAssembler {
|
||||
inline void lbz( Register d, int si16);
|
||||
inline void ldx( Register d, Register s2);
|
||||
inline void ld( Register d, int si16);
|
||||
inline void ldbrx(Register d, Register s2);
|
||||
inline void stwx( Register d, Register s2);
|
||||
inline void stw( Register d, int si16);
|
||||
inline void stwbrx( Register d, Register s2);
|
||||
inline void sthx( Register d, Register s2);
|
||||
inline void sth( Register d, int si16);
|
||||
inline void sthbrx( Register d, Register s2);
|
||||
inline void stbx( Register d, Register s2);
|
||||
inline void stb( Register d, int si16);
|
||||
inline void stdx( Register d, Register s2);
|
||||
inline void std( Register d, int si16);
|
||||
inline void stdbrx( Register d, Register s2);
|
||||
|
||||
// PPC 2, section 3.2.1 Instruction Cache Instructions
|
||||
inline void icbi( Register s2);
|
||||
|
@ -327,6 +327,7 @@ inline void Assembler::lbzu( Register d, int si16, Register s1) { assert(d !=
|
||||
inline void Assembler::ld( Register d, int si16, Register s1) { emit_int32(LD_OPCODE | rt(d) | ds(si16) | ra0mem(s1));}
|
||||
inline void Assembler::ldx( Register d, Register s1, Register s2) { emit_int32(LDX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
|
||||
inline void Assembler::ldu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LDU_OPCODE | rt(d) | ds(si16) | rta0mem(s1));}
|
||||
inline void Assembler::ldbrx( Register d, Register s1, Register s2) { emit_int32(LDBRX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
|
||||
|
||||
inline void Assembler::ld_ptr(Register d, int b, Register s1) { ld(d, b, s1); }
|
||||
DEBUG_ONLY(inline void Assembler::ld_ptr(Register d, ByteSize b, Register s1) { ld(d, in_bytes(b), s1); })
|
||||
@ -335,10 +336,12 @@ DEBUG_ONLY(inline void Assembler::ld_ptr(Register d, ByteSize b, Register s1) {
|
||||
inline void Assembler::stwx( Register d, Register s1, Register s2) { emit_int32(STWX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
|
||||
inline void Assembler::stw( Register d, int si16, Register s1) { emit_int32(STW_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}
|
||||
inline void Assembler::stwu( Register d, int si16, Register s1) { emit_int32(STWU_OPCODE | rs(d) | d1(si16) | rta0mem(s1));}
|
||||
inline void Assembler::stwbrx( Register d, Register s1, Register s2) { emit_int32(STWBRX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
|
||||
|
||||
inline void Assembler::sthx( Register d, Register s1, Register s2) { emit_int32(STHX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
|
||||
inline void Assembler::sth( Register d, int si16, Register s1) { emit_int32(STH_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}
|
||||
inline void Assembler::sthu( Register d, int si16, Register s1) { emit_int32(STHU_OPCODE | rs(d) | d1(si16) | rta0mem(s1));}
|
||||
inline void Assembler::sthbrx( Register d, Register s1, Register s2) { emit_int32(STHBRX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
|
||||
|
||||
inline void Assembler::stbx( Register d, Register s1, Register s2) { emit_int32(STBX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
|
||||
inline void Assembler::stb( Register d, int si16, Register s1) { emit_int32(STB_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}
|
||||
@ -348,6 +351,7 @@ inline void Assembler::std( Register d, int si16, Register s1) { emit_int32(
|
||||
inline void Assembler::stdx( Register d, Register s1, Register s2) { emit_int32(STDX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
|
||||
inline void Assembler::stdu( Register d, int si16, Register s1) { emit_int32(STDU_OPCODE | rs(d) | ds(si16) | rta0mem(s1));}
|
||||
inline void Assembler::stdux(Register s, Register a, Register b) { emit_int32(STDUX_OPCODE| rs(s) | rta0mem(a) | rb(b));}
|
||||
inline void Assembler::stdbrx( Register d, Register s1, Register s2) { emit_int32(STDBRX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
|
||||
|
||||
inline void Assembler::st_ptr(Register d, int b, Register s1) { std(d, b, s1); }
|
||||
DEBUG_ONLY(inline void Assembler::st_ptr(Register d, ByteSize b, Register s1) { std(d, in_bytes(b), s1); })
|
||||
@ -944,14 +948,18 @@ inline void Assembler::lbzx( Register d, Register s2) { emit_int32( LBZX_OPCODE
|
||||
inline void Assembler::lbz( Register d, int si16 ) { emit_int32( LBZ_OPCODE | rt(d) | d1(si16));}
|
||||
inline void Assembler::ld( Register d, int si16 ) { emit_int32( LD_OPCODE | rt(d) | ds(si16));}
|
||||
inline void Assembler::ldx( Register d, Register s2) { emit_int32( LDX_OPCODE | rt(d) | rb(s2));}
|
||||
inline void Assembler::ldbrx(Register d, Register s2) { emit_int32( LDBRX_OPCODE| rt(d) | rb(s2));}
|
||||
inline void Assembler::stwx( Register d, Register s2) { emit_int32( STWX_OPCODE | rs(d) | rb(s2));}
|
||||
inline void Assembler::stw( Register d, int si16 ) { emit_int32( STW_OPCODE | rs(d) | d1(si16));}
|
||||
inline void Assembler::stwbrx(Register d, Register s2){ emit_int32(STWBRX_OPCODE| rs(d) | rb(s2));}
|
||||
inline void Assembler::sthx( Register d, Register s2) { emit_int32( STHX_OPCODE | rs(d) | rb(s2));}
|
||||
inline void Assembler::sth( Register d, int si16 ) { emit_int32( STH_OPCODE | rs(d) | d1(si16));}
|
||||
inline void Assembler::sthbrx(Register d, Register s2){ emit_int32(STHBRX_OPCODE| rs(d) | rb(s2));}
|
||||
inline void Assembler::stbx( Register d, Register s2) { emit_int32( STBX_OPCODE | rs(d) | rb(s2));}
|
||||
inline void Assembler::stb( Register d, int si16 ) { emit_int32( STB_OPCODE | rs(d) | d1(si16));}
|
||||
inline void Assembler::std( Register d, int si16 ) { emit_int32( STD_OPCODE | rs(d) | ds(si16));}
|
||||
inline void Assembler::stdx( Register d, Register s2) { emit_int32( STDX_OPCODE | rs(d) | rb(s2));}
|
||||
inline void Assembler::stdbrx(Register d, Register s2){ emit_int32(STDBRX_OPCODE| rs(d) | rb(s2));}
|
||||
|
||||
// ra0 version
|
||||
inline void Assembler::icbi( Register s2) { emit_int32( ICBI_OPCODE | rb(s2) ); }
|
||||
|
@ -37,10 +37,6 @@ class Bytes: AllStatic {
|
||||
|
||||
#if defined(VM_LITTLE_ENDIAN)
|
||||
|
||||
// Returns true, if the byte ordering used by Java is different from the native byte ordering
|
||||
// of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
|
||||
static inline bool is_Java_byte_ordering_different() { return true; }
|
||||
|
||||
// Forward declarations of the compiler-dependent implementation
|
||||
static inline u2 swap_u2(u2 x);
|
||||
static inline u4 swap_u4(u4 x);
|
||||
@ -155,10 +151,6 @@ class Bytes: AllStatic {
|
||||
|
||||
#else // !defined(VM_LITTLE_ENDIAN)
|
||||
|
||||
// Returns true, if the byte ordering used by Java is different from the nativ byte ordering
|
||||
// of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
|
||||
static inline bool is_Java_byte_ordering_different() { return false; }
|
||||
|
||||
// Thus, a swap between native and Java ordering is always a no-op:
|
||||
static inline u2 swap_u2(u2 x) { return x; }
|
||||
static inline u4 swap_u4(u4 x) { return x; }
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -3177,9 +3177,8 @@ void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
|
||||
assert_different_registers(val, crc, res);
|
||||
|
||||
__ load_const_optimized(res, StubRoutines::crc_table_addr(), R0);
|
||||
__ nand(crc, crc, crc); // ~crc
|
||||
__ update_byte_crc32(crc, val, res);
|
||||
__ nand(res, crc, crc); // ~crc
|
||||
__ kernel_crc32_singleByteReg(crc, val, res, true);
|
||||
__ mr(res, crc);
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -63,18 +63,6 @@ void LIRItem::load_nonconstant() {
|
||||
}
|
||||
|
||||
|
||||
inline void load_int_as_long(LIR_List *ll, LIRItem &li, LIR_Opr dst) {
|
||||
LIR_Opr r = li.value()->operand();
|
||||
if (r->is_register()) {
|
||||
LIR_Opr dst_l = FrameMap::as_long_opr(dst->as_register());
|
||||
ll->convert(Bytecodes::_i2l, li.result(), dst_l); // Convert.
|
||||
} else {
|
||||
// Constants or memory get loaded with sign extend on this platform.
|
||||
ll->move(li.result(), dst);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//--------------------------------------------------------------
|
||||
// LIRGenerator
|
||||
//--------------------------------------------------------------
|
||||
@ -1426,10 +1414,9 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
|
||||
arg2 = cc->at(1),
|
||||
arg3 = cc->at(2);
|
||||
|
||||
// CCallingConventionRequiresIntsAsLongs
|
||||
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits.
|
||||
__ leal(LIR_OprFact::address(a), arg2);
|
||||
load_int_as_long(gen()->lir(), len, arg3);
|
||||
len.load_item_force(arg3); // We skip int->long conversion here, , because CRC32 stub expects int.
|
||||
|
||||
__ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args());
|
||||
__ move(result_reg, result);
|
||||
@ -1441,6 +1428,76 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
|
||||
}
|
||||
}
|
||||
|
||||
void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
|
||||
assert(UseCRC32CIntrinsics, "or should not be here");
|
||||
LIR_Opr result = rlock_result(x);
|
||||
|
||||
switch (x->id()) {
|
||||
case vmIntrinsics::_updateBytesCRC32C:
|
||||
case vmIntrinsics::_updateDirectByteBufferCRC32C: {
|
||||
bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
|
||||
|
||||
LIRItem crc(x->argument_at(0), this);
|
||||
LIRItem buf(x->argument_at(1), this);
|
||||
LIRItem off(x->argument_at(2), this);
|
||||
LIRItem end(x->argument_at(3), this);
|
||||
buf.load_item();
|
||||
off.load_nonconstant();
|
||||
end.load_nonconstant();
|
||||
|
||||
// len = end - off
|
||||
LIR_Opr len = end.result();
|
||||
LIR_Opr tmpA = new_register(T_INT);
|
||||
LIR_Opr tmpB = new_register(T_INT);
|
||||
__ move(end.result(), tmpA);
|
||||
__ move(off.result(), tmpB);
|
||||
__ sub(tmpA, tmpB, tmpA);
|
||||
len = tmpA;
|
||||
|
||||
LIR_Opr index = off.result();
|
||||
int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
|
||||
if (off.result()->is_constant()) {
|
||||
index = LIR_OprFact::illegalOpr;
|
||||
offset += off.result()->as_jint();
|
||||
}
|
||||
LIR_Opr base_op = buf.result();
|
||||
LIR_Address* a = NULL;
|
||||
|
||||
if (index->is_valid()) {
|
||||
LIR_Opr tmp = new_register(T_LONG);
|
||||
__ convert(Bytecodes::_i2l, index, tmp);
|
||||
index = tmp;
|
||||
__ add(index, LIR_OprFact::intptrConst(offset), index);
|
||||
a = new LIR_Address(base_op, index, T_BYTE);
|
||||
} else {
|
||||
a = new LIR_Address(base_op, offset, T_BYTE);
|
||||
}
|
||||
|
||||
BasicTypeList signature(3);
|
||||
signature.append(T_INT);
|
||||
signature.append(T_ADDRESS);
|
||||
signature.append(T_INT);
|
||||
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
|
||||
const LIR_Opr result_reg = result_register_for(x->type());
|
||||
|
||||
LIR_Opr arg1 = cc->at(0),
|
||||
arg2 = cc->at(1),
|
||||
arg3 = cc->at(2);
|
||||
|
||||
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32C stub doesn't care about high bits.
|
||||
__ leal(LIR_OprFact::address(a), arg2);
|
||||
__ move(len, cc->at(2)); // We skip int->long conversion here, because CRC32C stub expects int.
|
||||
|
||||
__ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), LIR_OprFact::illegalOpr, result_reg, cc->args());
|
||||
__ move(result_reg, result);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
|
||||
assert(x->number_of_arguments() == 3, "wrong type");
|
||||
assert(UseFMA, "Needs FMA instructions support.");
|
||||
@ -1467,7 +1524,3 @@ void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
|
||||
void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
|
||||
fatal("vectorizedMismatch intrinsic is not implemented on this platform");
|
||||
}
|
||||
|
||||
void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
|
||||
Unimplemented();
|
||||
}
|
||||
|
@ -164,7 +164,7 @@ define_pd_global(intx, InitArrayShortSize, 9*BytesPerLong);
|
||||
product(bool, ZapMemory, false, "Write 0x0101... to empty memory." \
|
||||
" Use this to ease debugging.") \
|
||||
\
|
||||
/* Use Restricted Transactional Memory for lock eliding */ \
|
||||
/* Use Restricted Transactional Memory for lock elision */ \
|
||||
product(bool, UseRTMLocking, false, \
|
||||
"Enable RTM lock eliding for inflated locks in compiled code") \
|
||||
\
|
||||
@ -174,24 +174,30 @@ define_pd_global(intx, InitArrayShortSize, 9*BytesPerLong);
|
||||
product(bool, UseRTMDeopt, false, \
|
||||
"Perform deopt and recompilation based on RTM abort ratio") \
|
||||
\
|
||||
product(uintx, RTMRetryCount, 5, \
|
||||
product(int, RTMRetryCount, 5, \
|
||||
"Number of RTM retries on lock abort or busy") \
|
||||
range(0, max_jint) \
|
||||
\
|
||||
experimental(intx, RTMSpinLoopCount, 100, \
|
||||
experimental(int, RTMSpinLoopCount, 100, \
|
||||
"Spin count for lock to become free before RTM retry") \
|
||||
range(0, 32767) /* immediate operand limit on ppc */ \
|
||||
\
|
||||
experimental(intx, RTMAbortThreshold, 1000, \
|
||||
experimental(int, RTMAbortThreshold, 1000, \
|
||||
"Calculate abort ratio after this number of aborts") \
|
||||
range(0, max_jint) \
|
||||
\
|
||||
experimental(intx, RTMLockingThreshold, 10000, \
|
||||
experimental(int, RTMLockingThreshold, 10000, \
|
||||
"Lock count at which to do RTM lock eliding without " \
|
||||
"abort ratio calculation") \
|
||||
range(0, max_jint) \
|
||||
\
|
||||
experimental(intx, RTMAbortRatio, 50, \
|
||||
experimental(int, RTMAbortRatio, 50, \
|
||||
"Lock abort ratio at which to stop use RTM lock eliding") \
|
||||
range(0, 100) /* natural range, checked in vm_version_ppc.cpp */ \
|
||||
\
|
||||
experimental(intx, RTMTotalCountIncrRate, 64, \
|
||||
experimental(int, RTMTotalCountIncrRate, 64, \
|
||||
"Increment total RTM attempted lock count once every n times") \
|
||||
range(1, 32767) /* immediate operand limit on ppc */ \
|
||||
\
|
||||
experimental(intx, RTMLockingCalculationDelay, 0, \
|
||||
"Number of milliseconds to wait before start calculating aborts " \
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -45,8 +45,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
#define thread_(field_name) in_bytes(JavaThread::field_name ## _offset()), R16_thread
|
||||
#define method_(field_name) in_bytes(Method::field_name ## _offset()), R19_method
|
||||
|
||||
virtual void check_and_handle_popframe(Register java_thread);
|
||||
virtual void check_and_handle_earlyret(Register java_thread);
|
||||
virtual void check_and_handle_popframe(Register scratch_reg);
|
||||
virtual void check_and_handle_earlyret(Register scratch_reg);
|
||||
|
||||
// Base routine for all dispatches.
|
||||
void dispatch_base(TosState state, address* table);
|
||||
@ -79,6 +79,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
// Load object from cpool->resolved_references(index).
|
||||
void load_resolved_reference_at_index(Register result, Register index, Label *is_null = NULL);
|
||||
|
||||
// load cpool->resolved_klass_at(index)
|
||||
void load_resolved_klass_at_offset(Register Rcpool, Register Roffset, Register Rklass);
|
||||
|
||||
void load_receiver(Register Rparam_count, Register Rrecv_dst);
|
||||
|
||||
// helpers for expression stack
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -454,7 +454,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
|
||||
Register tmp = index; // reuse
|
||||
sldi(tmp, index, LogBytesPerHeapOop);
|
||||
// Load pointer for resolved_references[] objArray.
|
||||
ld(result, ConstantPool::resolved_references_offset_in_bytes(), result);
|
||||
ld(result, ConstantPool::cache_offset_in_bytes(), result);
|
||||
ld(result, ConstantPoolCache::resolved_references_offset_in_bytes(), result);
|
||||
// JNIHandles::resolve(result)
|
||||
ld(result, 0, result);
|
||||
#ifdef ASSERT
|
||||
@ -471,6 +472,25 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
|
||||
load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result, is_null);
|
||||
}
|
||||
|
||||
// load cpool->resolved_klass_at(index)
|
||||
void InterpreterMacroAssembler::load_resolved_klass_at_offset(Register Rcpool, Register Roffset, Register Rklass) {
|
||||
// int value = *(Rcpool->int_at_addr(which));
|
||||
// int resolved_klass_index = extract_low_short_from_int(value);
|
||||
add(Roffset, Rcpool, Roffset);
|
||||
#if defined(VM_LITTLE_ENDIAN)
|
||||
lhz(Roffset, sizeof(ConstantPool), Roffset); // Roffset = resolved_klass_index
|
||||
#else
|
||||
lhz(Roffset, sizeof(ConstantPool) + 2, Roffset); // Roffset = resolved_klass_index
|
||||
#endif
|
||||
|
||||
ld(Rklass, ConstantPool::resolved_klasses_offset_in_bytes(), Rcpool); // Rklass = Rcpool->_resolved_klasses
|
||||
|
||||
sldi(Roffset, Roffset, LogBytesPerWord);
|
||||
addi(Roffset, Roffset, Array<Klass*>::base_offset_in_bytes());
|
||||
isync(); // Order load of instance Klass wrt. tags.
|
||||
ldx(Rklass, Rklass, Roffset);
|
||||
}
|
||||
|
||||
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
|
||||
// a subtype of super_klass. Blows registers Rsub_klass, tmp1, tmp2.
|
||||
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, Register Rsuper_klass, Register Rtmp1,
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -2498,14 +2498,20 @@ void MacroAssembler::rtm_abort_ratio_calculation(Register rtm_counters_Reg,
|
||||
// All transactions = total_count * RTMTotalCountIncrRate
|
||||
// Set no_rtm bit if (Aborted transactions >= All transactions * RTMAbortRatio)
|
||||
ld(R0, RTMLockingCounters::abort_count_offset(), rtm_counters_Reg);
|
||||
cmpdi(CCR0, R0, RTMAbortThreshold);
|
||||
blt(CCR0, L_check_always_rtm2);
|
||||
if (is_simm(RTMAbortThreshold, 16)) { // cmpdi can handle 16bit immediate only.
|
||||
cmpdi(CCR0, R0, RTMAbortThreshold);
|
||||
blt(CCR0, L_check_always_rtm2); // reload of rtm_counters_Reg not necessary
|
||||
} else {
|
||||
load_const_optimized(rtm_counters_Reg, RTMAbortThreshold);
|
||||
cmpd(CCR0, R0, rtm_counters_Reg);
|
||||
blt(CCR0, L_check_always_rtm1); // reload of rtm_counters_Reg required
|
||||
}
|
||||
mulli(R0, R0, 100);
|
||||
|
||||
const Register tmpReg = rtm_counters_Reg;
|
||||
ld(tmpReg, RTMLockingCounters::total_count_offset(), rtm_counters_Reg);
|
||||
mulli(tmpReg, tmpReg, RTMTotalCountIncrRate);
|
||||
mulli(tmpReg, tmpReg, RTMAbortRatio);
|
||||
mulli(tmpReg, tmpReg, RTMTotalCountIncrRate); // allowable range: int16
|
||||
mulli(tmpReg, tmpReg, RTMAbortRatio); // allowable range: int16
|
||||
cmpd(CCR0, R0, tmpReg);
|
||||
blt(CCR0, L_check_always_rtm1); // jump to reload
|
||||
if (method_data != NULL) {
|
||||
@ -2521,7 +2527,13 @@ void MacroAssembler::rtm_abort_ratio_calculation(Register rtm_counters_Reg,
|
||||
load_const_optimized(rtm_counters_Reg, (address)rtm_counters, R0); // reload
|
||||
bind(L_check_always_rtm2);
|
||||
ld(tmpReg, RTMLockingCounters::total_count_offset(), rtm_counters_Reg);
|
||||
cmpdi(CCR0, tmpReg, RTMLockingThreshold / RTMTotalCountIncrRate);
|
||||
int64_t thresholdValue = RTMLockingThreshold / RTMTotalCountIncrRate;
|
||||
if (is_simm(thresholdValue, 16)) { // cmpdi can handle 16bit immediate only.
|
||||
cmpdi(CCR0, tmpReg, thresholdValue);
|
||||
} else {
|
||||
load_const_optimized(R0, thresholdValue);
|
||||
cmpd(CCR0, tmpReg, R0);
|
||||
}
|
||||
blt(CCR0, L_done);
|
||||
if (method_data != NULL) {
|
||||
// Set rtm_state to "always rtm" in MDO.
|
||||
@ -2620,7 +2632,7 @@ void MacroAssembler::rtm_stack_locking(ConditionRegister flag,
|
||||
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
|
||||
Label L_noincrement;
|
||||
if (RTMTotalCountIncrRate > 1) {
|
||||
branch_on_random_using_tb(tmp, (int)RTMTotalCountIncrRate, L_noincrement);
|
||||
branch_on_random_using_tb(tmp, RTMTotalCountIncrRate, L_noincrement);
|
||||
}
|
||||
assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM");
|
||||
load_const_optimized(tmp, (address)stack_rtm_counters->total_count_addr(), R0);
|
||||
@ -2687,7 +2699,7 @@ void MacroAssembler::rtm_inflated_locking(ConditionRegister flag,
|
||||
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
|
||||
Label L_noincrement;
|
||||
if (RTMTotalCountIncrRate > 1) {
|
||||
branch_on_random_using_tb(R0, (int)RTMTotalCountIncrRate, L_noincrement);
|
||||
branch_on_random_using_tb(R0, RTMTotalCountIncrRate, L_noincrement);
|
||||
}
|
||||
assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
|
||||
load_const(R0, (address)rtm_counters->total_count_addr(), tmpReg);
|
||||
@ -4120,7 +4132,7 @@ void MacroAssembler::update_byte_crc32(Register crc, Register val, Register tabl
|
||||
* @param table register pointing to CRC table
|
||||
*/
|
||||
void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
|
||||
Register data, bool loopAlignment, bool invertCRC) {
|
||||
Register data, bool loopAlignment) {
|
||||
assert_different_registers(crc, buf, len, table, data);
|
||||
|
||||
Label L_mainLoop, L_done;
|
||||
@ -4131,10 +4143,6 @@ void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register
|
||||
clrldi_(len, len, 32); // Enforce 32 bit. Anything to do?
|
||||
beq(CCR0, L_done);
|
||||
|
||||
if (invertCRC) {
|
||||
nand(crc, crc, crc); // ~c
|
||||
}
|
||||
|
||||
mtctr(len);
|
||||
align(mainLoop_alignment);
|
||||
BIND(L_mainLoop);
|
||||
@ -4143,10 +4151,6 @@ void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register
|
||||
update_byte_crc32(crc, data, table);
|
||||
bdnz(L_mainLoop); // Iterate.
|
||||
|
||||
if (invertCRC) {
|
||||
nand(crc, crc, crc); // ~c
|
||||
}
|
||||
|
||||
bind(L_done);
|
||||
}
|
||||
|
||||
@ -4203,7 +4207,8 @@ void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register tab
|
||||
*/
|
||||
void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
|
||||
Register t0, Register t1, Register t2, Register t3,
|
||||
Register tc0, Register tc1, Register tc2, Register tc3) {
|
||||
Register tc0, Register tc1, Register tc2, Register tc3,
|
||||
bool invertCRC) {
|
||||
assert_different_registers(crc, buf, len, table);
|
||||
|
||||
Label L_mainLoop, L_tail;
|
||||
@ -4217,14 +4222,16 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
|
||||
const int complexThreshold = 2*mainLoop_stepping;
|
||||
|
||||
// Don't test for len <= 0 here. This pathological case should not occur anyway.
|
||||
// Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
|
||||
// The situation itself is detected and handled correctly by the conditional branches
|
||||
// following aghi(len, -stepping) and aghi(len, +stepping).
|
||||
// Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles
|
||||
// for all well-behaved cases. The situation itself is detected and handled correctly
|
||||
// within update_byteLoop_crc32.
|
||||
assert(tailLoop_stepping == 1, "check tailLoop_stepping!");
|
||||
|
||||
BLOCK_COMMENT("kernel_crc32_2word {");
|
||||
|
||||
nand(crc, crc, crc); // ~c
|
||||
if (invertCRC) {
|
||||
nand(crc, crc, crc); // 1s complement of crc
|
||||
}
|
||||
|
||||
// Check for short (<mainLoop_stepping) buffer.
|
||||
cmpdi(CCR0, len, complexThreshold);
|
||||
@ -4245,7 +4252,7 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
|
||||
blt(CCR0, L_tail); // For less than one mainloop_stepping left, do only tail processing
|
||||
mr(len, tmp); // remaining bytes for main loop (>=mainLoop_stepping is guaranteed).
|
||||
}
|
||||
update_byteLoop_crc32(crc, buf, tmp2, table, data, false, false);
|
||||
update_byteLoop_crc32(crc, buf, tmp2, table, data, false);
|
||||
}
|
||||
|
||||
srdi(tmp2, len, log_stepping); // #iterations for mainLoop
|
||||
@ -4281,9 +4288,11 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
|
||||
|
||||
// Process last few (<complexThreshold) bytes of buffer.
|
||||
BIND(L_tail);
|
||||
update_byteLoop_crc32(crc, buf, len, table, data, false, false);
|
||||
update_byteLoop_crc32(crc, buf, len, table, data, false);
|
||||
|
||||
nand(crc, crc, crc); // ~c
|
||||
if (invertCRC) {
|
||||
nand(crc, crc, crc); // 1s complement of crc
|
||||
}
|
||||
BLOCK_COMMENT("} kernel_crc32_2word");
|
||||
}
|
||||
|
||||
@ -4297,7 +4306,8 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
|
||||
*/
|
||||
void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
|
||||
Register t0, Register t1, Register t2, Register t3,
|
||||
Register tc0, Register tc1, Register tc2, Register tc3) {
|
||||
Register tc0, Register tc1, Register tc2, Register tc3,
|
||||
bool invertCRC) {
|
||||
assert_different_registers(crc, buf, len, table);
|
||||
|
||||
Label L_mainLoop, L_tail;
|
||||
@ -4311,14 +4321,16 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
|
||||
const int complexThreshold = 2*mainLoop_stepping;
|
||||
|
||||
// Don't test for len <= 0 here. This pathological case should not occur anyway.
|
||||
// Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
|
||||
// The situation itself is detected and handled correctly by the conditional branches
|
||||
// following aghi(len, -stepping) and aghi(len, +stepping).
|
||||
// Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles
|
||||
// for all well-behaved cases. The situation itself is detected and handled correctly
|
||||
// within update_byteLoop_crc32.
|
||||
assert(tailLoop_stepping == 1, "check tailLoop_stepping!");
|
||||
|
||||
BLOCK_COMMENT("kernel_crc32_1word {");
|
||||
|
||||
nand(crc, crc, crc); // ~c
|
||||
if (invertCRC) {
|
||||
nand(crc, crc, crc); // 1s complement of crc
|
||||
}
|
||||
|
||||
// Check for short (<mainLoop_stepping) buffer.
|
||||
cmpdi(CCR0, len, complexThreshold);
|
||||
@ -4339,7 +4351,7 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
|
||||
blt(CCR0, L_tail); // For less than one mainloop_stepping left, do only tail processing
|
||||
mr(len, tmp); // remaining bytes for main loop (>=mainLoop_stepping is guaranteed).
|
||||
}
|
||||
update_byteLoop_crc32(crc, buf, tmp2, table, data, false, false);
|
||||
update_byteLoop_crc32(crc, buf, tmp2, table, data, false);
|
||||
}
|
||||
|
||||
srdi(tmp2, len, log_stepping); // #iterations for mainLoop
|
||||
@ -4374,9 +4386,11 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
|
||||
|
||||
// Process last few (<complexThreshold) bytes of buffer.
|
||||
BIND(L_tail);
|
||||
update_byteLoop_crc32(crc, buf, len, table, data, false, false);
|
||||
update_byteLoop_crc32(crc, buf, len, table, data, false);
|
||||
|
||||
nand(crc, crc, crc); // ~c
|
||||
if (invertCRC) {
|
||||
nand(crc, crc, crc); // 1s complement of crc
|
||||
}
|
||||
BLOCK_COMMENT("} kernel_crc32_1word");
|
||||
}
|
||||
|
||||
@ -4389,16 +4403,24 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
|
||||
* Uses R7_ARG5, R8_ARG6 as work registers.
|
||||
*/
|
||||
void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
|
||||
Register t0, Register t1, Register t2, Register t3) {
|
||||
Register t0, Register t1, Register t2, Register t3,
|
||||
bool invertCRC) {
|
||||
assert_different_registers(crc, buf, len, table);
|
||||
|
||||
Register data = t0; // Holds the current byte to be folded into crc.
|
||||
|
||||
BLOCK_COMMENT("kernel_crc32_1byte {");
|
||||
|
||||
// Process all bytes in a single-byte loop.
|
||||
update_byteLoop_crc32(crc, buf, len, table, data, true, true);
|
||||
if (invertCRC) {
|
||||
nand(crc, crc, crc); // 1s complement of crc
|
||||
}
|
||||
|
||||
// Process all bytes in a single-byte loop.
|
||||
update_byteLoop_crc32(crc, buf, len, table, data, true);
|
||||
|
||||
if (invertCRC) {
|
||||
nand(crc, crc, crc); // 1s complement of crc
|
||||
}
|
||||
BLOCK_COMMENT("} kernel_crc32_1byte");
|
||||
}
|
||||
|
||||
@ -4416,7 +4438,8 @@ void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len
|
||||
*/
|
||||
void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Register len, Register table,
|
||||
Register constants, Register barretConstants,
|
||||
Register t0, Register t1, Register t2, Register t3, Register t4) {
|
||||
Register t0, Register t1, Register t2, Register t3, Register t4,
|
||||
bool invertCRC) {
|
||||
assert_different_registers(crc, buf, len, table);
|
||||
|
||||
Label L_alignedHead, L_tail, L_alignTail, L_start, L_end;
|
||||
@ -4434,13 +4457,15 @@ void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Regi
|
||||
Register tc0 = t4;
|
||||
Register tc1 = constants;
|
||||
Register tc2 = barretConstants;
|
||||
kernel_crc32_1word(crc, buf, len, table,t0, t1, t2, t3, tc0, tc1, tc2, table);
|
||||
kernel_crc32_1word(crc, buf, len, table,t0, t1, t2, t3, tc0, tc1, tc2, table, invertCRC);
|
||||
b(L_end);
|
||||
|
||||
BIND(L_start);
|
||||
|
||||
// 2. ~c
|
||||
nand(crc, crc, crc);
|
||||
if (invertCRC) {
|
||||
nand(crc, crc, crc); // 1s complement of crc
|
||||
}
|
||||
|
||||
// 3. calculate from 0 to first 128bit-aligned address
|
||||
clrldi_(prealign, buf, 57);
|
||||
@ -4449,7 +4474,7 @@ void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Regi
|
||||
subfic(prealign, prealign, 128);
|
||||
|
||||
subf(len, prealign, len);
|
||||
update_byteLoop_crc32(crc, buf, prealign, table, t2, false, false);
|
||||
update_byteLoop_crc32(crc, buf, prealign, table, t2, false);
|
||||
|
||||
// 4. calculate from first 128bit-aligned address to last 128bit-aligned address
|
||||
BIND(L_alignedHead);
|
||||
@ -4464,12 +4489,14 @@ void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Regi
|
||||
cmpdi(CCR0, postalign, 0);
|
||||
beq(CCR0, L_tail);
|
||||
|
||||
update_byteLoop_crc32(crc, buf, postalign, table, t2, false, false);
|
||||
update_byteLoop_crc32(crc, buf, postalign, table, t2, false);
|
||||
|
||||
BIND(L_tail);
|
||||
|
||||
// 6. ~c
|
||||
nand(crc, crc, crc);
|
||||
if (invertCRC) {
|
||||
nand(crc, crc, crc); // 1s complement of crc
|
||||
}
|
||||
|
||||
BIND(L_end);
|
||||
|
||||
@ -4961,16 +4988,35 @@ void MacroAssembler::kernel_crc32_1word_aligned(Register crc, Register buf, Regi
|
||||
offsetInt -= 8; ld(R31, offsetInt, R1_SP);
|
||||
}
|
||||
|
||||
void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp) {
|
||||
void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp, bool invertCRC) {
|
||||
assert_different_registers(crc, buf, /* len, not used!! */ table, tmp);
|
||||
|
||||
BLOCK_COMMENT("kernel_crc32_singleByte:");
|
||||
nand(crc, crc, crc); // ~c
|
||||
if (invertCRC) {
|
||||
nand(crc, crc, crc); // 1s complement of crc
|
||||
}
|
||||
|
||||
lbz(tmp, 0, buf); // Byte from buffer, zero-extended.
|
||||
lbz(tmp, 0, buf); // Byte from buffer, zero-extended.
|
||||
update_byte_crc32(crc, tmp, table);
|
||||
|
||||
nand(crc, crc, crc); // ~c
|
||||
if (invertCRC) {
|
||||
nand(crc, crc, crc); // 1s complement of crc
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table, bool invertCRC) {
|
||||
assert_different_registers(crc, val, table);
|
||||
|
||||
BLOCK_COMMENT("kernel_crc32_singleByteReg:");
|
||||
if (invertCRC) {
|
||||
nand(crc, crc, crc); // 1s complement of crc
|
||||
}
|
||||
|
||||
update_byte_crc32(crc, val, table);
|
||||
|
||||
if (invertCRC) {
|
||||
nand(crc, crc, crc); // 1s complement of crc
|
||||
}
|
||||
}
|
||||
|
||||
// dest_lo += src1 + src2
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -819,33 +819,47 @@ class MacroAssembler: public Assembler {
|
||||
Register tmp6, Register tmp7, Register tmp8, Register tmp9, Register tmp10,
|
||||
Register tmp11, Register tmp12, Register tmp13);
|
||||
|
||||
// CRC32 Intrinsics.
|
||||
// Emitters for CRC32 calculation.
|
||||
// A note on invertCRC:
|
||||
// Unfortunately, internal representation of crc differs between CRC32 and CRC32C.
|
||||
// CRC32 holds it's current crc value in the externally visible representation.
|
||||
// CRC32C holds it's current crc value in internal format, ready for updating.
|
||||
// Thus, the crc value must be bit-flipped before updating it in the CRC32 case.
|
||||
// In the CRC32C case, it must be bit-flipped when it is given to the outside world (getValue()).
|
||||
// The bool invertCRC parameter indicates whether bit-flipping is required before updates.
|
||||
void load_reverse_32(Register dst, Register src);
|
||||
int crc32_table_columns(Register table, Register tc0, Register tc1, Register tc2, Register tc3);
|
||||
void fold_byte_crc32(Register crc, Register val, Register table, Register tmp);
|
||||
void fold_8bit_crc32(Register crc, Register table, Register tmp);
|
||||
void update_byte_crc32(Register crc, Register val, Register table);
|
||||
void update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
|
||||
Register data, bool loopAlignment, bool invertCRC);
|
||||
Register data, bool loopAlignment);
|
||||
void update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
|
||||
Register t0, Register t1, Register t2, Register t3,
|
||||
Register tc0, Register tc1, Register tc2, Register tc3);
|
||||
void kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
|
||||
Register t0, Register t1, Register t2, Register t3,
|
||||
Register tc0, Register tc1, Register tc2, Register tc3);
|
||||
Register tc0, Register tc1, Register tc2, Register tc3,
|
||||
bool invertCRC);
|
||||
void kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
|
||||
Register t0, Register t1, Register t2, Register t3,
|
||||
Register tc0, Register tc1, Register tc2, Register tc3);
|
||||
Register tc0, Register tc1, Register tc2, Register tc3,
|
||||
bool invertCRC);
|
||||
void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
|
||||
Register t0, Register t1, Register t2, Register t3);
|
||||
Register t0, Register t1, Register t2, Register t3,
|
||||
bool invertCRC);
|
||||
void kernel_crc32_1word_vpmsumd(Register crc, Register buf, Register len, Register table,
|
||||
Register constants, Register barretConstants,
|
||||
Register t0, Register t1, Register t2, Register t3, Register t4);
|
||||
Register t0, Register t1, Register t2, Register t3, Register t4,
|
||||
bool invertCRC);
|
||||
void kernel_crc32_1word_aligned(Register crc, Register buf, Register len,
|
||||
Register constants, Register barretConstants,
|
||||
Register t0, Register t1, Register t2);
|
||||
|
||||
void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp);
|
||||
void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
|
||||
bool invertCRC);
|
||||
void kernel_crc32_singleByteReg(Register crc, Register val, Register table,
|
||||
bool invertCRC);
|
||||
|
||||
//
|
||||
// Debugging
|
||||
|
@ -1,78 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "asm/codeBuffer.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
|
||||
// Generate the self-patching vtable method:
|
||||
//
|
||||
// This method will be called (as any other Klass virtual method) with
|
||||
// the Klass itself as the first argument. Example:
|
||||
//
|
||||
// oop obj;
|
||||
// int size = obj->klass()->klass_part()->oop_size(this);
|
||||
//
|
||||
// for which the virtual method call is Klass::oop_size();
|
||||
//
|
||||
// The dummy method is called with the Klass object as the first
|
||||
// operand, and an object as the second argument.
|
||||
//
|
||||
|
||||
//=====================================================================
|
||||
|
||||
// All of the dummy methods in the vtable are essentially identical,
|
||||
// differing only by an ordinal constant, and they bear no releationship
|
||||
// to the original method which the caller intended. Also, there needs
|
||||
// to be 'vtbl_list_size' instances of the vtable in order to
|
||||
// differentiate between the 'vtable_list_size' original Klass objects.
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
|
||||
void** vtable,
|
||||
char** md_top,
|
||||
char* md_end,
|
||||
char** mc_top,
|
||||
char* mc_end) {
|
||||
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
|
||||
*(intptr_t *)(*md_top) = vtable_bytes;
|
||||
*md_top += sizeof(intptr_t);
|
||||
void** dummy_vtable = (void**)*md_top;
|
||||
*vtable = dummy_vtable;
|
||||
*md_top += vtable_bytes;
|
||||
|
||||
// Get ready to generate dummy methods.
|
||||
|
||||
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
|
||||
MacroAssembler* masm = new MacroAssembler(&cb);
|
||||
|
||||
// There are more general problems with CDS on ppc, so I can not
|
||||
// really test this. But having this instead of Unimplementd() allows
|
||||
// us to pass TestOptionsWithRanges.java.
|
||||
__ unimplemented();
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -71,7 +71,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
|
||||
Register temp_reg, Register temp2_reg,
|
||||
const char* error_message) {
|
||||
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
|
||||
KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
|
||||
Klass* klass = SystemDictionary::well_known_klass(klass_id);
|
||||
Label L_ok, L_bad;
|
||||
BLOCK_COMMENT("verify_klass {");
|
||||
__ verify_oop(obj_reg);
|
||||
@ -174,8 +174,9 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
|
||||
__ verify_oop(method_temp);
|
||||
__ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()), method_temp, temp2);
|
||||
__ verify_oop(method_temp);
|
||||
// The following assumes that a Method* is normally compressed in the vmtarget field:
|
||||
__ ld(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), method_temp);
|
||||
__ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()), method_temp);
|
||||
__ verify_oop(method_temp);
|
||||
__ ld(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()), method_temp);
|
||||
|
||||
if (VerifyMethodHandles && !for_compiler_entry) {
|
||||
// Make sure recv is already on stack.
|
||||
@ -361,14 +362,16 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
if (VerifyMethodHandles) {
|
||||
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp2);
|
||||
}
|
||||
__ ld(R19_method, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), member_reg);
|
||||
__ load_heap_oop(member_reg, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()), member_reg);
|
||||
__ ld(R19_method, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()), member_reg);
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_linkToStatic:
|
||||
if (VerifyMethodHandles) {
|
||||
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp2);
|
||||
}
|
||||
__ ld(R19_method, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), member_reg);
|
||||
__ load_heap_oop(member_reg, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()), member_reg);
|
||||
__ ld(R19_method, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()), member_reg);
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_linkToVirtual:
|
||||
|
@ -1,5 +1,5 @@
|
||||
//
|
||||
// Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2012, 2016 SAP SE. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
@ -2053,12 +2053,12 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
|
||||
}
|
||||
|
||||
// Vector ideal reg.
|
||||
const int Matcher::vector_ideal_reg(int size) {
|
||||
const uint Matcher::vector_ideal_reg(int size) {
|
||||
assert(MaxVectorSize == 8 && size == 8, "");
|
||||
return Op_RegL;
|
||||
}
|
||||
|
||||
const int Matcher::vector_shift_count_ideal_reg(int size) {
|
||||
const uint Matcher::vector_shift_count_ideal_reg(int size) {
|
||||
fatal("vector shift is not supported");
|
||||
return Node::NotAMachineReg;
|
||||
}
|
||||
@ -5842,6 +5842,16 @@ instruct loadConN_lo(iRegNdst dst, iRegNsrc src1, immN src2) %{
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct rldicl(iRegLdst dst, iRegLsrc src, immI16 shift, immI16 mask_begin) %{
|
||||
effect(DEF dst, USE src, USE shift, USE mask_begin);
|
||||
|
||||
size(4);
|
||||
ins_encode %{
|
||||
__ rldicl($dst$$Register, $src$$Register, $shift$$constant, $mask_begin$$constant);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// Needed to postalloc expand loadConN: ConN is loaded as ConI
|
||||
// leaving the upper 32 bits with sign-extension bits.
|
||||
// This clears these bits: dst = src & 0xFFFFFFFF.
|
||||
@ -10519,6 +10529,16 @@ instruct convB2I_reg(iRegIdst dst, iRegIsrc src, immI_24 amount) %{
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct extsh(iRegIdst dst, iRegIsrc src) %{
|
||||
effect(DEF dst, USE src);
|
||||
|
||||
size(4);
|
||||
ins_encode %{
|
||||
__ extsh($dst$$Register, $src$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// LShiftI 16 + RShiftI 16 converts short to int.
|
||||
instruct convS2I_reg(iRegIdst dst, iRegIsrc src, immI_16 amount) %{
|
||||
match(Set dst (RShiftI (LShiftI src amount) amount));
|
||||
@ -12682,8 +12702,7 @@ instruct insrwi(iRegIdst dst, iRegIsrc src, immI16 pos, immI16 shift) %{
|
||||
// Just slightly faster than java implementation.
|
||||
instruct bytes_reverse_int_Ex(iRegIdst dst, iRegIsrc src) %{
|
||||
match(Set dst (ReverseBytesI src));
|
||||
predicate(UseCountLeadingZerosInstructionsPPC64);
|
||||
ins_cost(DEFAULT_COST);
|
||||
ins_cost(7*DEFAULT_COST);
|
||||
|
||||
expand %{
|
||||
immI16 imm24 %{ (int) 24 %}
|
||||
@ -12705,6 +12724,172 @@ instruct bytes_reverse_int_Ex(iRegIdst dst, iRegIsrc src) %{
|
||||
%}
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_long_Ex(iRegLdst dst, iRegLsrc src) %{
|
||||
match(Set dst (ReverseBytesL src));
|
||||
ins_cost(15*DEFAULT_COST);
|
||||
|
||||
expand %{
|
||||
immI16 imm56 %{ (int) 56 %}
|
||||
immI16 imm48 %{ (int) 48 %}
|
||||
immI16 imm40 %{ (int) 40 %}
|
||||
immI16 imm32 %{ (int) 32 %}
|
||||
immI16 imm24 %{ (int) 24 %}
|
||||
immI16 imm16 %{ (int) 16 %}
|
||||
immI16 imm8 %{ (int) 8 %}
|
||||
immI16 imm0 %{ (int) 0 %}
|
||||
iRegLdst tmpL1;
|
||||
iRegLdst tmpL2;
|
||||
iRegLdst tmpL3;
|
||||
iRegLdst tmpL4;
|
||||
iRegLdst tmpL5;
|
||||
iRegLdst tmpL6;
|
||||
|
||||
// src : |a|b|c|d|e|f|g|h|
|
||||
rldicl(tmpL1, src, imm8, imm24); // tmpL1 : | | | |e|f|g|h|a|
|
||||
rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |a| | | |e|
|
||||
rldicl(tmpL3, tmpL2, imm32, imm0); // tmpL3 : | | | |e| | | |a|
|
||||
rldicl(tmpL1, src, imm16, imm24); // tmpL1 : | | | |f|g|h|a|b|
|
||||
rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |b| | | |f|
|
||||
rldicl(tmpL4, tmpL2, imm40, imm0); // tmpL4 : | | |f| | | |b| |
|
||||
orL_reg_reg(tmpL5, tmpL3, tmpL4); // tmpL5 : | | |f|e| | |b|a|
|
||||
rldicl(tmpL1, src, imm24, imm24); // tmpL1 : | | | |g|h|a|b|c|
|
||||
rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |c| | | |g|
|
||||
rldicl(tmpL3, tmpL2, imm48, imm0); // tmpL3 : | |g| | | |c| | |
|
||||
rldicl(tmpL1, src, imm32, imm24); // tmpL1 : | | | |h|a|b|c|d|
|
||||
rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |d| | | |h|
|
||||
rldicl(tmpL4, tmpL2, imm56, imm0); // tmpL4 : |h| | | |d| | | |
|
||||
orL_reg_reg(tmpL6, tmpL3, tmpL4); // tmpL6 : |h|g| | |d|c| | |
|
||||
orL_reg_reg(dst, tmpL5, tmpL6); // dst : |h|g|f|e|d|c|b|a|
|
||||
%}
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_ushort_Ex(iRegIdst dst, iRegIsrc src) %{
|
||||
match(Set dst (ReverseBytesUS src));
|
||||
ins_cost(2*DEFAULT_COST);
|
||||
|
||||
expand %{
|
||||
immI16 imm16 %{ (int) 16 %}
|
||||
immI16 imm8 %{ (int) 8 %}
|
||||
|
||||
urShiftI_reg_imm(dst, src, imm8);
|
||||
insrwi(dst, src, imm16, imm8);
|
||||
%}
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_short_Ex(iRegIdst dst, iRegIsrc src) %{
|
||||
match(Set dst (ReverseBytesS src));
|
||||
ins_cost(3*DEFAULT_COST);
|
||||
|
||||
expand %{
|
||||
immI16 imm16 %{ (int) 16 %}
|
||||
immI16 imm8 %{ (int) 8 %}
|
||||
iRegLdst tmpI1;
|
||||
|
||||
urShiftI_reg_imm(tmpI1, src, imm8);
|
||||
insrwi(tmpI1, src, imm16, imm8);
|
||||
extsh(dst, tmpI1);
|
||||
%}
|
||||
%}
|
||||
|
||||
// Load Integer reversed byte order
|
||||
instruct loadI_reversed(iRegIdst dst, indirect mem) %{
|
||||
match(Set dst (ReverseBytesI (LoadI mem)));
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
size(4);
|
||||
ins_encode %{
|
||||
__ lwbrx($dst$$Register, $mem$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// Load Long - aligned and reversed
|
||||
instruct loadL_reversed(iRegLdst dst, indirect mem) %{
|
||||
match(Set dst (ReverseBytesL (LoadL mem)));
|
||||
predicate(VM_Version::has_ldbrx());
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
size(4);
|
||||
ins_encode %{
|
||||
__ ldbrx($dst$$Register, $mem$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// Load unsigned short / char reversed byte order
|
||||
instruct loadUS_reversed(iRegIdst dst, indirect mem) %{
|
||||
match(Set dst (ReverseBytesUS (LoadUS mem)));
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
size(4);
|
||||
ins_encode %{
|
||||
__ lhbrx($dst$$Register, $mem$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// Load short reversed byte order
|
||||
instruct loadS_reversed(iRegIdst dst, indirect mem) %{
|
||||
match(Set dst (ReverseBytesS (LoadS mem)));
|
||||
ins_cost(MEMORY_REF_COST + DEFAULT_COST);
|
||||
|
||||
size(8);
|
||||
ins_encode %{
|
||||
__ lhbrx($dst$$Register, $mem$$Register);
|
||||
__ extsh($dst$$Register, $dst$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// Store Integer reversed byte order
|
||||
instruct storeI_reversed(iRegIsrc src, indirect mem) %{
|
||||
match(Set mem (StoreI mem (ReverseBytesI src)));
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
size(4);
|
||||
ins_encode %{
|
||||
__ stwbrx($src$$Register, $mem$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// Store Long reversed byte order
|
||||
instruct storeL_reversed(iRegLsrc src, indirect mem) %{
|
||||
match(Set mem (StoreL mem (ReverseBytesL src)));
|
||||
predicate(VM_Version::has_stdbrx());
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
size(4);
|
||||
ins_encode %{
|
||||
__ stdbrx($src$$Register, $mem$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// Store unsigned short / char reversed byte order
|
||||
instruct storeUS_reversed(iRegIsrc src, indirect mem) %{
|
||||
match(Set mem (StoreC mem (ReverseBytesUS src)));
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
size(4);
|
||||
ins_encode %{
|
||||
__ sthbrx($src$$Register, $mem$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// Store short reversed byte order
|
||||
instruct storeS_reversed(iRegIsrc src, indirect mem) %{
|
||||
match(Set mem (StoreC mem (ReverseBytesS src)));
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
size(4);
|
||||
ins_encode %{
|
||||
__ sthbrx($src$$Register, $mem$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
//---------- Replicate Vector Instructions ------------------------------------
|
||||
|
||||
// Insrdi does replicate if src == dst.
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -3276,6 +3276,36 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
|
||||
// Compute CRC32/CRC32C function.
|
||||
void generate_CRC_updateBytes(const char* name, Register table, bool invertCRC) {
|
||||
|
||||
// arguments to kernel_crc32:
|
||||
const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call.
|
||||
const Register data = R4_ARG2; // source byte array
|
||||
const Register dataLen = R5_ARG3; // #bytes to process
|
||||
|
||||
const Register t0 = R2;
|
||||
const Register t1 = R7;
|
||||
const Register t2 = R8;
|
||||
const Register t3 = R9;
|
||||
const Register tc0 = R10;
|
||||
const Register tc1 = R11;
|
||||
const Register tc2 = R12;
|
||||
|
||||
BLOCK_COMMENT("Stub body {");
|
||||
assert_different_registers(crc, data, dataLen, table);
|
||||
|
||||
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, table, invertCRC);
|
||||
|
||||
BLOCK_COMMENT("return");
|
||||
__ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
|
||||
__ blr();
|
||||
|
||||
BLOCK_COMMENT("} Stub body");
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Arguments:
|
||||
*
|
||||
@ -3296,14 +3326,14 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ function_entry(); // Remember stub start address (is rtn value).
|
||||
|
||||
const Register table = R6; // crc table address
|
||||
|
||||
#ifdef VM_LITTLE_ENDIAN
|
||||
// arguments to kernel_crc32:
|
||||
const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call.
|
||||
const Register data = R4_ARG2; // source byte array
|
||||
const Register dataLen = R5_ARG3; // #bytes to process
|
||||
|
||||
const Register table = R6; // crc table address
|
||||
|
||||
#ifdef VM_LITTLE_ENDIAN
|
||||
if (VM_Version::has_vpmsumb()) {
|
||||
const Register constants = R2; // constants address
|
||||
const Register bconstants = R8; // barret table address
|
||||
@ -3321,7 +3351,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::ppc64::generate_load_crc_constants_addr(_masm, constants);
|
||||
StubRoutines::ppc64::generate_load_crc_barret_constants_addr(_masm, bconstants);
|
||||
|
||||
__ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4);
|
||||
__ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, true);
|
||||
|
||||
BLOCK_COMMENT("return");
|
||||
__ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
|
||||
@ -3331,31 +3361,79 @@ class StubGenerator: public StubCodeGenerator {
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
const Register t0 = R2;
|
||||
const Register t1 = R7;
|
||||
const Register t2 = R8;
|
||||
const Register t3 = R9;
|
||||
const Register tc0 = R10;
|
||||
const Register tc1 = R11;
|
||||
const Register tc2 = R12;
|
||||
StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
|
||||
generate_CRC_updateBytes(name, table, true);
|
||||
}
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Arguments:
|
||||
*
|
||||
* Inputs:
|
||||
* R3_ARG1 - int crc
|
||||
* R4_ARG2 - byte* buf
|
||||
* R5_ARG3 - int length (of buffer)
|
||||
*
|
||||
* scratch:
|
||||
* R2, R6-R12
|
||||
*
|
||||
* Ouput:
|
||||
* R3_RET - int crc result
|
||||
*/
|
||||
// Compute CRC32C function.
|
||||
address generate_CRC32C_updateBytes(const char* name) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ function_entry(); // Remember stub start address (is rtn value).
|
||||
|
||||
const Register table = R6; // crc table address
|
||||
|
||||
#if 0 // no vector support yet for CRC32C
|
||||
#ifdef VM_LITTLE_ENDIAN
|
||||
// arguments to kernel_crc32:
|
||||
const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call.
|
||||
const Register data = R4_ARG2; // source byte array
|
||||
const Register dataLen = R5_ARG3; // #bytes to process
|
||||
|
||||
if (VM_Version::has_vpmsumb()) {
|
||||
const Register constants = R2; // constants address
|
||||
const Register bconstants = R8; // barret table address
|
||||
|
||||
const Register t0 = R9;
|
||||
const Register t1 = R10;
|
||||
const Register t2 = R11;
|
||||
const Register t3 = R12;
|
||||
const Register t4 = R7;
|
||||
|
||||
BLOCK_COMMENT("Stub body {");
|
||||
assert_different_registers(crc, data, dataLen, table);
|
||||
|
||||
StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
|
||||
StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
|
||||
StubRoutines::ppc64::generate_load_crc32c_constants_addr(_masm, constants);
|
||||
StubRoutines::ppc64::generate_load_crc32c_barret_constants_addr(_masm, bconstants);
|
||||
|
||||
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, table);
|
||||
__ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, false);
|
||||
|
||||
BLOCK_COMMENT("return");
|
||||
__ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
|
||||
__ blr();
|
||||
|
||||
BLOCK_COMMENT("} Stub body");
|
||||
} else
|
||||
#endif
|
||||
#endif
|
||||
{
|
||||
StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
|
||||
generate_CRC_updateBytes(name, table, false);
|
||||
}
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
|
||||
// Initialization
|
||||
void generate_initial() {
|
||||
// Generates all stubs and initializes the entry points
|
||||
@ -3383,6 +3461,12 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_crc_table_adr = (address)StubRoutines::ppc64::_crc_table;
|
||||
StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes");
|
||||
}
|
||||
|
||||
// CRC32C Intrinsics.
|
||||
if (UseCRC32CIntrinsics) {
|
||||
StubRoutines::_crc32c_table_addr = (address)StubRoutines::ppc64::_crc32c_table;
|
||||
StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes("CRC32C_updateBytes");
|
||||
}
|
||||
}
|
||||
|
||||
void generate_all() {
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -55,13 +55,16 @@ class ppc64 {
|
||||
|
||||
// CRC32 Intrinsics.
|
||||
static juint _crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
|
||||
static juint _crc32c_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
|
||||
static juint* _constants;
|
||||
static juint* _barret_constants;
|
||||
|
||||
public:
|
||||
|
||||
// CRC32 Intrinsics.
|
||||
static void generate_load_table_addr(MacroAssembler* masm, Register table, address table_addr, uint64_t table_contents);
|
||||
static void generate_load_crc_table_addr(MacroAssembler* masm, Register table);
|
||||
static void generate_load_crc32c_table_addr(MacroAssembler* masm, Register table);
|
||||
static void generate_load_crc_constants_addr(MacroAssembler* masm, Register table);
|
||||
static void generate_load_crc_barret_constants_addr(MacroAssembler* masm, Register table);
|
||||
static juint* generate_crc_constants();
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2017 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2015, 2017, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -643,12 +643,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
|
||||
return entry;
|
||||
}
|
||||
|
||||
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
|
||||
address entry = __ pc();
|
||||
__ unimplemented("generate_continuation_for");
|
||||
return entry;
|
||||
}
|
||||
|
||||
// This entry is returned to when a call returns to the interpreter.
|
||||
// When we arrive here, we expect that the callee stack frame is already popped.
|
||||
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
|
||||
@ -692,6 +686,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
#endif
|
||||
__ sldi(size, size, Interpreter::logStackElementSize);
|
||||
__ add(R15_esp, R15_esp, size);
|
||||
|
||||
__ check_and_handle_popframe(R11_scratch1);
|
||||
__ check_and_handle_earlyret(R11_scratch1);
|
||||
|
||||
__ dispatch_next(state, step);
|
||||
return entry;
|
||||
}
|
||||
@ -1894,7 +1892,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
|
||||
__ lwz(crc, 2*wordSize, argP); // Current crc state, zero extend to 64 bit to have a clean register.
|
||||
|
||||
StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
|
||||
__ kernel_crc32_singleByte(crc, data, dataLen, table, tmp);
|
||||
__ kernel_crc32_singleByte(crc, data, dataLen, table, tmp, true);
|
||||
|
||||
// Restore caller sp for c2i case and return.
|
||||
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
|
||||
@ -1910,7 +1908,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// CRC32 Intrinsics.
|
||||
|
||||
/**
|
||||
* Method entry for static native methods:
|
||||
* int java.util.zip.CRC32.updateBytes( int crc, byte[] b, int off, int len)
|
||||
@ -1986,7 +1984,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
|
||||
// Performance measurements show the 1word and 2word variants to be almost equivalent,
|
||||
// with very light advantages for the 1word variant. We chose the 1word variant for
|
||||
// code compactness.
|
||||
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3);
|
||||
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3, true);
|
||||
|
||||
// Restore caller sp for c2i case and return.
|
||||
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
|
||||
@ -2002,8 +2000,88 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Not supported
|
||||
|
||||
/**
|
||||
* Method entry for intrinsic-candidate (non-native) methods:
|
||||
* int java.util.zip.CRC32C.updateBytes( int crc, byte[] b, int off, int end)
|
||||
* int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long* buf, int off, int end)
|
||||
* Unlike CRC32, CRC32C does not have any methods marked as native
|
||||
* CRC32C also uses an "end" variable instead of the length variable CRC32 uses
|
||||
**/
|
||||
address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
|
||||
if (UseCRC32CIntrinsics) {
|
||||
address start = __ pc(); // Remember stub start address (is rtn value).
|
||||
|
||||
// We don't generate local frame and don't align stack because
|
||||
// we not even call stub code (we generate the code inline)
|
||||
// and there is no safepoint on this path.
|
||||
|
||||
// Load parameters.
|
||||
// Z_esp is callers operand stack pointer, i.e. it points to the parameters.
|
||||
const Register argP = R15_esp;
|
||||
const Register crc = R3_ARG1; // crc value
|
||||
const Register data = R4_ARG2; // address of java byte array
|
||||
const Register dataLen = R5_ARG3; // source data len
|
||||
const Register table = R6_ARG4; // address of crc32c table
|
||||
|
||||
const Register t0 = R9; // scratch registers for crc calculation
|
||||
const Register t1 = R10;
|
||||
const Register t2 = R11;
|
||||
const Register t3 = R12;
|
||||
|
||||
const Register tc0 = R2; // registers to hold pre-calculated column addresses
|
||||
const Register tc1 = R7;
|
||||
const Register tc2 = R8;
|
||||
const Register tc3 = table; // table address is reconstructed at the end of kernel_crc32_* emitters
|
||||
|
||||
const Register tmp = t0; // Only used very locally to calculate byte buffer address.
|
||||
|
||||
// Arguments are reversed on java expression stack.
|
||||
// Calculate address of start element.
|
||||
if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) { // Used for "updateDirectByteBuffer".
|
||||
BLOCK_COMMENT("CRC32C_updateDirectByteBuffer {");
|
||||
// crc @ (SP + 5W) (32bit)
|
||||
// buf @ (SP + 3W) (64bit ptr to long array)
|
||||
// off @ (SP + 2W) (32bit)
|
||||
// dataLen @ (SP + 1W) (32bit)
|
||||
// data = buf + off
|
||||
__ ld( data, 3*wordSize, argP); // start of byte buffer
|
||||
__ lwa( tmp, 2*wordSize, argP); // byte buffer offset
|
||||
__ lwa( dataLen, 1*wordSize, argP); // #bytes to process
|
||||
__ lwz( crc, 5*wordSize, argP); // current crc state
|
||||
__ add( data, data, tmp); // Add byte buffer offset.
|
||||
__ sub( dataLen, dataLen, tmp); // (end_index - offset)
|
||||
} else { // Used for "updateBytes update".
|
||||
BLOCK_COMMENT("CRC32C_updateBytes {");
|
||||
// crc @ (SP + 4W) (32bit)
|
||||
// buf @ (SP + 3W) (64bit ptr to byte array)
|
||||
// off @ (SP + 2W) (32bit)
|
||||
// dataLen @ (SP + 1W) (32bit)
|
||||
// data = buf + off + base_offset
|
||||
__ ld( data, 3*wordSize, argP); // start of byte buffer
|
||||
__ lwa( tmp, 2*wordSize, argP); // byte buffer offset
|
||||
__ lwa( dataLen, 1*wordSize, argP); // #bytes to process
|
||||
__ add( data, data, tmp); // add byte buffer offset
|
||||
__ sub( dataLen, dataLen, tmp); // (end_index - offset)
|
||||
__ lwz( crc, 4*wordSize, argP); // current crc state
|
||||
__ addi(data, data, arrayOopDesc::base_offset_in_bytes(T_BYTE));
|
||||
}
|
||||
|
||||
StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
|
||||
|
||||
// Performance measurements show the 1word and 2word variants to be almost equivalent,
|
||||
// with very light advantages for the 1word variant. We chose the 1word variant for
|
||||
// code compactness.
|
||||
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3, false);
|
||||
|
||||
// Restore caller sp for c2i case and return.
|
||||
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
|
||||
__ blr();
|
||||
|
||||
BLOCK_COMMENT("} CRC32C_update{Bytes|DirectByteBuffer}");
|
||||
return start;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2017 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -3660,11 +3660,9 @@ void TemplateTable::_new() {
|
||||
__ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
|
||||
__ bne(CCR0, Lslow_case);
|
||||
|
||||
// Get instanceKlass (load from Rcpool + sizeof(ConstantPool) + Rindex*BytesPerWord).
|
||||
// Get instanceKlass
|
||||
__ sldi(Roffset, Rindex, LogBytesPerWord);
|
||||
__ addi(Rscratch, Rcpool, sizeof(ConstantPool));
|
||||
__ isync(); // Order load of instance Klass wrt. tags.
|
||||
__ ldx(RinstanceKlass, Roffset, Rscratch);
|
||||
__ load_resolved_klass_at_offset(Rcpool, Roffset, RinstanceKlass);
|
||||
|
||||
// Make sure klass is fully initialized and get instance_size.
|
||||
__ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass);
|
||||
@ -3722,7 +3720,7 @@ void TemplateTable::_new() {
|
||||
__ bge(CCR0, Lslow_case);
|
||||
|
||||
// Increment waste limit to prevent getting stuck on this slow path.
|
||||
__ addi(RtlabWasteLimitValue, RtlabWasteLimitValue, (int)ThreadLocalAllocBuffer::refill_waste_limit_increment());
|
||||
__ add_const_optimized(RtlabWasteLimitValue, RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment());
|
||||
__ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread);
|
||||
}
|
||||
// else: No allocation in the shared eden. // fallthru: __ b(Lslow_case);
|
||||
@ -3875,9 +3873,7 @@ void TemplateTable::checkcast() {
|
||||
// Extract target class from constant pool.
|
||||
__ bind(Lquicked);
|
||||
__ sldi(Roffset, Roffset, LogBytesPerWord);
|
||||
__ addi(Rcpool, Rcpool, sizeof(ConstantPool));
|
||||
__ isync(); // Order load of specified Klass wrt. tags.
|
||||
__ ldx(RspecifiedKlass, Rcpool, Roffset);
|
||||
__ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass);
|
||||
|
||||
// Do the checkcast.
|
||||
__ bind(Lresolved);
|
||||
@ -3939,9 +3935,7 @@ void TemplateTable::instanceof() {
|
||||
// Extract target class from constant pool.
|
||||
__ bind(Lquicked);
|
||||
__ sldi(Roffset, Roffset, LogBytesPerWord);
|
||||
__ addi(Rcpool, Rcpool, sizeof(ConstantPool));
|
||||
__ isync(); // Order load of specified Klass wrt. tags.
|
||||
__ ldx(RspecifiedKlass, Rcpool, Roffset);
|
||||
__ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass);
|
||||
|
||||
// Do the checkcast.
|
||||
__ bind(Lresolved);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -111,7 +111,7 @@ void VM_Version::initialize() {
|
||||
// Create and print feature-string.
|
||||
char buf[(num_features+1) * 16]; // Max 16 chars per feature.
|
||||
jio_snprintf(buf, sizeof(buf),
|
||||
"ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
|
||||
"ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
|
||||
(has_fsqrt() ? " fsqrt" : ""),
|
||||
(has_isel() ? " isel" : ""),
|
||||
(has_lxarxeh() ? " lxarxeh" : ""),
|
||||
@ -126,7 +126,9 @@ void VM_Version::initialize() {
|
||||
(has_vpmsumb() ? " vpmsumb" : ""),
|
||||
(has_tcheck() ? " tcheck" : ""),
|
||||
(has_mfdscr() ? " mfdscr" : ""),
|
||||
(has_vsx() ? " vsx" : "")
|
||||
(has_vsx() ? " vsx" : ""),
|
||||
(has_ldbrx() ? " ldbrx" : ""),
|
||||
(has_stdbrx() ? " stdbrx" : "")
|
||||
// Make sure number of %s matches num_features!
|
||||
);
|
||||
_features_string = os::strdup(buf);
|
||||
@ -172,18 +174,27 @@ void VM_Version::initialize() {
|
||||
|
||||
assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
|
||||
|
||||
// Implementation does not use any of the vector instructions
|
||||
// available with Power8. Their exploitation is still pending.
|
||||
// If defined(VM_LITTLE_ENDIAN) and running on Power8 or newer hardware,
|
||||
// the implementation uses the vector instructions available with Power8.
|
||||
// In all other cases, the implementation uses only generally available instructions.
|
||||
if (!UseCRC32Intrinsics) {
|
||||
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
|
||||
FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
|
||||
}
|
||||
}
|
||||
|
||||
if (UseCRC32CIntrinsics) {
|
||||
if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics))
|
||||
warning("CRC32C intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
|
||||
// Implementation does not use any of the vector instructions available with Power8.
|
||||
// Their exploitation is still pending (aka "work in progress").
|
||||
if (!UseCRC32CIntrinsics) {
|
||||
if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
|
||||
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Provide implementation.
|
||||
if (UseAdler32Intrinsics) {
|
||||
warning("Adler32Intrinsics not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
|
||||
}
|
||||
|
||||
// The AES intrinsic stubs require AES instruction support.
|
||||
@ -245,11 +256,6 @@ void VM_Version::initialize() {
|
||||
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseAdler32Intrinsics) {
|
||||
warning("Adler32Intrinsics not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
|
||||
UseMultiplyToLenIntrinsic = true;
|
||||
}
|
||||
@ -659,6 +665,8 @@ void VM_Version::determine_features() {
|
||||
a->tcheck(0); // code[12] -> tcheck
|
||||
a->mfdscr(R0); // code[13] -> mfdscr
|
||||
a->lxvd2x(VSR0, R3_ARG1); // code[14] -> vsx
|
||||
a->ldbrx(R7, R3_ARG1, R4_ARG2); // code[15] -> ldbrx
|
||||
a->stdbrx(R7, R3_ARG1, R4_ARG2); // code[16] -> stdbrx
|
||||
a->blr();
|
||||
|
||||
// Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
|
||||
@ -708,6 +716,8 @@ void VM_Version::determine_features() {
|
||||
if (code[feature_cntr++]) features |= tcheck_m;
|
||||
if (code[feature_cntr++]) features |= mfdscr_m;
|
||||
if (code[feature_cntr++]) features |= vsx_m;
|
||||
if (code[feature_cntr++]) features |= ldbrx_m;
|
||||
if (code[feature_cntr++]) features |= stdbrx_m;
|
||||
|
||||
// Print the detection code.
|
||||
if (PrintAssembly) {
|
||||
|
@ -47,6 +47,8 @@ protected:
|
||||
tcheck,
|
||||
mfdscr,
|
||||
vsx,
|
||||
ldbrx,
|
||||
stdbrx,
|
||||
num_features // last entry to count features
|
||||
};
|
||||
enum Feature_Flag_Set {
|
||||
@ -66,6 +68,8 @@ protected:
|
||||
tcheck_m = (1 << tcheck ),
|
||||
mfdscr_m = (1 << mfdscr ),
|
||||
vsx_m = (1 << vsx ),
|
||||
ldbrx_m = (1 << ldbrx ),
|
||||
stdbrx_m = (1 << stdbrx ),
|
||||
all_features_m = (unsigned long)-1
|
||||
};
|
||||
|
||||
@ -100,6 +104,8 @@ public:
|
||||
static bool has_tcheck() { return (_features & tcheck_m) != 0; }
|
||||
static bool has_mfdscr() { return (_features & mfdscr_m) != 0; }
|
||||
static bool has_vsx() { return (_features & vsx_m) != 0; }
|
||||
static bool has_ldbrx() { return (_features & ldbrx_m) != 0; }
|
||||
static bool has_stdbrx() { return (_features & stdbrx_m) != 0; }
|
||||
|
||||
// Assembler testing
|
||||
static void allow_all();
|
||||
|
@ -28,8 +28,6 @@
|
||||
|
||||
#undef LUCY_DBG
|
||||
|
||||
#define NearLabel Label
|
||||
|
||||
// Immediate is an abstraction to represent the various immediate
|
||||
// operands which exist on z/Architecture. Neither this class nor
|
||||
// instances hereof have an own state. It consists of methods only.
|
||||
|
@ -42,12 +42,6 @@ class Bytes: AllStatic {
|
||||
//
|
||||
// In short, it makes no sense on z/Architecture to piecemeal get or put unaligned data.
|
||||
|
||||
// Returns true if the byte ordering used by Java is different from
|
||||
// the native byte ordering of the underlying machine.
|
||||
// z/Arch is big endian, thus, a swap between native and Java ordering
|
||||
// is always a no-op.
|
||||
static inline bool is_Java_byte_ordering_different() { return false; }
|
||||
|
||||
// Only swap on little endian machines => suffix `_le'.
|
||||
static inline u2 swap_u2_le(u2 x) { return x; }
|
||||
static inline u4 swap_u4_le(u4 x) { return x; }
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -3048,9 +3048,8 @@ void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
|
||||
assert_different_registers(val, crc, res);
|
||||
|
||||
__ load_const_optimized(res, StubRoutines::crc_table_addr());
|
||||
__ not_(crc, noreg, false); // ~crc
|
||||
__ update_byte_crc32(crc, val, res);
|
||||
__ not_(res, crc, false); // ~crc
|
||||
__ kernel_crc32_singleByteReg(crc, val, res, true);
|
||||
__ z_lgfr(res, crc);
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -61,20 +61,6 @@ void LIRItem::load_nonconstant(int bits) {
|
||||
}
|
||||
}
|
||||
|
||||
inline void load_int_as_long(LIR_List *ll, LIRItem &li, LIR_Opr dst) {
|
||||
LIR_Opr r = li.value()->operand();
|
||||
if (r->is_constant()) {
|
||||
// Constants get loaded with sign extend on this platform.
|
||||
ll->move(li.result(), dst);
|
||||
} else {
|
||||
if (!r->is_register()) {
|
||||
li.load_item_force(dst);
|
||||
}
|
||||
LIR_Opr dst_l = FrameMap::as_long_opr(dst->as_register());
|
||||
ll->convert(Bytecodes::_i2l, li.result(), dst_l); // Convert.
|
||||
}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------
|
||||
// LIRGenerator
|
||||
//--------------------------------------------------------------
|
||||
@ -1224,10 +1210,9 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
|
||||
LIR_Opr arg2 = cc->at(1);
|
||||
LIR_Opr arg3 = cc->at(2);
|
||||
|
||||
// CCallingConventionRequiresIntsAsLongs
|
||||
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits.
|
||||
__ leal(LIR_OprFact::address(a), arg2);
|
||||
load_int_as_long(gen()->lir(), len, arg3);
|
||||
len.load_item_force(arg3); // We skip int->long conversion here, because CRC32 stub expects int.
|
||||
|
||||
__ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args());
|
||||
__ move(result_reg, result);
|
||||
@ -1240,7 +1225,70 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
|
||||
}
|
||||
|
||||
void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
|
||||
Unimplemented();
|
||||
assert(UseCRC32CIntrinsics, "or should not be here");
|
||||
LIR_Opr result = rlock_result(x);
|
||||
|
||||
switch (x->id()) {
|
||||
case vmIntrinsics::_updateBytesCRC32C:
|
||||
case vmIntrinsics::_updateDirectByteBufferCRC32C: {
|
||||
bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
|
||||
|
||||
LIRItem crc(x->argument_at(0), this);
|
||||
LIRItem buf(x->argument_at(1), this);
|
||||
LIRItem off(x->argument_at(2), this);
|
||||
LIRItem end(x->argument_at(3), this);
|
||||
buf.load_item();
|
||||
off.load_nonconstant();
|
||||
end.load_nonconstant();
|
||||
|
||||
// len = end - off
|
||||
LIR_Opr len = end.result();
|
||||
LIR_Opr tmpA = new_register(T_INT);
|
||||
LIR_Opr tmpB = new_register(T_INT);
|
||||
__ move(end.result(), tmpA);
|
||||
__ move(off.result(), tmpB);
|
||||
__ sub(tmpA, tmpB, tmpA);
|
||||
len = tmpA;
|
||||
|
||||
LIR_Opr index = off.result();
|
||||
int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
|
||||
if (off.result()->is_constant()) {
|
||||
index = LIR_OprFact::illegalOpr;
|
||||
offset += off.result()->as_jint();
|
||||
}
|
||||
LIR_Opr base_op = buf.result();
|
||||
|
||||
if (index->is_valid()) {
|
||||
LIR_Opr tmp = new_register(T_LONG);
|
||||
__ convert(Bytecodes::_i2l, index, tmp);
|
||||
index = tmp;
|
||||
}
|
||||
|
||||
LIR_Address* a = new LIR_Address(base_op, index, offset, T_BYTE);
|
||||
|
||||
BasicTypeList signature(3);
|
||||
signature.append(T_INT);
|
||||
signature.append(T_ADDRESS);
|
||||
signature.append(T_INT);
|
||||
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
|
||||
const LIR_Opr result_reg = result_register_for (x->type());
|
||||
|
||||
LIR_Opr arg1 = cc->at(0);
|
||||
LIR_Opr arg2 = cc->at(1);
|
||||
LIR_Opr arg3 = cc->at(2);
|
||||
|
||||
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32C stub doesn't care about high bits.
|
||||
__ leal(LIR_OprFact::address(a), arg2);
|
||||
__ move(len, cc->at(2)); // We skip int->long conversion here, because CRC32C stub expects int.
|
||||
|
||||
__ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), LIR_OprFact::illegalOpr, result_reg, cc->args());
|
||||
__ move(result_reg, result);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
|
||||
@ -1271,4 +1319,3 @@ void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
|
||||
void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
|
||||
fatal("vectorizedMismatch intrinsic is not implemented on this platform");
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -107,24 +107,15 @@ void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) {
|
||||
// TODO: Maybe implement +VerifyActivationFrameSize here.
|
||||
// verify_thread(); // Too slow. We will just verify on method entry & exit.
|
||||
verify_oop(Z_tos, state);
|
||||
#ifdef FAST_DISPATCH
|
||||
if (table == Interpreter::dispatch_table(state)) {
|
||||
// Use IdispatchTables.
|
||||
add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
|
||||
// Add offset to correct dispatch table.
|
||||
sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // Multiply by wordSize.
|
||||
ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // Get entry addr.
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
// Dispatch table to use.
|
||||
load_absolute_address(Z_tmp_1, (address) table); // Z_tmp_1 = table;
|
||||
|
||||
// 0 <= Z_bytecode < 256 => Use a 32 bit shift, because it is shorter than sllg.
|
||||
// Z_bytecode must have been loaded zero-extended for this approach to be correct.
|
||||
z_sll(Z_bytecode, LogBytesPerWord, Z_R0); // Multiply by wordSize.
|
||||
z_lg(Z_tmp_1, 0, Z_bytecode, Z_tmp_1); // Get entry addr.
|
||||
}
|
||||
// Dispatch table to use.
|
||||
load_absolute_address(Z_tmp_1, (address) table); // Z_tmp_1 = table;
|
||||
|
||||
// 0 <= Z_bytecode < 256 => Use a 32 bit shift, because it is shorter than sllg.
|
||||
// Z_bytecode must have been loaded zero-extended for this approach to be correct.
|
||||
z_sll(Z_bytecode, LogBytesPerWord, Z_R0); // Multiply by wordSize.
|
||||
z_lg(Z_tmp_1, 0, Z_bytecode, Z_tmp_1); // Get entry addr.
|
||||
|
||||
z_br(Z_tmp_1);
|
||||
}
|
||||
|
||||
@ -371,7 +362,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
|
||||
Register tmp = index; // reuse
|
||||
z_sllg(index, index, LogBytesPerHeapOop); // Offset into resolved references array.
|
||||
// Load pointer for resolved_references[] objArray.
|
||||
z_lg(result, ConstantPool::resolved_references_offset_in_bytes(), result);
|
||||
z_lg(result, ConstantPool::cache_offset_in_bytes(), result);
|
||||
z_lg(result, ConstantPoolCache::resolved_references_offset_in_bytes(), result);
|
||||
// JNIHandles::resolve(result)
|
||||
z_lg(result, 0, result); // Load resolved references array itself.
|
||||
#ifdef ASSERT
|
||||
@ -386,6 +378,16 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
|
||||
load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result);
|
||||
}
|
||||
|
||||
// load cpool->resolved_klass_at(index)
|
||||
void InterpreterMacroAssembler::load_resolved_klass_at_offset(Register cpool, Register offset, Register iklass) {
|
||||
// int value = *(Rcpool->int_at_addr(which));
|
||||
// int resolved_klass_index = extract_low_short_from_int(value);
|
||||
z_llgh(offset, Address(cpool, offset, sizeof(ConstantPool) + 2)); // offset = resolved_klass_index (s390 is big-endian)
|
||||
z_sllg(offset, offset, LogBytesPerWord); // Convert 'index' to 'offset'
|
||||
z_lg(iklass, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes())); // iklass = cpool->_resolved_klasses
|
||||
z_lg(iklass, Address(iklass, offset, Array<Klass*>::base_offset_in_bytes()));
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
|
||||
Register tmp,
|
||||
int bcp_offset,
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -48,9 +48,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
bool allow_relocation,
|
||||
bool check_exceptions);
|
||||
|
||||
virtual void check_and_handle_popframe(Register java_thread);
|
||||
virtual void check_and_handle_earlyret(Register java_thread);
|
||||
|
||||
// Base routine for all dispatches.
|
||||
void dispatch_base(TosState state, address* table);
|
||||
|
||||
@ -58,6 +55,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
InterpreterMacroAssembler(CodeBuffer* c)
|
||||
: MacroAssembler(c) {}
|
||||
|
||||
virtual void check_and_handle_popframe(Register java_thread);
|
||||
virtual void check_and_handle_earlyret(Register java_thread);
|
||||
|
||||
void jump_to_entry(address entry, Register Rscratch);
|
||||
|
||||
virtual void load_earlyret_value(TosState state);
|
||||
@ -115,6 +115,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
|
||||
void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
|
||||
void load_resolved_reference_at_index(Register result, Register index);
|
||||
// load cpool->resolved_klass_at(index)
|
||||
void load_resolved_klass_at_offset(Register cpool, Register offset, Register iklass);
|
||||
|
||||
// Pop topmost element from stack. It just disappears. Useful if
|
||||
// consumed previously by access via stackTop().
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1616,6 +1616,8 @@ void MacroAssembler::branch_optimized(Assembler::branch_condition cond, Label& b
|
||||
if (branch_target.is_bound()) {
|
||||
address branch_addr = target(branch_target);
|
||||
branch_optimized(cond, branch_addr);
|
||||
} else if (branch_target.is_near()) {
|
||||
z_brc(cond, branch_target); // Caller assures that the target will be in range for z_brc.
|
||||
} else {
|
||||
z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time.
|
||||
}
|
||||
@ -1674,7 +1676,8 @@ void MacroAssembler::compare_and_branch_optimized(Register r1,
|
||||
bool has_sign) {
|
||||
address branch_origin = pc();
|
||||
bool x2_imm8 = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2));
|
||||
bool is_RelAddr16 = (branch_target.is_bound() &&
|
||||
bool is_RelAddr16 = branch_target.is_near() ||
|
||||
(branch_target.is_bound() &&
|
||||
RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin));
|
||||
unsigned int casenum = (len64?2:0)+(has_sign?0:1);
|
||||
|
||||
@ -1744,13 +1747,21 @@ void MacroAssembler::compare_and_branch_optimized(Register r1,
|
||||
Label& branch_target,
|
||||
bool len64,
|
||||
bool has_sign) {
|
||||
unsigned int casenum = (len64?2:0)+(has_sign?0:1);
|
||||
unsigned int casenum = (len64 ? 2 : 0) + (has_sign ? 0 : 1);
|
||||
|
||||
if (branch_target.is_bound()) {
|
||||
address branch_addr = target(branch_target);
|
||||
compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign);
|
||||
} else {
|
||||
{
|
||||
if (VM_Version::has_CompareBranch() && branch_target.is_near()) {
|
||||
switch (casenum) {
|
||||
case 0: z_crj( r1, r2, cond, branch_target); break;
|
||||
case 1: z_clrj( r1, r2, cond, branch_target); break;
|
||||
case 2: z_cgrj( r1, r2, cond, branch_target); break;
|
||||
case 3: z_clgrj(r1, r2, cond, branch_target); break;
|
||||
default: ShouldNotReachHere(); break;
|
||||
}
|
||||
} else {
|
||||
switch (casenum) {
|
||||
case 0: z_cr( r1, r2); break;
|
||||
case 1: z_clr(r1, r2); break;
|
||||
@ -2741,11 +2752,11 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
|
||||
BLOCK_COMMENT("lookup_interface_method {");
|
||||
|
||||
// Load start of itable entries into itable_entry_addr.
|
||||
z_llgf(vtable_len, Address(recv_klass, InstanceKlass::vtable_length_offset()));
|
||||
z_llgf(vtable_len, Address(recv_klass, Klass::vtable_length_offset()));
|
||||
z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
|
||||
|
||||
// Loop over all itable entries until desired interfaceOop(Rinterface) found.
|
||||
const int vtable_base_offset = in_bytes(InstanceKlass::vtable_start_offset());
|
||||
const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
|
||||
|
||||
add2reg_with_index(itable_entry_addr,
|
||||
vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(),
|
||||
@ -5927,8 +5938,7 @@ void MacroAssembler::update_byte_crc32(Register crc, Register val, Register tabl
|
||||
* @param len register containing number of bytes
|
||||
* @param table register pointing to CRC table
|
||||
*/
|
||||
void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
|
||||
Register data, bool invertCRC) {
|
||||
void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, Register data) {
|
||||
assert_different_registers(crc, buf, len, table, data);
|
||||
|
||||
Label L_mainLoop, L_done;
|
||||
@ -5938,20 +5948,12 @@ void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register
|
||||
z_ltr(len, len);
|
||||
z_brnh(L_done);
|
||||
|
||||
if (invertCRC) {
|
||||
not_(crc, noreg, false); // ~c
|
||||
}
|
||||
|
||||
bind(L_mainLoop);
|
||||
z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
|
||||
add2reg(buf, mainLoop_stepping); // Advance buffer position.
|
||||
update_byte_crc32(crc, data, table);
|
||||
z_brct(len, L_mainLoop); // Iterate.
|
||||
|
||||
if (invertCRC) {
|
||||
not_(crc, noreg, false); // ~c
|
||||
}
|
||||
|
||||
bind(L_done);
|
||||
}
|
||||
|
||||
@ -5968,6 +5970,7 @@ void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register tab
|
||||
// c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \
|
||||
// crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24]
|
||||
// #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4
|
||||
// Pre-calculate (constant) column offsets, use columns 4..7 for big-endian.
|
||||
const int ix0 = 4*(4*CRC32_COLUMN_SIZE);
|
||||
const int ix1 = 5*(4*CRC32_COLUMN_SIZE);
|
||||
const int ix2 = 6*(4*CRC32_COLUMN_SIZE);
|
||||
@ -5986,17 +5989,12 @@ void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register tab
|
||||
rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true); // ((c >> 16) & 0xff) << 2
|
||||
rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true); // ((c >> 24) & 0xff) << 2
|
||||
|
||||
// Load pre-calculated table values.
|
||||
// Use columns 4..7 for big-endian.
|
||||
z_ly(t3, Address(table, t3, (intptr_t)ix0));
|
||||
// XOR indexed table values to calculate updated crc.
|
||||
z_ly(t2, Address(table, t2, (intptr_t)ix1));
|
||||
z_ly(t1, Address(table, t1, (intptr_t)ix2));
|
||||
z_ly(t0, Address(table, t0, (intptr_t)ix3));
|
||||
|
||||
// Calculate new crc from table values.
|
||||
z_xr(t2, t3);
|
||||
z_xr(t0, t1);
|
||||
z_xr(t0, t2); // Now crc contains the final checksum value.
|
||||
z_xy(t2, Address(table, t3, (intptr_t)ix0));
|
||||
z_xy(t0, Address(table, t1, (intptr_t)ix2));
|
||||
z_xr(t0, t2); // Now t0 contains the updated CRC value.
|
||||
lgr_if_needed(crc, t0);
|
||||
}
|
||||
|
||||
@ -6009,7 +6007,8 @@ void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register tab
|
||||
* uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
|
||||
*/
|
||||
void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
|
||||
Register t0, Register t1, Register t2, Register t3) {
|
||||
Register t0, Register t1, Register t2, Register t3,
|
||||
bool invertCRC) {
|
||||
assert_different_registers(crc, buf, len, table);
|
||||
|
||||
Label L_mainLoop, L_tail;
|
||||
@ -6024,7 +6023,9 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
|
||||
// The situation itself is detected and handled correctly by the conditional branches
|
||||
// following aghi(len, -stepping) and aghi(len, +stepping).
|
||||
|
||||
not_(crc, noreg, false); // 1s complement of crc
|
||||
if (invertCRC) {
|
||||
not_(crc, noreg, false); // 1s complement of crc
|
||||
}
|
||||
|
||||
#if 0
|
||||
{
|
||||
@ -6039,7 +6040,7 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
|
||||
rotate_then_insert(ctr, ctr, 62, 63, 0, true); // TODO: should set cc
|
||||
z_sgfr(len, ctr); // Remaining len after alignment.
|
||||
|
||||
update_byteLoop_crc32(crc, buf, ctr, table, data, false);
|
||||
update_byteLoop_crc32(crc, buf, ctr, table, data);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -6047,21 +6048,23 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
|
||||
z_srag(ctr, len, log_stepping);
|
||||
z_brnh(L_tail);
|
||||
|
||||
z_lrvr(crc, crc); // Revert byte order because we are dealing with big-endian data.
|
||||
z_lrvr(crc, crc); // Revert byte order because we are dealing with big-endian data.
|
||||
rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop
|
||||
|
||||
BIND(L_mainLoop);
|
||||
update_1word_crc32(crc, buf, table, 0, 0, crc, t1, t2, t3);
|
||||
update_1word_crc32(crc, buf, table, 4, mainLoop_stepping, crc, t1, t2, t3);
|
||||
z_brct(ctr, L_mainLoop); // Iterate.
|
||||
z_brct(ctr, L_mainLoop); // Iterate.
|
||||
|
||||
z_lrvr(crc, crc); // Revert byte order back to original.
|
||||
z_lrvr(crc, crc); // Revert byte order back to original.
|
||||
|
||||
// Process last few (<8) bytes of buffer.
|
||||
BIND(L_tail);
|
||||
update_byteLoop_crc32(crc, buf, len, table, data, false);
|
||||
update_byteLoop_crc32(crc, buf, len, table, data);
|
||||
|
||||
not_(crc, noreg, false); // 1s complement of crc
|
||||
if (invertCRC) {
|
||||
not_(crc, noreg, false); // 1s complement of crc
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -6073,7 +6076,8 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
|
||||
* uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
|
||||
*/
|
||||
void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
|
||||
Register t0, Register t1, Register t2, Register t3) {
|
||||
Register t0, Register t1, Register t2, Register t3,
|
||||
bool invertCRC) {
|
||||
assert_different_registers(crc, buf, len, table);
|
||||
|
||||
Label L_mainLoop, L_tail;
|
||||
@ -6087,7 +6091,9 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
|
||||
// The situation itself is detected and handled correctly by the conditional branches
|
||||
// following aghi(len, -stepping) and aghi(len, +stepping).
|
||||
|
||||
not_(crc, noreg, false); // 1s complement of crc
|
||||
if (invertCRC) {
|
||||
not_(crc, noreg, false); // 1s complement of crc
|
||||
}
|
||||
|
||||
// Check for short (<4 bytes) buffer.
|
||||
z_srag(ctr, len, log_stepping);
|
||||
@ -6099,13 +6105,16 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
|
||||
BIND(L_mainLoop);
|
||||
update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3);
|
||||
z_brct(ctr, L_mainLoop); // Iterate.
|
||||
|
||||
z_lrvr(crc, crc); // Revert byte order back to original.
|
||||
|
||||
// Process last few (<8) bytes of buffer.
|
||||
BIND(L_tail);
|
||||
update_byteLoop_crc32(crc, buf, len, table, data, false);
|
||||
update_byteLoop_crc32(crc, buf, len, table, data);
|
||||
|
||||
not_(crc, noreg, false); // 1s complement of crc
|
||||
if (invertCRC) {
|
||||
not_(crc, noreg, false); // 1s complement of crc
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -6115,22 +6124,51 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
|
||||
* @param table register pointing to CRC table
|
||||
*/
|
||||
void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
|
||||
Register t0, Register t1, Register t2, Register t3) {
|
||||
Register t0, Register t1, Register t2, Register t3,
|
||||
bool invertCRC) {
|
||||
assert_different_registers(crc, buf, len, table);
|
||||
Register data = t0;
|
||||
|
||||
update_byteLoop_crc32(crc, buf, len, table, data, true);
|
||||
if (invertCRC) {
|
||||
not_(crc, noreg, false); // 1s complement of crc
|
||||
}
|
||||
|
||||
update_byteLoop_crc32(crc, buf, len, table, data);
|
||||
|
||||
if (invertCRC) {
|
||||
not_(crc, noreg, false); // 1s complement of crc
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp) {
|
||||
void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
|
||||
bool invertCRC) {
|
||||
assert_different_registers(crc, buf, len, table, tmp);
|
||||
|
||||
not_(crc, noreg, false); // ~c
|
||||
if (invertCRC) {
|
||||
not_(crc, noreg, false); // 1s complement of crc
|
||||
}
|
||||
|
||||
z_llgc(tmp, Address(buf, (intptr_t)0)); // Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
|
||||
update_byte_crc32(crc, tmp, table);
|
||||
|
||||
not_(crc, noreg, false); // ~c
|
||||
if (invertCRC) {
|
||||
not_(crc, noreg, false); // 1s complement of crc
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table,
|
||||
bool invertCRC) {
|
||||
assert_different_registers(crc, val, table);
|
||||
|
||||
if (invertCRC) {
|
||||
not_(crc, noreg, false); // 1s complement of crc
|
||||
}
|
||||
|
||||
update_byte_crc32(crc, val, table);
|
||||
|
||||
if (invertCRC) {
|
||||
not_(crc, noreg, false); // 1s complement of crc
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1011,22 +1011,35 @@ class MacroAssembler: public Assembler {
|
||||
int before = 0, int after = 0) PRODUCT_RETURN;
|
||||
|
||||
// Emitters for CRC32 calculation.
|
||||
// A note on invertCRC:
|
||||
// Unfortunately, internal representation of crc differs between CRC32 and CRC32C.
|
||||
// CRC32 holds it's current crc value in the externally visible representation.
|
||||
// CRC32C holds it's current crc value in internal format, ready for updating.
|
||||
// Thus, the crc value must be bit-flipped before updating it in the CRC32 case.
|
||||
// In the CRC32C case, it must be bit-flipped when it is given to the outside world (getValue()).
|
||||
// The bool invertCRC parameter indicates whether bit-flipping is required before updates.
|
||||
private:
|
||||
void fold_byte_crc32(Register crc, Register table, Register val, Register tmp);
|
||||
void fold_8bit_crc32(Register crc, Register table, Register tmp);
|
||||
void update_byte_crc32( Register crc, Register val, Register table);
|
||||
void update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
|
||||
Register data, bool invertCRC);
|
||||
Register data);
|
||||
void update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
|
||||
Register t0, Register t1, Register t2, Register t3);
|
||||
public:
|
||||
void update_byte_crc32( Register crc, Register val, Register table);
|
||||
void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp);
|
||||
void kernel_crc32_singleByteReg(Register crc, Register val, Register table,
|
||||
bool invertCRC);
|
||||
void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
|
||||
bool invertCRC);
|
||||
void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
|
||||
Register t0, Register t1, Register t2, Register t3);
|
||||
Register t0, Register t1, Register t2, Register t3,
|
||||
bool invertCRC);
|
||||
void kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
|
||||
Register t0, Register t1, Register t2, Register t3);
|
||||
Register t0, Register t1, Register t2, Register t3,
|
||||
bool invertCRC);
|
||||
void kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
|
||||
Register t0, Register t1, Register t2, Register t3);
|
||||
Register t0, Register t1, Register t2, Register t3,
|
||||
bool invertCRC);
|
||||
|
||||
// Emitters for BigInteger.multiplyToLen intrinsic
|
||||
// note: length of result array (zlen) is passed on the stack
|
||||
|
@ -1,76 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/codeBuffer.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
|
||||
// Generate the self-patching vtable method:
|
||||
//
|
||||
// This method will be called (as any other Klass virtual method) with
|
||||
// the Klass itself as the first argument. Example:
|
||||
//
|
||||
// oop obj;
|
||||
// int size = obj->klass()->klass_part()->oop_size(this);
|
||||
//
|
||||
// for which the virtual method call is Klass::oop_size();.
|
||||
//
|
||||
// The dummy method is called with the Klass object as the first
|
||||
// operand, and an object as the second argument.
|
||||
//
|
||||
|
||||
//=====================================================================
|
||||
|
||||
// All of the dummy methods in the vtable are essentially identical,
|
||||
// differing only by an ordinal constant, and they bear no releationship
|
||||
// to the original method which the caller intended. Also, there needs
|
||||
// to be 'vtbl_list_size' instances of the vtable in order to
|
||||
// differentiate between the 'vtable_list_size' original Klass objects.
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
|
||||
void** vtable,
|
||||
char** md_top,
|
||||
char* md_end,
|
||||
char** mc_top,
|
||||
char* mc_end) {
|
||||
|
||||
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
|
||||
*(intptr_t *)(*md_top) = vtable_bytes;
|
||||
*md_top += sizeof(intptr_t);
|
||||
void** dummy_vtable = (void**)*md_top;
|
||||
*vtable = dummy_vtable;
|
||||
*md_top += vtable_bytes;
|
||||
|
||||
// Get ready to generate dummy methods.
|
||||
|
||||
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
|
||||
MacroAssembler* masm = new MacroAssembler(&cb);
|
||||
|
||||
__ unimplemented();
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -73,7 +73,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
|
||||
const char* error_message) {
|
||||
|
||||
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
|
||||
KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
|
||||
Klass* klass = SystemDictionary::well_known_klass(klass_id);
|
||||
|
||||
assert(temp_reg != Z_R0 && // Is used as base register!
|
||||
temp_reg != noreg && temp2_reg != noreg, "need valid registers!");
|
||||
@ -200,10 +200,13 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
|
||||
Address(method_temp,
|
||||
NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
|
||||
__ verify_oop(method_temp);
|
||||
// The following assumes that a method is normally compressed in the vmtarget field.
|
||||
__ load_heap_oop(method_temp,
|
||||
Address(method_temp,
|
||||
NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())));
|
||||
__ verify_oop(method_temp);
|
||||
__ z_lg(method_temp,
|
||||
Address(method_temp,
|
||||
NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));
|
||||
NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())));
|
||||
|
||||
if (VerifyMethodHandles && !for_compiler_entry) {
|
||||
// Make sure recv is already on stack.
|
||||
@ -371,7 +374,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
|
||||
Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
|
||||
Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
|
||||
Address member_vmtarget(member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));
|
||||
Address member_vmtarget(member_reg, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()));
|
||||
Address vmtarget_method(Z_method, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()));
|
||||
Register temp1_recv_klass = temp1;
|
||||
|
||||
if (iid != vmIntrinsics::_linkToStatic) {
|
||||
@ -424,7 +428,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
if (VerifyMethodHandles) {
|
||||
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
|
||||
}
|
||||
__ z_lg(Z_method, member_vmtarget);
|
||||
__ load_heap_oop(Z_method, member_vmtarget);
|
||||
__ z_lg(Z_method, vmtarget_method);
|
||||
method_is_live = true;
|
||||
break;
|
||||
|
||||
@ -432,7 +437,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
if (VerifyMethodHandles) {
|
||||
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
|
||||
}
|
||||
__ z_lg(Z_method, member_vmtarget);
|
||||
__ load_heap_oop(Z_method, member_vmtarget);
|
||||
__ z_lg(Z_method, vmtarget_method);
|
||||
method_is_live = true;
|
||||
break;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
//
|
||||
// Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
@ -1562,7 +1562,7 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
|
||||
}
|
||||
|
||||
// Vector ideal reg.
|
||||
const int Matcher::vector_ideal_reg(int size) {
|
||||
const uint Matcher::vector_ideal_reg(int size) {
|
||||
assert(MaxVectorSize == 8 && size == 8, "");
|
||||
return Op_RegL;
|
||||
}
|
||||
@ -1577,7 +1577,7 @@ const int Matcher::min_vector_size(const BasicType bt) {
|
||||
return max_vector_size(bt); // Same as max.
|
||||
}
|
||||
|
||||
const int Matcher::vector_shift_count_ideal_reg(int size) {
|
||||
const uint Matcher::vector_shift_count_ideal_reg(int size) {
|
||||
fatal("vector shift is not supported");
|
||||
return Node::NotAMachineReg;
|
||||
}
|
||||
@ -6768,6 +6768,7 @@ instruct sllI_reg_imm(iRegI dst, iRegI src, immI nbits) %{
|
||||
format %{ "SLL $dst,$src,$nbits\t# use RISC-like SLLG also for int" %}
|
||||
ins_encode %{
|
||||
int Nbit = $nbits$$constant;
|
||||
assert((Nbit & (BitsPerJavaInteger - 1)) == Nbit, "Check shift mask in ideal graph");
|
||||
__ z_sllg($dst$$Register, $src$$Register, Nbit & (BitsPerJavaInteger - 1), Z_R0);
|
||||
%}
|
||||
ins_pipe(pipe_class_dummy);
|
||||
@ -6841,6 +6842,7 @@ instruct sraI_reg_imm(iRegI dst, immI src, flagsReg cr) %{
|
||||
format %{ "SRA $dst,$src" %}
|
||||
ins_encode %{
|
||||
int Nbit = $src$$constant;
|
||||
assert((Nbit & (BitsPerJavaInteger - 1)) == Nbit, "Check shift mask in ideal graph");
|
||||
__ z_sra($dst$$Register, Nbit & (BitsPerJavaInteger - 1), Z_R0);
|
||||
%}
|
||||
ins_pipe(pipe_class_dummy);
|
||||
@ -6893,6 +6895,7 @@ instruct srlI_reg_imm(iRegI dst, immI src) %{
|
||||
format %{ "SRL $dst,$src" %}
|
||||
ins_encode %{
|
||||
int Nbit = $src$$constant;
|
||||
assert((Nbit & (BitsPerJavaInteger - 1)) == Nbit, "Check shift mask in ideal graph");
|
||||
__ z_srl($dst$$Register, Nbit & (BitsPerJavaInteger - 1), Z_R0);
|
||||
%}
|
||||
ins_pipe(pipe_class_dummy);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -623,26 +623,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
#define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm):_masm)->
|
||||
#endif
|
||||
|
||||
//----------------------------------------------------------------------
|
||||
// The following routine generates a subroutine to throw an asynchronous
|
||||
// UnknownError when an unsafe access gets a fault that could not be
|
||||
// reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.)
|
||||
//
|
||||
// Arguments:
|
||||
// trapping PC: ??
|
||||
//
|
||||
// Results:
|
||||
// Posts an asynchronous exception, skips the trapping instruction.
|
||||
//
|
||||
address generate_handler_for_unsafe_access() {
|
||||
StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
|
||||
{
|
||||
address start = __ pc();
|
||||
__ unimplemented("StubRoutines::handler_for_unsafe_access", 86);
|
||||
return start;
|
||||
}
|
||||
}
|
||||
|
||||
// Support for uint StubRoutine::zarch::partial_subtype_check(Klass
|
||||
// sub, Klass super);
|
||||
//
|
||||
@ -2330,26 +2310,25 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Arguments:
|
||||
// Z_ARG1 - int crc
|
||||
// Z_ARG2 - byte* buf
|
||||
// Z_ARG3 - int length (of buffer)
|
||||
//
|
||||
// Result:
|
||||
// Z_RET - int crc result
|
||||
//
|
||||
// Compute CRC32 function.
|
||||
address generate_CRC32_updateBytes(const char* name) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
/**
|
||||
* Arguments:
|
||||
*
|
||||
* Inputs:
|
||||
* Z_ARG1 - int crc
|
||||
* Z_ARG2 - byte* buf
|
||||
* Z_ARG3 - int length (of buffer)
|
||||
*
|
||||
* Result:
|
||||
* Z_RET - int crc result
|
||||
**/
|
||||
// Compute CRC function (generic, for all polynomials).
|
||||
void generate_CRC_updateBytes(const char* name, Register table, bool invertCRC) {
|
||||
|
||||
// arguments to kernel_crc32:
|
||||
Register crc = Z_ARG1; // Current checksum, preset by caller or result from previous call, int.
|
||||
Register data = Z_ARG2; // source byte array
|
||||
Register dataLen = Z_ARG3; // #bytes to process, int
|
||||
Register table = Z_ARG4; // crc table address
|
||||
// Register table = Z_ARG4; // crc table address. Preloaded and passed in by caller.
|
||||
const Register t0 = Z_R10; // work reg for kernel* emitters
|
||||
const Register t1 = Z_R11; // work reg for kernel* emitters
|
||||
const Register t2 = Z_R12; // work reg for kernel* emitters
|
||||
@ -2361,16 +2340,50 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Crc used as int.
|
||||
__ z_llgfr(dataLen, dataLen);
|
||||
|
||||
StubRoutines::zarch::generate_load_crc_table_addr(_masm, table);
|
||||
|
||||
__ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
|
||||
__ z_stmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 to make them available as work registers.
|
||||
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3);
|
||||
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, invertCRC);
|
||||
__ z_lmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 back from stack.
|
||||
__ resize_frame(+(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
|
||||
|
||||
__ z_llgfr(Z_RET, crc); // Updated crc is function result. No copying required, just zero upper 32 bits.
|
||||
__ z_br(Z_R14); // Result already in Z_RET == Z_ARG1.
|
||||
}
|
||||
|
||||
|
||||
// Compute CRC32 function.
|
||||
address generate_CRC32_updateBytes(const char* name) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
assert(UseCRC32Intrinsics, "should not generate this stub (%s) with CRC32 intrinsics disabled", name);
|
||||
|
||||
BLOCK_COMMENT("CRC32_updateBytes {");
|
||||
Register table = Z_ARG4; // crc32 table address.
|
||||
StubRoutines::zarch::generate_load_crc_table_addr(_masm, table);
|
||||
|
||||
generate_CRC_updateBytes(name, table, true);
|
||||
BLOCK_COMMENT("} CRC32_updateBytes");
|
||||
|
||||
return __ addr_at(start_off);
|
||||
}
|
||||
|
||||
|
||||
// Compute CRC32C function.
|
||||
address generate_CRC32C_updateBytes(const char* name) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
assert(UseCRC32CIntrinsics, "should not generate this stub (%s) with CRC32C intrinsics disabled", name);
|
||||
|
||||
BLOCK_COMMENT("CRC32C_updateBytes {");
|
||||
Register table = Z_ARG4; // crc32c table address.
|
||||
StubRoutines::zarch::generate_load_crc32c_table_addr(_masm, table);
|
||||
|
||||
generate_CRC_updateBytes(name, table, false);
|
||||
BLOCK_COMMENT("} CRC32C_updateBytes");
|
||||
|
||||
return __ addr_at(start_off);
|
||||
}
|
||||
@ -2441,9 +2454,13 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Entry points that are platform specific.
|
||||
|
||||
if (UseCRC32Intrinsics) {
|
||||
// We have no CRC32 table on z/Architecture.
|
||||
StubRoutines::_crc_table_adr = (address)StubRoutines::zarch::_crc_table;
|
||||
StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes");
|
||||
StubRoutines::_crc_table_adr = (address)StubRoutines::zarch::_crc_table;
|
||||
StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes");
|
||||
}
|
||||
|
||||
if (UseCRC32CIntrinsics) {
|
||||
StubRoutines::_crc32c_table_addr = (address)StubRoutines::zarch::_crc32c_table;
|
||||
StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes("CRC32C_updateBytes");
|
||||
}
|
||||
|
||||
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
|
||||
@ -2461,8 +2478,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false);
|
||||
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
|
||||
|
||||
StubRoutines::zarch::_handler_for_unsafe_access_entry = generate_handler_for_unsafe_access();
|
||||
|
||||
// Support for verify_oop (must happen after universe_init).
|
||||
StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine();
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -68,12 +68,11 @@ class zarch {
|
||||
};
|
||||
|
||||
private:
|
||||
static address _handler_for_unsafe_access_entry;
|
||||
|
||||
static int _atomic_memory_operation_lock;
|
||||
|
||||
static address _partial_subtype_check;
|
||||
static juint _crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
|
||||
static juint _crc32c_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
|
||||
|
||||
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
|
||||
static address _trot_table_addr;
|
||||
@ -91,11 +90,11 @@ class zarch {
|
||||
static int atomic_memory_operation_lock() { return _atomic_memory_operation_lock; }
|
||||
static void set_atomic_memory_operation_lock(int value) { _atomic_memory_operation_lock = value; }
|
||||
|
||||
static address handler_for_unsafe_access_entry() { return _handler_for_unsafe_access_entry; }
|
||||
|
||||
static address partial_subtype_check() { return _partial_subtype_check; }
|
||||
|
||||
static void generate_load_absolute_address(MacroAssembler* masm, Register table, address table_addr, uint64_t table_contents);
|
||||
static void generate_load_crc_table_addr(MacroAssembler* masm, Register table);
|
||||
static void generate_load_crc32c_table_addr(MacroAssembler* masm, Register table);
|
||||
|
||||
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
|
||||
static void generate_load_trot_table_addr(MacroAssembler* masm, Register table);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -642,13 +642,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
|
||||
return entry;
|
||||
}
|
||||
|
||||
// Unused, should never pass by.
|
||||
address TemplateInterpreterGenerator::generate_continuation_for (TosState state) {
|
||||
address entry = __ pc();
|
||||
__ should_not_reach_here();
|
||||
return entry;
|
||||
}
|
||||
|
||||
address TemplateInterpreterGenerator::generate_return_entry_for (TosState state, int step, size_t index_size) {
|
||||
address entry = __ pc();
|
||||
|
||||
@ -683,6 +676,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for (TosState state,
|
||||
__ z_llgc(size, Address(cache, offset, flags_offset+(sizeof(size_t)-1)));
|
||||
__ z_sllg(size, size, Interpreter::logStackElementSize); // Each argument size in bytes.
|
||||
__ z_agr(Z_esp, size); // Pop arguments.
|
||||
|
||||
__ check_and_handle_popframe(Z_thread);
|
||||
__ check_and_handle_earlyret(Z_thread);
|
||||
|
||||
__ dispatch_next(state, step);
|
||||
|
||||
BLOCK_COMMENT("} return_entry");
|
||||
@ -1186,11 +1183,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
// native_call: assert that mdo == NULL
|
||||
const bool check_for_mdo = !native_call DEBUG_ONLY(|| native_call);
|
||||
if (ProfileInterpreter && check_for_mdo) {
|
||||
#ifdef FAST_DISPATCH
|
||||
// FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
|
||||
// they both use I2.
|
||||
assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
|
||||
#endif // FAST_DISPATCH
|
||||
Label get_continue;
|
||||
|
||||
__ load_and_test_long(Rmdp, method_(method_data));
|
||||
@ -1933,8 +1925,11 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
// Method entry for static native methods:
|
||||
// int java.util.zip.CRC32.update(int crc, int b)
|
||||
|
||||
/**
|
||||
* Method entry for static native methods:
|
||||
* int java.util.zip.CRC32.update(int crc, int b)
|
||||
*/
|
||||
address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
|
||||
|
||||
if (UseCRC32Intrinsics) {
|
||||
@ -1964,7 +1959,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
|
||||
__ z_llgf(crc, 2 * wordSize, argP); // Current crc state, zero extend to 64 bit to have a clean register.
|
||||
|
||||
StubRoutines::zarch::generate_load_crc_table_addr(_masm, table);
|
||||
__ kernel_crc32_singleByte(crc, data, dataLen, table, Z_R1);
|
||||
__ kernel_crc32_singleByte(crc, data, dataLen, table, Z_R1, true);
|
||||
|
||||
// Restore caller sp for c2i case.
|
||||
__ resize_frame_absolute(Z_R10, Z_R0, true); // Cut the stack back to where the caller started.
|
||||
@ -1983,9 +1978,11 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
|
||||
}
|
||||
|
||||
|
||||
// Method entry for static native methods:
|
||||
// int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
|
||||
// int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
|
||||
/**
|
||||
* Method entry for static native methods:
|
||||
* int java.util.zip.CRC32.updateBytes( int crc, byte[] b, int off, int len)
|
||||
* int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len)
|
||||
*/
|
||||
address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
|
||||
|
||||
if (UseCRC32Intrinsics) {
|
||||
@ -2020,10 +2017,10 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
|
||||
// data = buf + off
|
||||
BLOCK_COMMENT("CRC32_updateByteBuffer {");
|
||||
__ z_llgf(crc, 5*wordSize, argP); // current crc state
|
||||
__ z_lg(data, 3*wordSize, argP); // start of byte buffer
|
||||
__ z_lg(data, 3*wordSize, argP); // start of byte buffer
|
||||
__ z_agf(data, 2*wordSize, argP); // Add byte buffer offset.
|
||||
__ z_lgf(dataLen, 1*wordSize, argP); // #bytes to process
|
||||
} else { // Used for "updateBytes update".
|
||||
} else { // Used for "updateBytes update".
|
||||
// crc @ (SP + 4W) (32bit)
|
||||
// buf @ (SP + 3W) (64bit ptr to byte array)
|
||||
// off @ (SP + 2W) (32bit)
|
||||
@ -2031,7 +2028,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
|
||||
// data = buf + off + base_offset
|
||||
BLOCK_COMMENT("CRC32_updateBytes {");
|
||||
__ z_llgf(crc, 4*wordSize, argP); // current crc state
|
||||
__ z_lg(data, 3*wordSize, argP); // start of byte buffer
|
||||
__ z_lg(data, 3*wordSize, argP); // start of byte buffer
|
||||
__ z_agf(data, 2*wordSize, argP); // Add byte buffer offset.
|
||||
__ z_lgf(dataLen, 1*wordSize, argP); // #bytes to process
|
||||
__ z_aghi(data, arrayOopDesc::base_offset_in_bytes(T_BYTE));
|
||||
@ -2041,7 +2038,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
|
||||
|
||||
__ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
|
||||
__ z_stmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 to make them available as work registers.
|
||||
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3);
|
||||
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, true);
|
||||
__ z_lmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 back from stack.
|
||||
|
||||
// Restore caller sp for c2i case.
|
||||
@ -2060,8 +2057,79 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Not supported
|
||||
|
||||
/**
|
||||
* Method entry for intrinsic-candidate (non-native) methods:
|
||||
* int java.util.zip.CRC32C.updateBytes( int crc, byte[] b, int off, int end)
|
||||
* int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long* buf, int off, int end)
|
||||
* Unlike CRC32, CRC32C does not have any methods marked as native
|
||||
* CRC32C also uses an "end" variable instead of the length variable CRC32 uses
|
||||
*/
|
||||
address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
|
||||
|
||||
if (UseCRC32CIntrinsics) {
|
||||
uint64_t entry_off = __ offset();
|
||||
|
||||
// We don't generate local frame and don't align stack because
|
||||
// we call stub code and there is no safepoint on this path.
|
||||
|
||||
// Load parameters.
|
||||
// Z_esp is callers operand stack pointer, i.e. it points to the parameters.
|
||||
const Register argP = Z_esp;
|
||||
const Register crc = Z_ARG1; // crc value
|
||||
const Register data = Z_ARG2; // address of java byte array
|
||||
const Register dataLen = Z_ARG3; // source data len
|
||||
const Register table = Z_ARG4; // address of crc32 table
|
||||
const Register t0 = Z_R10; // work reg for kernel* emitters
|
||||
const Register t1 = Z_R11; // work reg for kernel* emitters
|
||||
const Register t2 = Z_R12; // work reg for kernel* emitters
|
||||
const Register t3 = Z_R13; // work reg for kernel* emitters
|
||||
|
||||
// Arguments are reversed on java expression stack.
|
||||
// Calculate address of start element.
|
||||
if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) { // Used for "updateByteBuffer direct".
|
||||
// crc @ (SP + 5W) (32bit)
|
||||
// buf @ (SP + 3W) (64bit ptr to long array)
|
||||
// off @ (SP + 2W) (32bit)
|
||||
// dataLen @ (SP + 1W) (32bit)
|
||||
// data = buf + off
|
||||
BLOCK_COMMENT("CRC32C_updateDirectByteBuffer {");
|
||||
__ z_llgf(crc, 5*wordSize, argP); // current crc state
|
||||
__ z_lg(data, 3*wordSize, argP); // start of byte buffer
|
||||
__ z_agf(data, 2*wordSize, argP); // Add byte buffer offset.
|
||||
__ z_lgf(dataLen, 1*wordSize, argP); // #bytes to process, calculated as
|
||||
__ z_sgf(dataLen, Address(argP, 2*wordSize)); // (end_index - offset)
|
||||
} else { // Used for "updateBytes update".
|
||||
// crc @ (SP + 4W) (32bit)
|
||||
// buf @ (SP + 3W) (64bit ptr to byte array)
|
||||
// off @ (SP + 2W) (32bit)
|
||||
// dataLen @ (SP + 1W) (32bit)
|
||||
// data = buf + off + base_offset
|
||||
BLOCK_COMMENT("CRC32C_updateBytes {");
|
||||
__ z_llgf(crc, 4*wordSize, argP); // current crc state
|
||||
__ z_lg(data, 3*wordSize, argP); // start of byte buffer
|
||||
__ z_agf(data, 2*wordSize, argP); // Add byte buffer offset.
|
||||
__ z_lgf(dataLen, 1*wordSize, argP); // #bytes to process, calculated as
|
||||
__ z_sgf(dataLen, Address(argP, 2*wordSize)); // (end_index - offset)
|
||||
__ z_aghi(data, arrayOopDesc::base_offset_in_bytes(T_BYTE));
|
||||
}
|
||||
|
||||
StubRoutines::zarch::generate_load_crc32c_table_addr(_masm, table);
|
||||
|
||||
__ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
|
||||
__ z_stmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 to make them available as work registers.
|
||||
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, false);
|
||||
__ z_lmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 back from stack.
|
||||
|
||||
// Restore caller sp for c2i case.
|
||||
__ resize_frame_absolute(Z_R10, Z_R0, true); // Cut the stack back to where the caller started.
|
||||
|
||||
__ z_br(Z_R14);
|
||||
|
||||
BLOCK_COMMENT("} CRC32C_update{Bytes|DirectByteBuffer}");
|
||||
return __ addr_at(entry_off);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -3466,7 +3466,7 @@ void TemplateTable::invokevirtual_helper(Register index,
|
||||
__ z_sllg(index, index, exact_log2(vtableEntry::size_in_bytes()));
|
||||
__ mem2reg_opt(method,
|
||||
Address(Z_tmp_2, index,
|
||||
InstanceKlass::vtable_start_offset() + in_ByteSize(vtableEntry::method_offset_in_bytes())));
|
||||
Klass::vtable_start_offset() + in_ByteSize(vtableEntry::method_offset_in_bytes())));
|
||||
__ profile_arguments_type(Z_ARG4, method, Z_ARG5, true);
|
||||
__ jump_from_interpreted(method, Z_ARG4);
|
||||
BLOCK_COMMENT("} invokevirtual_helper");
|
||||
@ -3708,7 +3708,7 @@ void TemplateTable::_new() {
|
||||
__ z_sllg(offset, offset, LogBytesPerWord); // Convert to to offset.
|
||||
// Get InstanceKlass.
|
||||
Register iklass = cpool;
|
||||
__ z_lg(iklass, Address(cpool, offset, sizeof(ConstantPool)));
|
||||
__ load_resolved_klass_at_offset(cpool, offset, iklass);
|
||||
|
||||
// Make sure klass is initialized & doesn't have finalizer.
|
||||
// Make sure klass is fully initialized.
|
||||
@ -3895,7 +3895,7 @@ void TemplateTable::checkcast() {
|
||||
|
||||
__ z_lgr(Z_ARG4, Z_tos); // Save receiver.
|
||||
__ z_sllg(index, index, LogBytesPerWord); // index2bytes for addressing
|
||||
__ mem2reg_opt(klass, Address(cpool, index, sizeof(ConstantPool)));
|
||||
__ load_resolved_klass_at_offset(cpool, index, klass);
|
||||
|
||||
__ bind(resolved);
|
||||
|
||||
@ -3969,8 +3969,7 @@ void TemplateTable::instanceof() {
|
||||
|
||||
__ load_klass(subklass, Z_tos);
|
||||
__ z_sllg(index, index, LogBytesPerWord); // index2bytes for addressing
|
||||
__ mem2reg_opt(klass,
|
||||
Address(cpool, index, sizeof(ConstantPool)));
|
||||
__ load_resolved_klass_at_offset(cpool, index, klass);
|
||||
|
||||
__ bind(resolved);
|
||||
|
||||
|
@ -111,13 +111,23 @@ void VM_Version::initialize() {
|
||||
ContendedPaddingWidth = cache_line_size;
|
||||
}
|
||||
|
||||
// On z/Architecture, the CRC32 intrinsics had to be implemented "by hand".
|
||||
// They cannot be based on the CHECKSUM instruction which has been there
|
||||
// since the very beginning (of z/Architecture). It computes "some kind of" a checksum
|
||||
// which has nothing to do with the CRC32 algorithm.
|
||||
// On z/Architecture, the CRC32/CRC32C intrinsics are implemented "by hand".
|
||||
// TODO: Provide implementation based on the vector instructions available from z13.
|
||||
// Note: The CHECKSUM instruction, which has been there since the very beginning
|
||||
// (of z/Architecture), computes "some kind of" a checksum.
|
||||
// It has nothing to do with the CRC32 algorithm.
|
||||
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
|
||||
FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
|
||||
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
|
||||
}
|
||||
|
||||
// TODO: Provide implementation.
|
||||
if (UseAdler32Intrinsics) {
|
||||
warning("Adler32Intrinsics not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
|
||||
}
|
||||
|
||||
// On z/Architecture, we take UseAES as the general switch to enable/disable the AES intrinsics.
|
||||
// The specific, and yet to be defined, switches UseAESxxxIntrinsics will then be set
|
||||
@ -195,11 +205,6 @@ void VM_Version::initialize() {
|
||||
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseAdler32Intrinsics) {
|
||||
warning("Adler32Intrinsics not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
|
||||
FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true);
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
__ load_klass(rcvr_klass, Z_ARG1);
|
||||
|
||||
// Set method (in case of interpreted method), and destination address.
|
||||
int entry_offset = in_bytes(InstanceKlass::vtable_start_offset()) +
|
||||
int entry_offset = in_bytes(Klass::vtable_start_offset()) +
|
||||
vtable_index * vtableEntry::size_in_bytes();
|
||||
|
||||
#ifndef PRODUCT
|
||||
@ -96,8 +96,8 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
// worst case actual size
|
||||
padding_bytes += __ load_const_size() - __ load_const_optimized_rtn_len(vtable_idx, vtable_index*vtableEntry::size_in_bytes(), true);
|
||||
|
||||
assert(Immediate::is_uimm12(in_bytes(InstanceKlass::vtable_length_offset())), "disp to large");
|
||||
__ z_cl(vtable_idx, in_bytes(InstanceKlass::vtable_length_offset()), rcvr_klass);
|
||||
assert(Immediate::is_uimm12(in_bytes(Klass::vtable_length_offset())), "disp to large");
|
||||
__ z_cl(vtable_idx, in_bytes(Klass::vtable_length_offset()), rcvr_klass);
|
||||
__ z_brl(L);
|
||||
__ z_lghi(Z_ARG3, vtable_index); // Debug code, don't optimize.
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), Z_ARG1, Z_ARG3, false);
|
||||
@ -187,11 +187,11 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
|
||||
__ load_klass(rcvr_klass, Z_ARG1);
|
||||
|
||||
// Load start of itable entries into itable_entry.
|
||||
__ z_llgf(vtable_len, Address(rcvr_klass, InstanceKlass::vtable_length_offset()));
|
||||
__ z_llgf(vtable_len, Address(rcvr_klass, Klass::vtable_length_offset()));
|
||||
__ z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
|
||||
|
||||
// Loop over all itable entries until desired interfaceOop(Rinterface) found.
|
||||
const int vtable_base_offset = in_bytes(InstanceKlass::vtable_start_offset());
|
||||
const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
|
||||
// Count unused bytes.
|
||||
start_pc = __ pc();
|
||||
__ add2reg_with_index(itable_entry_addr, vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(), rcvr_klass, vtable_len);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -270,9 +270,7 @@ void AbstractInterpreter::layout_activation(Method* method,
|
||||
assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area");
|
||||
assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area");
|
||||
}
|
||||
#ifdef _LP64
|
||||
assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd");
|
||||
#endif
|
||||
|
||||
*interpreter_frame->register_addr(Lmethod) = (intptr_t) method;
|
||||
*interpreter_frame->register_addr(Llocals) = (intptr_t) locals;
|
||||
@ -283,9 +281,6 @@ void AbstractInterpreter::layout_activation(Method* method,
|
||||
*interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache();
|
||||
// save the mirror in the interpreter frame
|
||||
*interpreter_frame->interpreter_frame_mirror_addr() = method->method_holder()->java_mirror();
|
||||
#ifdef FAST_DISPATCH
|
||||
*interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table();
|
||||
#endif
|
||||
|
||||
#ifdef ASSERT
|
||||
BasicObjectLock* mp = (BasicObjectLock*)monitors;
|
||||
|
@ -34,10 +34,6 @@ class Bytes: AllStatic {
|
||||
|
||||
// can I count on address always being a pointer to an unsigned char? Yes
|
||||
|
||||
// Returns true, if the byte ordering used by Java is different from the nativ byte ordering
|
||||
// of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
|
||||
static inline bool is_Java_byte_ordering_different() { return false; }
|
||||
|
||||
// Thus, a swap between native and Java ordering is always a no-op:
|
||||
static inline u2 swap_u2(u2 x) { return x; }
|
||||
static inline u4 swap_u4(u4 x) { return x; }
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,10 +22,4 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "c1/c1_FpuStackSim.hpp"
|
||||
#include "c1/c1_FrameMap.hpp"
|
||||
#include "utilities/array.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
// No FPU stack on SPARC
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -159,21 +159,12 @@
|
||||
|
||||
public:
|
||||
|
||||
#ifdef _LP64
|
||||
static LIR_Opr as_long_opr(Register r) {
|
||||
return as_long_single_opr(r);
|
||||
}
|
||||
static LIR_Opr as_pointer_opr(Register r) {
|
||||
return as_long_single_opr(r);
|
||||
}
|
||||
#else
|
||||
static LIR_Opr as_long_opr(Register r) {
|
||||
return as_long_pair_opr(r);
|
||||
}
|
||||
static LIR_Opr as_pointer_opr(Register r) {
|
||||
return as_opr(r);
|
||||
}
|
||||
#endif
|
||||
static LIR_Opr as_float_opr(FloatRegister r) {
|
||||
return LIR_OprFact::single_fpu(r->encoding());
|
||||
}
|
||||
|
@ -556,11 +556,9 @@ void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
|
||||
// guarantee that 32-bit loads always sign extended but that isn't
|
||||
// true and since sign extension isn't free, it would impose a
|
||||
// slight cost.
|
||||
#ifdef _LP64
|
||||
if (op->type() == T_INT) {
|
||||
__ br(acond, false, Assembler::pn, *(op->label()));
|
||||
} else
|
||||
#endif
|
||||
__ brx(acond, false, Assembler::pn, *(op->label()));
|
||||
}
|
||||
// The peephole pass fills the delay slot
|
||||
@ -576,12 +574,7 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
|
||||
Register rlo = dst->as_register_lo();
|
||||
Register rhi = dst->as_register_hi();
|
||||
Register rval = op->in_opr()->as_register();
|
||||
#ifdef _LP64
|
||||
__ sra(rval, 0, rlo);
|
||||
#else
|
||||
__ mov(rval, rlo);
|
||||
__ sra(rval, BitsPerInt-1, rhi);
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
case Bytecodes::_i2d:
|
||||
@ -614,11 +607,7 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
|
||||
Register rlo = op->in_opr()->as_register_lo();
|
||||
Register rhi = op->in_opr()->as_register_hi();
|
||||
Register rdst = dst->as_register();
|
||||
#ifdef _LP64
|
||||
__ sra(rlo, 0, rdst);
|
||||
#else
|
||||
__ mov(rlo, rdst);
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
case Bytecodes::_d2f:
|
||||
@ -711,7 +700,6 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType
|
||||
case T_SHORT : __ sth(from_reg->as_register(), base, offset); break;
|
||||
case T_INT : __ stw(from_reg->as_register(), base, offset); break;
|
||||
case T_LONG :
|
||||
#ifdef _LP64
|
||||
if (unaligned || PatchALot) {
|
||||
// Don't use O7 here because it may be equal to 'base' (see LIR_Assembler::reg2mem)
|
||||
assert(G3_scratch != base, "can't handle this");
|
||||
@ -722,11 +710,6 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType
|
||||
} else {
|
||||
__ stx(from_reg->as_register_lo(), base, offset);
|
||||
}
|
||||
#else
|
||||
assert(Assembler::is_simm13(offset + 4), "must be");
|
||||
__ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
|
||||
__ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes);
|
||||
#endif
|
||||
break;
|
||||
case T_ADDRESS:
|
||||
case T_METADATA:
|
||||
@ -778,12 +761,7 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicTy
|
||||
case T_SHORT : __ sth(from_reg->as_register(), base, disp); break;
|
||||
case T_INT : __ stw(from_reg->as_register(), base, disp); break;
|
||||
case T_LONG :
|
||||
#ifdef _LP64
|
||||
__ stx(from_reg->as_register_lo(), base, disp);
|
||||
#else
|
||||
assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match");
|
||||
__ std(from_reg->as_register_hi(), base, disp);
|
||||
#endif
|
||||
break;
|
||||
case T_ADDRESS:
|
||||
__ st_ptr(from_reg->as_register(), base, disp);
|
||||
@ -826,40 +804,22 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ
|
||||
case T_INT : __ ld(base, offset, to_reg->as_register()); break;
|
||||
case T_LONG :
|
||||
if (!unaligned && !PatchALot) {
|
||||
#ifdef _LP64
|
||||
__ ldx(base, offset, to_reg->as_register_lo());
|
||||
#else
|
||||
assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
|
||||
"must be sequential");
|
||||
__ ldd(base, offset, to_reg->as_register_hi());
|
||||
#endif
|
||||
} else {
|
||||
#ifdef _LP64
|
||||
assert(base != to_reg->as_register_lo(), "can't handle this");
|
||||
assert(O7 != to_reg->as_register_lo(), "can't handle this");
|
||||
__ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
|
||||
__ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
|
||||
__ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
|
||||
__ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
|
||||
#else
|
||||
if (base == to_reg->as_register_lo()) {
|
||||
__ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
|
||||
__ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
|
||||
} else {
|
||||
__ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
|
||||
__ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
|
||||
}
|
||||
#endif
|
||||
}
|
||||
break;
|
||||
case T_METADATA: __ ld_ptr(base, offset, to_reg->as_register()); break;
|
||||
case T_ADDRESS:
|
||||
#ifdef _LP64
|
||||
if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {
|
||||
__ lduw(base, offset, to_reg->as_register());
|
||||
__ decode_klass_not_null(to_reg->as_register());
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
__ ld_ptr(base, offset, to_reg->as_register());
|
||||
}
|
||||
@ -921,13 +881,7 @@ int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType
|
||||
case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break;
|
||||
case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break;
|
||||
case T_LONG :
|
||||
#ifdef _LP64
|
||||
__ ldx(base, disp, to_reg->as_register_lo());
|
||||
#else
|
||||
assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
|
||||
"must be sequential");
|
||||
__ ldd(base, disp, to_reg->as_register_hi());
|
||||
#endif
|
||||
break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
@ -1107,16 +1061,9 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
|
||||
jlong con = c->as_jlong();
|
||||
|
||||
if (to_reg->is_double_cpu()) {
|
||||
#ifdef _LP64
|
||||
__ set(con, to_reg->as_register_lo());
|
||||
#else
|
||||
__ set(low(con), to_reg->as_register_lo());
|
||||
__ set(high(con), to_reg->as_register_hi());
|
||||
#endif
|
||||
#ifdef _LP64
|
||||
} else if (to_reg->is_single_cpu()) {
|
||||
__ set(con, to_reg->as_register());
|
||||
#endif
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
assert(to_reg->is_double_fpu(), "wrong register kind");
|
||||
@ -1190,12 +1137,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
|
||||
__ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg());
|
||||
} else {
|
||||
assert(to_reg->is_double_cpu(), "Must be a long register.");
|
||||
#ifdef _LP64
|
||||
__ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo());
|
||||
#else
|
||||
__ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo());
|
||||
__ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi());
|
||||
#endif
|
||||
}
|
||||
|
||||
}
|
||||
@ -1366,22 +1308,10 @@ void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
|
||||
}
|
||||
} else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
|
||||
if (from_reg->is_double_cpu()) {
|
||||
#ifdef _LP64
|
||||
__ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register());
|
||||
#else
|
||||
assert(to_reg->is_double_cpu() &&
|
||||
from_reg->as_register_hi() != to_reg->as_register_lo() &&
|
||||
from_reg->as_register_lo() != to_reg->as_register_hi(),
|
||||
"should both be long and not overlap");
|
||||
// long to long moves
|
||||
__ mov(from_reg->as_register_hi(), to_reg->as_register_hi());
|
||||
__ mov(from_reg->as_register_lo(), to_reg->as_register_lo());
|
||||
#endif
|
||||
#ifdef _LP64
|
||||
} else if (to_reg->is_double_cpu()) {
|
||||
// int to int moves
|
||||
__ mov(from_reg->as_register(), to_reg->as_register_lo());
|
||||
#endif
|
||||
} else {
|
||||
// int to int moves
|
||||
__ mov(from_reg->as_register(), to_reg->as_register());
|
||||
@ -1460,21 +1390,6 @@ void LIR_Assembler::return_op(LIR_Opr result) {
|
||||
if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
|
||||
__ reserved_stack_check();
|
||||
}
|
||||
// the poll may need a register so just pick one that isn't the return register
|
||||
#if defined(TIERED) && !defined(_LP64)
|
||||
if (result->type_field() == LIR_OprDesc::long_type) {
|
||||
// Must move the result to G1
|
||||
// Must leave proper result in O0,O1 and G1 (TIERED only)
|
||||
__ sllx(I0, 32, G1); // Shift bits into high G1
|
||||
__ srl (I1, 0, I1); // Zero extend O1 (harmless?)
|
||||
__ or3 (I1, G1, G1); // OR 64 bits into G1
|
||||
#ifdef ASSERT
|
||||
// mangle it so any problems will show up
|
||||
__ set(0xdeadbeef, I0);
|
||||
__ set(0xdeadbeef, I1);
|
||||
#endif
|
||||
}
|
||||
#endif // TIERED
|
||||
__ set((intptr_t)os::get_polling_page(), L0);
|
||||
__ relocate(relocInfo::poll_return_type);
|
||||
__ ld_ptr(L0, 0, G0);
|
||||
@ -1568,23 +1483,11 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
|
||||
Register xhi = opr1->as_register_hi();
|
||||
if (opr2->is_constant() && opr2->as_jlong() == 0) {
|
||||
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases");
|
||||
#ifdef _LP64
|
||||
__ orcc(xhi, G0, G0);
|
||||
#else
|
||||
__ orcc(xhi, xlo, G0);
|
||||
#endif
|
||||
} else if (opr2->is_register()) {
|
||||
Register ylo = opr2->as_register_lo();
|
||||
Register yhi = opr2->as_register_hi();
|
||||
#ifdef _LP64
|
||||
__ cmp(xlo, ylo);
|
||||
#else
|
||||
__ subcc(xlo, ylo, xlo);
|
||||
__ subccc(xhi, yhi, xhi);
|
||||
if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
|
||||
__ orcc(xhi, xlo, G0);
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
@ -1612,13 +1515,7 @@ void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Op
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
} else if (code == lir_cmp_l2i) {
|
||||
#ifdef _LP64
|
||||
__ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register());
|
||||
#else
|
||||
__ lcmp(left->as_register_hi(), left->as_register_lo(),
|
||||
right->as_register_hi(), right->as_register_lo(),
|
||||
dst->as_register());
|
||||
#endif
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
@ -1656,12 +1553,11 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
Label skip;
|
||||
#ifdef _LP64
|
||||
if (type == T_INT) {
|
||||
__ br(acond, false, Assembler::pt, skip);
|
||||
} else
|
||||
#endif
|
||||
} else {
|
||||
__ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit
|
||||
}
|
||||
if (opr1->is_constant() && opr1->type() == T_INT) {
|
||||
Register dest = result->as_register();
|
||||
if (Assembler::is_simm13(opr1->as_jint())) {
|
||||
@ -1720,7 +1616,6 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
|
||||
}
|
||||
|
||||
} else if (dest->is_double_cpu()) {
|
||||
#ifdef _LP64
|
||||
Register dst_lo = dest->as_register_lo();
|
||||
Register op1_lo = left->as_pointer_register();
|
||||
Register op2_lo = right->as_pointer_register();
|
||||
@ -1736,28 +1631,6 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
|
||||
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
#else
|
||||
Register op1_lo = left->as_register_lo();
|
||||
Register op1_hi = left->as_register_hi();
|
||||
Register op2_lo = right->as_register_lo();
|
||||
Register op2_hi = right->as_register_hi();
|
||||
Register dst_lo = dest->as_register_lo();
|
||||
Register dst_hi = dest->as_register_hi();
|
||||
|
||||
switch (code) {
|
||||
case lir_add:
|
||||
__ addcc(op1_lo, op2_lo, dst_lo);
|
||||
__ addc (op1_hi, op2_hi, dst_hi);
|
||||
break;
|
||||
|
||||
case lir_sub:
|
||||
__ subcc(op1_lo, op2_lo, dst_lo);
|
||||
__ subc (op1_hi, op2_hi, dst_hi);
|
||||
break;
|
||||
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
assert (right->is_single_cpu(), "Just Checking");
|
||||
|
||||
@ -1852,23 +1725,14 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
|
||||
int simm13 = (int)c;
|
||||
switch (code) {
|
||||
case lir_logic_and:
|
||||
#ifndef _LP64
|
||||
__ and3 (left->as_register_hi(), 0, dest->as_register_hi());
|
||||
#endif
|
||||
__ and3 (left->as_register_lo(), simm13, dest->as_register_lo());
|
||||
break;
|
||||
|
||||
case lir_logic_or:
|
||||
#ifndef _LP64
|
||||
__ or3 (left->as_register_hi(), 0, dest->as_register_hi());
|
||||
#endif
|
||||
__ or3 (left->as_register_lo(), simm13, dest->as_register_lo());
|
||||
break;
|
||||
|
||||
case lir_logic_xor:
|
||||
#ifndef _LP64
|
||||
__ xor3 (left->as_register_hi(), 0, dest->as_register_hi());
|
||||
#endif
|
||||
__ xor3 (left->as_register_lo(), simm13, dest->as_register_lo());
|
||||
break;
|
||||
|
||||
@ -1886,7 +1750,6 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
} else {
|
||||
#ifdef _LP64
|
||||
Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() :
|
||||
left->as_register_lo();
|
||||
Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() :
|
||||
@ -1898,26 +1761,6 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
|
||||
case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
#else
|
||||
switch (code) {
|
||||
case lir_logic_and:
|
||||
__ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
|
||||
__ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
|
||||
break;
|
||||
|
||||
case lir_logic_or:
|
||||
__ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
|
||||
__ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
|
||||
break;
|
||||
|
||||
case lir_logic_xor:
|
||||
__ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
|
||||
__ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
|
||||
break;
|
||||
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1975,12 +1818,10 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
|
||||
if (basic_type == T_ARRAY) basic_type = T_OBJECT;
|
||||
|
||||
#ifdef _LP64
|
||||
// higher 32bits must be null
|
||||
__ sra(dst_pos, 0, dst_pos);
|
||||
__ sra(src_pos, 0, src_pos);
|
||||
__ sra(length, 0, length);
|
||||
#endif
|
||||
|
||||
// set up the arraycopy stub information
|
||||
ArrayCopyStub* stub = op->stub();
|
||||
@ -2316,7 +2157,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
|
||||
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
|
||||
if (dest->is_single_cpu()) {
|
||||
#ifdef _LP64
|
||||
if (left->type() == T_OBJECT) {
|
||||
switch (code) {
|
||||
case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break;
|
||||
@ -2325,7 +2165,6 @@ void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
switch (code) {
|
||||
case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break;
|
||||
case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break;
|
||||
@ -2333,27 +2172,17 @@ void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
} else {
|
||||
#ifdef _LP64
|
||||
switch (code) {
|
||||
case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
|
||||
case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
|
||||
case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
#else
|
||||
switch (code) {
|
||||
case lir_shl: __ lshl (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
|
||||
case lir_shr: __ lshr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
|
||||
case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
|
||||
#ifdef _LP64
|
||||
if (left->type() == T_OBJECT) {
|
||||
count = count & 63; // shouldn't shift by more than sizeof(intptr_t)
|
||||
Register l = left->as_register();
|
||||
@ -2366,7 +2195,6 @@ void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr de
|
||||
}
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (dest->is_single_cpu()) {
|
||||
count = count & 0x1F; // Java spec
|
||||
@ -2425,7 +2253,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
|
||||
op->tmp4()->as_register() == O1 &&
|
||||
op->klass()->as_register() == G5, "must be");
|
||||
|
||||
LP64_ONLY( __ signx(op->len()->as_register()); )
|
||||
__ signx(op->len()->as_register());
|
||||
if (UseSlowPath ||
|
||||
(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
|
||||
(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
|
||||
@ -2748,7 +2576,6 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
|
||||
Register new_value_hi = op->new_value()->as_register_hi();
|
||||
Register t1 = op->tmp1()->as_register();
|
||||
Register t2 = op->tmp2()->as_register();
|
||||
#ifdef _LP64
|
||||
__ mov(cmp_value_lo, t1);
|
||||
__ mov(new_value_lo, t2);
|
||||
// perform the compare and swap operation
|
||||
@ -2756,23 +2583,6 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
|
||||
// generate condition code - if the swap succeeded, t2 ("new value" reg) was
|
||||
// overwritten with the original value in "addr" and will be equal to t1.
|
||||
__ cmp(t1, t2);
|
||||
#else
|
||||
// move high and low halves of long values into single registers
|
||||
__ sllx(cmp_value_hi, 32, t1); // shift high half into temp reg
|
||||
__ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half
|
||||
__ or3(t1, cmp_value_lo, t1); // t1 holds 64-bit compare value
|
||||
__ sllx(new_value_hi, 32, t2);
|
||||
__ srl(new_value_lo, 0, new_value_lo);
|
||||
__ or3(t2, new_value_lo, t2); // t2 holds 64-bit value to swap
|
||||
// perform the compare and swap operation
|
||||
__ casx(addr, t1, t2);
|
||||
// generate condition code - if the swap succeeded, t2 ("new value" reg) was
|
||||
// overwritten with the original value in "addr" and will be equal to t1.
|
||||
// Produce icc flag for 32bit.
|
||||
__ sub(t1, t2, t2);
|
||||
__ srlx(t2, 32, t1);
|
||||
__ orcc(t2, t1, G0);
|
||||
#endif
|
||||
} else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
|
||||
Register addr = op->addr()->as_pointer_register();
|
||||
Register cmp_value = op->cmp_value()->as_register();
|
||||
@ -2914,13 +2724,8 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
assert(data->is_CounterData(), "need CounterData for calls");
|
||||
assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
|
||||
Register mdo = op->mdo()->as_register();
|
||||
#ifdef _LP64
|
||||
assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
|
||||
Register tmp1 = op->tmp1()->as_register_lo();
|
||||
#else
|
||||
assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
|
||||
Register tmp1 = op->tmp1()->as_register();
|
||||
#endif
|
||||
metadata2reg(md->constant_encoding(), mdo);
|
||||
int mdo_offset_bias = 0;
|
||||
if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
|
||||
@ -3200,12 +3005,7 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
|
||||
assert (left->is_double_cpu(), "Must be a long");
|
||||
Register Rlow = left->as_register_lo();
|
||||
Register Rhi = left->as_register_hi();
|
||||
#ifdef _LP64
|
||||
__ sub(G0, Rlow, dest->as_register_lo());
|
||||
#else
|
||||
__ subcc(G0, Rlow, dest->as_register_lo());
|
||||
__ subc (G0, Rhi, dest->as_register_hi());
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -3245,9 +3045,7 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest,
|
||||
|
||||
|
||||
void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
|
||||
#ifdef _LP64
|
||||
ShouldNotReachHere();
|
||||
#endif
|
||||
|
||||
NEEDS_CLEANUP;
|
||||
if (type == T_LONG) {
|
||||
@ -3491,31 +3289,6 @@ void LIR_Assembler::peephole(LIR_List* lir) {
|
||||
inst->insert_before(i + 1, delay_op);
|
||||
i++;
|
||||
}
|
||||
|
||||
#if defined(TIERED) && !defined(_LP64)
|
||||
// fixup the return value from G1 to O0/O1 for long returns.
|
||||
// It's done here instead of in LIRGenerator because there's
|
||||
// such a mismatch between the single reg and double reg
|
||||
// calling convention.
|
||||
LIR_OpJavaCall* callop = op->as_OpJavaCall();
|
||||
if (callop->result_opr() == FrameMap::out_long_opr) {
|
||||
LIR_OpJavaCall* call;
|
||||
LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length());
|
||||
for (int a = 0; a < arguments->length(); a++) {
|
||||
arguments[a] = callop->arguments()[a];
|
||||
}
|
||||
if (op->code() == lir_virtual_call) {
|
||||
call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
|
||||
callop->vtable_offset(), arguments, callop->info());
|
||||
} else {
|
||||
call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
|
||||
callop->addr(), arguments, callop->info());
|
||||
}
|
||||
inst->at_put(i - 1, call);
|
||||
inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(),
|
||||
T_LONG, lir_patch_none, NULL));
|
||||
}
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -3533,14 +3306,10 @@ void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr
|
||||
} else if (data->is_oop()) {
|
||||
Register obj = data->as_register();
|
||||
Register narrow = tmp->as_register();
|
||||
#ifdef _LP64
|
||||
assert(UseCompressedOops, "swap is 32bit only");
|
||||
__ encode_heap_oop(obj, narrow);
|
||||
__ swap(as_Address(addr), narrow);
|
||||
__ decode_heap_oop(narrow, obj);
|
||||
#else
|
||||
__ swap(as_Address(addr), obj);
|
||||
#endif
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -61,11 +61,7 @@
|
||||
ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias);
|
||||
|
||||
enum {
|
||||
#ifdef _LP64
|
||||
_call_stub_size = 68,
|
||||
#else
|
||||
_call_stub_size = 20,
|
||||
#endif // _LP64
|
||||
_call_aot_stub_size = 0,
|
||||
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(128),
|
||||
_deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(64)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -70,7 +70,7 @@ LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::Oexcepti
|
||||
LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::Oissuing_pc_opr; }
|
||||
LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); }
|
||||
LIR_Opr LIRGenerator::syncTempOpr() { return new_register(T_OBJECT); }
|
||||
LIR_Opr LIRGenerator::getThreadTemp() { return rlock_callee_saved(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); }
|
||||
LIR_Opr LIRGenerator::getThreadTemp() { return rlock_callee_saved(T_LONG); }
|
||||
|
||||
LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
|
||||
LIR_Opr opr;
|
||||
@ -215,13 +215,11 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
|
||||
}
|
||||
}
|
||||
} else {
|
||||
#ifdef _LP64
|
||||
if (index_opr->type() == T_INT) {
|
||||
LIR_Opr tmp = new_register(T_LONG);
|
||||
__ convert(Bytecodes::_i2l, index_opr, tmp);
|
||||
index_opr = tmp;
|
||||
}
|
||||
#endif
|
||||
|
||||
base_opr = new_pointer_register();
|
||||
assert (index_opr->is_register(), "Must be register");
|
||||
@ -1317,20 +1315,12 @@ void LIRGenerator::trace_block_entry(BlockBegin* block) {
|
||||
|
||||
void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
|
||||
CodeEmitInfo* info) {
|
||||
#ifdef _LP64
|
||||
__ store(value, address, info);
|
||||
#else
|
||||
__ volatile_store_mem_reg(value, address, info);
|
||||
#endif
|
||||
}
|
||||
|
||||
void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
|
||||
CodeEmitInfo* info) {
|
||||
#ifdef _LP64
|
||||
__ load(address, result, info);
|
||||
#else
|
||||
__ volatile_load_mem_reg(address, result, info);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -1340,11 +1330,6 @@ void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
|
||||
LIR_Opr index_op = offset;
|
||||
|
||||
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
|
||||
#ifndef _LP64
|
||||
if (is_volatile && type == T_LONG) {
|
||||
__ volatile_store_unsafe_reg(data, src, offset, type, NULL, lir_patch_none);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if (type == T_BOOLEAN) {
|
||||
type = T_BYTE;
|
||||
@ -1374,11 +1359,6 @@ void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
|
||||
|
||||
void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
|
||||
BasicType type, bool is_volatile) {
|
||||
#ifndef _LP64
|
||||
if (is_volatile && type == T_LONG) {
|
||||
__ volatile_load_unsafe_reg(src, offset, dst, type, NULL, lir_patch_none);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
LIR_Address* addr = new LIR_Address(src, offset, type);
|
||||
__ load(addr, dst);
|
||||
@ -1403,17 +1383,13 @@ void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
|
||||
// Because we want a 2-arg form of xchg
|
||||
__ move(data, dst);
|
||||
|
||||
assert (!x->is_add() && (type == T_INT || (is_obj LP64_ONLY(&& UseCompressedOops))), "unexpected type");
|
||||
assert (!x->is_add() && (type == T_INT || (is_obj && UseCompressedOops)), "unexpected type");
|
||||
LIR_Address* addr;
|
||||
if (offset->is_constant()) {
|
||||
|
||||
#ifdef _LP64
|
||||
jlong l = offset->as_jlong();
|
||||
assert((jlong)((jint)l) == l, "offset too large for constant");
|
||||
jint c = (jint)l;
|
||||
#else
|
||||
jint c = offset->as_jint();
|
||||
#endif
|
||||
addr = new LIR_Address(src.result(), c, type);
|
||||
} else {
|
||||
addr = new LIR_Address(src.result(), offset, type);
|
||||
|
@ -48,16 +48,9 @@ LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) {
|
||||
void LIR_Address::verify() const {
|
||||
assert(scale() == times_1, "Scaled addressing mode not available on SPARC and should not be used");
|
||||
assert(disp() == 0 || index()->is_illegal(), "can't have both");
|
||||
#ifdef _LP64
|
||||
assert(base()->is_cpu_register(), "wrong base operand");
|
||||
assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand");
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
|
||||
"wrong type for addresses");
|
||||
#else
|
||||
assert(base()->is_single_cpu(), "wrong base operand");
|
||||
assert(index()->is_illegal() || index()->is_single_cpu(), "wrong index operand");
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_INT || base()->type() == T_METADATA,
|
||||
"wrong type for addresses");
|
||||
#endif
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,11 +32,7 @@ inline bool LinearScan::is_processed_reg_num(int reg_num) {
|
||||
inline int LinearScan::num_physical_regs(BasicType type) {
|
||||
// Sparc requires two cpu registers for long
|
||||
// and two cpu registers for double
|
||||
#ifdef _LP64
|
||||
if (type == T_DOUBLE) {
|
||||
#else
|
||||
if (type == T_DOUBLE || type == T_LONG) {
|
||||
#endif
|
||||
return 2;
|
||||
}
|
||||
return 1;
|
||||
@ -44,11 +40,7 @@ inline int LinearScan::num_physical_regs(BasicType type) {
|
||||
|
||||
|
||||
inline bool LinearScan::requires_adjacent_regs(BasicType type) {
|
||||
#ifdef _LP64
|
||||
return type == T_DOUBLE;
|
||||
#else
|
||||
return type == T_DOUBLE || type == T_LONG;
|
||||
#endif
|
||||
}
|
||||
|
||||
inline bool LinearScan::is_caller_save(int assigned_reg) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -273,13 +273,6 @@ void C1_MacroAssembler::initialize_object(
|
||||
add(obj, hdr_size_in_bytes, t1); // compute address of first element
|
||||
sub(var_size_in_bytes, hdr_size_in_bytes, t2); // compute size of body
|
||||
initialize_body(t1, t2);
|
||||
#ifndef _LP64
|
||||
} else if (con_size_in_bytes < threshold * 2) {
|
||||
// on v9 we can do double word stores to fill twice as much space.
|
||||
assert(hdr_size_in_bytes % 8 == 0, "double word aligned");
|
||||
assert(con_size_in_bytes % 8 == 0, "double word aligned");
|
||||
for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += 2 * HeapWordSize) stx(G0, obj, i);
|
||||
#endif
|
||||
} else if (con_size_in_bytes <= threshold) {
|
||||
// use explicit NULL stores
|
||||
for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += HeapWordSize) st_ptr(G0, obj, i);
|
||||
|
@ -930,11 +930,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
|
||||
Label not_already_dirty, restart, refill, young_card;
|
||||
|
||||
#ifdef _LP64
|
||||
__ srlx(addr, CardTableModRefBS::card_shift, addr);
|
||||
#else
|
||||
__ srl(addr, CardTableModRefBS::card_shift, addr);
|
||||
#endif
|
||||
|
||||
AddressLiteral rs(byte_map_base);
|
||||
__ set(rs, cardtable); // cardtable := <card table base>
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -66,7 +66,6 @@ define_pd_global(bool, OptoRegScheduling, false);
|
||||
define_pd_global(bool, SuperWordLoopUnrollAnalysis, false);
|
||||
define_pd_global(bool, IdealizeClearArrayNode, true);
|
||||
|
||||
#ifdef _LP64
|
||||
// We need to make sure that all generated code is within
|
||||
// 2 gigs of the libjvm.so runtime routines so we can use
|
||||
// the faster "call" instruction rather than the expensive
|
||||
@ -82,17 +81,6 @@ define_pd_global(intx, CodeCacheExpansionSize, 64*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t,MaxRAM, 128ULL*G);
|
||||
#else
|
||||
// InitialCodeCacheSize derived from specjbb2000 run.
|
||||
define_pd_global(intx, InitialCodeCacheSize, 1536*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 32*M);
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M);
|
||||
define_pd_global(intx, ProfiledCodeHeapSize, 14*M);
|
||||
define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 32*K);
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t, MaxRAM, 4ULL*G);
|
||||
#endif
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -114,14 +114,8 @@ static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
|
||||
}
|
||||
|
||||
static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
|
||||
#ifdef _LP64
|
||||
assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
|
||||
pd_conjoint_oops_atomic((oop*)from, (oop*)to, count);
|
||||
#else
|
||||
// Guarantee use of ldd/std via some asm code, because compiler won't.
|
||||
// See solaris_sparc.il.
|
||||
_Copy_conjoint_jlongs_atomic(from, to, count);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
|
||||
@ -162,7 +156,6 @@ static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count)
|
||||
}
|
||||
|
||||
static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
|
||||
#ifdef _LP64
|
||||
guarantee(mask_bits((uintptr_t)tohw, right_n_bits(LogBytesPerLong)) == 0,
|
||||
"unaligned fill words");
|
||||
julong* to = (julong*)tohw;
|
||||
@ -170,12 +163,6 @@ static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
|
||||
while (count-- > 0) {
|
||||
*to++ = v;
|
||||
}
|
||||
#else // _LP64
|
||||
juint* to = (juint*)tohw;
|
||||
while (count-- > 0) {
|
||||
*to++ = value;
|
||||
}
|
||||
#endif // _LP64
|
||||
}
|
||||
|
||||
typedef void (*_zero_Fn)(HeapWord* to, size_t count);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -114,11 +114,7 @@ address RegisterMap::pd_location(VMReg regname) const {
|
||||
// register locations. When that is fixed we'd will return NULL
|
||||
// (or assert here).
|
||||
reg = regname->prev()->as_Register();
|
||||
#ifdef _LP64
|
||||
second_word = sizeof(jint);
|
||||
#else
|
||||
return NULL;
|
||||
#endif // _LP64
|
||||
} else {
|
||||
reg = regname->as_Register();
|
||||
}
|
||||
@ -332,9 +328,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
|
||||
// Construct an unpatchable, deficient frame
|
||||
void frame::init(intptr_t* sp, address pc, CodeBlob* cb) {
|
||||
#ifdef _LP64
|
||||
assert( (((intptr_t)sp & (wordSize-1)) == 0), "frame constructor passed an invalid sp");
|
||||
#endif
|
||||
_sp = sp;
|
||||
_younger_sp = NULL;
|
||||
_pc = pc;
|
||||
@ -693,11 +687,9 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
|
||||
intptr_t* d_scratch = fp() + interpreter_frame_d_scratch_fp_offset;
|
||||
|
||||
address l_addr = (address)l_scratch;
|
||||
#ifdef _LP64
|
||||
// On 64-bit the result for 1/8/16/32-bit result types is in the other
|
||||
// word half
|
||||
l_addr += wordSize/2;
|
||||
#endif
|
||||
|
||||
switch (type) {
|
||||
case T_OBJECT:
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -100,11 +100,7 @@
|
||||
|
||||
// size of each block, in order of increasing address:
|
||||
register_save_words = 16,
|
||||
#ifdef _LP64
|
||||
callee_aggregate_return_pointer_words = 0,
|
||||
#else
|
||||
callee_aggregate_return_pointer_words = 1,
|
||||
#endif
|
||||
callee_register_argument_save_area_words = 6,
|
||||
// memory_parameter_words = <arbitrary>,
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -38,24 +38,14 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
|
||||
|
||||
// The expected size in bytes of a cache line, used to pad data structures.
|
||||
#if defined(TIERED)
|
||||
#ifdef _LP64
|
||||
// tiered, 64-bit, large machine
|
||||
#define DEFAULT_CACHE_LINE_SIZE 128
|
||||
#else
|
||||
// tiered, 32-bit, medium machine
|
||||
#define DEFAULT_CACHE_LINE_SIZE 64
|
||||
#endif
|
||||
// tiered, 64-bit, large machine
|
||||
#define DEFAULT_CACHE_LINE_SIZE 128
|
||||
#elif defined(COMPILER1)
|
||||
// pure C1, 32-bit, small machine
|
||||
#define DEFAULT_CACHE_LINE_SIZE 16
|
||||
#elif defined(COMPILER2) || defined(SHARK)
|
||||
#ifdef _LP64
|
||||
// pure C2, 64-bit, large machine
|
||||
#define DEFAULT_CACHE_LINE_SIZE 128
|
||||
#else
|
||||
// pure C2, 32-bit, medium machine
|
||||
#define DEFAULT_CACHE_LINE_SIZE 64
|
||||
#endif
|
||||
// pure C2, 64-bit, large machine
|
||||
#define DEFAULT_CACHE_LINE_SIZE 128
|
||||
#endif
|
||||
|
||||
#if defined(SOLARIS)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -56,18 +56,10 @@ define_pd_global(intx, InlineSmallCode, 1500);
|
||||
#define DEFAULT_STACK_RED_PAGES (1)
|
||||
#define DEFAULT_STACK_RESERVED_PAGES (SOLARIS_ONLY(1) NOT_SOLARIS(0))
|
||||
|
||||
#ifdef _LP64
|
||||
// Stack slots are 2X larger in LP64 than in the 32 bit VM.
|
||||
define_pd_global(intx, CompilerThreadStackSize, 1024);
|
||||
define_pd_global(intx, ThreadStackSize, 1024);
|
||||
define_pd_global(intx, VMThreadStackSize, 1024);
|
||||
#define DEFAULT_STACK_SHADOW_PAGES (20 DEBUG_ONLY(+2))
|
||||
#else
|
||||
define_pd_global(intx, CompilerThreadStackSize, 512);
|
||||
define_pd_global(intx, ThreadStackSize, 512);
|
||||
define_pd_global(intx, VMThreadStackSize, 512);
|
||||
#define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2))
|
||||
#endif // _LP64
|
||||
|
||||
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
|
||||
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,13 +32,9 @@
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
int InlineCacheBuffer::ic_stub_code_size() {
|
||||
#ifdef _LP64
|
||||
return (NativeMovConstReg::instruction_size + // sethi;add
|
||||
NativeJump::instruction_size + // sethi; jmp; delay slot
|
||||
(1*BytesPerInstWord) + 1); // flush + 1 extra byte
|
||||
#else
|
||||
return (2+2+ 1) * wordSize + 1; // set/jump_to/nop + 1 byte so that code_end can be set in CodeBuffer
|
||||
#endif
|
||||
}
|
||||
|
||||
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,11 +39,6 @@
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
|
||||
#ifndef FAST_DISPATCH
|
||||
#define FAST_DISPATCH 1
|
||||
#endif
|
||||
#undef FAST_DISPATCH
|
||||
|
||||
// Implementation of InterpreterMacroAssembler
|
||||
|
||||
// This file specializes the assember with interpreter-specific macros
|
||||
@ -78,23 +73,12 @@ void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args
|
||||
// own dispatch. The dispatch address is computed and placed in IdispatchAddress
|
||||
void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {
|
||||
assert_not_delayed();
|
||||
#ifdef FAST_DISPATCH
|
||||
// FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
|
||||
// they both use I2.
|
||||
assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
|
||||
ldub(Lbcp, bcp_incr, Lbyte_code); // load next bytecode
|
||||
add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
|
||||
// add offset to correct dispatch table
|
||||
sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
|
||||
ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr
|
||||
#else
|
||||
ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
|
||||
// dispatch table to use
|
||||
AddressLiteral tbl(Interpreter::dispatch_table(state));
|
||||
sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
|
||||
set(tbl, G3_scratch); // compute addr of table
|
||||
ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -281,23 +265,11 @@ void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* tab
|
||||
// %%%%% maybe implement +VerifyActivationFrameSize here
|
||||
//verify_thread(); //too slow; we will just verify on method entry & exit
|
||||
if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
|
||||
#ifdef FAST_DISPATCH
|
||||
if (table == Interpreter::dispatch_table(state)) {
|
||||
// use IdispatchTables
|
||||
add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
|
||||
// add offset to correct dispatch table
|
||||
sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
|
||||
ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // get entry addr
|
||||
} else {
|
||||
#endif
|
||||
// dispatch table to use
|
||||
AddressLiteral tbl(table);
|
||||
sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
|
||||
set(tbl, G3_scratch); // compute addr of table
|
||||
ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr
|
||||
#ifdef FAST_DISPATCH
|
||||
}
|
||||
#endif
|
||||
// dispatch table to use
|
||||
AddressLiteral tbl(table);
|
||||
sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
|
||||
set(tbl, G3_scratch); // compute addr of table
|
||||
ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr
|
||||
jmp( G3_scratch, 0 );
|
||||
if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr);
|
||||
else delayed()->nop();
|
||||
@ -318,52 +290,32 @@ void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* tab
|
||||
void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) {
|
||||
assert_not_delayed();
|
||||
|
||||
#ifdef _LP64
|
||||
ldf(FloatRegisterImpl::D, r1, offset, d);
|
||||
#else
|
||||
ldf(FloatRegisterImpl::S, r1, offset, d);
|
||||
ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor());
|
||||
#endif
|
||||
}
|
||||
|
||||
// Known good alignment in _LP64 but unknown otherwise
|
||||
void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) {
|
||||
assert_not_delayed();
|
||||
|
||||
#ifdef _LP64
|
||||
stf(FloatRegisterImpl::D, d, r1, offset);
|
||||
// store something more useful here
|
||||
debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
|
||||
#else
|
||||
stf(FloatRegisterImpl::S, d, r1, offset);
|
||||
stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// Known good alignment in _LP64 but unknown otherwise
|
||||
void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) {
|
||||
assert_not_delayed();
|
||||
#ifdef _LP64
|
||||
ldx(r1, offset, rd);
|
||||
#else
|
||||
ld(r1, offset, rd);
|
||||
ld(r1, offset + Interpreter::stackElementSize, rd->successor());
|
||||
#endif
|
||||
}
|
||||
|
||||
// Known good alignment in _LP64 but unknown otherwise
|
||||
void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) {
|
||||
assert_not_delayed();
|
||||
|
||||
#ifdef _LP64
|
||||
stx(l, r1, offset);
|
||||
// store something more useful here
|
||||
stx(G0, r1, offset+Interpreter::stackElementSize);
|
||||
#else
|
||||
st(l, r1, offset);
|
||||
st(l->successor(), r1, offset + Interpreter::stackElementSize);
|
||||
#endif
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::pop_i(Register r) {
|
||||
@ -527,9 +479,7 @@ void InterpreterMacroAssembler::empty_expression_stack() {
|
||||
sub( Lesp, Gframe_size, Gframe_size );
|
||||
and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary
|
||||
debug_only(verify_sp(Gframe_size, G4_scratch));
|
||||
#ifdef _LP64
|
||||
sub(Gframe_size, STACK_BIAS, Gframe_size );
|
||||
#endif
|
||||
mov(Gframe_size, SP);
|
||||
|
||||
bind(done);
|
||||
@ -541,28 +491,20 @@ void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) {
|
||||
Label Bad, OK;
|
||||
|
||||
// Saved SP must be aligned.
|
||||
#ifdef _LP64
|
||||
btst(2*BytesPerWord-1, Rsp);
|
||||
#else
|
||||
btst(LongAlignmentMask, Rsp);
|
||||
#endif
|
||||
br(Assembler::notZero, false, Assembler::pn, Bad);
|
||||
delayed()->nop();
|
||||
|
||||
// Saved SP, plus register window size, must not be above FP.
|
||||
add(Rsp, frame::register_save_words * wordSize, Rtemp);
|
||||
#ifdef _LP64
|
||||
sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP
|
||||
#endif
|
||||
cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad);
|
||||
|
||||
// Saved SP must not be ridiculously below current SP.
|
||||
size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K);
|
||||
set(maxstack, Rtemp);
|
||||
sub(SP, Rtemp, Rtemp);
|
||||
#ifdef _LP64
|
||||
add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp
|
||||
#endif
|
||||
cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad);
|
||||
|
||||
ba_short(OK);
|
||||
@ -584,9 +526,7 @@ void InterpreterMacroAssembler::verify_esp(Register Resp) {
|
||||
delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp);
|
||||
stop("too many pops: Lesp points into monitor area");
|
||||
bind(OK1);
|
||||
#ifdef _LP64
|
||||
sub(Resp, STACK_BIAS, Resp);
|
||||
#endif
|
||||
cmp(Resp, SP);
|
||||
brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2);
|
||||
delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp);
|
||||
@ -696,21 +636,12 @@ void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(
|
||||
}
|
||||
|
||||
br(Assembler::zero, true, Assembler::pn, aligned);
|
||||
#ifdef _LP64
|
||||
delayed()->ldsw(Rtmp, 0, Rdst);
|
||||
#else
|
||||
delayed()->ld(Rtmp, 0, Rdst);
|
||||
#endif
|
||||
|
||||
ldub(Lbcp, bcp_offset + 3, Rdst);
|
||||
ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst);
|
||||
ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst);
|
||||
#ifdef _LP64
|
||||
ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp);
|
||||
#else
|
||||
// Unsigned load is faster than signed on some implementations
|
||||
ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp);
|
||||
#endif
|
||||
or3(Rtmp, Rdst, Rdst );
|
||||
|
||||
bind(aligned);
|
||||
@ -796,7 +727,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
|
||||
sll(index, LogBytesPerHeapOop, tmp);
|
||||
get_constant_pool(result);
|
||||
// load pointer for resolved_references[] objArray
|
||||
ld_ptr(result, ConstantPool::resolved_references_offset_in_bytes(), result);
|
||||
ld_ptr(result, ConstantPool::cache_offset_in_bytes(), result);
|
||||
ld_ptr(result, ConstantPoolCache::resolved_references_offset_in_bytes(), result);
|
||||
// JNIHandles::resolve(result)
|
||||
ld_ptr(result, 0, result);
|
||||
// Add in the index
|
||||
@ -805,6 +737,24 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
|
||||
}
|
||||
|
||||
|
||||
// load cpool->resolved_klass_at(index)
|
||||
void InterpreterMacroAssembler::load_resolved_klass_at_offset(Register Rcpool,
|
||||
Register Roffset, Register Rklass) {
|
||||
// int value = *this_cp->int_at_addr(which);
|
||||
// int resolved_klass_index = extract_low_short_from_int(value);
|
||||
//
|
||||
// Because SPARC is big-endian, the low_short is at (cpool->int_at_addr(which) + 2 bytes)
|
||||
add(Roffset, Rcpool, Roffset);
|
||||
lduh(Roffset, sizeof(ConstantPool) + 2, Roffset); // Roffset = resolved_klass_index
|
||||
|
||||
Register Rresolved_klasses = Rklass;
|
||||
ld_ptr(Rcpool, ConstantPool::resolved_klasses_offset_in_bytes(), Rresolved_klasses);
|
||||
sll(Roffset, LogBytesPerWord, Roffset);
|
||||
add(Roffset, Array<Klass*>::base_offset_in_bytes(), Roffset);
|
||||
ld_ptr(Rresolved_klasses, Roffset, Rklass);
|
||||
}
|
||||
|
||||
|
||||
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
|
||||
// a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2.
|
||||
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
|
||||
@ -910,10 +860,8 @@ void InterpreterMacroAssembler::index_check_without_pop(Register array, Register
|
||||
assert_not_delayed();
|
||||
|
||||
verify_oop(array);
|
||||
#ifdef _LP64
|
||||
// sign extend since tos (index) can be a 32bit value
|
||||
sra(index, G0, index);
|
||||
#endif // _LP64
|
||||
|
||||
// check array
|
||||
Label ptr_ok;
|
||||
@ -1191,11 +1139,7 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
|
||||
// return tos
|
||||
assert(Otos_l1 == Otos_i, "adjust code below");
|
||||
switch (state) {
|
||||
#ifdef _LP64
|
||||
case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0
|
||||
#else
|
||||
case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through // O1 -> I1
|
||||
#endif
|
||||
case btos: // fall through
|
||||
case ztos: // fall through
|
||||
case ctos:
|
||||
@ -1207,20 +1151,6 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
|
||||
case vtos: /* nothing to do */ break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
|
||||
#if defined(COMPILER2) && !defined(_LP64)
|
||||
if (state == ltos) {
|
||||
// C2 expects long results in G1 we can't tell if we're returning to interpreted
|
||||
// or compiled so just be safe use G1 and O0/O1
|
||||
|
||||
// Shift bits into high (msb) of G1
|
||||
sllx(Otos_l1->after_save(), 32, G1);
|
||||
// Zero extend low bits
|
||||
srl (Otos_l2->after_save(), 0, Otos_l2->after_save());
|
||||
or3 (Otos_l2->after_save(), G1, G1);
|
||||
}
|
||||
#endif /* COMPILER2 */
|
||||
|
||||
}
|
||||
|
||||
// Lock object
|
||||
@ -1270,9 +1200,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object)
|
||||
// Check if owner is self by comparing the value in the markOop of object
|
||||
// with the stack pointer
|
||||
sub(temp_reg, SP, temp_reg);
|
||||
#ifdef _LP64
|
||||
sub(temp_reg, STACK_BIAS, temp_reg);
|
||||
#endif
|
||||
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
|
||||
|
||||
// Composite "andcc" test:
|
||||
@ -2711,11 +2639,7 @@ void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,
|
||||
void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) {
|
||||
if (is_native_call) {
|
||||
stf(FloatRegisterImpl::D, F0, d_tmp);
|
||||
#ifdef _LP64
|
||||
stx(O0, l_tmp);
|
||||
#else
|
||||
std(O0, l_tmp);
|
||||
#endif
|
||||
} else {
|
||||
push(state);
|
||||
}
|
||||
@ -2724,11 +2648,7 @@ void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native
|
||||
void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) {
|
||||
if (is_native_call) {
|
||||
ldf(FloatRegisterImpl::D, d_tmp, F0);
|
||||
#ifdef _LP64
|
||||
ldx(l_tmp, O0);
|
||||
#else
|
||||
ldd(l_tmp, O0);
|
||||
#endif
|
||||
} else {
|
||||
pop(state);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -70,9 +70,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
bool check_exception=true
|
||||
);
|
||||
|
||||
virtual void check_and_handle_popframe(Register java_thread);
|
||||
virtual void check_and_handle_earlyret(Register java_thread);
|
||||
|
||||
// base routine for all dispatches
|
||||
void dispatch_base(TosState state, address* table);
|
||||
|
||||
@ -80,6 +77,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
InterpreterMacroAssembler(CodeBuffer* c)
|
||||
: MacroAssembler(c) {}
|
||||
|
||||
virtual void check_and_handle_popframe(Register scratch_reg);
|
||||
virtual void check_and_handle_earlyret(Register scratch_reg);
|
||||
|
||||
void jump_to_entry(address entry);
|
||||
|
||||
virtual void load_earlyret_value(TosState state);
|
||||
@ -196,6 +196,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
// load cpool->resolved_references(index);
|
||||
void load_resolved_reference_at_index(Register result, Register index);
|
||||
|
||||
// load cpool->resolved_klass_at(index)
|
||||
void load_resolved_klass_at_offset(Register Rcpool, Register Roffset, Register Rklass);
|
||||
|
||||
// common code
|
||||
|
||||
void field_offset_at(int n, Register tmp, Register dest, Register base);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -53,47 +53,24 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
|
||||
Argument jni_arg(jni_offset(), false);
|
||||
Register Rtmp = O0;
|
||||
|
||||
#ifdef _LP64
|
||||
__ ldx(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
|
||||
__ store_long_argument(Rtmp, jni_arg);
|
||||
#else
|
||||
__ ld(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
|
||||
__ store_argument(Rtmp, jni_arg);
|
||||
__ ld(Llocals, Interpreter::local_offset_in_bytes(offset() + 0), Rtmp);
|
||||
Argument successor(jni_arg.successor());
|
||||
__ store_argument(Rtmp, successor);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
|
||||
Argument jni_arg(jni_offset(), false);
|
||||
#ifdef _LP64
|
||||
FloatRegister Rtmp = F0;
|
||||
__ ldf(FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(offset()), Rtmp);
|
||||
__ store_float_argument(Rtmp, jni_arg);
|
||||
#else
|
||||
Register Rtmp = O0;
|
||||
__ ld(Llocals, Interpreter::local_offset_in_bytes(offset()), Rtmp);
|
||||
__ store_argument(Rtmp, jni_arg);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_double() {
|
||||
Argument jni_arg(jni_offset(), false);
|
||||
#ifdef _LP64
|
||||
FloatRegister Rtmp = F0;
|
||||
__ ldf(FloatRegisterImpl::D, Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
|
||||
__ store_double_argument(Rtmp, jni_arg);
|
||||
#else
|
||||
Register Rtmp = O0;
|
||||
__ ld(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
|
||||
__ store_argument(Rtmp, jni_arg);
|
||||
__ ld(Llocals, Interpreter::local_offset_in_bytes(offset()), Rtmp);
|
||||
Argument successor(jni_arg.successor());
|
||||
__ store_argument(Rtmp, successor);
|
||||
#endif
|
||||
}
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
|
||||
@ -171,7 +148,6 @@ class SlowSignatureHandler: public NativeSignatureIterator {
|
||||
add_signature( non_float );
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
virtual void pass_float() {
|
||||
*_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
_from -= Interpreter::stackElementSize;
|
||||
@ -190,23 +166,6 @@ class SlowSignatureHandler: public NativeSignatureIterator {
|
||||
_from -= 2*Interpreter::stackElementSize;
|
||||
add_signature( long_sig );
|
||||
}
|
||||
#else
|
||||
// pass_double() is pass_long() and pass_float() only _LP64
|
||||
virtual void pass_long() {
|
||||
_to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
_to[1] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
_to += 2;
|
||||
_from -= 2*Interpreter::stackElementSize;
|
||||
add_signature( non_float );
|
||||
}
|
||||
|
||||
virtual void pass_float() {
|
||||
*_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
_from -= Interpreter::stackElementSize;
|
||||
add_signature( non_float );
|
||||
}
|
||||
|
||||
#endif // _LP64
|
||||
|
||||
virtual void add_signature( intptr_t sig_type ) {
|
||||
if ( _argcount < (sizeof (intptr_t))*4 ) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -88,9 +88,7 @@ private:
|
||||
// _last_Java_sp will always be a an unbiased stack pointer
|
||||
// if is is biased then some setter screwed up. This is
|
||||
// deadly.
|
||||
#ifdef _LP64
|
||||
assert(((intptr_t)_last_Java_sp & 0xF) == 0, "Biased last_Java_sp");
|
||||
#endif
|
||||
return _last_Java_sp;
|
||||
}
|
||||
|
||||
|
@ -152,39 +152,19 @@ address JNI_FastGetField::generate_fast_get_long_field() {
|
||||
__ ld_ptr (O1, 0, O5);
|
||||
__ add (O5, O4, O5);
|
||||
|
||||
#ifndef _LP64
|
||||
assert(count < LIST_CAPACITY-1, "LIST_CAPACITY too small");
|
||||
speculative_load_pclist[count++] = __ pc();
|
||||
__ ld (O5, 0, G2);
|
||||
|
||||
speculative_load_pclist[count] = __ pc();
|
||||
__ ld (O5, 4, O3);
|
||||
#else
|
||||
assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
|
||||
speculative_load_pclist[count] = __ pc();
|
||||
__ ldx (O5, 0, O3);
|
||||
#endif
|
||||
|
||||
__ ld (cnt_addr, G1);
|
||||
__ cmp (G1, G4);
|
||||
__ br (Assembler::notEqual, false, Assembler::pn, label2);
|
||||
__ delayed()->mov (O7, G1);
|
||||
|
||||
#ifndef _LP64
|
||||
__ mov (G2, O0);
|
||||
__ retl ();
|
||||
__ delayed()->mov (O3, O1);
|
||||
#else
|
||||
__ retl ();
|
||||
__ delayed()->mov (O3, O0);
|
||||
#endif
|
||||
|
||||
#ifndef _LP64
|
||||
slowcase_entry_pclist[count-1] = __ pc();
|
||||
slowcase_entry_pclist[count++] = __ pc() ;
|
||||
#else
|
||||
slowcase_entry_pclist[count++] = __ pc();
|
||||
#endif
|
||||
|
||||
__ bind (label1);
|
||||
__ mov (O7, G1);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -55,18 +55,10 @@ public:
|
||||
static inline void put_int(jint from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = from; }
|
||||
static inline void put_int(jint *from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = *from; }
|
||||
|
||||
#ifdef _LP64
|
||||
// Longs are stored in native format in one JavaCallArgument slot at *(to+1).
|
||||
static inline void put_long(jlong from, intptr_t *to) { *(jlong *)(to + 1 + 0) = from; }
|
||||
static inline void put_long(jlong from, intptr_t *to, int& pos) { *(jlong *)(to + 1 + pos) = from; pos += 2; }
|
||||
static inline void put_long(jlong *from, intptr_t *to, int& pos) { *(jlong *)(to + 1 + pos) = *from; pos += 2; }
|
||||
#else
|
||||
// Longs are stored in reversed native word format in two JavaCallArgument slots at *to.
|
||||
// The high half is in *(to+1) and the low half in *to.
|
||||
static inline void put_long(jlong from, intptr_t *to) { put_int2r((jint *)&from, (jint *)to); }
|
||||
static inline void put_long(jlong from, intptr_t *to, int& pos) { put_int2r((jint *)&from, (jint *)to, pos); }
|
||||
static inline void put_long(jlong *from, intptr_t *to, int& pos) { put_int2r((jint *) from, (jint *)to, pos); }
|
||||
#endif
|
||||
|
||||
// Oops are stored in native format in one JavaCallArgument slot at *to.
|
||||
static inline void put_obj(oop from, intptr_t *to) { *(oop *)(to + 0 ) = from; }
|
||||
@ -78,39 +70,21 @@ public:
|
||||
static inline void put_float(jfloat from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = from; }
|
||||
static inline void put_float(jfloat *from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = *from; }
|
||||
|
||||
#ifdef _LP64
|
||||
// Doubles are stored in native word format in one JavaCallArgument slot at *(to+1).
|
||||
static inline void put_double(jdouble from, intptr_t *to) { *(jdouble *)(to + 1 + 0) = from; }
|
||||
static inline void put_double(jdouble from, intptr_t *to, int& pos) { *(jdouble *)(to + 1 + pos) = from; pos += 2; }
|
||||
static inline void put_double(jdouble *from, intptr_t *to, int& pos) { *(jdouble *)(to + 1 + pos) = *from; pos += 2; }
|
||||
#else
|
||||
// Doubles are stored in reversed native word format in two JavaCallArgument slots at *to.
|
||||
static inline void put_double(jdouble from, intptr_t *to) { put_int2r((jint *)&from, (jint *)to); }
|
||||
static inline void put_double(jdouble from, intptr_t *to, int& pos) { put_int2r((jint *)&from, (jint *)to, pos); }
|
||||
static inline void put_double(jdouble *from, intptr_t *to, int& pos) { put_int2r((jint *) from, (jint *)to, pos); }
|
||||
#endif
|
||||
|
||||
// The get_xxx routines, on the other hand, actually _do_ fetch
|
||||
// java primitive types from the interpreter stack.
|
||||
static inline jint get_int(intptr_t *from) { return *(jint *)from; }
|
||||
|
||||
#ifdef _LP64
|
||||
static inline jlong get_long(intptr_t *from) { return *(jlong *)from; }
|
||||
#else
|
||||
static inline jlong get_long(intptr_t *from) { return ((jlong)(*( signed int *)((jint *)from )) << 32) |
|
||||
((jlong)(*(unsigned int *)((jint *)from + 1)) << 0); }
|
||||
#endif
|
||||
|
||||
static inline oop get_obj(intptr_t *from) { return *(oop *)from; }
|
||||
static inline jfloat get_float(intptr_t *from) { return *(jfloat *)from; }
|
||||
|
||||
#ifdef _LP64
|
||||
static inline jdouble get_double(intptr_t *from) { return *(jdouble *)from; }
|
||||
#else
|
||||
static inline jdouble get_double(intptr_t *from) { jlong jl = ((jlong)(*( signed int *)((jint *)from )) << 32) |
|
||||
((jlong)(*(unsigned int *)((jint *)from + 1)) << 0);
|
||||
return *(jdouble *)&jl; }
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,10 +39,6 @@
|
||||
|
||||
typedef int jint;
|
||||
|
||||
#ifdef _LP64
|
||||
typedef long jlong;
|
||||
#else
|
||||
typedef long long jlong;
|
||||
#endif
|
||||
typedef long jlong;
|
||||
|
||||
typedef signed char jbyte;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -44,16 +44,12 @@ jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Hand
|
||||
|
||||
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) {
|
||||
address pc = _instructions->start() + pc_offset;
|
||||
Handle obj = HotSpotObjectConstantImpl::object(constant);
|
||||
Handle obj(THREAD, HotSpotObjectConstantImpl::object(constant));
|
||||
jobject value = JNIHandles::make_local(obj());
|
||||
if (HotSpotObjectConstantImpl::compressed(constant)) {
|
||||
#ifdef _LP64
|
||||
int oop_index = _oop_recorder->find_index(value);
|
||||
RelocationHolder rspec = oop_Relocation::spec(oop_index);
|
||||
_instructions->relocate(pc, rspec, 1);
|
||||
#else
|
||||
JVMCI_ERROR("compressed oop on 32bit");
|
||||
#endif
|
||||
} else {
|
||||
NativeMovConstReg* move = nativeMovConstReg_at(pc);
|
||||
move->set_data((intptr_t) value);
|
||||
@ -69,14 +65,10 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS)
|
||||
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS) {
|
||||
address pc = _instructions->start() + pc_offset;
|
||||
if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
|
||||
#ifdef _LP64
|
||||
NativeMovConstReg32* move = nativeMovConstReg32_at(pc);
|
||||
narrowKlass narrowOop = record_narrow_metadata_reference(_instructions, pc, constant, CHECK);
|
||||
move->set_data((intptr_t)narrowOop);
|
||||
TRACE_jvmci_3("relocating (narrow metaspace constant) at " PTR_FORMAT "/0x%x", p2i(pc), narrowOop);
|
||||
#else
|
||||
JVMCI_ERROR("compressed Klass* on 32bit");
|
||||
#endif
|
||||
} else {
|
||||
NativeMovConstReg* move = nativeMovConstReg_at(pc);
|
||||
void* reference = record_metadata_reference(_instructions, pc, constant, CHECK);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -296,11 +296,6 @@ void MacroAssembler::verify_thread() {
|
||||
mov(G3, L3); // avoid clobbering G3
|
||||
mov(G4, L4); // avoid clobbering G4
|
||||
mov(G5_method, L5); // avoid clobbering G5_method
|
||||
#if defined(COMPILER2) && !defined(_LP64)
|
||||
// Save & restore possible 64-bit Long arguments in G-regs
|
||||
srlx(G1,32,L0);
|
||||
srlx(G4,32,L6);
|
||||
#endif
|
||||
call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type);
|
||||
delayed()->mov(G2_thread, O0);
|
||||
|
||||
@ -309,15 +304,6 @@ void MacroAssembler::verify_thread() {
|
||||
mov(L3, G3); // restore G3
|
||||
mov(L4, G4); // restore G4
|
||||
mov(L5, G5_method); // restore G5_method
|
||||
#if defined(COMPILER2) && !defined(_LP64)
|
||||
// Save & restore possible 64-bit Long arguments in G-regs
|
||||
sllx(L0,32,G2); // Move old high G1 bits high in G2
|
||||
srl(G1, 0,G1); // Clear current high G1 bits
|
||||
or3 (G1,G2,G1); // Recover 64-bit G1
|
||||
sllx(L6,32,G2); // Move old high G4 bits high in G2
|
||||
srl(G4, 0,G4); // Clear current high G4 bits
|
||||
or3 (G4,G2,G4); // Recover 64-bit G4
|
||||
#endif
|
||||
restore(O0, 0, G2_thread);
|
||||
}
|
||||
}
|
||||
@ -387,7 +373,6 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Ja
|
||||
st_ptr(last_Java_pc, pc_addr);
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
#ifdef ASSERT
|
||||
// Make sure that we have an odd stack
|
||||
Label StackOk;
|
||||
@ -400,9 +385,6 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Ja
|
||||
assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame");
|
||||
add( last_java_sp, STACK_BIAS, G4_scratch );
|
||||
st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset());
|
||||
#else
|
||||
st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset());
|
||||
#endif // _LP64
|
||||
}
|
||||
|
||||
void MacroAssembler::reset_last_Java_frame(void) {
|
||||
@ -658,11 +640,7 @@ void MacroAssembler::ic_call(address entry, bool emit_delay, jint method_index)
|
||||
|
||||
void MacroAssembler::card_table_write(jbyte* byte_map_base,
|
||||
Register tmp, Register obj) {
|
||||
#ifdef _LP64
|
||||
srlx(obj, CardTableModRefBS::card_shift, obj);
|
||||
#else
|
||||
srl(obj, CardTableModRefBS::card_shift, obj);
|
||||
#endif
|
||||
assert(tmp != obj, "need separate temp reg");
|
||||
set((address) byte_map_base, tmp);
|
||||
stb(G0, tmp, obj);
|
||||
@ -672,7 +650,6 @@ void MacroAssembler::card_table_write(jbyte* byte_map_base,
|
||||
void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
|
||||
address save_pc;
|
||||
int shiftcnt;
|
||||
#ifdef _LP64
|
||||
# ifdef CHECK_DELAY
|
||||
assert_not_delayed((char*) "cannot put two instructions in delay slot");
|
||||
# endif
|
||||
@ -719,9 +696,6 @@ void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, b
|
||||
while (pc() < (save_pc + (7 * BytesPerInstWord)))
|
||||
nop();
|
||||
}
|
||||
#else
|
||||
Assembler::sethi(addrlit.value(), d, addrlit.rspec());
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -736,7 +710,6 @@ void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d)
|
||||
|
||||
|
||||
int MacroAssembler::insts_for_sethi(address a, bool worst_case) {
|
||||
#ifdef _LP64
|
||||
if (worst_case) return 7;
|
||||
intptr_t iaddr = (intptr_t) a;
|
||||
int msb32 = (int) (iaddr >> 32);
|
||||
@ -756,9 +729,6 @@ int MacroAssembler::insts_for_sethi(address a, bool worst_case) {
|
||||
}
|
||||
}
|
||||
return count;
|
||||
#else
|
||||
return 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
int MacroAssembler::worst_case_insts_for_set() {
|
||||
@ -1488,11 +1458,7 @@ void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresul
|
||||
|
||||
|
||||
void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) {
|
||||
#ifdef _LP64
|
||||
add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult);
|
||||
#else
|
||||
add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult);
|
||||
#endif
|
||||
bclr(1, Rresult);
|
||||
sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes
|
||||
}
|
||||
@ -1531,22 +1497,12 @@ void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a,
|
||||
// Does a test & branch on 32-bit systems and a register-branch on 64-bit.
|
||||
void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) {
|
||||
assert_not_delayed();
|
||||
#ifdef _LP64
|
||||
bpr( rc_z, a, p, s1, L );
|
||||
#else
|
||||
tst(s1);
|
||||
br ( zero, a, p, L );
|
||||
#endif
|
||||
}
|
||||
|
||||
void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) {
|
||||
assert_not_delayed();
|
||||
#ifdef _LP64
|
||||
bpr( rc_nz, a, p, s1, L );
|
||||
#else
|
||||
tst(s1);
|
||||
br ( notZero, a, p, L );
|
||||
#endif
|
||||
}
|
||||
|
||||
// Compare registers and branch with nop in delay slot or cbcond without delay slot.
|
||||
@ -1862,14 +1818,12 @@ void MacroAssembler::lushr( Register Rin_high, Register Rin_low,
|
||||
bind( done );
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) {
|
||||
cmp(Ra, Rb);
|
||||
mov(-1, Rresult);
|
||||
movcc(equal, false, xcc, 0, Rresult);
|
||||
movcc(greater, false, xcc, 1, Rresult);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) {
|
||||
@ -2668,9 +2622,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
|
||||
// if compare/exchange succeeded we found an unlocked object and we now have locked it
|
||||
// hence we are done
|
||||
cmp(Rmark, Rscratch);
|
||||
#ifdef _LP64
|
||||
sub(Rscratch, STACK_BIAS, Rscratch);
|
||||
#endif
|
||||
brx(Assembler::equal, false, Assembler::pt, done);
|
||||
delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot
|
||||
|
||||
@ -2716,9 +2668,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
|
||||
|
||||
// Stack-lock attempt failed - check for recursive stack-lock.
|
||||
// See the comments below about how we might remove this case.
|
||||
#ifdef _LP64
|
||||
sub(Rscratch, STACK_BIAS, Rscratch);
|
||||
#endif
|
||||
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
|
||||
andcc(Rscratch, 0xfffff003, Rscratch);
|
||||
br(Assembler::always, false, Assembler::pt, done);
|
||||
@ -2800,9 +2750,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
|
||||
// control to the "slow" operators in synchronizer.cpp.
|
||||
|
||||
// RScratch contains the fetched obj->mark value from the failed CAS.
|
||||
#ifdef _LP64
|
||||
sub(Rscratch, STACK_BIAS, Rscratch);
|
||||
#endif
|
||||
sub(Rscratch, SP, Rscratch);
|
||||
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
|
||||
andcc(Rscratch, 0xfffff003, Rscratch);
|
||||
@ -3720,11 +3668,7 @@ static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
|
||||
|
||||
Label not_already_dirty, restart, refill, young_card;
|
||||
|
||||
#ifdef _LP64
|
||||
__ srlx(O0, CardTableModRefBS::card_shift, O0);
|
||||
#else
|
||||
__ srl(O0, CardTableModRefBS::card_shift, O0);
|
||||
#endif
|
||||
AddressLiteral addrlit(byte_map_base);
|
||||
__ set(addrlit, O1); // O1 := <card table base>
|
||||
__ ldub(O0, O1, O2); // O2 := [O0 + O1]
|
||||
@ -3826,11 +3770,7 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val
|
||||
|
||||
if (G1RSBarrierRegionFilter) {
|
||||
xor3(store_addr, new_val, tmp);
|
||||
#ifdef _LP64
|
||||
srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
|
||||
#else
|
||||
srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
|
||||
#endif
|
||||
|
||||
// XXX Should I predict this taken or not? Does it matter?
|
||||
cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -156,7 +156,6 @@ REGISTER_DECLARATION(Register, O5_savedSP , O5);
|
||||
REGISTER_DECLARATION(Register, I5_savedSP , I5); // Saved SP before bumping for locals. This is simply
|
||||
// a copy SP, so in 64-bit it's a biased value. The bias
|
||||
// is added and removed as needed in the frame code.
|
||||
REGISTER_DECLARATION(Register, IdispatchTables , I4); // Base address of the bytecode dispatch tables
|
||||
REGISTER_DECLARATION(Register, IdispatchAddress , I3); // Register which saves the dispatch address for each bytecode
|
||||
REGISTER_DECLARATION(Register, ImethodDataPtr , I2); // Pointer to the current method data
|
||||
|
||||
@ -228,7 +227,6 @@ REGISTER_DECLARATION(Register, Oissuing_pc , O1); // where the exception is comi
|
||||
#define O5_savedSP AS_REGISTER(Register, O5_savedSP)
|
||||
#define IdispatchAddress AS_REGISTER(Register, IdispatchAddress)
|
||||
#define ImethodDataPtr AS_REGISTER(Register, ImethodDataPtr)
|
||||
#define IdispatchTables AS_REGISTER(Register, IdispatchTables)
|
||||
|
||||
#define Oexception AS_REGISTER(Register, Oexception)
|
||||
#define Oissuing_pc AS_REGISTER(Register, Oissuing_pc)
|
||||
@ -333,14 +331,12 @@ class AddressLiteral VALUE_OBJ_CLASS_SPEC {
|
||||
return external_word_Relocation::spec(addr);
|
||||
case relocInfo::internal_word_type:
|
||||
return internal_word_Relocation::spec(addr);
|
||||
#ifdef _LP64
|
||||
case relocInfo::opt_virtual_call_type:
|
||||
return opt_virtual_call_Relocation::spec();
|
||||
case relocInfo::static_call_type:
|
||||
return static_call_Relocation::spec();
|
||||
case relocInfo::runtime_call_type:
|
||||
return runtime_call_Relocation::spec();
|
||||
#endif
|
||||
case relocInfo::none:
|
||||
return RelocationHolder();
|
||||
default:
|
||||
@ -396,12 +392,10 @@ class AddressLiteral VALUE_OBJ_CLASS_SPEC {
|
||||
: _address((address) addr),
|
||||
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
|
||||
|
||||
#ifdef _LP64
|
||||
// 32-bit complains about a multiple declaration for int*.
|
||||
AddressLiteral(intptr_t* addr, relocInfo::relocType rtype = relocInfo::none)
|
||||
: _address((address) addr),
|
||||
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
|
||||
#endif
|
||||
|
||||
AddressLiteral(Metadata* addr, relocInfo::relocType rtype = relocInfo::none)
|
||||
: _address((address) addr),
|
||||
@ -464,16 +458,10 @@ class Argument VALUE_OBJ_CLASS_SPEC {
|
||||
bool _is_in;
|
||||
|
||||
public:
|
||||
#ifdef _LP64
|
||||
enum {
|
||||
n_register_parameters = 6, // only 6 registers may contain integer parameters
|
||||
n_float_register_parameters = 16 // Can have up to 16 floating registers
|
||||
};
|
||||
#else
|
||||
enum {
|
||||
n_register_parameters = 6 // only 6 registers may contain integer parameters
|
||||
};
|
||||
#endif
|
||||
|
||||
// creation
|
||||
Argument(int number, bool is_in) : _number(number), _is_in(is_in) {}
|
||||
@ -489,7 +477,6 @@ class Argument VALUE_OBJ_CLASS_SPEC {
|
||||
// locating register-based arguments:
|
||||
bool is_register() const { return _number < n_register_parameters; }
|
||||
|
||||
#ifdef _LP64
|
||||
// locating Floating Point register-based arguments:
|
||||
bool is_float_register() const { return _number < n_float_register_parameters; }
|
||||
|
||||
@ -501,7 +488,6 @@ class Argument VALUE_OBJ_CLASS_SPEC {
|
||||
assert(is_float_register(), "must be a register argument");
|
||||
return as_FloatRegister(( number() *2 ));
|
||||
}
|
||||
#endif
|
||||
|
||||
Register as_register() const {
|
||||
assert(is_register(), "must be a register argument");
|
||||
@ -604,15 +590,15 @@ class MacroAssembler : public Assembler {
|
||||
bool check_exception=true // flag which indicates if exception should be checked
|
||||
);
|
||||
|
||||
public:
|
||||
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
|
||||
|
||||
// This routine should emit JVMTI PopFrame and ForceEarlyReturn handling code.
|
||||
// The implementation is only non-empty for the InterpreterMacroAssembler,
|
||||
// as only the interpreter handles and ForceEarlyReturn PopFrame requests.
|
||||
virtual void check_and_handle_popframe(Register scratch_reg);
|
||||
virtual void check_and_handle_earlyret(Register scratch_reg);
|
||||
|
||||
public:
|
||||
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
|
||||
|
||||
// Support for NULL-checks
|
||||
//
|
||||
// Generates code that causes a NULL OS exception if the content of reg is NULL.
|
||||
@ -1217,9 +1203,7 @@ public:
|
||||
void lushr( Register Rin_high, Register Rin_low, Register Rcount,
|
||||
Register Rout_high, Register Rout_low, Register Rtemp );
|
||||
|
||||
#ifdef _LP64
|
||||
void lcmp( Register Ra, Register Rb, Register Rresult);
|
||||
#endif
|
||||
|
||||
// Load and store values by size and signed-ness
|
||||
void load_sized_value( Address src, Register dst, size_t size_in_bytes, bool is_signed);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -45,19 +45,11 @@ inline void MacroAssembler::pd_patch_instruction(address branch, address target)
|
||||
|
||||
// Use the right loads/stores for the platform
|
||||
inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
|
||||
#ifdef _LP64
|
||||
Assembler::ldx(s1, s2, d);
|
||||
#else
|
||||
ld( s1, s2, d);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
|
||||
#ifdef _LP64
|
||||
Assembler::ldx(s1, simm13a, d);
|
||||
#else
|
||||
ld( s1, simm13a, d);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -68,35 +60,19 @@ inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d )
|
||||
#endif
|
||||
|
||||
inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) {
|
||||
#ifdef _LP64
|
||||
ldx(s1, s2, d);
|
||||
#else
|
||||
ld( s1, s2, d);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) {
|
||||
#ifdef _LP64
|
||||
ldx(a, d, offset);
|
||||
#else
|
||||
ld( a, d, offset);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
|
||||
#ifdef _LP64
|
||||
Assembler::stx(d, s1, s2);
|
||||
#else
|
||||
st( d, s1, s2);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) {
|
||||
#ifdef _LP64
|
||||
Assembler::stx(d, s1, simm13a);
|
||||
#else
|
||||
st( d, s1, simm13a);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -107,84 +83,44 @@ inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a )
|
||||
#endif
|
||||
|
||||
inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) {
|
||||
#ifdef _LP64
|
||||
stx(d, s1, s2);
|
||||
#else
|
||||
st( d, s1, s2);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) {
|
||||
#ifdef _LP64
|
||||
stx(d, a, offset);
|
||||
#else
|
||||
st( d, a, offset);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Use the right loads/stores for the platform
|
||||
inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) {
|
||||
#ifdef _LP64
|
||||
Assembler::ldx(s1, s2, d);
|
||||
#else
|
||||
Assembler::ldd(s1, s2, d);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) {
|
||||
#ifdef _LP64
|
||||
Assembler::ldx(s1, simm13a, d);
|
||||
#else
|
||||
Assembler::ldd(s1, simm13a, d);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) {
|
||||
#ifdef _LP64
|
||||
ldx(s1, s2, d);
|
||||
#else
|
||||
ldd(s1, s2, d);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) {
|
||||
#ifdef _LP64
|
||||
ldx(a, d, offset);
|
||||
#else
|
||||
ldd(a, d, offset);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) {
|
||||
#ifdef _LP64
|
||||
Assembler::stx(d, s1, s2);
|
||||
#else
|
||||
Assembler::std(d, s1, s2);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) {
|
||||
#ifdef _LP64
|
||||
Assembler::stx(d, s1, simm13a);
|
||||
#else
|
||||
Assembler::std(d, s1, simm13a);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) {
|
||||
#ifdef _LP64
|
||||
stx(d, s1, s2);
|
||||
#else
|
||||
std(d, s1, s2);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) {
|
||||
#ifdef _LP64
|
||||
stx(d, a, offset);
|
||||
#else
|
||||
std(d, a, offset);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void MacroAssembler::stbool(Register d, const Address& a) { stb(d, a); }
|
||||
@ -207,45 +143,25 @@ inline void MacroAssembler::casx( Register s1, Register s2, Register d) { casxa(
|
||||
// Functions for isolating 64 bit atomic swaps for LP64
|
||||
// cas_ptr will perform cas for 32 bit VM's and casx for 64 bit VM's
|
||||
inline void MacroAssembler::cas_ptr( Register s1, Register s2, Register d) {
|
||||
#ifdef _LP64
|
||||
casx( s1, s2, d );
|
||||
#else
|
||||
cas( s1, s2, d );
|
||||
#endif
|
||||
}
|
||||
|
||||
// Functions for isolating 64 bit shifts for LP64
|
||||
|
||||
inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) {
|
||||
#ifdef _LP64
|
||||
Assembler::sllx(s1, s2, d);
|
||||
#else
|
||||
Assembler::sll( s1, s2, d);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) {
|
||||
#ifdef _LP64
|
||||
Assembler::sllx(s1, imm6a, d);
|
||||
#else
|
||||
Assembler::sll( s1, imm6a, d);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) {
|
||||
#ifdef _LP64
|
||||
Assembler::srlx(s1, s2, d);
|
||||
#else
|
||||
Assembler::srl( s1, s2, d);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) {
|
||||
#ifdef _LP64
|
||||
Assembler::srlx(s1, imm6a, d);
|
||||
#else
|
||||
Assembler::srl( s1, imm6a, d);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) {
|
||||
@ -277,11 +193,7 @@ inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
|
||||
// Branch that tests either xcc or icc depending on the
|
||||
// architecture compiled (LP64 or not)
|
||||
inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
|
||||
#ifdef _LP64
|
||||
Assembler::bp(c, a, xcc, p, d, rt);
|
||||
#else
|
||||
MacroAssembler::br(c, a, p, d, rt);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
|
||||
@ -338,7 +250,6 @@ inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
|
||||
}
|
||||
|
||||
inline void MacroAssembler::call( address d, RelocationHolder const& rspec ) {
|
||||
#ifdef _LP64
|
||||
intptr_t disp;
|
||||
// NULL is ok because it will be relocated later.
|
||||
// Must change NULL to a reachable address in order to
|
||||
@ -355,9 +266,6 @@ inline void MacroAssembler::call( address d, RelocationHolder const& rspec ) {
|
||||
} else {
|
||||
Assembler::call(d, rspec);
|
||||
}
|
||||
#else
|
||||
Assembler::call( d, rspec );
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
|
||||
@ -414,12 +322,7 @@ inline void MacroAssembler::cmp( Register s1, int simm13a ) { subcc( s1, simm13
|
||||
// 2 instructions. All PCs in the CodeCache are within 2 Gig of each other.
|
||||
inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) {
|
||||
intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip;
|
||||
#ifdef _LP64
|
||||
Unimplemented();
|
||||
#else
|
||||
Assembler::sethi( thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc));
|
||||
add(reg, thepc & 0x3ff, reg, internal_word_Relocation::spec((address)thepc));
|
||||
#endif
|
||||
return thepc;
|
||||
}
|
||||
|
||||
@ -554,7 +457,6 @@ inline void MacroAssembler::store_ptr_argument( Register s, Argument& a ) {
|
||||
}
|
||||
|
||||
|
||||
#ifdef _LP64
|
||||
inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) {
|
||||
if (a.is_float_register())
|
||||
// V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2
|
||||
@ -579,7 +481,6 @@ inline void MacroAssembler::store_long_argument( Register s, Argument& a ) {
|
||||
else
|
||||
stx(s, a.as_address());
|
||||
}
|
||||
#endif
|
||||
|
||||
inline void MacroAssembler::round_to( Register r, int modulus ) {
|
||||
assert_not_delayed();
|
||||
@ -640,22 +541,13 @@ inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm1
|
||||
inline void MacroAssembler::clruw( Register s, Register d ) { srl( s, G0, d); }
|
||||
inline void MacroAssembler::clruwu( Register d ) { srl( d, G0, d); }
|
||||
|
||||
#ifdef _LP64
|
||||
// Make all 32 bit loads signed so 64 bit registers maintain proper sign
|
||||
inline void MacroAssembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); }
|
||||
inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); }
|
||||
#else
|
||||
inline void MacroAssembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); }
|
||||
inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); }
|
||||
#endif
|
||||
|
||||
#ifdef ASSERT
|
||||
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
|
||||
# ifdef _LP64
|
||||
inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); }
|
||||
# else
|
||||
inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); }
|
||||
# endif
|
||||
#endif
|
||||
|
||||
inline void MacroAssembler::ld( const Address& a, Register d, int offset) {
|
||||
|
@ -1,120 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "asm/codeBuffer.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
|
||||
// Generate the self-patching vtable method:
|
||||
//
|
||||
// This method will be called (as any other Klass virtual method) with
|
||||
// the Klass itself as the first argument. Example:
|
||||
//
|
||||
// oop obj;
|
||||
// int size = obj->klass()->oop_size(this);
|
||||
//
|
||||
// for which the virtual method call is Klass::oop_size();
|
||||
//
|
||||
// The dummy method is called with the Klass object as the first
|
||||
// operand, and an object as the second argument.
|
||||
//
|
||||
|
||||
//=====================================================================
|
||||
|
||||
// All of the dummy methods in the vtable are essentially identical,
|
||||
// differing only by an ordinal constant, and they bear no relationship
|
||||
// to the original method which the caller intended. Also, there needs
|
||||
// to be 'vtbl_list_size' instances of the vtable in order to
|
||||
// differentiate between the 'vtable_list_size' original Klass objects.
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
|
||||
void** vtable,
|
||||
char** md_top,
|
||||
char* md_end,
|
||||
char** mc_top,
|
||||
char* mc_end) {
|
||||
|
||||
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
|
||||
*(intptr_t *)(*md_top) = vtable_bytes;
|
||||
*md_top += sizeof(intptr_t);
|
||||
void** dummy_vtable = (void**)*md_top;
|
||||
*vtable = dummy_vtable;
|
||||
*md_top += vtable_bytes;
|
||||
|
||||
// Get ready to generate dummy methods.
|
||||
|
||||
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
|
||||
MacroAssembler* masm = new MacroAssembler(&cb);
|
||||
|
||||
Label common_code;
|
||||
for (int i = 0; i < vtbl_list_size; ++i) {
|
||||
for (int j = 0; j < num_virtuals; ++j) {
|
||||
dummy_vtable[num_virtuals * i + j] = (void*)masm->pc();
|
||||
__ save(SP, -256, SP);
|
||||
int offset = (i << 8) + j;
|
||||
Register src = G0;
|
||||
if (!Assembler::is_simm13(offset)) {
|
||||
__ sethi(offset, L0);
|
||||
src = L0;
|
||||
offset = offset & ((1 << 10) - 1);
|
||||
}
|
||||
__ brx(Assembler::always, false, Assembler::pt, common_code);
|
||||
|
||||
// Load L0 with a value indicating vtable/offset pair.
|
||||
// -- bits[ 7..0] (8 bits) which virtual method in table?
|
||||
// -- bits[13..8] (6 bits) which virtual method table?
|
||||
__ delayed()->or3(src, offset, L0);
|
||||
}
|
||||
}
|
||||
|
||||
__ bind(common_code);
|
||||
|
||||
// Expecting to be called with the "this" pointer in O0/I0 (where
|
||||
// "this" is a Klass object). In addition, L0 was set (above) to
|
||||
// identify the method and table.
|
||||
|
||||
// Look up the correct vtable pointer.
|
||||
|
||||
__ set((intptr_t)vtbl_list, L2); // L2 = address of new vtable list.
|
||||
__ srl(L0, 8, L3); // Isolate L3 = vtable identifier.
|
||||
__ sll(L3, LogBytesPerWord, L3);
|
||||
__ ld_ptr(L2, L3, L3); // L3 = new (correct) vtable pointer.
|
||||
__ st_ptr(L3, Address(I0, 0)); // Save correct vtable ptr in entry.
|
||||
|
||||
// Restore registers and jump to the correct method;
|
||||
|
||||
__ and3(L0, 255, L4); // Isolate L3 = method offset;.
|
||||
__ sll(L4, LogBytesPerWord, L4);
|
||||
__ ld_ptr(L3, L4, L4); // Get address of correct virtual method
|
||||
__ jmpl(L4, 0, G0); // Jump to correct method.
|
||||
__ delayed()->restore(); // Restore registers.
|
||||
|
||||
__ flush();
|
||||
*mc_top = (char*)__ pc();
|
||||
|
||||
guarantee(*mc_top <= mc_end, "Insufficient space for method wrappers.");
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -71,7 +71,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
|
||||
Register temp_reg, Register temp2_reg,
|
||||
const char* error_message) {
|
||||
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
|
||||
KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
|
||||
Klass* klass = SystemDictionary::well_known_klass(klass_id);
|
||||
bool did_save = false;
|
||||
if (temp_reg == noreg || temp2_reg == noreg) {
|
||||
temp_reg = L1;
|
||||
@ -181,8 +181,9 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
|
||||
__ verify_oop(method_temp);
|
||||
__ load_heap_oop(Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())), method_temp);
|
||||
__ verify_oop(method_temp);
|
||||
// the following assumes that a Method* is normally compressed in the vmtarget field:
|
||||
__ ld_ptr( Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())), method_temp);
|
||||
__ load_heap_oop(Address(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())), method_temp);
|
||||
__ verify_oop(method_temp);
|
||||
__ ld_ptr( Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())), method_temp);
|
||||
|
||||
if (VerifyMethodHandles && !for_compiler_entry) {
|
||||
// make sure recv is already on stack
|
||||
@ -332,7 +333,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
|
||||
Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
|
||||
Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
|
||||
Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));
|
||||
Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()));
|
||||
Address vmtarget_method( G5_method, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()));
|
||||
|
||||
Register temp1_recv_klass = temp1;
|
||||
if (iid != vmIntrinsics::_linkToStatic) {
|
||||
@ -384,14 +386,16 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
if (VerifyMethodHandles) {
|
||||
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp2);
|
||||
}
|
||||
__ ld_ptr(member_vmtarget, G5_method);
|
||||
__ load_heap_oop(member_vmtarget, G5_method);
|
||||
__ ld_ptr(vmtarget_method, G5_method);
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_linkToStatic:
|
||||
if (VerifyMethodHandles) {
|
||||
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp2);
|
||||
}
|
||||
__ ld_ptr(member_vmtarget, G5_method);
|
||||
__ load_heap_oop(member_vmtarget, G5_method);
|
||||
__ ld_ptr(vmtarget_method, G5_method);
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_linkToVirtual:
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -236,8 +236,6 @@ void NativeCall::test() {
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
#ifdef _LP64
|
||||
|
||||
void NativeFarCall::set_destination(address dest) {
|
||||
// Address materialized in the instruction stream, so nothing to do.
|
||||
return;
|
||||
@ -290,8 +288,6 @@ void NativeFarCall::test() {
|
||||
}
|
||||
// End code for unit testing implementation of NativeFarCall class
|
||||
|
||||
#endif // _LP64
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
|
||||
@ -304,18 +300,9 @@ void NativeMovConstReg::verify() {
|
||||
|
||||
// verify the pattern "sethi %hi22(imm), reg ; add reg, %lo10(imm), reg"
|
||||
Register rd = inv_rd(i0);
|
||||
#ifndef _LP64
|
||||
if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
|
||||
is_op3(i1, Assembler::add_op3, Assembler::arith_op) &&
|
||||
inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
|
||||
rd == inv_rs1(i1) && rd == inv_rd(i1))) {
|
||||
fatal("not a set_metadata");
|
||||
}
|
||||
#else
|
||||
if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
|
||||
fatal("not a set_metadata");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -324,23 +311,13 @@ void NativeMovConstReg::print() {
|
||||
}
|
||||
|
||||
|
||||
#ifdef _LP64
|
||||
intptr_t NativeMovConstReg::data() const {
|
||||
return data64(addr_at(sethi_offset), long_at(add_offset));
|
||||
}
|
||||
#else
|
||||
intptr_t NativeMovConstReg::data() const {
|
||||
return data32(long_at(sethi_offset), long_at(add_offset));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
void NativeMovConstReg::set_data(intptr_t x) {
|
||||
#ifdef _LP64
|
||||
set_data64_sethi(addr_at(sethi_offset), x);
|
||||
#else
|
||||
set_long_at(sethi_offset, set_data32_sethi( long_at(sethi_offset), x));
|
||||
#endif
|
||||
set_long_at(add_offset, set_data32_simm13( long_at(add_offset), x));
|
||||
|
||||
// also store the value into an oop_Relocation cell, if any
|
||||
@ -508,20 +485,12 @@ void NativeMovConstRegPatching::print() {
|
||||
|
||||
|
||||
int NativeMovConstRegPatching::data() const {
|
||||
#ifdef _LP64
|
||||
return data64(addr_at(sethi_offset), long_at(add_offset));
|
||||
#else
|
||||
return data32(long_at(sethi_offset), long_at(add_offset));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void NativeMovConstRegPatching::set_data(int x) {
|
||||
#ifdef _LP64
|
||||
set_data64_sethi(addr_at(sethi_offset), x);
|
||||
#else
|
||||
set_long_at(sethi_offset, set_data32_sethi(long_at(sethi_offset), x));
|
||||
#endif
|
||||
set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
|
||||
|
||||
// also store the value into an oop_Relocation cell, if any
|
||||
@ -758,21 +727,12 @@ void NativeJump::verify() {
|
||||
assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
|
||||
// verify the pattern "sethi %hi22(imm), treg ; jmpl treg, %lo10(imm), lreg"
|
||||
Register rd = inv_rd(i0);
|
||||
#ifndef _LP64
|
||||
if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
|
||||
(is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op)) &&
|
||||
inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
|
||||
rd == inv_rs1(i1))) {
|
||||
fatal("not a jump_to instruction");
|
||||
}
|
||||
#else
|
||||
// In LP64, the jump instruction location varies for non relocatable
|
||||
// jumps, for example is could be sethi, xor, jmp instead of the
|
||||
// 7 instructions for sethi. So let's check sethi only.
|
||||
if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
|
||||
fatal("not a jump_to instruction");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user