This commit is contained in:
Jesper Wilhelmsson 2017-05-05 17:48:45 +02:00
commit 7b8b238986
1724 changed files with 119255 additions and 38498 deletions

View File

@ -47,11 +47,9 @@ ifeq ($(INCLUDE_GRAAL), true)
$(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_MATCH_PROCESSOR, \
SETUP := GENERATE_OLDBYTECODE, \
SRC := \
$(SRC_DIR)/org.graalvm.compiler.common/src \
$(SRC_DIR)/org.graalvm.compiler.core/src \
$(SRC_DIR)/org.graalvm.compiler.core.common/src \
$(SRC_DIR)/org.graalvm.compiler.core.match.processor/src \
$(SRC_DIR)/org.graalvm.compiler.api.collections/src \
$(SRC_DIR)/org.graalvm.compiler.api.replacements/src \
$(SRC_DIR)/org.graalvm.compiler.asm/src \
$(SRC_DIR)/org.graalvm.compiler.bytecode/src \
@ -68,6 +66,7 @@ ifeq ($(INCLUDE_GRAAL), true)
$(SRC_DIR)/org.graalvm.compiler.phases.common/src \
$(SRC_DIR)/org.graalvm.compiler.serviceprovider/src \
$(SRC_DIR)/org.graalvm.compiler.virtual/src \
$(SRC_DIR)/org.graalvm.util/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.code/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.common/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.meta/src \
@ -102,6 +101,7 @@ ifeq ($(INCLUDE_GRAAL), true)
SRC := \
$(SRC_DIR)/org.graalvm.compiler.options/src \
$(SRC_DIR)/org.graalvm.compiler.options.processor/src \
$(SRC_DIR)/org.graalvm.util/src \
, \
BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor, \
JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor.jar, \
@ -114,9 +114,7 @@ ifeq ($(INCLUDE_GRAAL), true)
$(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_REPLACEMENTS_VERIFIER, \
SETUP := GENERATE_OLDBYTECODE, \
SRC := \
$(SRC_DIR)/org.graalvm.compiler.common/src \
$(SRC_DIR)/org.graalvm.compiler.replacements.verifier/src \
$(SRC_DIR)/org.graalvm.compiler.api.collections/src \
$(SRC_DIR)/org.graalvm.compiler.api.replacements/src \
$(SRC_DIR)/org.graalvm.compiler.code/src \
$(SRC_DIR)/org.graalvm.compiler.core.common/src \
@ -125,6 +123,7 @@ ifeq ($(INCLUDE_GRAAL), true)
$(SRC_DIR)/org.graalvm.compiler.nodeinfo/src \
$(SRC_DIR)/org.graalvm.compiler.options/src \
$(SRC_DIR)/org.graalvm.compiler.serviceprovider/src \
$(SRC_DIR)/org.graalvm.util/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.code/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.common/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.meta/src \

View File

@ -37,7 +37,6 @@ SRC_DIR := $(HOTSPOT_TOPDIR)/src/$(MODULE)/share/classes
PROC_SRC_SUBDIRS := \
org.graalvm.compiler.code \
org.graalvm.compiler.common \
org.graalvm.compiler.core \
org.graalvm.compiler.core.aarch64 \
org.graalvm.compiler.core.amd64 \

View File

@ -1,53 +0,0 @@
#
# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
include $(SPEC)
include NativeCompilation.gmk
$(eval $(call IncludeCustomExtension, hotspot, lib/Lib-jdk.aot.gmk))
##############################################################################
# Build libjelfshim only when AOT is enabled.
ifeq ($(ENABLE_AOT), true)
JELFSHIM_NAME := jelfshim
$(eval $(call SetupNativeCompilation, BUILD_LIBJELFSHIM, \
TOOLCHAIN := TOOLCHAIN_DEFAULT, \
OPTIMIZATION := LOW, \
LIBRARY := $(JELFSHIM_NAME), \
OUTPUT_DIR := $(call FindLibDirForModule, $(MODULE)), \
SRC := $(HOTSPOT_TOPDIR)/src/jdk.aot/unix/native/libjelfshim, \
CFLAGS := $(CFLAGS_JDKLIB) $(ELF_CFLAGS) \
-DAOT_VERSION_STRING='"$(VERSION_STRING)"' \
-I$(SUPPORT_OUTPUTDIR)/headers/$(MODULE), \
LDFLAGS := $(LDFLAGS_JDKLIB), \
OBJECT_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/lib$(JELFSHIM_NAME), \
LIBS := $(ELF_LIBS) $(LIBS_JDKLIB), \
))
TARGETS += $(BUILD_LIBJELFSHIM)
endif
##############################################################################

View File

@ -35,12 +35,15 @@ include $(SPEC)
include MakeBase.gmk
include TestFilesCompilation.gmk
$(eval $(call IncludeCustomExtension, hotspot, test/JtregNative.gmk))
################################################################################
# Targets for building the native tests themselves.
################################################################################
# Add more directories here when needed.
BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
BUILD_HOTSPOT_JTREG_NATIVE_SRC += \
$(HOTSPOT_TOPDIR)/test/gc/stress/gclocker \
$(HOTSPOT_TOPDIR)/test/native_sanity \
$(HOTSPOT_TOPDIR)/test/runtime/jni/8025979 \
$(HOTSPOT_TOPDIR)/test/runtime/jni/8033445 \
@ -53,6 +56,7 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
$(HOTSPOT_TOPDIR)/test/runtime/modules/getModuleJNI \
$(HOTSPOT_TOPDIR)/test/runtime/SameObject \
$(HOTSPOT_TOPDIR)/test/runtime/BoolReturn \
$(HOTSPOT_TOPDIR)/test/runtime/noClassDefFoundMsg \
$(HOTSPOT_TOPDIR)/test/compiler/floatingpoint/ \
$(HOTSPOT_TOPDIR)/test/compiler/calls \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/GetNamedModule \
@ -65,6 +69,7 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/ModuleAwareAgents/ClassFileLoadHook \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/ModuleAwareAgents/ClassLoadPrepare \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/ModuleAwareAgents/ThreadStart \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/StartPhase/AllowedFunctions \
#
# Add conditional directories here when needed.
@ -91,6 +96,7 @@ ifeq ($(TOOLCHAIN_TYPE), solstudio)
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAClassFileLoadHook := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAClassLoadPrepare := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAThreadStart := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libAllowedFunctions := -lc
endif
ifeq ($(OPENJDK_TARGET_OS), linux)

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2014, Red Hat Inc. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
@ -3564,7 +3564,7 @@ const int Matcher::min_vector_size(const BasicType bt) {
}
// Vector ideal reg.
const int Matcher::vector_ideal_reg(int len) {
const uint Matcher::vector_ideal_reg(int len) {
switch(len) {
case 8: return Op_VecD;
case 16: return Op_VecX;
@ -3573,7 +3573,7 @@ const int Matcher::vector_ideal_reg(int len) {
return 0;
}
const int Matcher::vector_shift_count_ideal_reg(int size) {
const uint Matcher::vector_shift_count_ideal_reg(int size) {
return Op_VecX;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -270,7 +270,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
get_constant_pool(result);
// load pointer for resolved_references[] objArray
ldr(result, Address(result, ConstantPool::resolved_references_offset_in_bytes()));
ldr(result, Address(result, ConstantPool::cache_offset_in_bytes()));
ldr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
// JNIHandles::resolve(obj);
ldr(result, Address(result, 0));
// Add in the index
@ -278,6 +279,15 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
}
void InterpreterMacroAssembler::load_resolved_klass_at_offset(
Register cpool, Register index, Register klass, Register temp) {
add(temp, cpool, index, LSL, LogBytesPerWord);
ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
ldr(klass, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes())); // klass = cpool->_resolved_klasses
add(klass, klass, temp, LSL, LogBytesPerWord);
ldr(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
}
// Generate a subtype check: branch to ok_is_subtype if sub_klass is a
// subtype of super_klass.
//

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -54,9 +54,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
int number_of_arguments,
bool check_exceptions);
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// base routine for all dispatches
void dispatch_base(TosState state, address* table, bool verifyoop = true);
@ -67,6 +64,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
void jump_to_entry(address entry);
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// Interpreter-specific registers
void save_bcp() {
str(rbcp, Address(rfp, frame::interpreter_frame_bcp_offset * wordSize));
@ -123,6 +123,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
// load cpool->resolved_references(index);
void load_resolved_reference_at_index(Register result, Register index);
// load cpool->resolved_klass_at(index);
void load_resolved_klass_at_offset(Register cpool, Register index, Register klass, Register temp);
void pop_ptr(Register r = r0);
void pop_i(Register r = r0);
void pop_l(Register r = r0);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,7 +55,7 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS)
}
}
#endif // ASSERT
Handle obj = HotSpotObjectConstantImpl::object(constant);
Handle obj(THREAD, HotSpotObjectConstantImpl::object(constant));
jobject value = JNIHandles::make_local(obj());
MacroAssembler::patch_oop(pc, (address)obj());
int oop_index = _oop_recorder->find_index(value);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -77,12 +77,6 @@ class MacroAssembler: public Assembler {
bool check_exceptions // whether to check for pending exceptions after return
);
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler,
// as only the interpreter handles PopFrame and ForceEarlyReturn requests.
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
// Maximum size of class area in Metaspace when compressed
@ -97,6 +91,12 @@ class MacroAssembler: public Assembler {
> (1u << log2_intptr(CompressedClassSpaceSize))));
}
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler,
// as only the interpreter handles PopFrame and ForceEarlyReturn requests.
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// Biased locking support
// lock_reg and obj_reg must be loaded up with the appropriate values.
// swap_reg is killed.

View File

@ -1,126 +0,0 @@
/*
* Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
// This method will be called (as any other Klass virtual method) with
// the Klass itself as the first argument. Example:
//
// oop obj;
// int size = obj->klass()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();
//
// The dummy method is called with the Klass object as the first
// operand, and an object as the second argument.
//
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
// differing only by an ordinal constant, and they bear no relationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#define __ masm->
extern "C" {
void aarch64_prolog(void);
}
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
char** mc_top,
char* mc_end) {
#ifdef BUILTIN_SIM
// Write a dummy word to the writable shared metaspace.
// MetaspaceShared::initialize_shared_spaces will fill it with the
// address of aarch64_prolog().
address *prolog_ptr = (address*)*md_top;
*(intptr_t *)(*md_top) = (intptr_t)0;
(*md_top) += sizeof(intptr_t);
#endif
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
*(intptr_t *)(*md_top) = vtable_bytes;
*md_top += sizeof(intptr_t);
void** dummy_vtable = (void**)*md_top;
*vtable = dummy_vtable;
*md_top += vtable_bytes;
// Get ready to generate dummy methods.
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
MacroAssembler* masm = new MacroAssembler(&cb);
Label common_code;
for (int i = 0; i < vtbl_list_size; ++i) {
for (int j = 0; j < num_virtuals; ++j) {
dummy_vtable[num_virtuals * i + j] = (void*)masm->pc();
// We're called directly from C code.
#ifdef BUILTIN_SIM
__ c_stub_prolog(8, 0, MacroAssembler::ret_type_integral, prolog_ptr);
#endif
// Load rscratch1 with a value indicating vtable/offset pair.
// -- bits[ 7..0] (8 bits) which virtual method in table?
// -- bits[12..8] (5 bits) which virtual method table?
__ mov(rscratch1, (i << 8) + j);
__ b(common_code);
}
}
__ bind(common_code);
Register tmp0 = r10, tmp1 = r11; // AAPCS64 temporary registers
__ enter();
__ lsr(tmp0, rscratch1, 8); // isolate vtable identifier.
__ mov(tmp1, (address)vtbl_list); // address of list of vtable pointers.
__ ldr(tmp1, Address(tmp1, tmp0, Address::lsl(LogBytesPerWord))); // get correct vtable pointer.
__ str(tmp1, Address(c_rarg0)); // update vtable pointer in obj.
__ add(rscratch1, tmp1, rscratch1, ext::uxtb, LogBytesPerWord); // address of real method pointer.
__ ldr(rscratch1, Address(rscratch1)); // get real method pointer.
__ blrt(rscratch1, 8, 0, 1); // jump to the real method.
__ leave();
__ ret(lr);
*mc_top = (char*)__ pc();
}
#ifdef BUILTIN_SIM
void MetaspaceShared::relocate_vtbl_list(char **buffer) {
void **sim_entry = (void**)*buffer;
*sim_entry = (void*)aarch64_prolog;
*buffer += sizeof(intptr_t);
}
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -63,7 +63,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Register obj, SystemDictionary::WKID klass_id,
const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
Klass* klass = SystemDictionary::well_known_klass(klass_id);
Register temp = rscratch2;
Register temp2 = rscratch1; // used by MacroAssembler::cmpptr
Label L_ok, L_bad;

View File

@ -402,14 +402,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(
return entry;
}
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
address entry = __ pc();
// NULL last_sp until next java call
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
__ dispatch_next(state);
return entry;
}
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
address entry = __ pc();
@ -444,6 +436,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ notify(Assembler::method_reentry);
}
#endif
__ check_and_handle_popframe(rthread);
__ check_and_handle_earlyret(rthread);
__ get_dispatch();
__ dispatch_next(state, step);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -3372,8 +3372,7 @@ void TemplateTable::_new() {
__ br(Assembler::NE, slow_case);
// get InstanceKlass
__ lea(r4, Address(r4, r3, Address::lsl(3)));
__ ldr(r4, Address(r4, sizeof(ConstantPool)));
__ load_resolved_klass_at_offset(r4, r3, r4, rscratch1);
// make sure klass is initialized & doesn't have finalizer
// make sure klass is fully initialized
@ -3526,8 +3525,7 @@ void TemplateTable::checkcast()
// Get superklass in r0 and subklass in r3
__ bind(quicked);
__ mov(r3, r0); // Save object in r3; r0 needed for subtype check
__ lea(r0, Address(r2, r19, Address::lsl(3)));
__ ldr(r0, Address(r0, sizeof(ConstantPool)));
__ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); // r0 = klass
__ bind(resolved);
__ load_klass(r19, r3);
@ -3583,8 +3581,7 @@ void TemplateTable::instanceof() {
// Get superklass in r0 and subklass in r3
__ bind(quicked);
__ load_klass(r3, r0);
__ lea(r0, Address(r2, r19, Address::lsl(3)));
__ ldr(r0, Address(r0, sizeof(ConstantPool)));
__ load_resolved_klass_at_offset(r2, r19, r0, rscratch1);
__ bind(resolved);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -234,8 +234,15 @@ void AbstractInterpreter::layout_activation(Method* method,
#ifdef AARCH64
interpreter_frame->interpreter_frame_set_stack_top(stack_top);
// We have to add extra reserved slots to max_stack. There are 3 users of the extra slots,
// none of which are at the same time, so we just need to make sure there is enough room
// for the biggest user:
// -reserved slot for exception handler
// -reserved slots for JSR292. Method::extra_stack_entries() is the size.
// -3 reserved slots so get_method_counters() can save some registers before call_VM().
int max_stack = method->constMethod()->max_stack() + MAX2(3, Method::extra_stack_entries());
intptr_t* extended_sp = (intptr_t*) monbot -
(method->max_stack() + 1) * Interpreter::stackElementWords - // +1 is reserved slot for exception handler
(max_stack * Interpreter::stackElementWords) -
popframe_extra_args;
extended_sp = (intptr_t*)round_down((intptr_t)extended_sp, StackAlignmentInBytes);
interpreter_frame->interpreter_frame_set_extended_sp(extended_sp);

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@ -1122,7 +1122,7 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
}
// Vector ideal reg corresponding to specified size in bytes
const int Matcher::vector_ideal_reg(int size) {
const uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize >= size, "");
switch(size) {
case 8: return Op_VecD;
@ -1132,7 +1132,7 @@ const int Matcher::vector_ideal_reg(int size) {
return 0;
}
const int Matcher::vector_shift_count_ideal_reg(int size) {
const uint Matcher::vector_shift_count_ideal_reg(int size) {
return vector_ideal_reg(size);
}

View File

@ -298,7 +298,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
Register cache = result;
// load pointer for resolved_references[] objArray
ldr(cache, Address(result, ConstantPool::resolved_references_offset_in_bytes()));
ldr(cache, Address(result, ConstantPool::cache_offset_in_bytes()));
ldr(cache, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
// JNIHandles::resolve(result)
ldr(cache, Address(cache, 0));
// Add in the index
@ -308,6 +309,15 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
load_heap_oop(result, Address(cache, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
}
void InterpreterMacroAssembler::load_resolved_klass_at_offset(
Register Rcpool, Register Rindex, Register Rklass) {
add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
ldrh(Rtemp, Address(Rtemp, sizeof(ConstantPool))); // Rtemp = resolved_klass_index
ldr(Rklass, Address(Rcpool, ConstantPool::resolved_klasses_offset_in_bytes())); // Rklass = cpool->_resolved_klasses
add(Rklass, Rklass, AsmOperand(Rtemp, lsl, LogBytesPerWord));
ldr(Rklass, Address(Rklass, Array<Klass*>::base_offset_in_bytes()));
}
// Generate a subtype check: branch to not_subtype if sub_klass is
// not a subtype of super_klass.
// Profiling code for the subtype check failure (profile_typecheck_failed)
@ -2016,75 +2026,42 @@ void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
void InterpreterMacroAssembler::get_method_counters(Register method,
Register Rcounters,
Label& skip) {
Label& skip,
bool saveRegs,
Register reg1,
Register reg2,
Register reg3) {
const Address method_counters(method, Method::method_counters_offset());
Label has_counters;
ldr(Rcounters, method_counters);
cbnz(Rcounters, has_counters);
if (saveRegs) {
// Save and restore in use caller-saved registers since they will be trashed by call_VM
assert(reg1 != noreg, "must specify reg1");
assert(reg2 != noreg, "must specify reg2");
#ifdef AARCH64
const Register tmp = Rcounters;
const int saved_regs_size = 20*wordSize;
// Note: call_VM will cut SP according to Rstack_top value before call, and restore SP to
// extended_sp value from frame after the call.
// So make sure there is enough stack space to save registers and adjust Rstack_top accordingly.
{
Label enough_stack_space;
check_extended_sp(tmp);
sub(Rstack_top, Rstack_top, saved_regs_size);
cmp(SP, Rstack_top);
b(enough_stack_space, ls);
align_reg(tmp, Rstack_top, StackAlignmentInBytes);
mov(SP, tmp);
str(tmp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
bind(enough_stack_space);
check_stack_top();
int offset = 0;
stp(R0, R1, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R2, R3, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R4, R5, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R6, R7, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R8, R9, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R10, R11, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R12, R13, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R14, R15, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R16, R17, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R18, LR, Address(Rstack_top, offset)); offset += 2*wordSize;
assert (offset == saved_regs_size, "should be");
}
assert(reg3 != noreg, "must specify reg3");
stp(reg1, reg2, Address(Rstack_top, -2*wordSize, pre_indexed));
stp(reg3, ZR, Address(Rstack_top, -2*wordSize, pre_indexed));
#else
push(RegisterSet(R0, R3) | RegisterSet(R12) | RegisterSet(R14));
#endif // AARCH64
assert(reg3 == noreg, "must not specify reg3");
push(RegisterSet(reg1) | RegisterSet(reg2));
#endif
}
mov(R1, method);
call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::build_method_counters), R1);
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), R1);
if (saveRegs) {
#ifdef AARCH64
{
int offset = 0;
ldp(R0, R1, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R2, R3, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R4, R5, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R6, R7, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R8, R9, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R10, R11, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R12, R13, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R14, R15, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R16, R17, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R18, LR, Address(Rstack_top, offset)); offset += 2*wordSize;
assert (offset == saved_regs_size, "should be");
add(Rstack_top, Rstack_top, saved_regs_size);
}
ldp(reg3, ZR, Address(Rstack_top, 2*wordSize, post_indexed));
ldp(reg1, reg2, Address(Rstack_top, 2*wordSize, post_indexed));
#else
pop(RegisterSet(R0, R3) | RegisterSet(R12) | RegisterSet(R14));
#endif // AARCH64
pop(RegisterSet(reg1) | RegisterSet(reg2));
#endif
}
ldr(Rcounters, method_counters);
cbz(Rcounters, skip); // No MethodCounters created, OutOfMemory

View File

@ -53,9 +53,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
// Template interpreter specific version of call_VM_helper
virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions);
virtual void check_and_handle_popframe();
virtual void check_and_handle_earlyret();
// base routine for all dispatches
typedef enum { DispatchDefault, DispatchNormal } DispatchTableMode;
void dispatch_base(TosState state, DispatchTableMode table_mode, bool verifyoop = true);
@ -63,6 +60,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
public:
InterpreterMacroAssembler(CodeBuffer* code);
virtual void check_and_handle_popframe();
virtual void check_and_handle_earlyret();
// Interpreter-specific registers
#if defined(AARCH64) && defined(ASSERT)
@ -141,6 +141,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
// Load object from cpool->resolved_references(*bcp+1)
void load_resolved_reference_at_index(Register result, Register tmp);
// load cpool->resolved_klass_at(index); Rtemp is corrupted upon return
void load_resolved_klass_at_offset(Register Rcpool, Register Rindex, Register Rklass);
void store_check_part1(Register card_table_base); // Sets card_table_base register.
void store_check_part2(Register obj, Register card_table_base, Register tmp);
@ -328,7 +331,13 @@ class InterpreterMacroAssembler: public MacroAssembler {
void trace_state(const char* msg) PRODUCT_RETURN;
void get_method_counters(Register method, Register Rcounters, Label& skip);
void get_method_counters(Register method,
Register Rcounters,
Label& skip,
bool saveRegs = false,
Register reg1 = noreg,
Register reg2 = noreg,
Register reg3 = noreg);
};
#endif // CPU_ARM_VM_INTERP_MASM_ARM_HPP

View File

@ -206,6 +206,9 @@ protected:
// may customize this version by overriding it for its purposes (e.g., to save/restore
// additional registers when doing a VM call).
virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions);
public:
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler,
@ -213,10 +216,6 @@ protected:
virtual void check_and_handle_popframe() {}
virtual void check_and_handle_earlyret() {}
public:
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
// By default, we do not need relocation information for non
// patchable absolute addresses. However, when needed by some
// extensions, ignore_non_patchable_relocations can be modified,

View File

@ -1,99 +0,0 @@
/*
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "assembler_arm.inline.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
// This method will be called (as any other Klass virtual method) with
// the Klass itself as the first argument. Example:
//
// oop obj;
// int size = obj->klass()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();
//
// The dummy method is called with the Klass object as the first
// operand, and an object as the second argument.
//
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
// differing only by an ordinal constant, and they bear no relationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#define __ masm->
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
char** mc_top,
char* mc_end) {
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
*(intptr_t *)(*md_top) = vtable_bytes;
*md_top += sizeof(intptr_t);
void** dummy_vtable = (void**)*md_top;
*vtable = dummy_vtable;
*md_top += vtable_bytes;
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
MacroAssembler* masm = new MacroAssembler(&cb);
for (int i = 0; i < vtbl_list_size; ++i) {
Label common_code;
for (int j = 0; j < num_virtuals; ++j) {
dummy_vtable[num_virtuals * i + j] = (void*) __ pc();
__ mov(Rtemp, j); // Rtemp contains an index of a virtual method in the table
__ b(common_code);
}
InlinedAddress vtable_address((address)&vtbl_list[i]);
__ bind(common_code);
const Register tmp2 = AARCH64_ONLY(Rtemp2) NOT_AARCH64(R4);
assert_different_registers(Rtemp, tmp2);
#ifndef AARCH64
__ push(tmp2);
#endif // !AARCH64
// Do not use ldr_global since the code must be portable across all ARM architectures
__ ldr_literal(tmp2, vtable_address);
__ ldr(tmp2, Address(tmp2)); // get correct vtable address
__ ldr(Rtemp, Address::indexed_ptr(tmp2, Rtemp)); // get real method pointer
__ str(tmp2, Address(R0)); // update vtable. R0 = "this"
#ifndef AARCH64
__ pop(tmp2);
#endif // !AARCH64
__ jump(Rtemp);
__ bind_literal(vtable_address);
}
__ flush();
*mc_top = (char*) __ pc();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -67,7 +67,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Register obj, Register temp1, Register temp2, SystemDictionary::WKID klass_id,
const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
Klass* klass = SystemDictionary::well_known_klass(klass_id);
Label L_ok, L_bad;
BLOCK_COMMENT("verify_klass {");
__ verify_oop(obj);

View File

@ -270,12 +270,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
return entry;
}
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
// Not used.
STOP("generate_continuation_for");
return NULL;
}
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
address entry = __ pc();
@ -310,6 +304,9 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ convert_retval_to_tos(state);
#endif // !AARCH64
__ check_and_handle_popframe();
__ check_and_handle_earlyret();
__ dispatch_next(state, step);
return entry;
@ -1401,7 +1398,13 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
#ifdef AARCH64
// setup RmaxStack
__ ldrh(RmaxStack, Address(RconstMethod, ConstMethod::max_stack_offset()));
__ add(RmaxStack, RmaxStack, MAX2(1, Method::extra_stack_entries())); // reserve slots for exception handler and JSR292 appendix argument
// We have to add extra reserved slots to max_stack. There are 3 users of the extra slots,
// none of which are at the same time, so we just need to make sure there is enough room
// for the biggest user:
// -reserved slot for exception handler
// -reserved slots for JSR292. Method::extra_stack_entries() is the size.
// -3 reserved slots so get_method_counters() can save some registers before call_VM().
__ add(RmaxStack, RmaxStack, MAX2(3, Method::extra_stack_entries()));
#endif // AARCH64
// see if we've got enough room on the stack for locals plus overhead.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2286,13 +2286,18 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
}
__ bind(no_mdo);
// Increment backedge counter in MethodCounters*
__ get_method_counters(Rmethod, Rcounters, dispatch);
// Note Rbumped_taken_count is a callee saved registers for ARM32, but caller saved for ARM64
__ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
Rdisp, R3_bytecode,
AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset()));
__ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask,
Rcnt, R4_tmp, eq, &backedge_counter_overflow);
} else {
// increment counter
__ get_method_counters(Rmethod, Rcounters, dispatch);
// Increment backedge counter in MethodCounters*
__ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
Rdisp, R3_bytecode,
AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
__ ldr_u32(Rtemp, Address(Rcounters, be_offset)); // load backedge counter
__ add(Rtemp, Rtemp, InvocationCounter::count_increment); // increment counter
__ str_32(Rtemp, Address(Rcounters, be_offset)); // store counter
@ -4367,10 +4372,9 @@ void TemplateTable::_new() {
#endif // AARCH64
// get InstanceKlass
__ add(Rklass, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
__ ldr(Rklass, Address(Rklass, sizeof(ConstantPool)));
__ cmp(Rtemp, JVM_CONSTANT_Class);
__ b(slow_case, ne);
__ load_resolved_klass_at_offset(Rcpool, Rindex, Rklass);
// make sure klass is initialized & doesn't have finalizer
// make sure klass is fully initialized
@ -4642,8 +4646,7 @@ void TemplateTable::checkcast() {
// Get superklass in Rsuper and subklass in Rsub
__ bind(quicked);
__ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
__ ldr(Rsuper, Address(Rtemp, sizeof(ConstantPool)));
__ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
__ bind(resolved);
__ load_klass(Rsub, Robj);
@ -4716,8 +4719,7 @@ void TemplateTable::instanceof() {
// Get superklass in Rsuper and subklass in Rsub
__ bind(quicked);
__ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
__ ldr(Rsuper, Address(Rtemp, sizeof(ConstantPool)));
__ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
__ bind(resolved);
__ load_klass(Rsub, Robj);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -3177,9 +3177,8 @@ void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
assert_different_registers(val, crc, res);
__ load_const_optimized(res, StubRoutines::crc_table_addr(), R0);
__ nand(crc, crc, crc); // ~crc
__ update_byte_crc32(crc, val, res);
__ nand(res, crc, crc); // ~crc
__ kernel_crc32_singleByteReg(crc, val, res, true);
__ mr(res, crc);
}
#undef __

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -63,18 +63,6 @@ void LIRItem::load_nonconstant() {
}
inline void load_int_as_long(LIR_List *ll, LIRItem &li, LIR_Opr dst) {
LIR_Opr r = li.value()->operand();
if (r->is_register()) {
LIR_Opr dst_l = FrameMap::as_long_opr(dst->as_register());
ll->convert(Bytecodes::_i2l, li.result(), dst_l); // Convert.
} else {
// Constants or memory get loaded with sign extend on this platform.
ll->move(li.result(), dst);
}
}
//--------------------------------------------------------------
// LIRGenerator
//--------------------------------------------------------------
@ -1419,10 +1407,9 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
arg2 = cc->at(1),
arg3 = cc->at(2);
// CCallingConventionRequiresIntsAsLongs
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits.
__ leal(LIR_OprFact::address(a), arg2);
load_int_as_long(gen()->lir(), len, arg3);
len.load_item_force(arg3); // We skip int->long conversion here, , because CRC32 stub expects int.
__ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args());
__ move(result_reg, result);
@ -1434,6 +1421,76 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
}
}
void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
assert(UseCRC32CIntrinsics, "or should not be here");
LIR_Opr result = rlock_result(x);
switch (x->id()) {
case vmIntrinsics::_updateBytesCRC32C:
case vmIntrinsics::_updateDirectByteBufferCRC32C: {
bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
LIRItem crc(x->argument_at(0), this);
LIRItem buf(x->argument_at(1), this);
LIRItem off(x->argument_at(2), this);
LIRItem end(x->argument_at(3), this);
buf.load_item();
off.load_nonconstant();
end.load_nonconstant();
// len = end - off
LIR_Opr len = end.result();
LIR_Opr tmpA = new_register(T_INT);
LIR_Opr tmpB = new_register(T_INT);
__ move(end.result(), tmpA);
__ move(off.result(), tmpB);
__ sub(tmpA, tmpB, tmpA);
len = tmpA;
LIR_Opr index = off.result();
int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
if (off.result()->is_constant()) {
index = LIR_OprFact::illegalOpr;
offset += off.result()->as_jint();
}
LIR_Opr base_op = buf.result();
LIR_Address* a = NULL;
if (index->is_valid()) {
LIR_Opr tmp = new_register(T_LONG);
__ convert(Bytecodes::_i2l, index, tmp);
index = tmp;
__ add(index, LIR_OprFact::intptrConst(offset), index);
a = new LIR_Address(base_op, index, T_BYTE);
} else {
a = new LIR_Address(base_op, offset, T_BYTE);
}
BasicTypeList signature(3);
signature.append(T_INT);
signature.append(T_ADDRESS);
signature.append(T_INT);
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
const LIR_Opr result_reg = result_register_for(x->type());
LIR_Opr arg1 = cc->at(0),
arg2 = cc->at(1),
arg3 = cc->at(2);
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32C stub doesn't care about high bits.
__ leal(LIR_OprFact::address(a), arg2);
__ move(len, cc->at(2)); // We skip int->long conversion here, because CRC32C stub expects int.
__ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), LIR_OprFact::illegalOpr, result_reg, cc->args());
__ move(result_reg, result);
break;
}
default: {
ShouldNotReachHere();
}
}
}
void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
assert(x->number_of_arguments() == 3, "wrong type");
assert(UseFMA, "Needs FMA instructions support.");
@ -1460,7 +1517,3 @@ void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
fatal("vectorizedMismatch intrinsic is not implemented on this platform");
}
void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
Unimplemented();
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,8 +45,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
#define thread_(field_name) in_bytes(JavaThread::field_name ## _offset()), R16_thread
#define method_(field_name) in_bytes(Method::field_name ## _offset()), R19_method
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
virtual void check_and_handle_popframe(Register scratch_reg);
virtual void check_and_handle_earlyret(Register scratch_reg);
// Base routine for all dispatches.
void dispatch_base(TosState state, address* table);
@ -79,6 +79,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
// Load object from cpool->resolved_references(index).
void load_resolved_reference_at_index(Register result, Register index, Label *is_null = NULL);
// load cpool->resolved_klass_at(index)
void load_resolved_klass_at_offset(Register Rcpool, Register Roffset, Register Rklass);
void load_receiver(Register Rparam_count, Register Rrecv_dst);
// helpers for expression stack

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -454,7 +454,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
Register tmp = index; // reuse
sldi(tmp, index, LogBytesPerHeapOop);
// Load pointer for resolved_references[] objArray.
ld(result, ConstantPool::resolved_references_offset_in_bytes(), result);
ld(result, ConstantPool::cache_offset_in_bytes(), result);
ld(result, ConstantPoolCache::resolved_references_offset_in_bytes(), result);
// JNIHandles::resolve(result)
ld(result, 0, result);
#ifdef ASSERT
@ -471,6 +472,25 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result, is_null);
}
// load cpool->resolved_klass_at(index)
void InterpreterMacroAssembler::load_resolved_klass_at_offset(Register Rcpool, Register Roffset, Register Rklass) {
// int value = *(Rcpool->int_at_addr(which));
// int resolved_klass_index = extract_low_short_from_int(value);
add(Roffset, Rcpool, Roffset);
#if defined(VM_LITTLE_ENDIAN)
lhz(Roffset, sizeof(ConstantPool), Roffset); // Roffset = resolved_klass_index
#else
lhz(Roffset, sizeof(ConstantPool) + 2, Roffset); // Roffset = resolved_klass_index
#endif
ld(Rklass, ConstantPool::resolved_klasses_offset_in_bytes(), Rcpool); // Rklass = Rcpool->_resolved_klasses
sldi(Roffset, Roffset, LogBytesPerWord);
addi(Roffset, Roffset, Array<Klass*>::base_offset_in_bytes());
isync(); // Order load of instance Klass wrt. tags.
ldx(Rklass, Rklass, Roffset);
}
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass. Blows registers Rsub_klass, tmp1, tmp2.
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, Register Rsuper_klass, Register Rtmp1,

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -4120,7 +4120,7 @@ void MacroAssembler::update_byte_crc32(Register crc, Register val, Register tabl
* @param table register pointing to CRC table
*/
void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
Register data, bool loopAlignment, bool invertCRC) {
Register data, bool loopAlignment) {
assert_different_registers(crc, buf, len, table, data);
Label L_mainLoop, L_done;
@ -4131,10 +4131,6 @@ void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register
clrldi_(len, len, 32); // Enforce 32 bit. Anything to do?
beq(CCR0, L_done);
if (invertCRC) {
nand(crc, crc, crc); // ~c
}
mtctr(len);
align(mainLoop_alignment);
BIND(L_mainLoop);
@ -4143,10 +4139,6 @@ void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register
update_byte_crc32(crc, data, table);
bdnz(L_mainLoop); // Iterate.
if (invertCRC) {
nand(crc, crc, crc); // ~c
}
bind(L_done);
}
@ -4203,7 +4195,8 @@ void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register tab
*/
void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3,
Register tc0, Register tc1, Register tc2, Register tc3) {
Register tc0, Register tc1, Register tc2, Register tc3,
bool invertCRC) {
assert_different_registers(crc, buf, len, table);
Label L_mainLoop, L_tail;
@ -4217,14 +4210,16 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
const int complexThreshold = 2*mainLoop_stepping;
// Don't test for len <= 0 here. This pathological case should not occur anyway.
// Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
// The situation itself is detected and handled correctly by the conditional branches
// following aghi(len, -stepping) and aghi(len, +stepping).
// Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles
// for all well-behaved cases. The situation itself is detected and handled correctly
// within update_byteLoop_crc32.
assert(tailLoop_stepping == 1, "check tailLoop_stepping!");
BLOCK_COMMENT("kernel_crc32_2word {");
nand(crc, crc, crc); // ~c
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
// Check for short (<mainLoop_stepping) buffer.
cmpdi(CCR0, len, complexThreshold);
@ -4245,7 +4240,7 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
blt(CCR0, L_tail); // For less than one mainloop_stepping left, do only tail processing
mr(len, tmp); // remaining bytes for main loop (>=mainLoop_stepping is guaranteed).
}
update_byteLoop_crc32(crc, buf, tmp2, table, data, false, false);
update_byteLoop_crc32(crc, buf, tmp2, table, data, false);
}
srdi(tmp2, len, log_stepping); // #iterations for mainLoop
@ -4281,9 +4276,11 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
// Process last few (<complexThreshold) bytes of buffer.
BIND(L_tail);
update_byteLoop_crc32(crc, buf, len, table, data, false, false);
update_byteLoop_crc32(crc, buf, len, table, data, false);
nand(crc, crc, crc); // ~c
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
BLOCK_COMMENT("} kernel_crc32_2word");
}
@ -4297,7 +4294,8 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
*/
void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3,
Register tc0, Register tc1, Register tc2, Register tc3) {
Register tc0, Register tc1, Register tc2, Register tc3,
bool invertCRC) {
assert_different_registers(crc, buf, len, table);
Label L_mainLoop, L_tail;
@ -4311,14 +4309,16 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
const int complexThreshold = 2*mainLoop_stepping;
// Don't test for len <= 0 here. This pathological case should not occur anyway.
// Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
// The situation itself is detected and handled correctly by the conditional branches
// following aghi(len, -stepping) and aghi(len, +stepping).
// Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles
// for all well-behaved cases. The situation itself is detected and handled correctly
// within update_byteLoop_crc32.
assert(tailLoop_stepping == 1, "check tailLoop_stepping!");
BLOCK_COMMENT("kernel_crc32_1word {");
nand(crc, crc, crc); // ~c
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
// Check for short (<mainLoop_stepping) buffer.
cmpdi(CCR0, len, complexThreshold);
@ -4339,7 +4339,7 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
blt(CCR0, L_tail); // For less than one mainloop_stepping left, do only tail processing
mr(len, tmp); // remaining bytes for main loop (>=mainLoop_stepping is guaranteed).
}
update_byteLoop_crc32(crc, buf, tmp2, table, data, false, false);
update_byteLoop_crc32(crc, buf, tmp2, table, data, false);
}
srdi(tmp2, len, log_stepping); // #iterations for mainLoop
@ -4374,9 +4374,11 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
// Process last few (<complexThreshold) bytes of buffer.
BIND(L_tail);
update_byteLoop_crc32(crc, buf, len, table, data, false, false);
update_byteLoop_crc32(crc, buf, len, table, data, false);
nand(crc, crc, crc); // ~c
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
BLOCK_COMMENT("} kernel_crc32_1word");
}
@ -4389,16 +4391,24 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
* Uses R7_ARG5, R8_ARG6 as work registers.
*/
void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3) {
Register t0, Register t1, Register t2, Register t3,
bool invertCRC) {
assert_different_registers(crc, buf, len, table);
Register data = t0; // Holds the current byte to be folded into crc.
BLOCK_COMMENT("kernel_crc32_1byte {");
// Process all bytes in a single-byte loop.
update_byteLoop_crc32(crc, buf, len, table, data, true, true);
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
// Process all bytes in a single-byte loop.
update_byteLoop_crc32(crc, buf, len, table, data, true);
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
BLOCK_COMMENT("} kernel_crc32_1byte");
}
@ -4416,7 +4426,8 @@ void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len
*/
void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Register len, Register table,
Register constants, Register barretConstants,
Register t0, Register t1, Register t2, Register t3, Register t4) {
Register t0, Register t1, Register t2, Register t3, Register t4,
bool invertCRC) {
assert_different_registers(crc, buf, len, table);
Label L_alignedHead, L_tail, L_alignTail, L_start, L_end;
@ -4434,13 +4445,15 @@ void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Regi
Register tc0 = t4;
Register tc1 = constants;
Register tc2 = barretConstants;
kernel_crc32_1word(crc, buf, len, table,t0, t1, t2, t3, tc0, tc1, tc2, table);
kernel_crc32_1word(crc, buf, len, table,t0, t1, t2, t3, tc0, tc1, tc2, table, invertCRC);
b(L_end);
BIND(L_start);
// 2. ~c
nand(crc, crc, crc);
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
// 3. calculate from 0 to first 128bit-aligned address
clrldi_(prealign, buf, 57);
@ -4449,7 +4462,7 @@ void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Regi
subfic(prealign, prealign, 128);
subf(len, prealign, len);
update_byteLoop_crc32(crc, buf, prealign, table, t2, false, false);
update_byteLoop_crc32(crc, buf, prealign, table, t2, false);
// 4. calculate from first 128bit-aligned address to last 128bit-aligned address
BIND(L_alignedHead);
@ -4464,12 +4477,14 @@ void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Regi
cmpdi(CCR0, postalign, 0);
beq(CCR0, L_tail);
update_byteLoop_crc32(crc, buf, postalign, table, t2, false, false);
update_byteLoop_crc32(crc, buf, postalign, table, t2, false);
BIND(L_tail);
// 6. ~c
nand(crc, crc, crc);
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
BIND(L_end);
@ -4961,16 +4976,35 @@ void MacroAssembler::kernel_crc32_1word_aligned(Register crc, Register buf, Regi
offsetInt -= 8; ld(R31, offsetInt, R1_SP);
}
void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp) {
void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp, bool invertCRC) {
assert_different_registers(crc, buf, /* len, not used!! */ table, tmp);
BLOCK_COMMENT("kernel_crc32_singleByte:");
nand(crc, crc, crc); // ~c
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
lbz(tmp, 0, buf); // Byte from buffer, zero-extended.
lbz(tmp, 0, buf); // Byte from buffer, zero-extended.
update_byte_crc32(crc, tmp, table);
nand(crc, crc, crc); // ~c
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
}
void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table, bool invertCRC) {
assert_different_registers(crc, val, table);
BLOCK_COMMENT("kernel_crc32_singleByteReg:");
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
update_byte_crc32(crc, val, table);
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
}
// dest_lo += src1 + src2

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -819,33 +819,47 @@ class MacroAssembler: public Assembler {
Register tmp6, Register tmp7, Register tmp8, Register tmp9, Register tmp10,
Register tmp11, Register tmp12, Register tmp13);
// CRC32 Intrinsics.
// Emitters for CRC32 calculation.
// A note on invertCRC:
// Unfortunately, internal representation of crc differs between CRC32 and CRC32C.
// CRC32 holds it's current crc value in the externally visible representation.
// CRC32C holds it's current crc value in internal format, ready for updating.
// Thus, the crc value must be bit-flipped before updating it in the CRC32 case.
// In the CRC32C case, it must be bit-flipped when it is given to the outside world (getValue()).
// The bool invertCRC parameter indicates whether bit-flipping is required before updates.
void load_reverse_32(Register dst, Register src);
int crc32_table_columns(Register table, Register tc0, Register tc1, Register tc2, Register tc3);
void fold_byte_crc32(Register crc, Register val, Register table, Register tmp);
void fold_8bit_crc32(Register crc, Register table, Register tmp);
void update_byte_crc32(Register crc, Register val, Register table);
void update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
Register data, bool loopAlignment, bool invertCRC);
Register data, bool loopAlignment);
void update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
Register t0, Register t1, Register t2, Register t3,
Register tc0, Register tc1, Register tc2, Register tc3);
void kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3,
Register tc0, Register tc1, Register tc2, Register tc3);
Register tc0, Register tc1, Register tc2, Register tc3,
bool invertCRC);
void kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3,
Register tc0, Register tc1, Register tc2, Register tc3);
Register tc0, Register tc1, Register tc2, Register tc3,
bool invertCRC);
void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3);
Register t0, Register t1, Register t2, Register t3,
bool invertCRC);
void kernel_crc32_1word_vpmsumd(Register crc, Register buf, Register len, Register table,
Register constants, Register barretConstants,
Register t0, Register t1, Register t2, Register t3, Register t4);
Register t0, Register t1, Register t2, Register t3, Register t4,
bool invertCRC);
void kernel_crc32_1word_aligned(Register crc, Register buf, Register len,
Register constants, Register barretConstants,
Register t0, Register t1, Register t2);
void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp);
void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
bool invertCRC);
void kernel_crc32_singleByteReg(Register crc, Register val, Register table,
bool invertCRC);
//
// Debugging

View File

@ -1,78 +0,0 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "asm/codeBuffer.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
// This method will be called (as any other Klass virtual method) with
// the Klass itself as the first argument. Example:
//
// oop obj;
// int size = obj->klass()->klass_part()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();
//
// The dummy method is called with the Klass object as the first
// operand, and an object as the second argument.
//
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
// differing only by an ordinal constant, and they bear no releationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#define __ masm->
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
char** mc_top,
char* mc_end) {
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
*(intptr_t *)(*md_top) = vtable_bytes;
*md_top += sizeof(intptr_t);
void** dummy_vtable = (void**)*md_top;
*vtable = dummy_vtable;
*md_top += vtable_bytes;
// Get ready to generate dummy methods.
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
MacroAssembler* masm = new MacroAssembler(&cb);
// There are more general problems with CDS on ppc, so I can not
// really test this. But having this instead of Unimplementd() allows
// us to pass TestOptionsWithRanges.java.
__ unimplemented();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -71,7 +71,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Register temp_reg, Register temp2_reg,
const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
Klass* klass = SystemDictionary::well_known_klass(klass_id);
Label L_ok, L_bad;
BLOCK_COMMENT("verify_klass {");
__ verify_oop(obj_reg);

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2012, 2016 SAP SE. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
@ -2053,12 +2053,12 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
}
// Vector ideal reg.
const int Matcher::vector_ideal_reg(int size) {
const uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize == 8 && size == 8, "");
return Op_RegL;
}
const int Matcher::vector_shift_count_ideal_reg(int size) {
const uint Matcher::vector_shift_count_ideal_reg(int size) {
fatal("vector shift is not supported");
return Node::NotAMachineReg;
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -3276,6 +3276,36 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
// Compute CRC32/CRC32C function.
void generate_CRC_updateBytes(const char* name, Register table, bool invertCRC) {
// arguments to kernel_crc32:
const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call.
const Register data = R4_ARG2; // source byte array
const Register dataLen = R5_ARG3; // #bytes to process
const Register t0 = R2;
const Register t1 = R7;
const Register t2 = R8;
const Register t3 = R9;
const Register tc0 = R10;
const Register tc1 = R11;
const Register tc2 = R12;
BLOCK_COMMENT("Stub body {");
assert_different_registers(crc, data, dataLen, table);
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, table, invertCRC);
BLOCK_COMMENT("return");
__ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
__ blr();
BLOCK_COMMENT("} Stub body");
}
/**
* Arguments:
*
@ -3296,14 +3326,14 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", name);
address start = __ function_entry(); // Remember stub start address (is rtn value).
const Register table = R6; // crc table address
#ifdef VM_LITTLE_ENDIAN
// arguments to kernel_crc32:
const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call.
const Register data = R4_ARG2; // source byte array
const Register dataLen = R5_ARG3; // #bytes to process
const Register table = R6; // crc table address
#ifdef VM_LITTLE_ENDIAN
if (VM_Version::has_vpmsumb()) {
const Register constants = R2; // constants address
const Register bconstants = R8; // barret table address
@ -3321,7 +3351,7 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::ppc64::generate_load_crc_constants_addr(_masm, constants);
StubRoutines::ppc64::generate_load_crc_barret_constants_addr(_masm, bconstants);
__ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4);
__ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, true);
BLOCK_COMMENT("return");
__ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
@ -3331,31 +3361,79 @@ class StubGenerator: public StubCodeGenerator {
} else
#endif
{
const Register t0 = R2;
const Register t1 = R7;
const Register t2 = R8;
const Register t3 = R9;
const Register tc0 = R10;
const Register tc1 = R11;
const Register tc2 = R12;
StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
generate_CRC_updateBytes(name, table, true);
}
return start;
}
/**
* Arguments:
*
* Inputs:
* R3_ARG1 - int crc
* R4_ARG2 - byte* buf
* R5_ARG3 - int length (of buffer)
*
* scratch:
* R2, R6-R12
*
* Ouput:
* R3_RET - int crc result
*/
// Compute CRC32C function.
address generate_CRC32C_updateBytes(const char* name) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ function_entry(); // Remember stub start address (is rtn value).
const Register table = R6; // crc table address
#if 0 // no vector support yet for CRC32C
#ifdef VM_LITTLE_ENDIAN
// arguments to kernel_crc32:
const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call.
const Register data = R4_ARG2; // source byte array
const Register dataLen = R5_ARG3; // #bytes to process
if (VM_Version::has_vpmsumb()) {
const Register constants = R2; // constants address
const Register bconstants = R8; // barret table address
const Register t0 = R9;
const Register t1 = R10;
const Register t2 = R11;
const Register t3 = R12;
const Register t4 = R7;
BLOCK_COMMENT("Stub body {");
assert_different_registers(crc, data, dataLen, table);
StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
StubRoutines::ppc64::generate_load_crc32c_constants_addr(_masm, constants);
StubRoutines::ppc64::generate_load_crc32c_barret_constants_addr(_masm, bconstants);
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, table);
__ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, false);
BLOCK_COMMENT("return");
__ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
__ blr();
BLOCK_COMMENT("} Stub body");
} else
#endif
#endif
{
StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
generate_CRC_updateBytes(name, table, false);
}
return start;
}
// Initialization
void generate_initial() {
// Generates all stubs and initializes the entry points
@ -3383,6 +3461,12 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_crc_table_adr = (address)StubRoutines::ppc64::_crc_table;
StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes");
}
// CRC32C Intrinsics.
if (UseCRC32CIntrinsics) {
StubRoutines::_crc32c_table_addr = (address)StubRoutines::ppc64::_crc32c_table;
StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes("CRC32C_updateBytes");
}
}
void generate_all() {

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,13 +55,16 @@ class ppc64 {
// CRC32 Intrinsics.
static juint _crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
static juint _crc32c_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
static juint* _constants;
static juint* _barret_constants;
public:
// CRC32 Intrinsics.
static void generate_load_table_addr(MacroAssembler* masm, Register table, address table_addr, uint64_t table_contents);
static void generate_load_crc_table_addr(MacroAssembler* masm, Register table);
static void generate_load_crc32c_table_addr(MacroAssembler* masm, Register table);
static void generate_load_crc_constants_addr(MacroAssembler* masm, Register table);
static void generate_load_crc_barret_constants_addr(MacroAssembler* masm, Register table);
static juint* generate_crc_constants();

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2017 SAP SE. All rights reserved.
* Copyright (c) 2015, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -643,12 +643,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
return entry;
}
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
address entry = __ pc();
__ unimplemented("generate_continuation_for");
return entry;
}
// This entry is returned to when a call returns to the interpreter.
// When we arrive here, we expect that the callee stack frame is already popped.
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
@ -692,6 +686,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
#endif
__ sldi(size, size, Interpreter::logStackElementSize);
__ add(R15_esp, R15_esp, size);
__ check_and_handle_popframe(R11_scratch1);
__ check_and_handle_earlyret(R11_scratch1);
__ dispatch_next(state, step);
return entry;
}
@ -1894,7 +1892,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
__ lwz(crc, 2*wordSize, argP); // Current crc state, zero extend to 64 bit to have a clean register.
StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
__ kernel_crc32_singleByte(crc, data, dataLen, table, tmp);
__ kernel_crc32_singleByte(crc, data, dataLen, table, tmp, true);
// Restore caller sp for c2i case and return.
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
@ -1910,7 +1908,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
return NULL;
}
// CRC32 Intrinsics.
/**
* Method entry for static native methods:
* int java.util.zip.CRC32.updateBytes( int crc, byte[] b, int off, int len)
@ -1986,7 +1984,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
// Performance measurements show the 1word and 2word variants to be almost equivalent,
// with very light advantages for the 1word variant. We chose the 1word variant for
// code compactness.
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3);
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3, true);
// Restore caller sp for c2i case and return.
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
@ -2002,8 +2000,88 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
return NULL;
}
// Not supported
/**
* Method entry for intrinsic-candidate (non-native) methods:
* int java.util.zip.CRC32C.updateBytes( int crc, byte[] b, int off, int end)
* int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long* buf, int off, int end)
* Unlike CRC32, CRC32C does not have any methods marked as native
* CRC32C also uses an "end" variable instead of the length variable CRC32 uses
**/
address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
if (UseCRC32CIntrinsics) {
address start = __ pc(); // Remember stub start address (is rtn value).
// We don't generate local frame and don't align stack because
// we not even call stub code (we generate the code inline)
// and there is no safepoint on this path.
// Load parameters.
// Z_esp is callers operand stack pointer, i.e. it points to the parameters.
const Register argP = R15_esp;
const Register crc = R3_ARG1; // crc value
const Register data = R4_ARG2; // address of java byte array
const Register dataLen = R5_ARG3; // source data len
const Register table = R6_ARG4; // address of crc32c table
const Register t0 = R9; // scratch registers for crc calculation
const Register t1 = R10;
const Register t2 = R11;
const Register t3 = R12;
const Register tc0 = R2; // registers to hold pre-calculated column addresses
const Register tc1 = R7;
const Register tc2 = R8;
const Register tc3 = table; // table address is reconstructed at the end of kernel_crc32_* emitters
const Register tmp = t0; // Only used very locally to calculate byte buffer address.
// Arguments are reversed on java expression stack.
// Calculate address of start element.
if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) { // Used for "updateDirectByteBuffer".
BLOCK_COMMENT("CRC32C_updateDirectByteBuffer {");
// crc @ (SP + 5W) (32bit)
// buf @ (SP + 3W) (64bit ptr to long array)
// off @ (SP + 2W) (32bit)
// dataLen @ (SP + 1W) (32bit)
// data = buf + off
__ ld( data, 3*wordSize, argP); // start of byte buffer
__ lwa( tmp, 2*wordSize, argP); // byte buffer offset
__ lwa( dataLen, 1*wordSize, argP); // #bytes to process
__ lwz( crc, 5*wordSize, argP); // current crc state
__ add( data, data, tmp); // Add byte buffer offset.
__ sub( dataLen, dataLen, tmp); // (end_index - offset)
} else { // Used for "updateBytes update".
BLOCK_COMMENT("CRC32C_updateBytes {");
// crc @ (SP + 4W) (32bit)
// buf @ (SP + 3W) (64bit ptr to byte array)
// off @ (SP + 2W) (32bit)
// dataLen @ (SP + 1W) (32bit)
// data = buf + off + base_offset
__ ld( data, 3*wordSize, argP); // start of byte buffer
__ lwa( tmp, 2*wordSize, argP); // byte buffer offset
__ lwa( dataLen, 1*wordSize, argP); // #bytes to process
__ add( data, data, tmp); // add byte buffer offset
__ sub( dataLen, dataLen, tmp); // (end_index - offset)
__ lwz( crc, 4*wordSize, argP); // current crc state
__ addi(data, data, arrayOopDesc::base_offset_in_bytes(T_BYTE));
}
StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
// Performance measurements show the 1word and 2word variants to be almost equivalent,
// with very light advantages for the 1word variant. We chose the 1word variant for
// code compactness.
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3, false);
// Restore caller sp for c2i case and return.
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
__ blr();
BLOCK_COMMENT("} CRC32C_update{Bytes|DirectByteBuffer}");
return start;
}
return NULL;
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2016 SAP SE. All rights reserved.
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -3660,11 +3660,9 @@ void TemplateTable::_new() {
__ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
__ bne(CCR0, Lslow_case);
// Get instanceKlass (load from Rcpool + sizeof(ConstantPool) + Rindex*BytesPerWord).
// Get instanceKlass
__ sldi(Roffset, Rindex, LogBytesPerWord);
__ addi(Rscratch, Rcpool, sizeof(ConstantPool));
__ isync(); // Order load of instance Klass wrt. tags.
__ ldx(RinstanceKlass, Roffset, Rscratch);
__ load_resolved_klass_at_offset(Rcpool, Roffset, RinstanceKlass);
// Make sure klass is fully initialized and get instance_size.
__ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass);
@ -3875,9 +3873,7 @@ void TemplateTable::checkcast() {
// Extract target class from constant pool.
__ bind(Lquicked);
__ sldi(Roffset, Roffset, LogBytesPerWord);
__ addi(Rcpool, Rcpool, sizeof(ConstantPool));
__ isync(); // Order load of specified Klass wrt. tags.
__ ldx(RspecifiedKlass, Rcpool, Roffset);
__ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass);
// Do the checkcast.
__ bind(Lresolved);
@ -3939,9 +3935,7 @@ void TemplateTable::instanceof() {
// Extract target class from constant pool.
__ bind(Lquicked);
__ sldi(Roffset, Roffset, LogBytesPerWord);
__ addi(Rcpool, Rcpool, sizeof(ConstantPool));
__ isync(); // Order load of specified Klass wrt. tags.
__ ldx(RspecifiedKlass, Rcpool, Roffset);
__ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass);
// Do the checkcast.
__ bind(Lresolved);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -172,18 +172,27 @@ void VM_Version::initialize() {
assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
// Implementation does not use any of the vector instructions
// available with Power8. Their exploitation is still pending.
// If defined(VM_LITTLE_ENDIAN) and running on Power8 or newer hardware,
// the implementation uses the vector instructions available with Power8.
// In all other cases, the implementation uses only generally available instructions.
if (!UseCRC32Intrinsics) {
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
}
}
if (UseCRC32CIntrinsics) {
if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics))
warning("CRC32C intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
// Implementation does not use any of the vector instructions available with Power8.
// Their exploitation is still pending (aka "work in progress").
if (!UseCRC32CIntrinsics) {
if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
}
}
// TODO: Provide implementation.
if (UseAdler32Intrinsics) {
warning("Adler32Intrinsics not available on this CPU.");
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
}
// The AES intrinsic stubs require AES instruction support.
@ -245,11 +254,6 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
}
if (UseAdler32Intrinsics) {
warning("Adler32Intrinsics not available on this CPU.");
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
}
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
UseMultiplyToLenIntrinsic = true;
}

View File

@ -28,8 +28,6 @@
#undef LUCY_DBG
#define NearLabel Label
// Immediate is an abstraction to represent the various immediate
// operands which exist on z/Architecture. Neither this class nor
// instances hereof have an own state. It consists of methods only.

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -3048,9 +3048,8 @@ void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
assert_different_registers(val, crc, res);
__ load_const_optimized(res, StubRoutines::crc_table_addr());
__ not_(crc, noreg, false); // ~crc
__ update_byte_crc32(crc, val, res);
__ not_(res, crc, false); // ~crc
__ kernel_crc32_singleByteReg(crc, val, res, true);
__ z_lgfr(res, crc);
}
#undef __

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -61,20 +61,6 @@ void LIRItem::load_nonconstant(int bits) {
}
}
inline void load_int_as_long(LIR_List *ll, LIRItem &li, LIR_Opr dst) {
LIR_Opr r = li.value()->operand();
if (r->is_constant()) {
// Constants get loaded with sign extend on this platform.
ll->move(li.result(), dst);
} else {
if (!r->is_register()) {
li.load_item_force(dst);
}
LIR_Opr dst_l = FrameMap::as_long_opr(dst->as_register());
ll->convert(Bytecodes::_i2l, li.result(), dst_l); // Convert.
}
}
//--------------------------------------------------------------
// LIRGenerator
//--------------------------------------------------------------
@ -1217,10 +1203,9 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
LIR_Opr arg2 = cc->at(1);
LIR_Opr arg3 = cc->at(2);
// CCallingConventionRequiresIntsAsLongs
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits.
__ leal(LIR_OprFact::address(a), arg2);
load_int_as_long(gen()->lir(), len, arg3);
len.load_item_force(arg3); // We skip int->long conversion here, because CRC32 stub expects int.
__ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args());
__ move(result_reg, result);
@ -1233,7 +1218,70 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
}
void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
Unimplemented();
assert(UseCRC32CIntrinsics, "or should not be here");
LIR_Opr result = rlock_result(x);
switch (x->id()) {
case vmIntrinsics::_updateBytesCRC32C:
case vmIntrinsics::_updateDirectByteBufferCRC32C: {
bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
LIRItem crc(x->argument_at(0), this);
LIRItem buf(x->argument_at(1), this);
LIRItem off(x->argument_at(2), this);
LIRItem end(x->argument_at(3), this);
buf.load_item();
off.load_nonconstant();
end.load_nonconstant();
// len = end - off
LIR_Opr len = end.result();
LIR_Opr tmpA = new_register(T_INT);
LIR_Opr tmpB = new_register(T_INT);
__ move(end.result(), tmpA);
__ move(off.result(), tmpB);
__ sub(tmpA, tmpB, tmpA);
len = tmpA;
LIR_Opr index = off.result();
int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
if (off.result()->is_constant()) {
index = LIR_OprFact::illegalOpr;
offset += off.result()->as_jint();
}
LIR_Opr base_op = buf.result();
if (index->is_valid()) {
LIR_Opr tmp = new_register(T_LONG);
__ convert(Bytecodes::_i2l, index, tmp);
index = tmp;
}
LIR_Address* a = new LIR_Address(base_op, index, offset, T_BYTE);
BasicTypeList signature(3);
signature.append(T_INT);
signature.append(T_ADDRESS);
signature.append(T_INT);
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
const LIR_Opr result_reg = result_register_for (x->type());
LIR_Opr arg1 = cc->at(0);
LIR_Opr arg2 = cc->at(1);
LIR_Opr arg3 = cc->at(2);
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32C stub doesn't care about high bits.
__ leal(LIR_OprFact::address(a), arg2);
__ move(len, cc->at(2)); // We skip int->long conversion here, because CRC32C stub expects int.
__ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), LIR_OprFact::illegalOpr, result_reg, cc->args());
__ move(result_reg, result);
break;
}
default: {
ShouldNotReachHere();
}
}
}
void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
@ -1264,4 +1312,3 @@ void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
fatal("vectorizedMismatch intrinsic is not implemented on this platform");
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -371,7 +371,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
Register tmp = index; // reuse
z_sllg(index, index, LogBytesPerHeapOop); // Offset into resolved references array.
// Load pointer for resolved_references[] objArray.
z_lg(result, ConstantPool::resolved_references_offset_in_bytes(), result);
z_lg(result, ConstantPool::cache_offset_in_bytes(), result);
z_lg(result, ConstantPoolCache::resolved_references_offset_in_bytes(), result);
// JNIHandles::resolve(result)
z_lg(result, 0, result); // Load resolved references array itself.
#ifdef ASSERT
@ -386,6 +387,16 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result);
}
// load cpool->resolved_klass_at(index)
void InterpreterMacroAssembler::load_resolved_klass_at_offset(Register cpool, Register offset, Register iklass) {
// int value = *(Rcpool->int_at_addr(which));
// int resolved_klass_index = extract_low_short_from_int(value);
z_llgh(offset, Address(cpool, offset, sizeof(ConstantPool) + 2)); // offset = resolved_klass_index (s390 is big-endian)
z_sllg(offset, offset, LogBytesPerWord); // Convert 'index' to 'offset'
z_lg(iklass, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes())); // iklass = cpool->_resolved_klasses
z_lg(iklass, Address(iklass, offset, Array<Klass*>::base_offset_in_bytes()));
}
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
Register tmp,
int bcp_offset,

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,9 +48,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
bool allow_relocation,
bool check_exceptions);
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// Base routine for all dispatches.
void dispatch_base(TosState state, address* table);
@ -58,6 +55,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
InterpreterMacroAssembler(CodeBuffer* c)
: MacroAssembler(c) {}
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
void jump_to_entry(address entry, Register Rscratch);
virtual void load_earlyret_value(TosState state);
@ -115,6 +115,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
void load_resolved_reference_at_index(Register result, Register index);
// load cpool->resolved_klass_at(index)
void load_resolved_klass_at_offset(Register cpool, Register offset, Register iklass);
// Pop topmost element from stack. It just disappears. Useful if
// consumed previously by access via stackTop().

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1616,6 +1616,8 @@ void MacroAssembler::branch_optimized(Assembler::branch_condition cond, Label& b
if (branch_target.is_bound()) {
address branch_addr = target(branch_target);
branch_optimized(cond, branch_addr);
} else if (branch_target.is_near()) {
z_brc(cond, branch_target); // Caller assures that the target will be in range for z_brc.
} else {
z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time.
}
@ -1674,7 +1676,8 @@ void MacroAssembler::compare_and_branch_optimized(Register r1,
bool has_sign) {
address branch_origin = pc();
bool x2_imm8 = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2));
bool is_RelAddr16 = (branch_target.is_bound() &&
bool is_RelAddr16 = branch_target.is_near() ||
(branch_target.is_bound() &&
RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin));
unsigned int casenum = (len64?2:0)+(has_sign?0:1);
@ -1744,13 +1747,21 @@ void MacroAssembler::compare_and_branch_optimized(Register r1,
Label& branch_target,
bool len64,
bool has_sign) {
unsigned int casenum = (len64?2:0)+(has_sign?0:1);
unsigned int casenum = (len64 ? 2 : 0) + (has_sign ? 0 : 1);
if (branch_target.is_bound()) {
address branch_addr = target(branch_target);
compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign);
} else {
{
if (VM_Version::has_CompareBranch() && branch_target.is_near()) {
switch (casenum) {
case 0: z_crj( r1, r2, cond, branch_target); break;
case 1: z_clrj( r1, r2, cond, branch_target); break;
case 2: z_cgrj( r1, r2, cond, branch_target); break;
case 3: z_clgrj(r1, r2, cond, branch_target); break;
default: ShouldNotReachHere(); break;
}
} else {
switch (casenum) {
case 0: z_cr( r1, r2); break;
case 1: z_clr(r1, r2); break;
@ -2741,11 +2752,11 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
BLOCK_COMMENT("lookup_interface_method {");
// Load start of itable entries into itable_entry_addr.
z_llgf(vtable_len, Address(recv_klass, InstanceKlass::vtable_length_offset()));
z_llgf(vtable_len, Address(recv_klass, Klass::vtable_length_offset()));
z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
// Loop over all itable entries until desired interfaceOop(Rinterface) found.
const int vtable_base_offset = in_bytes(InstanceKlass::vtable_start_offset());
const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
add2reg_with_index(itable_entry_addr,
vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(),
@ -5927,8 +5938,7 @@ void MacroAssembler::update_byte_crc32(Register crc, Register val, Register tabl
* @param len register containing number of bytes
* @param table register pointing to CRC table
*/
void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
Register data, bool invertCRC) {
void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, Register data) {
assert_different_registers(crc, buf, len, table, data);
Label L_mainLoop, L_done;
@ -5938,20 +5948,12 @@ void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register
z_ltr(len, len);
z_brnh(L_done);
if (invertCRC) {
not_(crc, noreg, false); // ~c
}
bind(L_mainLoop);
z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
add2reg(buf, mainLoop_stepping); // Advance buffer position.
update_byte_crc32(crc, data, table);
z_brct(len, L_mainLoop); // Iterate.
if (invertCRC) {
not_(crc, noreg, false); // ~c
}
bind(L_done);
}
@ -5968,6 +5970,7 @@ void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register tab
// c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \
// crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24]
// #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4
// Pre-calculate (constant) column offsets, use columns 4..7 for big-endian.
const int ix0 = 4*(4*CRC32_COLUMN_SIZE);
const int ix1 = 5*(4*CRC32_COLUMN_SIZE);
const int ix2 = 6*(4*CRC32_COLUMN_SIZE);
@ -5986,17 +5989,12 @@ void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register tab
rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true); // ((c >> 16) & 0xff) << 2
rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true); // ((c >> 24) & 0xff) << 2
// Load pre-calculated table values.
// Use columns 4..7 for big-endian.
z_ly(t3, Address(table, t3, (intptr_t)ix0));
// XOR indexed table values to calculate updated crc.
z_ly(t2, Address(table, t2, (intptr_t)ix1));
z_ly(t1, Address(table, t1, (intptr_t)ix2));
z_ly(t0, Address(table, t0, (intptr_t)ix3));
// Calculate new crc from table values.
z_xr(t2, t3);
z_xr(t0, t1);
z_xr(t0, t2); // Now crc contains the final checksum value.
z_xy(t2, Address(table, t3, (intptr_t)ix0));
z_xy(t0, Address(table, t1, (intptr_t)ix2));
z_xr(t0, t2); // Now t0 contains the updated CRC value.
lgr_if_needed(crc, t0);
}
@ -6009,7 +6007,8 @@ void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register tab
* uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
*/
void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3) {
Register t0, Register t1, Register t2, Register t3,
bool invertCRC) {
assert_different_registers(crc, buf, len, table);
Label L_mainLoop, L_tail;
@ -6024,7 +6023,9 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
// The situation itself is detected and handled correctly by the conditional branches
// following aghi(len, -stepping) and aghi(len, +stepping).
not_(crc, noreg, false); // 1s complement of crc
if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
#if 0
{
@ -6039,7 +6040,7 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
rotate_then_insert(ctr, ctr, 62, 63, 0, true); // TODO: should set cc
z_sgfr(len, ctr); // Remaining len after alignment.
update_byteLoop_crc32(crc, buf, ctr, table, data, false);
update_byteLoop_crc32(crc, buf, ctr, table, data);
}
#endif
@ -6047,21 +6048,23 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
z_srag(ctr, len, log_stepping);
z_brnh(L_tail);
z_lrvr(crc, crc); // Revert byte order because we are dealing with big-endian data.
z_lrvr(crc, crc); // Revert byte order because we are dealing with big-endian data.
rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop
BIND(L_mainLoop);
update_1word_crc32(crc, buf, table, 0, 0, crc, t1, t2, t3);
update_1word_crc32(crc, buf, table, 4, mainLoop_stepping, crc, t1, t2, t3);
z_brct(ctr, L_mainLoop); // Iterate.
z_brct(ctr, L_mainLoop); // Iterate.
z_lrvr(crc, crc); // Revert byte order back to original.
z_lrvr(crc, crc); // Revert byte order back to original.
// Process last few (<8) bytes of buffer.
BIND(L_tail);
update_byteLoop_crc32(crc, buf, len, table, data, false);
update_byteLoop_crc32(crc, buf, len, table, data);
not_(crc, noreg, false); // 1s complement of crc
if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
}
/**
@ -6073,7 +6076,8 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
* uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
*/
void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3) {
Register t0, Register t1, Register t2, Register t3,
bool invertCRC) {
assert_different_registers(crc, buf, len, table);
Label L_mainLoop, L_tail;
@ -6087,7 +6091,9 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
// The situation itself is detected and handled correctly by the conditional branches
// following aghi(len, -stepping) and aghi(len, +stepping).
not_(crc, noreg, false); // 1s complement of crc
if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
// Check for short (<4 bytes) buffer.
z_srag(ctr, len, log_stepping);
@ -6099,13 +6105,16 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
BIND(L_mainLoop);
update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3);
z_brct(ctr, L_mainLoop); // Iterate.
z_lrvr(crc, crc); // Revert byte order back to original.
// Process last few (<8) bytes of buffer.
BIND(L_tail);
update_byteLoop_crc32(crc, buf, len, table, data, false);
update_byteLoop_crc32(crc, buf, len, table, data);
not_(crc, noreg, false); // 1s complement of crc
if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
}
/**
@ -6115,22 +6124,51 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
* @param table register pointing to CRC table
*/
void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3) {
Register t0, Register t1, Register t2, Register t3,
bool invertCRC) {
assert_different_registers(crc, buf, len, table);
Register data = t0;
update_byteLoop_crc32(crc, buf, len, table, data, true);
if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
update_byteLoop_crc32(crc, buf, len, table, data);
if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
}
void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp) {
void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
bool invertCRC) {
assert_different_registers(crc, buf, len, table, tmp);
not_(crc, noreg, false); // ~c
if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
z_llgc(tmp, Address(buf, (intptr_t)0)); // Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
update_byte_crc32(crc, tmp, table);
not_(crc, noreg, false); // ~c
if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
}
void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table,
bool invertCRC) {
assert_different_registers(crc, val, table);
if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
update_byte_crc32(crc, val, table);
if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
}
//

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1011,22 +1011,35 @@ class MacroAssembler: public Assembler {
int before = 0, int after = 0) PRODUCT_RETURN;
// Emitters for CRC32 calculation.
// A note on invertCRC:
// Unfortunately, internal representation of crc differs between CRC32 and CRC32C.
// CRC32 holds it's current crc value in the externally visible representation.
// CRC32C holds it's current crc value in internal format, ready for updating.
// Thus, the crc value must be bit-flipped before updating it in the CRC32 case.
// In the CRC32C case, it must be bit-flipped when it is given to the outside world (getValue()).
// The bool invertCRC parameter indicates whether bit-flipping is required before updates.
private:
void fold_byte_crc32(Register crc, Register table, Register val, Register tmp);
void fold_8bit_crc32(Register crc, Register table, Register tmp);
void update_byte_crc32( Register crc, Register val, Register table);
void update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
Register data, bool invertCRC);
Register data);
void update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
Register t0, Register t1, Register t2, Register t3);
public:
void update_byte_crc32( Register crc, Register val, Register table);
void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp);
void kernel_crc32_singleByteReg(Register crc, Register val, Register table,
bool invertCRC);
void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
bool invertCRC);
void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3);
Register t0, Register t1, Register t2, Register t3,
bool invertCRC);
void kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3);
Register t0, Register t1, Register t2, Register t3,
bool invertCRC);
void kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3);
Register t0, Register t1, Register t2, Register t3,
bool invertCRC);
// Emitters for BigInteger.multiplyToLen intrinsic
// note: length of result array (zlen) is passed on the stack

View File

@ -1,76 +0,0 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/codeBuffer.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
// This method will be called (as any other Klass virtual method) with
// the Klass itself as the first argument. Example:
//
// oop obj;
// int size = obj->klass()->klass_part()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();.
//
// The dummy method is called with the Klass object as the first
// operand, and an object as the second argument.
//
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
// differing only by an ordinal constant, and they bear no releationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#undef __
#define __ masm->
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
char** mc_top,
char* mc_end) {
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
*(intptr_t *)(*md_top) = vtable_bytes;
*md_top += sizeof(intptr_t);
void** dummy_vtable = (void**)*md_top;
*vtable = dummy_vtable;
*md_top += vtable_bytes;
// Get ready to generate dummy methods.
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
MacroAssembler* masm = new MacroAssembler(&cb);
__ unimplemented();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -73,7 +73,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
Klass* klass = SystemDictionary::well_known_klass(klass_id);
assert(temp_reg != Z_R0 && // Is used as base register!
temp_reg != noreg && temp2_reg != noreg, "need valid registers!");

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2016 SAP SE. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
@ -1562,7 +1562,7 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
}
// Vector ideal reg.
const int Matcher::vector_ideal_reg(int size) {
const uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize == 8 && size == 8, "");
return Op_RegL;
}
@ -1577,7 +1577,7 @@ const int Matcher::min_vector_size(const BasicType bt) {
return max_vector_size(bt); // Same as max.
}
const int Matcher::vector_shift_count_ideal_reg(int size) {
const uint Matcher::vector_shift_count_ideal_reg(int size) {
fatal("vector shift is not supported");
return Node::NotAMachineReg;
}
@ -6768,6 +6768,7 @@ instruct sllI_reg_imm(iRegI dst, iRegI src, immI nbits) %{
format %{ "SLL $dst,$src,$nbits\t# use RISC-like SLLG also for int" %}
ins_encode %{
int Nbit = $nbits$$constant;
assert((Nbit & (BitsPerJavaInteger - 1)) == Nbit, "Check shift mask in ideal graph");
__ z_sllg($dst$$Register, $src$$Register, Nbit & (BitsPerJavaInteger - 1), Z_R0);
%}
ins_pipe(pipe_class_dummy);
@ -6841,6 +6842,7 @@ instruct sraI_reg_imm(iRegI dst, immI src, flagsReg cr) %{
format %{ "SRA $dst,$src" %}
ins_encode %{
int Nbit = $src$$constant;
assert((Nbit & (BitsPerJavaInteger - 1)) == Nbit, "Check shift mask in ideal graph");
__ z_sra($dst$$Register, Nbit & (BitsPerJavaInteger - 1), Z_R0);
%}
ins_pipe(pipe_class_dummy);
@ -6893,6 +6895,7 @@ instruct srlI_reg_imm(iRegI dst, immI src) %{
format %{ "SRL $dst,$src" %}
ins_encode %{
int Nbit = $src$$constant;
assert((Nbit & (BitsPerJavaInteger - 1)) == Nbit, "Check shift mask in ideal graph");
__ z_srl($dst$$Register, Nbit & (BitsPerJavaInteger - 1), Z_R0);
%}
ins_pipe(pipe_class_dummy);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -623,26 +623,6 @@ class StubGenerator: public StubCodeGenerator {
#define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm):_masm)->
#endif
//----------------------------------------------------------------------
// The following routine generates a subroutine to throw an asynchronous
// UnknownError when an unsafe access gets a fault that could not be
// reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.)
//
// Arguments:
// trapping PC: ??
//
// Results:
// Posts an asynchronous exception, skips the trapping instruction.
//
address generate_handler_for_unsafe_access() {
StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
{
address start = __ pc();
__ unimplemented("StubRoutines::handler_for_unsafe_access", 86);
return start;
}
}
// Support for uint StubRoutine::zarch::partial_subtype_check(Klass
// sub, Klass super);
//
@ -2330,26 +2310,25 @@ class StubGenerator: public StubCodeGenerator {
}
// Arguments:
// Z_ARG1 - int crc
// Z_ARG2 - byte* buf
// Z_ARG3 - int length (of buffer)
//
// Result:
// Z_RET - int crc result
//
// Compute CRC32 function.
address generate_CRC32_updateBytes(const char* name) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
/**
* Arguments:
*
* Inputs:
* Z_ARG1 - int crc
* Z_ARG2 - byte* buf
* Z_ARG3 - int length (of buffer)
*
* Result:
* Z_RET - int crc result
**/
// Compute CRC function (generic, for all polynomials).
void generate_CRC_updateBytes(const char* name, Register table, bool invertCRC) {
// arguments to kernel_crc32:
Register crc = Z_ARG1; // Current checksum, preset by caller or result from previous call, int.
Register data = Z_ARG2; // source byte array
Register dataLen = Z_ARG3; // #bytes to process, int
Register table = Z_ARG4; // crc table address
// Register table = Z_ARG4; // crc table address. Preloaded and passed in by caller.
const Register t0 = Z_R10; // work reg for kernel* emitters
const Register t1 = Z_R11; // work reg for kernel* emitters
const Register t2 = Z_R12; // work reg for kernel* emitters
@ -2361,16 +2340,50 @@ class StubGenerator: public StubCodeGenerator {
// Crc used as int.
__ z_llgfr(dataLen, dataLen);
StubRoutines::zarch::generate_load_crc_table_addr(_masm, table);
__ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
__ z_stmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 to make them available as work registers.
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3);
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, invertCRC);
__ z_lmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 back from stack.
__ resize_frame(+(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
__ z_llgfr(Z_RET, crc); // Updated crc is function result. No copying required, just zero upper 32 bits.
__ z_br(Z_R14); // Result already in Z_RET == Z_ARG1.
}
// Compute CRC32 function.
address generate_CRC32_updateBytes(const char* name) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
assert(UseCRC32Intrinsics, "should not generate this stub (%s) with CRC32 intrinsics disabled", name);
BLOCK_COMMENT("CRC32_updateBytes {");
Register table = Z_ARG4; // crc32 table address.
StubRoutines::zarch::generate_load_crc_table_addr(_masm, table);
generate_CRC_updateBytes(name, table, true);
BLOCK_COMMENT("} CRC32_updateBytes");
return __ addr_at(start_off);
}
// Compute CRC32C function.
address generate_CRC32C_updateBytes(const char* name) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
assert(UseCRC32CIntrinsics, "should not generate this stub (%s) with CRC32C intrinsics disabled", name);
BLOCK_COMMENT("CRC32C_updateBytes {");
Register table = Z_ARG4; // crc32c table address.
StubRoutines::zarch::generate_load_crc32c_table_addr(_masm, table);
generate_CRC_updateBytes(name, table, false);
BLOCK_COMMENT("} CRC32C_updateBytes");
return __ addr_at(start_off);
}
@ -2441,9 +2454,13 @@ class StubGenerator: public StubCodeGenerator {
// Entry points that are platform specific.
if (UseCRC32Intrinsics) {
// We have no CRC32 table on z/Architecture.
StubRoutines::_crc_table_adr = (address)StubRoutines::zarch::_crc_table;
StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes");
StubRoutines::_crc_table_adr = (address)StubRoutines::zarch::_crc_table;
StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes");
}
if (UseCRC32CIntrinsics) {
StubRoutines::_crc32c_table_addr = (address)StubRoutines::zarch::_crc32c_table;
StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes("CRC32C_updateBytes");
}
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
@ -2461,8 +2478,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false);
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
StubRoutines::zarch::_handler_for_unsafe_access_entry = generate_handler_for_unsafe_access();
// Support for verify_oop (must happen after universe_init).
StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine();

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -68,12 +68,11 @@ class zarch {
};
private:
static address _handler_for_unsafe_access_entry;
static int _atomic_memory_operation_lock;
static address _partial_subtype_check;
static juint _crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
static juint _crc32c_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
static address _trot_table_addr;
@ -91,11 +90,11 @@ class zarch {
static int atomic_memory_operation_lock() { return _atomic_memory_operation_lock; }
static void set_atomic_memory_operation_lock(int value) { _atomic_memory_operation_lock = value; }
static address handler_for_unsafe_access_entry() { return _handler_for_unsafe_access_entry; }
static address partial_subtype_check() { return _partial_subtype_check; }
static void generate_load_absolute_address(MacroAssembler* masm, Register table, address table_addr, uint64_t table_contents);
static void generate_load_crc_table_addr(MacroAssembler* masm, Register table);
static void generate_load_crc32c_table_addr(MacroAssembler* masm, Register table);
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
static void generate_load_trot_table_addr(MacroAssembler* masm, Register table);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -642,13 +642,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
return entry;
}
// Unused, should never pass by.
address TemplateInterpreterGenerator::generate_continuation_for (TosState state) {
address entry = __ pc();
__ should_not_reach_here();
return entry;
}
address TemplateInterpreterGenerator::generate_return_entry_for (TosState state, int step, size_t index_size) {
address entry = __ pc();
@ -683,6 +676,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for (TosState state,
__ z_llgc(size, Address(cache, offset, flags_offset+(sizeof(size_t)-1)));
__ z_sllg(size, size, Interpreter::logStackElementSize); // Each argument size in bytes.
__ z_agr(Z_esp, size); // Pop arguments.
__ check_and_handle_popframe(Z_thread);
__ check_and_handle_earlyret(Z_thread);
__ dispatch_next(state, step);
BLOCK_COMMENT("} return_entry");
@ -1933,8 +1930,11 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
return entry_point;
}
// Method entry for static native methods:
// int java.util.zip.CRC32.update(int crc, int b)
/**
* Method entry for static native methods:
* int java.util.zip.CRC32.update(int crc, int b)
*/
address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
if (UseCRC32Intrinsics) {
@ -1964,7 +1964,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
__ z_llgf(crc, 2 * wordSize, argP); // Current crc state, zero extend to 64 bit to have a clean register.
StubRoutines::zarch::generate_load_crc_table_addr(_masm, table);
__ kernel_crc32_singleByte(crc, data, dataLen, table, Z_R1);
__ kernel_crc32_singleByte(crc, data, dataLen, table, Z_R1, true);
// Restore caller sp for c2i case.
__ resize_frame_absolute(Z_R10, Z_R0, true); // Cut the stack back to where the caller started.
@ -1983,9 +1983,11 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
}
// Method entry for static native methods:
// int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
// int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
/**
* Method entry for static native methods:
* int java.util.zip.CRC32.updateBytes( int crc, byte[] b, int off, int len)
* int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len)
*/
address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
if (UseCRC32Intrinsics) {
@ -2020,10 +2022,10 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
// data = buf + off
BLOCK_COMMENT("CRC32_updateByteBuffer {");
__ z_llgf(crc, 5*wordSize, argP); // current crc state
__ z_lg(data, 3*wordSize, argP); // start of byte buffer
__ z_lg(data, 3*wordSize, argP); // start of byte buffer
__ z_agf(data, 2*wordSize, argP); // Add byte buffer offset.
__ z_lgf(dataLen, 1*wordSize, argP); // #bytes to process
} else { // Used for "updateBytes update".
} else { // Used for "updateBytes update".
// crc @ (SP + 4W) (32bit)
// buf @ (SP + 3W) (64bit ptr to byte array)
// off @ (SP + 2W) (32bit)
@ -2031,7 +2033,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
// data = buf + off + base_offset
BLOCK_COMMENT("CRC32_updateBytes {");
__ z_llgf(crc, 4*wordSize, argP); // current crc state
__ z_lg(data, 3*wordSize, argP); // start of byte buffer
__ z_lg(data, 3*wordSize, argP); // start of byte buffer
__ z_agf(data, 2*wordSize, argP); // Add byte buffer offset.
__ z_lgf(dataLen, 1*wordSize, argP); // #bytes to process
__ z_aghi(data, arrayOopDesc::base_offset_in_bytes(T_BYTE));
@ -2041,7 +2043,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
__ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
__ z_stmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 to make them available as work registers.
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3);
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, true);
__ z_lmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 back from stack.
// Restore caller sp for c2i case.
@ -2060,8 +2062,79 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
return NULL;
}
// Not supported
/**
* Method entry for intrinsic-candidate (non-native) methods:
* int java.util.zip.CRC32C.updateBytes( int crc, byte[] b, int off, int end)
* int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long* buf, int off, int end)
* Unlike CRC32, CRC32C does not have any methods marked as native
* CRC32C also uses an "end" variable instead of the length variable CRC32 uses
*/
address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
if (UseCRC32CIntrinsics) {
uint64_t entry_off = __ offset();
// We don't generate local frame and don't align stack because
// we call stub code and there is no safepoint on this path.
// Load parameters.
// Z_esp is callers operand stack pointer, i.e. it points to the parameters.
const Register argP = Z_esp;
const Register crc = Z_ARG1; // crc value
const Register data = Z_ARG2; // address of java byte array
const Register dataLen = Z_ARG3; // source data len
const Register table = Z_ARG4; // address of crc32 table
const Register t0 = Z_R10; // work reg for kernel* emitters
const Register t1 = Z_R11; // work reg for kernel* emitters
const Register t2 = Z_R12; // work reg for kernel* emitters
const Register t3 = Z_R13; // work reg for kernel* emitters
// Arguments are reversed on java expression stack.
// Calculate address of start element.
if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) { // Used for "updateByteBuffer direct".
// crc @ (SP + 5W) (32bit)
// buf @ (SP + 3W) (64bit ptr to long array)
// off @ (SP + 2W) (32bit)
// dataLen @ (SP + 1W) (32bit)
// data = buf + off
BLOCK_COMMENT("CRC32C_updateDirectByteBuffer {");
__ z_llgf(crc, 5*wordSize, argP); // current crc state
__ z_lg(data, 3*wordSize, argP); // start of byte buffer
__ z_agf(data, 2*wordSize, argP); // Add byte buffer offset.
__ z_lgf(dataLen, 1*wordSize, argP); // #bytes to process, calculated as
__ z_sgf(dataLen, Address(argP, 2*wordSize)); // (end_index - offset)
} else { // Used for "updateBytes update".
// crc @ (SP + 4W) (32bit)
// buf @ (SP + 3W) (64bit ptr to byte array)
// off @ (SP + 2W) (32bit)
// dataLen @ (SP + 1W) (32bit)
// data = buf + off + base_offset
BLOCK_COMMENT("CRC32C_updateBytes {");
__ z_llgf(crc, 4*wordSize, argP); // current crc state
__ z_lg(data, 3*wordSize, argP); // start of byte buffer
__ z_agf(data, 2*wordSize, argP); // Add byte buffer offset.
__ z_lgf(dataLen, 1*wordSize, argP); // #bytes to process, calculated as
__ z_sgf(dataLen, Address(argP, 2*wordSize)); // (end_index - offset)
__ z_aghi(data, arrayOopDesc::base_offset_in_bytes(T_BYTE));
}
StubRoutines::zarch::generate_load_crc32c_table_addr(_masm, table);
__ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
__ z_stmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 to make them available as work registers.
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, false);
__ z_lmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 back from stack.
// Restore caller sp for c2i case.
__ resize_frame_absolute(Z_R10, Z_R0, true); // Cut the stack back to where the caller started.
__ z_br(Z_R14);
BLOCK_COMMENT("} CRC32C_update{Bytes|DirectByteBuffer}");
return __ addr_at(entry_off);
}
return NULL;
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -3466,7 +3466,7 @@ void TemplateTable::invokevirtual_helper(Register index,
__ z_sllg(index, index, exact_log2(vtableEntry::size_in_bytes()));
__ mem2reg_opt(method,
Address(Z_tmp_2, index,
InstanceKlass::vtable_start_offset() + in_ByteSize(vtableEntry::method_offset_in_bytes())));
Klass::vtable_start_offset() + in_ByteSize(vtableEntry::method_offset_in_bytes())));
__ profile_arguments_type(Z_ARG4, method, Z_ARG5, true);
__ jump_from_interpreted(method, Z_ARG4);
BLOCK_COMMENT("} invokevirtual_helper");
@ -3708,7 +3708,7 @@ void TemplateTable::_new() {
__ z_sllg(offset, offset, LogBytesPerWord); // Convert to to offset.
// Get InstanceKlass.
Register iklass = cpool;
__ z_lg(iklass, Address(cpool, offset, sizeof(ConstantPool)));
__ load_resolved_klass_at_offset(cpool, offset, iklass);
// Make sure klass is initialized & doesn't have finalizer.
// Make sure klass is fully initialized.
@ -3895,7 +3895,7 @@ void TemplateTable::checkcast() {
__ z_lgr(Z_ARG4, Z_tos); // Save receiver.
__ z_sllg(index, index, LogBytesPerWord); // index2bytes for addressing
__ mem2reg_opt(klass, Address(cpool, index, sizeof(ConstantPool)));
__ load_resolved_klass_at_offset(cpool, index, klass);
__ bind(resolved);
@ -3969,8 +3969,7 @@ void TemplateTable::instanceof() {
__ load_klass(subklass, Z_tos);
__ z_sllg(index, index, LogBytesPerWord); // index2bytes for addressing
__ mem2reg_opt(klass,
Address(cpool, index, sizeof(ConstantPool)));
__ load_resolved_klass_at_offset(cpool, index, klass);
__ bind(resolved);

View File

@ -111,13 +111,23 @@ void VM_Version::initialize() {
ContendedPaddingWidth = cache_line_size;
}
// On z/Architecture, the CRC32 intrinsics had to be implemented "by hand".
// They cannot be based on the CHECKSUM instruction which has been there
// since the very beginning (of z/Architecture). It computes "some kind of" a checksum
// which has nothing to do with the CRC32 algorithm.
// On z/Architecture, the CRC32/CRC32C intrinsics are implemented "by hand".
// TODO: Provide implementation based on the vector instructions available from z13.
// Note: The CHECKSUM instruction, which has been there since the very beginning
// (of z/Architecture), computes "some kind of" a checksum.
// It has nothing to do with the CRC32 algorithm.
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
}
if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
}
// TODO: Provide implementation.
if (UseAdler32Intrinsics) {
warning("Adler32Intrinsics not available on this CPU.");
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
}
// On z/Architecture, we take UseAES as the general switch to enable/disable the AES intrinsics.
// The specific, and yet to be defined, switches UseAESxxxIntrinsics will then be set
@ -195,11 +205,6 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
}
if (UseAdler32Intrinsics) {
warning("Adler32Intrinsics not available on this CPU.");
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
}
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true);
}

View File

@ -83,7 +83,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
__ load_klass(rcvr_klass, Z_ARG1);
// Set method (in case of interpreted method), and destination address.
int entry_offset = in_bytes(InstanceKlass::vtable_start_offset()) +
int entry_offset = in_bytes(Klass::vtable_start_offset()) +
vtable_index * vtableEntry::size_in_bytes();
#ifndef PRODUCT
@ -96,8 +96,8 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// worst case actual size
padding_bytes += __ load_const_size() - __ load_const_optimized_rtn_len(vtable_idx, vtable_index*vtableEntry::size_in_bytes(), true);
assert(Immediate::is_uimm12(in_bytes(InstanceKlass::vtable_length_offset())), "disp to large");
__ z_cl(vtable_idx, in_bytes(InstanceKlass::vtable_length_offset()), rcvr_klass);
assert(Immediate::is_uimm12(in_bytes(Klass::vtable_length_offset())), "disp to large");
__ z_cl(vtable_idx, in_bytes(Klass::vtable_length_offset()), rcvr_klass);
__ z_brl(L);
__ z_lghi(Z_ARG3, vtable_index); // Debug code, don't optimize.
__ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), Z_ARG1, Z_ARG3, false);
@ -187,11 +187,11 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
__ load_klass(rcvr_klass, Z_ARG1);
// Load start of itable entries into itable_entry.
__ z_llgf(vtable_len, Address(rcvr_klass, InstanceKlass::vtable_length_offset()));
__ z_llgf(vtable_len, Address(rcvr_klass, Klass::vtable_length_offset()));
__ z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
// Loop over all itable entries until desired interfaceOop(Rinterface) found.
const int vtable_base_offset = in_bytes(InstanceKlass::vtable_start_offset());
const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
// Count unused bytes.
start_pc = __ pc();
__ add2reg_with_index(itable_entry_addr, vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(), rcvr_klass, vtable_len);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -270,9 +270,7 @@ void AbstractInterpreter::layout_activation(Method* method,
assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area");
assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area");
}
#ifdef _LP64
assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd");
#endif
*interpreter_frame->register_addr(Lmethod) = (intptr_t) method;
*interpreter_frame->register_addr(Llocals) = (intptr_t) locals;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -159,21 +159,12 @@
public:
#ifdef _LP64
static LIR_Opr as_long_opr(Register r) {
return as_long_single_opr(r);
}
static LIR_Opr as_pointer_opr(Register r) {
return as_long_single_opr(r);
}
#else
static LIR_Opr as_long_opr(Register r) {
return as_long_pair_opr(r);
}
static LIR_Opr as_pointer_opr(Register r) {
return as_opr(r);
}
#endif
static LIR_Opr as_float_opr(FloatRegister r) {
return LIR_OprFact::single_fpu(r->encoding());
}

View File

@ -556,11 +556,9 @@ void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
// guarantee that 32-bit loads always sign extended but that isn't
// true and since sign extension isn't free, it would impose a
// slight cost.
#ifdef _LP64
if (op->type() == T_INT) {
__ br(acond, false, Assembler::pn, *(op->label()));
} else
#endif
__ brx(acond, false, Assembler::pn, *(op->label()));
}
// The peephole pass fills the delay slot
@ -576,12 +574,7 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
Register rlo = dst->as_register_lo();
Register rhi = dst->as_register_hi();
Register rval = op->in_opr()->as_register();
#ifdef _LP64
__ sra(rval, 0, rlo);
#else
__ mov(rval, rlo);
__ sra(rval, BitsPerInt-1, rhi);
#endif
break;
}
case Bytecodes::_i2d:
@ -614,11 +607,7 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
Register rlo = op->in_opr()->as_register_lo();
Register rhi = op->in_opr()->as_register_hi();
Register rdst = dst->as_register();
#ifdef _LP64
__ sra(rlo, 0, rdst);
#else
__ mov(rlo, rdst);
#endif
break;
}
case Bytecodes::_d2f:
@ -711,7 +700,6 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType
case T_SHORT : __ sth(from_reg->as_register(), base, offset); break;
case T_INT : __ stw(from_reg->as_register(), base, offset); break;
case T_LONG :
#ifdef _LP64
if (unaligned || PatchALot) {
// Don't use O7 here because it may be equal to 'base' (see LIR_Assembler::reg2mem)
assert(G3_scratch != base, "can't handle this");
@ -722,11 +710,6 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType
} else {
__ stx(from_reg->as_register_lo(), base, offset);
}
#else
assert(Assembler::is_simm13(offset + 4), "must be");
__ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
__ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes);
#endif
break;
case T_ADDRESS:
case T_METADATA:
@ -778,12 +761,7 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicTy
case T_SHORT : __ sth(from_reg->as_register(), base, disp); break;
case T_INT : __ stw(from_reg->as_register(), base, disp); break;
case T_LONG :
#ifdef _LP64
__ stx(from_reg->as_register_lo(), base, disp);
#else
assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match");
__ std(from_reg->as_register_hi(), base, disp);
#endif
break;
case T_ADDRESS:
__ st_ptr(from_reg->as_register(), base, disp);
@ -826,40 +804,22 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ
case T_INT : __ ld(base, offset, to_reg->as_register()); break;
case T_LONG :
if (!unaligned && !PatchALot) {
#ifdef _LP64
__ ldx(base, offset, to_reg->as_register_lo());
#else
assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
"must be sequential");
__ ldd(base, offset, to_reg->as_register_hi());
#endif
} else {
#ifdef _LP64
assert(base != to_reg->as_register_lo(), "can't handle this");
assert(O7 != to_reg->as_register_lo(), "can't handle this");
__ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
__ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
__ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
__ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
#else
if (base == to_reg->as_register_lo()) {
__ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
__ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
} else {
__ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
__ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
}
#endif
}
break;
case T_METADATA: __ ld_ptr(base, offset, to_reg->as_register()); break;
case T_ADDRESS:
#ifdef _LP64
if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {
__ lduw(base, offset, to_reg->as_register());
__ decode_klass_not_null(to_reg->as_register());
} else
#endif
{
__ ld_ptr(base, offset, to_reg->as_register());
}
@ -921,13 +881,7 @@ int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType
case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break;
case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break;
case T_LONG :
#ifdef _LP64
__ ldx(base, disp, to_reg->as_register_lo());
#else
assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
"must be sequential");
__ ldd(base, disp, to_reg->as_register_hi());
#endif
break;
default : ShouldNotReachHere();
}
@ -1107,16 +1061,9 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
jlong con = c->as_jlong();
if (to_reg->is_double_cpu()) {
#ifdef _LP64
__ set(con, to_reg->as_register_lo());
#else
__ set(low(con), to_reg->as_register_lo());
__ set(high(con), to_reg->as_register_hi());
#endif
#ifdef _LP64
} else if (to_reg->is_single_cpu()) {
__ set(con, to_reg->as_register());
#endif
} else {
ShouldNotReachHere();
assert(to_reg->is_double_fpu(), "wrong register kind");
@ -1190,12 +1137,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
__ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg());
} else {
assert(to_reg->is_double_cpu(), "Must be a long register.");
#ifdef _LP64
__ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo());
#else
__ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo());
__ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi());
#endif
}
}
@ -1366,22 +1308,10 @@ void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
}
} else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
if (from_reg->is_double_cpu()) {
#ifdef _LP64
__ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register());
#else
assert(to_reg->is_double_cpu() &&
from_reg->as_register_hi() != to_reg->as_register_lo() &&
from_reg->as_register_lo() != to_reg->as_register_hi(),
"should both be long and not overlap");
// long to long moves
__ mov(from_reg->as_register_hi(), to_reg->as_register_hi());
__ mov(from_reg->as_register_lo(), to_reg->as_register_lo());
#endif
#ifdef _LP64
} else if (to_reg->is_double_cpu()) {
// int to int moves
__ mov(from_reg->as_register(), to_reg->as_register_lo());
#endif
} else {
// int to int moves
__ mov(from_reg->as_register(), to_reg->as_register());
@ -1461,20 +1391,6 @@ void LIR_Assembler::return_op(LIR_Opr result) {
__ reserved_stack_check();
}
// the poll may need a register so just pick one that isn't the return register
#if defined(TIERED) && !defined(_LP64)
if (result->type_field() == LIR_OprDesc::long_type) {
// Must move the result to G1
// Must leave proper result in O0,O1 and G1 (TIERED only)
__ sllx(I0, 32, G1); // Shift bits into high G1
__ srl (I1, 0, I1); // Zero extend O1 (harmless?)
__ or3 (I1, G1, G1); // OR 64 bits into G1
#ifdef ASSERT
// mangle it so any problems will show up
__ set(0xdeadbeef, I0);
__ set(0xdeadbeef, I1);
#endif
}
#endif // TIERED
__ set((intptr_t)os::get_polling_page(), L0);
__ relocate(relocInfo::poll_return_type);
__ ld_ptr(L0, 0, G0);
@ -1568,23 +1484,11 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
Register xhi = opr1->as_register_hi();
if (opr2->is_constant() && opr2->as_jlong() == 0) {
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases");
#ifdef _LP64
__ orcc(xhi, G0, G0);
#else
__ orcc(xhi, xlo, G0);
#endif
} else if (opr2->is_register()) {
Register ylo = opr2->as_register_lo();
Register yhi = opr2->as_register_hi();
#ifdef _LP64
__ cmp(xlo, ylo);
#else
__ subcc(xlo, ylo, xlo);
__ subccc(xhi, yhi, xhi);
if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
__ orcc(xhi, xlo, G0);
}
#endif
} else {
ShouldNotReachHere();
}
@ -1612,13 +1516,7 @@ void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Op
ShouldNotReachHere();
}
} else if (code == lir_cmp_l2i) {
#ifdef _LP64
__ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register());
#else
__ lcmp(left->as_register_hi(), left->as_register_lo(),
right->as_register_hi(), right->as_register_lo(),
dst->as_register());
#endif
} else {
ShouldNotReachHere();
}
@ -1656,11 +1554,9 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
ShouldNotReachHere();
}
Label skip;
#ifdef _LP64
if (type == T_INT) {
__ br(acond, false, Assembler::pt, skip);
} else
#endif
__ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit
if (opr1->is_constant() && opr1->type() == T_INT) {
Register dest = result->as_register();
@ -1720,7 +1616,6 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
}
} else if (dest->is_double_cpu()) {
#ifdef _LP64
Register dst_lo = dest->as_register_lo();
Register op1_lo = left->as_pointer_register();
Register op2_lo = right->as_pointer_register();
@ -1736,28 +1631,6 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
default: ShouldNotReachHere();
}
#else
Register op1_lo = left->as_register_lo();
Register op1_hi = left->as_register_hi();
Register op2_lo = right->as_register_lo();
Register op2_hi = right->as_register_hi();
Register dst_lo = dest->as_register_lo();
Register dst_hi = dest->as_register_hi();
switch (code) {
case lir_add:
__ addcc(op1_lo, op2_lo, dst_lo);
__ addc (op1_hi, op2_hi, dst_hi);
break;
case lir_sub:
__ subcc(op1_lo, op2_lo, dst_lo);
__ subc (op1_hi, op2_hi, dst_hi);
break;
default: ShouldNotReachHere();
}
#endif
} else {
assert (right->is_single_cpu(), "Just Checking");
@ -1852,23 +1725,14 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
int simm13 = (int)c;
switch (code) {
case lir_logic_and:
#ifndef _LP64
__ and3 (left->as_register_hi(), 0, dest->as_register_hi());
#endif
__ and3 (left->as_register_lo(), simm13, dest->as_register_lo());
break;
case lir_logic_or:
#ifndef _LP64
__ or3 (left->as_register_hi(), 0, dest->as_register_hi());
#endif
__ or3 (left->as_register_lo(), simm13, dest->as_register_lo());
break;
case lir_logic_xor:
#ifndef _LP64
__ xor3 (left->as_register_hi(), 0, dest->as_register_hi());
#endif
__ xor3 (left->as_register_lo(), simm13, dest->as_register_lo());
break;
@ -1886,7 +1750,6 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
default: ShouldNotReachHere();
}
} else {
#ifdef _LP64
Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() :
left->as_register_lo();
Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() :
@ -1898,26 +1761,6 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break;
default: ShouldNotReachHere();
}
#else
switch (code) {
case lir_logic_and:
__ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
__ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
break;
case lir_logic_or:
__ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
__ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
break;
case lir_logic_xor:
__ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
__ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
break;
default: ShouldNotReachHere();
}
#endif
}
}
}
@ -1975,12 +1818,10 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
if (basic_type == T_ARRAY) basic_type = T_OBJECT;
#ifdef _LP64
// higher 32bits must be null
__ sra(dst_pos, 0, dst_pos);
__ sra(src_pos, 0, src_pos);
__ sra(length, 0, length);
#endif
// set up the arraycopy stub information
ArrayCopyStub* stub = op->stub();
@ -2316,7 +2157,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
if (dest->is_single_cpu()) {
#ifdef _LP64
if (left->type() == T_OBJECT) {
switch (code) {
case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break;
@ -2325,7 +2165,6 @@ void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr
default: ShouldNotReachHere();
}
} else
#endif
switch (code) {
case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break;
case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break;
@ -2333,27 +2172,17 @@ void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr
default: ShouldNotReachHere();
}
} else {
#ifdef _LP64
switch (code) {
case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
default: ShouldNotReachHere();
}
#else
switch (code) {
case lir_shl: __ lshl (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
case lir_shr: __ lshr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
default: ShouldNotReachHere();
}
#endif
}
}
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
#ifdef _LP64
if (left->type() == T_OBJECT) {
count = count & 63; // shouldn't shift by more than sizeof(intptr_t)
Register l = left->as_register();
@ -2366,7 +2195,6 @@ void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr de
}
return;
}
#endif
if (dest->is_single_cpu()) {
count = count & 0x1F; // Java spec
@ -2425,7 +2253,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
op->tmp4()->as_register() == O1 &&
op->klass()->as_register() == G5, "must be");
LP64_ONLY( __ signx(op->len()->as_register()); )
__ signx(op->len()->as_register());
if (UseSlowPath ||
(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
@ -2748,7 +2576,6 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
Register new_value_hi = op->new_value()->as_register_hi();
Register t1 = op->tmp1()->as_register();
Register t2 = op->tmp2()->as_register();
#ifdef _LP64
__ mov(cmp_value_lo, t1);
__ mov(new_value_lo, t2);
// perform the compare and swap operation
@ -2756,23 +2583,6 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
// generate condition code - if the swap succeeded, t2 ("new value" reg) was
// overwritten with the original value in "addr" and will be equal to t1.
__ cmp(t1, t2);
#else
// move high and low halves of long values into single registers
__ sllx(cmp_value_hi, 32, t1); // shift high half into temp reg
__ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half
__ or3(t1, cmp_value_lo, t1); // t1 holds 64-bit compare value
__ sllx(new_value_hi, 32, t2);
__ srl(new_value_lo, 0, new_value_lo);
__ or3(t2, new_value_lo, t2); // t2 holds 64-bit value to swap
// perform the compare and swap operation
__ casx(addr, t1, t2);
// generate condition code - if the swap succeeded, t2 ("new value" reg) was
// overwritten with the original value in "addr" and will be equal to t1.
// Produce icc flag for 32bit.
__ sub(t1, t2, t2);
__ srlx(t2, 32, t1);
__ orcc(t2, t1, G0);
#endif
} else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
Register addr = op->addr()->as_pointer_register();
Register cmp_value = op->cmp_value()->as_register();
@ -2914,13 +2724,8 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
assert(data->is_CounterData(), "need CounterData for calls");
assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
Register mdo = op->mdo()->as_register();
#ifdef _LP64
assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
Register tmp1 = op->tmp1()->as_register_lo();
#else
assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
Register tmp1 = op->tmp1()->as_register();
#endif
metadata2reg(md->constant_encoding(), mdo);
int mdo_offset_bias = 0;
if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
@ -3200,12 +3005,7 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
assert (left->is_double_cpu(), "Must be a long");
Register Rlow = left->as_register_lo();
Register Rhi = left->as_register_hi();
#ifdef _LP64
__ sub(G0, Rlow, dest->as_register_lo());
#else
__ subcc(G0, Rlow, dest->as_register_lo());
__ subc (G0, Rhi, dest->as_register_hi());
#endif
}
}
@ -3245,9 +3045,7 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest,
void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
#ifdef _LP64
ShouldNotReachHere();
#endif
NEEDS_CLEANUP;
if (type == T_LONG) {
@ -3491,31 +3289,6 @@ void LIR_Assembler::peephole(LIR_List* lir) {
inst->insert_before(i + 1, delay_op);
i++;
}
#if defined(TIERED) && !defined(_LP64)
// fixup the return value from G1 to O0/O1 for long returns.
// It's done here instead of in LIRGenerator because there's
// such a mismatch between the single reg and double reg
// calling convention.
LIR_OpJavaCall* callop = op->as_OpJavaCall();
if (callop->result_opr() == FrameMap::out_long_opr) {
LIR_OpJavaCall* call;
LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length());
for (int a = 0; a < arguments->length(); a++) {
arguments[a] = callop->arguments()[a];
}
if (op->code() == lir_virtual_call) {
call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
callop->vtable_offset(), arguments, callop->info());
} else {
call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
callop->addr(), arguments, callop->info());
}
inst->at_put(i - 1, call);
inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(),
T_LONG, lir_patch_none, NULL));
}
#endif
break;
}
}
@ -3533,14 +3306,10 @@ void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr
} else if (data->is_oop()) {
Register obj = data->as_register();
Register narrow = tmp->as_register();
#ifdef _LP64
assert(UseCompressedOops, "swap is 32bit only");
__ encode_heap_oop(obj, narrow);
__ swap(as_Address(addr), narrow);
__ decode_heap_oop(narrow, obj);
#else
__ swap(as_Address(addr), obj);
#endif
} else {
ShouldNotReachHere();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -61,11 +61,7 @@
ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias);
enum {
#ifdef _LP64
_call_stub_size = 68,
#else
_call_stub_size = 20,
#endif // _LP64
_call_aot_stub_size = 0,
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(128),
_deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(64)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -70,7 +70,7 @@ LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::Oexcepti
LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::Oissuing_pc_opr; }
LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); }
LIR_Opr LIRGenerator::syncTempOpr() { return new_register(T_OBJECT); }
LIR_Opr LIRGenerator::getThreadTemp() { return rlock_callee_saved(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); }
LIR_Opr LIRGenerator::getThreadTemp() { return rlock_callee_saved(T_LONG); }
LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
LIR_Opr opr;
@ -215,13 +215,11 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
}
}
} else {
#ifdef _LP64
if (index_opr->type() == T_INT) {
LIR_Opr tmp = new_register(T_LONG);
__ convert(Bytecodes::_i2l, index_opr, tmp);
index_opr = tmp;
}
#endif
base_opr = new_pointer_register();
assert (index_opr->is_register(), "Must be register");
@ -1310,20 +1308,12 @@ void LIRGenerator::trace_block_entry(BlockBegin* block) {
void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
CodeEmitInfo* info) {
#ifdef _LP64
__ store(value, address, info);
#else
__ volatile_store_mem_reg(value, address, info);
#endif
}
void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
CodeEmitInfo* info) {
#ifdef _LP64
__ load(address, result, info);
#else
__ volatile_load_mem_reg(address, result, info);
#endif
}
@ -1333,11 +1323,6 @@ void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
LIR_Opr index_op = offset;
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
#ifndef _LP64
if (is_volatile && type == T_LONG) {
__ volatile_store_unsafe_reg(data, src, offset, type, NULL, lir_patch_none);
} else
#endif
{
if (type == T_BOOLEAN) {
type = T_BYTE;
@ -1367,11 +1352,6 @@ void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
BasicType type, bool is_volatile) {
#ifndef _LP64
if (is_volatile && type == T_LONG) {
__ volatile_load_unsafe_reg(src, offset, dst, type, NULL, lir_patch_none);
} else
#endif
{
LIR_Address* addr = new LIR_Address(src, offset, type);
__ load(addr, dst);
@ -1396,17 +1376,13 @@ void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
// Because we want a 2-arg form of xchg
__ move(data, dst);
assert (!x->is_add() && (type == T_INT || (is_obj LP64_ONLY(&& UseCompressedOops))), "unexpected type");
assert (!x->is_add() && (type == T_INT || (is_obj && UseCompressedOops)), "unexpected type");
LIR_Address* addr;
if (offset->is_constant()) {
#ifdef _LP64
jlong l = offset->as_jlong();
assert((jlong)((jint)l) == l, "offset too large for constant");
jint c = (jint)l;
#else
jint c = offset->as_jint();
#endif
addr = new LIR_Address(src.result(), c, type);
} else {
addr = new LIR_Address(src.result(), offset, type);

View File

@ -48,16 +48,9 @@ LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) {
void LIR_Address::verify() const {
assert(scale() == times_1, "Scaled addressing mode not available on SPARC and should not be used");
assert(disp() == 0 || index()->is_illegal(), "can't have both");
#ifdef _LP64
assert(base()->is_cpu_register(), "wrong base operand");
assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand");
assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
"wrong type for addresses");
#else
assert(base()->is_single_cpu(), "wrong base operand");
assert(index()->is_illegal() || index()->is_single_cpu(), "wrong index operand");
assert(base()->type() == T_OBJECT || base()->type() == T_INT || base()->type() == T_METADATA,
"wrong type for addresses");
#endif
}
#endif // PRODUCT

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,11 +32,7 @@ inline bool LinearScan::is_processed_reg_num(int reg_num) {
inline int LinearScan::num_physical_regs(BasicType type) {
// Sparc requires two cpu registers for long
// and two cpu registers for double
#ifdef _LP64
if (type == T_DOUBLE) {
#else
if (type == T_DOUBLE || type == T_LONG) {
#endif
return 2;
}
return 1;
@ -44,11 +40,7 @@ inline int LinearScan::num_physical_regs(BasicType type) {
inline bool LinearScan::requires_adjacent_regs(BasicType type) {
#ifdef _LP64
return type == T_DOUBLE;
#else
return type == T_DOUBLE || type == T_LONG;
#endif
}
inline bool LinearScan::is_caller_save(int assigned_reg) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -273,13 +273,6 @@ void C1_MacroAssembler::initialize_object(
add(obj, hdr_size_in_bytes, t1); // compute address of first element
sub(var_size_in_bytes, hdr_size_in_bytes, t2); // compute size of body
initialize_body(t1, t2);
#ifndef _LP64
} else if (con_size_in_bytes < threshold * 2) {
// on v9 we can do double word stores to fill twice as much space.
assert(hdr_size_in_bytes % 8 == 0, "double word aligned");
assert(con_size_in_bytes % 8 == 0, "double word aligned");
for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += 2 * HeapWordSize) stx(G0, obj, i);
#endif
} else if (con_size_in_bytes <= threshold) {
// use explicit NULL stores
for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += HeapWordSize) st_ptr(G0, obj, i);

View File

@ -930,11 +930,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Label not_already_dirty, restart, refill, young_card;
#ifdef _LP64
__ srlx(addr, CardTableModRefBS::card_shift, addr);
#else
__ srl(addr, CardTableModRefBS::card_shift, addr);
#endif
AddressLiteral rs(byte_map_base);
__ set(rs, cardtable); // cardtable := <card table base>

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -66,7 +66,6 @@ define_pd_global(bool, OptoRegScheduling, false);
define_pd_global(bool, SuperWordLoopUnrollAnalysis, false);
define_pd_global(bool, IdealizeClearArrayNode, true);
#ifdef _LP64
// We need to make sure that all generated code is within
// 2 gigs of the libjvm.so runtime routines so we can use
// the faster "call" instruction rather than the expensive
@ -82,17 +81,6 @@ define_pd_global(intx, CodeCacheExpansionSize, 64*K);
// Ergonomics related flags
define_pd_global(uint64_t,MaxRAM, 128ULL*G);
#else
// InitialCodeCacheSize derived from specjbb2000 run.
define_pd_global(intx, InitialCodeCacheSize, 1536*K); // Integral multiple of CodeCacheExpansionSize
define_pd_global(intx, ReservedCodeCacheSize, 32*M);
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M);
define_pd_global(intx, ProfiledCodeHeapSize, 14*M);
define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
define_pd_global(intx, CodeCacheExpansionSize, 32*K);
// Ergonomics related flags
define_pd_global(uint64_t, MaxRAM, 4ULL*G);
#endif
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -114,14 +114,8 @@ static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
}
static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
#ifdef _LP64
assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
pd_conjoint_oops_atomic((oop*)from, (oop*)to, count);
#else
// Guarantee use of ldd/std via some asm code, because compiler won't.
// See solaris_sparc.il.
_Copy_conjoint_jlongs_atomic(from, to, count);
#endif
}
static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
@ -162,7 +156,6 @@ static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count)
}
static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
#ifdef _LP64
guarantee(mask_bits((uintptr_t)tohw, right_n_bits(LogBytesPerLong)) == 0,
"unaligned fill words");
julong* to = (julong*)tohw;
@ -170,12 +163,6 @@ static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
while (count-- > 0) {
*to++ = v;
}
#else // _LP64
juint* to = (juint*)tohw;
while (count-- > 0) {
*to++ = value;
}
#endif // _LP64
}
typedef void (*_zero_Fn)(HeapWord* to, size_t count);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -114,11 +114,7 @@ address RegisterMap::pd_location(VMReg regname) const {
// register locations. When that is fixed we'd will return NULL
// (or assert here).
reg = regname->prev()->as_Register();
#ifdef _LP64
second_word = sizeof(jint);
#else
return NULL;
#endif // _LP64
} else {
reg = regname->as_Register();
}
@ -332,9 +328,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// Construct an unpatchable, deficient frame
void frame::init(intptr_t* sp, address pc, CodeBlob* cb) {
#ifdef _LP64
assert( (((intptr_t)sp & (wordSize-1)) == 0), "frame constructor passed an invalid sp");
#endif
_sp = sp;
_younger_sp = NULL;
_pc = pc;
@ -693,11 +687,9 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
intptr_t* d_scratch = fp() + interpreter_frame_d_scratch_fp_offset;
address l_addr = (address)l_scratch;
#ifdef _LP64
// On 64-bit the result for 1/8/16/32-bit result types is in the other
// word half
l_addr += wordSize/2;
#endif
switch (type) {
case T_OBJECT:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -100,11 +100,7 @@
// size of each block, in order of increasing address:
register_save_words = 16,
#ifdef _LP64
callee_aggregate_return_pointer_words = 0,
#else
callee_aggregate_return_pointer_words = 1,
#endif
callee_register_argument_save_area_words = 6,
// memory_parameter_words = <arbitrary>,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,24 +38,14 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
// The expected size in bytes of a cache line, used to pad data structures.
#if defined(TIERED)
#ifdef _LP64
// tiered, 64-bit, large machine
#define DEFAULT_CACHE_LINE_SIZE 128
#else
// tiered, 32-bit, medium machine
#define DEFAULT_CACHE_LINE_SIZE 64
#endif
// tiered, 64-bit, large machine
#define DEFAULT_CACHE_LINE_SIZE 128
#elif defined(COMPILER1)
// pure C1, 32-bit, small machine
#define DEFAULT_CACHE_LINE_SIZE 16
#elif defined(COMPILER2) || defined(SHARK)
#ifdef _LP64
// pure C2, 64-bit, large machine
#define DEFAULT_CACHE_LINE_SIZE 128
#else
// pure C2, 32-bit, medium machine
#define DEFAULT_CACHE_LINE_SIZE 64
#endif
// pure C2, 64-bit, large machine
#define DEFAULT_CACHE_LINE_SIZE 128
#endif
#if defined(SOLARIS)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -56,18 +56,11 @@ define_pd_global(intx, InlineSmallCode, 1500);
#define DEFAULT_STACK_RED_PAGES (1)
#define DEFAULT_STACK_RESERVED_PAGES (SOLARIS_ONLY(1) NOT_SOLARIS(0))
#ifdef _LP64
// Stack slots are 2X larger in LP64 than in the 32 bit VM.
define_pd_global(intx, CompilerThreadStackSize, 1024);
define_pd_global(intx, ThreadStackSize, 1024);
define_pd_global(intx, VMThreadStackSize, 1024);
#define DEFAULT_STACK_SHADOW_PAGES (20 DEBUG_ONLY(+2))
#else
define_pd_global(intx, CompilerThreadStackSize, 512);
define_pd_global(intx, ThreadStackSize, 512);
define_pd_global(intx, VMThreadStackSize, 512);
#define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2))
#endif // _LP64
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,13 +32,9 @@
#include "oops/oop.inline.hpp"
int InlineCacheBuffer::ic_stub_code_size() {
#ifdef _LP64
return (NativeMovConstReg::instruction_size + // sethi;add
NativeJump::instruction_size + // sethi; jmp; delay slot
(1*BytesPerInstWord) + 1); // flush + 1 extra byte
#else
return (2+2+ 1) * wordSize + 1; // set/jump_to/nop + 1 byte so that code_end can be set in CodeBuffer
#endif
}
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -318,52 +318,32 @@ void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* tab
void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) {
assert_not_delayed();
#ifdef _LP64
ldf(FloatRegisterImpl::D, r1, offset, d);
#else
ldf(FloatRegisterImpl::S, r1, offset, d);
ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor());
#endif
}
// Known good alignment in _LP64 but unknown otherwise
void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) {
assert_not_delayed();
#ifdef _LP64
stf(FloatRegisterImpl::D, d, r1, offset);
// store something more useful here
debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
#else
stf(FloatRegisterImpl::S, d, r1, offset);
stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize);
#endif
}
// Known good alignment in _LP64 but unknown otherwise
void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) {
assert_not_delayed();
#ifdef _LP64
ldx(r1, offset, rd);
#else
ld(r1, offset, rd);
ld(r1, offset + Interpreter::stackElementSize, rd->successor());
#endif
}
// Known good alignment in _LP64 but unknown otherwise
void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) {
assert_not_delayed();
#ifdef _LP64
stx(l, r1, offset);
// store something more useful here
stx(G0, r1, offset+Interpreter::stackElementSize);
#else
st(l, r1, offset);
st(l->successor(), r1, offset + Interpreter::stackElementSize);
#endif
}
void InterpreterMacroAssembler::pop_i(Register r) {
@ -527,9 +507,7 @@ void InterpreterMacroAssembler::empty_expression_stack() {
sub( Lesp, Gframe_size, Gframe_size );
and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary
debug_only(verify_sp(Gframe_size, G4_scratch));
#ifdef _LP64
sub(Gframe_size, STACK_BIAS, Gframe_size );
#endif
mov(Gframe_size, SP);
bind(done);
@ -541,28 +519,20 @@ void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) {
Label Bad, OK;
// Saved SP must be aligned.
#ifdef _LP64
btst(2*BytesPerWord-1, Rsp);
#else
btst(LongAlignmentMask, Rsp);
#endif
br(Assembler::notZero, false, Assembler::pn, Bad);
delayed()->nop();
// Saved SP, plus register window size, must not be above FP.
add(Rsp, frame::register_save_words * wordSize, Rtemp);
#ifdef _LP64
sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP
#endif
cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad);
// Saved SP must not be ridiculously below current SP.
size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K);
set(maxstack, Rtemp);
sub(SP, Rtemp, Rtemp);
#ifdef _LP64
add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp
#endif
cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad);
ba_short(OK);
@ -584,9 +554,7 @@ void InterpreterMacroAssembler::verify_esp(Register Resp) {
delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp);
stop("too many pops: Lesp points into monitor area");
bind(OK1);
#ifdef _LP64
sub(Resp, STACK_BIAS, Resp);
#endif
cmp(Resp, SP);
brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2);
delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp);
@ -696,21 +664,12 @@ void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(
}
br(Assembler::zero, true, Assembler::pn, aligned);
#ifdef _LP64
delayed()->ldsw(Rtmp, 0, Rdst);
#else
delayed()->ld(Rtmp, 0, Rdst);
#endif
ldub(Lbcp, bcp_offset + 3, Rdst);
ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst);
ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst);
#ifdef _LP64
ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp);
#else
// Unsigned load is faster than signed on some implementations
ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp);
#endif
or3(Rtmp, Rdst, Rdst );
bind(aligned);
@ -796,7 +755,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
sll(index, LogBytesPerHeapOop, tmp);
get_constant_pool(result);
// load pointer for resolved_references[] objArray
ld_ptr(result, ConstantPool::resolved_references_offset_in_bytes(), result);
ld_ptr(result, ConstantPool::cache_offset_in_bytes(), result);
ld_ptr(result, ConstantPoolCache::resolved_references_offset_in_bytes(), result);
// JNIHandles::resolve(result)
ld_ptr(result, 0, result);
// Add in the index
@ -805,6 +765,24 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
}
// load cpool->resolved_klass_at(index)
void InterpreterMacroAssembler::load_resolved_klass_at_offset(Register Rcpool,
Register Roffset, Register Rklass) {
// int value = *this_cp->int_at_addr(which);
// int resolved_klass_index = extract_low_short_from_int(value);
//
// Because SPARC is big-endian, the low_short is at (cpool->int_at_addr(which) + 2 bytes)
add(Roffset, Rcpool, Roffset);
lduh(Roffset, sizeof(ConstantPool) + 2, Roffset); // Roffset = resolved_klass_index
Register Rresolved_klasses = Rklass;
ld_ptr(Rcpool, ConstantPool::resolved_klasses_offset_in_bytes(), Rresolved_klasses);
sll(Roffset, LogBytesPerWord, Roffset);
add(Roffset, Array<Klass*>::base_offset_in_bytes(), Roffset);
ld_ptr(Rresolved_klasses, Roffset, Rklass);
}
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2.
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
@ -910,10 +888,8 @@ void InterpreterMacroAssembler::index_check_without_pop(Register array, Register
assert_not_delayed();
verify_oop(array);
#ifdef _LP64
// sign extend since tos (index) can be a 32bit value
sra(index, G0, index);
#endif // _LP64
// check array
Label ptr_ok;
@ -1191,11 +1167,7 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
// return tos
assert(Otos_l1 == Otos_i, "adjust code below");
switch (state) {
#ifdef _LP64
case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0
#else
case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through // O1 -> I1
#endif
case btos: // fall through
case ztos: // fall through
case ctos:
@ -1207,20 +1179,6 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
case vtos: /* nothing to do */ break;
default : ShouldNotReachHere();
}
#if defined(COMPILER2) && !defined(_LP64)
if (state == ltos) {
// C2 expects long results in G1 we can't tell if we're returning to interpreted
// or compiled so just be safe use G1 and O0/O1
// Shift bits into high (msb) of G1
sllx(Otos_l1->after_save(), 32, G1);
// Zero extend low bits
srl (Otos_l2->after_save(), 0, Otos_l2->after_save());
or3 (Otos_l2->after_save(), G1, G1);
}
#endif /* COMPILER2 */
}
// Lock object
@ -1270,9 +1228,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object)
// Check if owner is self by comparing the value in the markOop of object
// with the stack pointer
sub(temp_reg, SP, temp_reg);
#ifdef _LP64
sub(temp_reg, STACK_BIAS, temp_reg);
#endif
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
// Composite "andcc" test:
@ -2711,11 +2667,7 @@ void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,
void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) {
if (is_native_call) {
stf(FloatRegisterImpl::D, F0, d_tmp);
#ifdef _LP64
stx(O0, l_tmp);
#else
std(O0, l_tmp);
#endif
} else {
push(state);
}
@ -2724,11 +2676,7 @@ void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native
void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) {
if (is_native_call) {
ldf(FloatRegisterImpl::D, d_tmp, F0);
#ifdef _LP64
ldx(l_tmp, O0);
#else
ldd(l_tmp, O0);
#endif
} else {
pop(state);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -70,9 +70,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
bool check_exception=true
);
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// base routine for all dispatches
void dispatch_base(TosState state, address* table);
@ -80,6 +77,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
InterpreterMacroAssembler(CodeBuffer* c)
: MacroAssembler(c) {}
virtual void check_and_handle_popframe(Register scratch_reg);
virtual void check_and_handle_earlyret(Register scratch_reg);
void jump_to_entry(address entry);
virtual void load_earlyret_value(TosState state);
@ -196,6 +196,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
// load cpool->resolved_references(index);
void load_resolved_reference_at_index(Register result, Register index);
// load cpool->resolved_klass_at(index)
void load_resolved_klass_at_offset(Register Rcpool, Register Roffset, Register Rklass);
// common code
void field_offset_at(int n, Register tmp, Register dest, Register base);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -53,47 +53,24 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
Argument jni_arg(jni_offset(), false);
Register Rtmp = O0;
#ifdef _LP64
__ ldx(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
__ store_long_argument(Rtmp, jni_arg);
#else
__ ld(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
__ store_argument(Rtmp, jni_arg);
__ ld(Llocals, Interpreter::local_offset_in_bytes(offset() + 0), Rtmp);
Argument successor(jni_arg.successor());
__ store_argument(Rtmp, successor);
#endif
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
Argument jni_arg(jni_offset(), false);
#ifdef _LP64
FloatRegister Rtmp = F0;
__ ldf(FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(offset()), Rtmp);
__ store_float_argument(Rtmp, jni_arg);
#else
Register Rtmp = O0;
__ ld(Llocals, Interpreter::local_offset_in_bytes(offset()), Rtmp);
__ store_argument(Rtmp, jni_arg);
#endif
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_double() {
Argument jni_arg(jni_offset(), false);
#ifdef _LP64
FloatRegister Rtmp = F0;
__ ldf(FloatRegisterImpl::D, Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
__ store_double_argument(Rtmp, jni_arg);
#else
Register Rtmp = O0;
__ ld(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
__ store_argument(Rtmp, jni_arg);
__ ld(Llocals, Interpreter::local_offset_in_bytes(offset()), Rtmp);
Argument successor(jni_arg.successor());
__ store_argument(Rtmp, successor);
#endif
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
@ -171,7 +148,6 @@ class SlowSignatureHandler: public NativeSignatureIterator {
add_signature( non_float );
}
#ifdef _LP64
virtual void pass_float() {
*_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
_from -= Interpreter::stackElementSize;
@ -190,23 +166,6 @@ class SlowSignatureHandler: public NativeSignatureIterator {
_from -= 2*Interpreter::stackElementSize;
add_signature( long_sig );
}
#else
// pass_double() is pass_long() and pass_float() only _LP64
virtual void pass_long() {
_to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
_to[1] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
_to += 2;
_from -= 2*Interpreter::stackElementSize;
add_signature( non_float );
}
virtual void pass_float() {
*_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
_from -= Interpreter::stackElementSize;
add_signature( non_float );
}
#endif // _LP64
virtual void add_signature( intptr_t sig_type ) {
if ( _argcount < (sizeof (intptr_t))*4 ) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -88,9 +88,7 @@ private:
// _last_Java_sp will always be a an unbiased stack pointer
// if is is biased then some setter screwed up. This is
// deadly.
#ifdef _LP64
assert(((intptr_t)_last_Java_sp & 0xF) == 0, "Biased last_Java_sp");
#endif
return _last_Java_sp;
}

View File

@ -152,39 +152,19 @@ address JNI_FastGetField::generate_fast_get_long_field() {
__ ld_ptr (O1, 0, O5);
__ add (O5, O4, O5);
#ifndef _LP64
assert(count < LIST_CAPACITY-1, "LIST_CAPACITY too small");
speculative_load_pclist[count++] = __ pc();
__ ld (O5, 0, G2);
speculative_load_pclist[count] = __ pc();
__ ld (O5, 4, O3);
#else
assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
speculative_load_pclist[count] = __ pc();
__ ldx (O5, 0, O3);
#endif
__ ld (cnt_addr, G1);
__ cmp (G1, G4);
__ br (Assembler::notEqual, false, Assembler::pn, label2);
__ delayed()->mov (O7, G1);
#ifndef _LP64
__ mov (G2, O0);
__ retl ();
__ delayed()->mov (O3, O1);
#else
__ retl ();
__ delayed()->mov (O3, O0);
#endif
#ifndef _LP64
slowcase_entry_pclist[count-1] = __ pc();
slowcase_entry_pclist[count++] = __ pc() ;
#else
slowcase_entry_pclist[count++] = __ pc();
#endif
__ bind (label1);
__ mov (O7, G1);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,18 +55,10 @@ public:
static inline void put_int(jint from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = from; }
static inline void put_int(jint *from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = *from; }
#ifdef _LP64
// Longs are stored in native format in one JavaCallArgument slot at *(to+1).
static inline void put_long(jlong from, intptr_t *to) { *(jlong *)(to + 1 + 0) = from; }
static inline void put_long(jlong from, intptr_t *to, int& pos) { *(jlong *)(to + 1 + pos) = from; pos += 2; }
static inline void put_long(jlong *from, intptr_t *to, int& pos) { *(jlong *)(to + 1 + pos) = *from; pos += 2; }
#else
// Longs are stored in reversed native word format in two JavaCallArgument slots at *to.
// The high half is in *(to+1) and the low half in *to.
static inline void put_long(jlong from, intptr_t *to) { put_int2r((jint *)&from, (jint *)to); }
static inline void put_long(jlong from, intptr_t *to, int& pos) { put_int2r((jint *)&from, (jint *)to, pos); }
static inline void put_long(jlong *from, intptr_t *to, int& pos) { put_int2r((jint *) from, (jint *)to, pos); }
#endif
// Oops are stored in native format in one JavaCallArgument slot at *to.
static inline void put_obj(oop from, intptr_t *to) { *(oop *)(to + 0 ) = from; }
@ -78,39 +70,21 @@ public:
static inline void put_float(jfloat from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = from; }
static inline void put_float(jfloat *from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = *from; }
#ifdef _LP64
// Doubles are stored in native word format in one JavaCallArgument slot at *(to+1).
static inline void put_double(jdouble from, intptr_t *to) { *(jdouble *)(to + 1 + 0) = from; }
static inline void put_double(jdouble from, intptr_t *to, int& pos) { *(jdouble *)(to + 1 + pos) = from; pos += 2; }
static inline void put_double(jdouble *from, intptr_t *to, int& pos) { *(jdouble *)(to + 1 + pos) = *from; pos += 2; }
#else
// Doubles are stored in reversed native word format in two JavaCallArgument slots at *to.
static inline void put_double(jdouble from, intptr_t *to) { put_int2r((jint *)&from, (jint *)to); }
static inline void put_double(jdouble from, intptr_t *to, int& pos) { put_int2r((jint *)&from, (jint *)to, pos); }
static inline void put_double(jdouble *from, intptr_t *to, int& pos) { put_int2r((jint *) from, (jint *)to, pos); }
#endif
// The get_xxx routines, on the other hand, actually _do_ fetch
// java primitive types from the interpreter stack.
static inline jint get_int(intptr_t *from) { return *(jint *)from; }
#ifdef _LP64
static inline jlong get_long(intptr_t *from) { return *(jlong *)from; }
#else
static inline jlong get_long(intptr_t *from) { return ((jlong)(*( signed int *)((jint *)from )) << 32) |
((jlong)(*(unsigned int *)((jint *)from + 1)) << 0); }
#endif
static inline oop get_obj(intptr_t *from) { return *(oop *)from; }
static inline jfloat get_float(intptr_t *from) { return *(jfloat *)from; }
#ifdef _LP64
static inline jdouble get_double(intptr_t *from) { return *(jdouble *)from; }
#else
static inline jdouble get_double(intptr_t *from) { jlong jl = ((jlong)(*( signed int *)((jint *)from )) << 32) |
((jlong)(*(unsigned int *)((jint *)from + 1)) << 0);
return *(jdouble *)&jl; }
#endif
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,10 +39,6 @@
typedef int jint;
#ifdef _LP64
typedef long jlong;
#else
typedef long long jlong;
#endif
typedef long jlong;
typedef signed char jbyte;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,16 +44,12 @@ jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Hand
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) {
address pc = _instructions->start() + pc_offset;
Handle obj = HotSpotObjectConstantImpl::object(constant);
Handle obj(THREAD, HotSpotObjectConstantImpl::object(constant));
jobject value = JNIHandles::make_local(obj());
if (HotSpotObjectConstantImpl::compressed(constant)) {
#ifdef _LP64
int oop_index = _oop_recorder->find_index(value);
RelocationHolder rspec = oop_Relocation::spec(oop_index);
_instructions->relocate(pc, rspec, 1);
#else
JVMCI_ERROR("compressed oop on 32bit");
#endif
} else {
NativeMovConstReg* move = nativeMovConstReg_at(pc);
move->set_data((intptr_t) value);
@ -69,14 +65,10 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS)
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS) {
address pc = _instructions->start() + pc_offset;
if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
#ifdef _LP64
NativeMovConstReg32* move = nativeMovConstReg32_at(pc);
narrowKlass narrowOop = record_narrow_metadata_reference(_instructions, pc, constant, CHECK);
move->set_data((intptr_t)narrowOop);
TRACE_jvmci_3("relocating (narrow metaspace constant) at " PTR_FORMAT "/0x%x", p2i(pc), narrowOop);
#else
JVMCI_ERROR("compressed Klass* on 32bit");
#endif
} else {
NativeMovConstReg* move = nativeMovConstReg_at(pc);
void* reference = record_metadata_reference(_instructions, pc, constant, CHECK);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -296,11 +296,6 @@ void MacroAssembler::verify_thread() {
mov(G3, L3); // avoid clobbering G3
mov(G4, L4); // avoid clobbering G4
mov(G5_method, L5); // avoid clobbering G5_method
#if defined(COMPILER2) && !defined(_LP64)
// Save & restore possible 64-bit Long arguments in G-regs
srlx(G1,32,L0);
srlx(G4,32,L6);
#endif
call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type);
delayed()->mov(G2_thread, O0);
@ -309,15 +304,6 @@ void MacroAssembler::verify_thread() {
mov(L3, G3); // restore G3
mov(L4, G4); // restore G4
mov(L5, G5_method); // restore G5_method
#if defined(COMPILER2) && !defined(_LP64)
// Save & restore possible 64-bit Long arguments in G-regs
sllx(L0,32,G2); // Move old high G1 bits high in G2
srl(G1, 0,G1); // Clear current high G1 bits
or3 (G1,G2,G1); // Recover 64-bit G1
sllx(L6,32,G2); // Move old high G4 bits high in G2
srl(G4, 0,G4); // Clear current high G4 bits
or3 (G4,G2,G4); // Recover 64-bit G4
#endif
restore(O0, 0, G2_thread);
}
}
@ -387,7 +373,6 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Ja
st_ptr(last_Java_pc, pc_addr);
}
#ifdef _LP64
#ifdef ASSERT
// Make sure that we have an odd stack
Label StackOk;
@ -400,9 +385,6 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Ja
assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame");
add( last_java_sp, STACK_BIAS, G4_scratch );
st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset());
#else
st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset());
#endif // _LP64
}
void MacroAssembler::reset_last_Java_frame(void) {
@ -658,11 +640,7 @@ void MacroAssembler::ic_call(address entry, bool emit_delay, jint method_index)
void MacroAssembler::card_table_write(jbyte* byte_map_base,
Register tmp, Register obj) {
#ifdef _LP64
srlx(obj, CardTableModRefBS::card_shift, obj);
#else
srl(obj, CardTableModRefBS::card_shift, obj);
#endif
assert(tmp != obj, "need separate temp reg");
set((address) byte_map_base, tmp);
stb(G0, tmp, obj);
@ -672,7 +650,6 @@ void MacroAssembler::card_table_write(jbyte* byte_map_base,
void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
address save_pc;
int shiftcnt;
#ifdef _LP64
# ifdef CHECK_DELAY
assert_not_delayed((char*) "cannot put two instructions in delay slot");
# endif
@ -719,9 +696,6 @@ void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, b
while (pc() < (save_pc + (7 * BytesPerInstWord)))
nop();
}
#else
Assembler::sethi(addrlit.value(), d, addrlit.rspec());
#endif
}
@ -736,7 +710,6 @@ void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d)
int MacroAssembler::insts_for_sethi(address a, bool worst_case) {
#ifdef _LP64
if (worst_case) return 7;
intptr_t iaddr = (intptr_t) a;
int msb32 = (int) (iaddr >> 32);
@ -756,9 +729,6 @@ int MacroAssembler::insts_for_sethi(address a, bool worst_case) {
}
}
return count;
#else
return 1;
#endif
}
int MacroAssembler::worst_case_insts_for_set() {
@ -1488,11 +1458,7 @@ void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresul
void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) {
#ifdef _LP64
add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult);
#else
add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult);
#endif
bclr(1, Rresult);
sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes
}
@ -1531,22 +1497,12 @@ void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a,
// Does a test & branch on 32-bit systems and a register-branch on 64-bit.
void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) {
assert_not_delayed();
#ifdef _LP64
bpr( rc_z, a, p, s1, L );
#else
tst(s1);
br ( zero, a, p, L );
#endif
}
void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) {
assert_not_delayed();
#ifdef _LP64
bpr( rc_nz, a, p, s1, L );
#else
tst(s1);
br ( notZero, a, p, L );
#endif
}
// Compare registers and branch with nop in delay slot or cbcond without delay slot.
@ -1862,14 +1818,12 @@ void MacroAssembler::lushr( Register Rin_high, Register Rin_low,
bind( done );
}
#ifdef _LP64
void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) {
cmp(Ra, Rb);
mov(-1, Rresult);
movcc(equal, false, xcc, 0, Rresult);
movcc(greater, false, xcc, 1, Rresult);
}
#endif
void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) {
@ -2668,9 +2622,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
// if compare/exchange succeeded we found an unlocked object and we now have locked it
// hence we are done
cmp(Rmark, Rscratch);
#ifdef _LP64
sub(Rscratch, STACK_BIAS, Rscratch);
#endif
brx(Assembler::equal, false, Assembler::pt, done);
delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot
@ -2716,9 +2668,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
// Stack-lock attempt failed - check for recursive stack-lock.
// See the comments below about how we might remove this case.
#ifdef _LP64
sub(Rscratch, STACK_BIAS, Rscratch);
#endif
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
andcc(Rscratch, 0xfffff003, Rscratch);
br(Assembler::always, false, Assembler::pt, done);
@ -2800,9 +2750,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
// control to the "slow" operators in synchronizer.cpp.
// RScratch contains the fetched obj->mark value from the failed CAS.
#ifdef _LP64
sub(Rscratch, STACK_BIAS, Rscratch);
#endif
sub(Rscratch, SP, Rscratch);
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
andcc(Rscratch, 0xfffff003, Rscratch);
@ -3720,11 +3668,7 @@ static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
Label not_already_dirty, restart, refill, young_card;
#ifdef _LP64
__ srlx(O0, CardTableModRefBS::card_shift, O0);
#else
__ srl(O0, CardTableModRefBS::card_shift, O0);
#endif
AddressLiteral addrlit(byte_map_base);
__ set(addrlit, O1); // O1 := <card table base>
__ ldub(O0, O1, O2); // O2 := [O0 + O1]
@ -3826,11 +3770,7 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val
if (G1RSBarrierRegionFilter) {
xor3(store_addr, new_val, tmp);
#ifdef _LP64
srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
#else
srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
#endif
// XXX Should I predict this taken or not? Does it matter?
cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -333,14 +333,12 @@ class AddressLiteral VALUE_OBJ_CLASS_SPEC {
return external_word_Relocation::spec(addr);
case relocInfo::internal_word_type:
return internal_word_Relocation::spec(addr);
#ifdef _LP64
case relocInfo::opt_virtual_call_type:
return opt_virtual_call_Relocation::spec();
case relocInfo::static_call_type:
return static_call_Relocation::spec();
case relocInfo::runtime_call_type:
return runtime_call_Relocation::spec();
#endif
case relocInfo::none:
return RelocationHolder();
default:
@ -396,12 +394,10 @@ class AddressLiteral VALUE_OBJ_CLASS_SPEC {
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
#ifdef _LP64
// 32-bit complains about a multiple declaration for int*.
AddressLiteral(intptr_t* addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
#endif
AddressLiteral(Metadata* addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
@ -464,16 +460,10 @@ class Argument VALUE_OBJ_CLASS_SPEC {
bool _is_in;
public:
#ifdef _LP64
enum {
n_register_parameters = 6, // only 6 registers may contain integer parameters
n_float_register_parameters = 16 // Can have up to 16 floating registers
};
#else
enum {
n_register_parameters = 6 // only 6 registers may contain integer parameters
};
#endif
// creation
Argument(int number, bool is_in) : _number(number), _is_in(is_in) {}
@ -489,7 +479,6 @@ class Argument VALUE_OBJ_CLASS_SPEC {
// locating register-based arguments:
bool is_register() const { return _number < n_register_parameters; }
#ifdef _LP64
// locating Floating Point register-based arguments:
bool is_float_register() const { return _number < n_float_register_parameters; }
@ -501,7 +490,6 @@ class Argument VALUE_OBJ_CLASS_SPEC {
assert(is_float_register(), "must be a register argument");
return as_FloatRegister(( number() *2 ));
}
#endif
Register as_register() const {
assert(is_register(), "must be a register argument");
@ -604,15 +592,15 @@ class MacroAssembler : public Assembler {
bool check_exception=true // flag which indicates if exception should be checked
);
public:
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
// This routine should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler,
// as only the interpreter handles and ForceEarlyReturn PopFrame requests.
virtual void check_and_handle_popframe(Register scratch_reg);
virtual void check_and_handle_earlyret(Register scratch_reg);
public:
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
// Support for NULL-checks
//
// Generates code that causes a NULL OS exception if the content of reg is NULL.
@ -1217,9 +1205,7 @@ public:
void lushr( Register Rin_high, Register Rin_low, Register Rcount,
Register Rout_high, Register Rout_low, Register Rtemp );
#ifdef _LP64
void lcmp( Register Ra, Register Rb, Register Rresult);
#endif
// Load and store values by size and signed-ness
void load_sized_value( Address src, Register dst, size_t size_in_bytes, bool is_signed);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,19 +45,11 @@ inline void MacroAssembler::pd_patch_instruction(address branch, address target)
// Use the right loads/stores for the platform
inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, s2, d);
#else
ld( s1, s2, d);
#endif
}
inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, simm13a, d);
#else
ld( s1, simm13a, d);
#endif
}
#ifdef ASSERT
@ -68,35 +60,19 @@ inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d )
#endif
inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) {
#ifdef _LP64
ldx(s1, s2, d);
#else
ld( s1, s2, d);
#endif
}
inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) {
#ifdef _LP64
ldx(a, d, offset);
#else
ld( a, d, offset);
#endif
}
inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
#ifdef _LP64
Assembler::stx(d, s1, s2);
#else
st( d, s1, s2);
#endif
}
inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) {
#ifdef _LP64
Assembler::stx(d, s1, simm13a);
#else
st( d, s1, simm13a);
#endif
}
#ifdef ASSERT
@ -107,84 +83,44 @@ inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a )
#endif
inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) {
#ifdef _LP64
stx(d, s1, s2);
#else
st( d, s1, s2);
#endif
}
inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) {
#ifdef _LP64
stx(d, a, offset);
#else
st( d, a, offset);
#endif
}
// Use the right loads/stores for the platform
inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, s2, d);
#else
Assembler::ldd(s1, s2, d);
#endif
}
inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, simm13a, d);
#else
Assembler::ldd(s1, simm13a, d);
#endif
}
inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) {
#ifdef _LP64
ldx(s1, s2, d);
#else
ldd(s1, s2, d);
#endif
}
inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) {
#ifdef _LP64
ldx(a, d, offset);
#else
ldd(a, d, offset);
#endif
}
inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) {
#ifdef _LP64
Assembler::stx(d, s1, s2);
#else
Assembler::std(d, s1, s2);
#endif
}
inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) {
#ifdef _LP64
Assembler::stx(d, s1, simm13a);
#else
Assembler::std(d, s1, simm13a);
#endif
}
inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) {
#ifdef _LP64
stx(d, s1, s2);
#else
std(d, s1, s2);
#endif
}
inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) {
#ifdef _LP64
stx(d, a, offset);
#else
std(d, a, offset);
#endif
}
inline void MacroAssembler::stbool(Register d, const Address& a) { stb(d, a); }
@ -207,45 +143,25 @@ inline void MacroAssembler::casx( Register s1, Register s2, Register d) { casxa(
// Functions for isolating 64 bit atomic swaps for LP64
// cas_ptr will perform cas for 32 bit VM's and casx for 64 bit VM's
inline void MacroAssembler::cas_ptr( Register s1, Register s2, Register d) {
#ifdef _LP64
casx( s1, s2, d );
#else
cas( s1, s2, d );
#endif
}
// Functions for isolating 64 bit shifts for LP64
inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::sllx(s1, s2, d);
#else
Assembler::sll( s1, s2, d);
#endif
}
inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) {
#ifdef _LP64
Assembler::sllx(s1, imm6a, d);
#else
Assembler::sll( s1, imm6a, d);
#endif
}
inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::srlx(s1, s2, d);
#else
Assembler::srl( s1, s2, d);
#endif
}
inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) {
#ifdef _LP64
Assembler::srlx(s1, imm6a, d);
#else
Assembler::srl( s1, imm6a, d);
#endif
}
inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) {
@ -277,11 +193,7 @@ inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
// Branch that tests either xcc or icc depending on the
// architecture compiled (LP64 or not)
inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
#ifdef _LP64
Assembler::bp(c, a, xcc, p, d, rt);
#else
MacroAssembler::br(c, a, p, d, rt);
#endif
}
inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
@ -338,7 +250,6 @@ inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
}
inline void MacroAssembler::call( address d, RelocationHolder const& rspec ) {
#ifdef _LP64
intptr_t disp;
// NULL is ok because it will be relocated later.
// Must change NULL to a reachable address in order to
@ -355,9 +266,6 @@ inline void MacroAssembler::call( address d, RelocationHolder const& rspec ) {
} else {
Assembler::call(d, rspec);
}
#else
Assembler::call( d, rspec );
#endif
}
inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
@ -414,12 +322,7 @@ inline void MacroAssembler::cmp( Register s1, int simm13a ) { subcc( s1, simm13
// 2 instructions. All PCs in the CodeCache are within 2 Gig of each other.
inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) {
intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip;
#ifdef _LP64
Unimplemented();
#else
Assembler::sethi( thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc));
add(reg, thepc & 0x3ff, reg, internal_word_Relocation::spec((address)thepc));
#endif
return thepc;
}
@ -554,7 +457,6 @@ inline void MacroAssembler::store_ptr_argument( Register s, Argument& a ) {
}
#ifdef _LP64
inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) {
if (a.is_float_register())
// V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2
@ -579,7 +481,6 @@ inline void MacroAssembler::store_long_argument( Register s, Argument& a ) {
else
stx(s, a.as_address());
}
#endif
inline void MacroAssembler::round_to( Register r, int modulus ) {
assert_not_delayed();
@ -640,22 +541,13 @@ inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm1
inline void MacroAssembler::clruw( Register s, Register d ) { srl( s, G0, d); }
inline void MacroAssembler::clruwu( Register d ) { srl( d, G0, d); }
#ifdef _LP64
// Make all 32 bit loads signed so 64 bit registers maintain proper sign
inline void MacroAssembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); }
inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); }
#else
inline void MacroAssembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); }
inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); }
#endif
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
# ifdef _LP64
inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); }
# else
inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); }
# endif
#endif
inline void MacroAssembler::ld( const Address& a, Register d, int offset) {

View File

@ -1,120 +0,0 @@
/*
* Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "asm/codeBuffer.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
// This method will be called (as any other Klass virtual method) with
// the Klass itself as the first argument. Example:
//
// oop obj;
// int size = obj->klass()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();
//
// The dummy method is called with the Klass object as the first
// operand, and an object as the second argument.
//
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
// differing only by an ordinal constant, and they bear no relationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#define __ masm->
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
char** mc_top,
char* mc_end) {
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
*(intptr_t *)(*md_top) = vtable_bytes;
*md_top += sizeof(intptr_t);
void** dummy_vtable = (void**)*md_top;
*vtable = dummy_vtable;
*md_top += vtable_bytes;
// Get ready to generate dummy methods.
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
MacroAssembler* masm = new MacroAssembler(&cb);
Label common_code;
for (int i = 0; i < vtbl_list_size; ++i) {
for (int j = 0; j < num_virtuals; ++j) {
dummy_vtable[num_virtuals * i + j] = (void*)masm->pc();
__ save(SP, -256, SP);
int offset = (i << 8) + j;
Register src = G0;
if (!Assembler::is_simm13(offset)) {
__ sethi(offset, L0);
src = L0;
offset = offset & ((1 << 10) - 1);
}
__ brx(Assembler::always, false, Assembler::pt, common_code);
// Load L0 with a value indicating vtable/offset pair.
// -- bits[ 7..0] (8 bits) which virtual method in table?
// -- bits[13..8] (6 bits) which virtual method table?
__ delayed()->or3(src, offset, L0);
}
}
__ bind(common_code);
// Expecting to be called with the "this" pointer in O0/I0 (where
// "this" is a Klass object). In addition, L0 was set (above) to
// identify the method and table.
// Look up the correct vtable pointer.
__ set((intptr_t)vtbl_list, L2); // L2 = address of new vtable list.
__ srl(L0, 8, L3); // Isolate L3 = vtable identifier.
__ sll(L3, LogBytesPerWord, L3);
__ ld_ptr(L2, L3, L3); // L3 = new (correct) vtable pointer.
__ st_ptr(L3, Address(I0, 0)); // Save correct vtable ptr in entry.
// Restore registers and jump to the correct method;
__ and3(L0, 255, L4); // Isolate L3 = method offset;.
__ sll(L4, LogBytesPerWord, L4);
__ ld_ptr(L3, L4, L4); // Get address of correct virtual method
__ jmpl(L4, 0, G0); // Jump to correct method.
__ delayed()->restore(); // Restore registers.
__ flush();
*mc_top = (char*)__ pc();
guarantee(*mc_top <= mc_end, "Insufficient space for method wrappers.");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -71,7 +71,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Register temp_reg, Register temp2_reg,
const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
Klass* klass = SystemDictionary::well_known_klass(klass_id);
bool did_save = false;
if (temp_reg == noreg || temp2_reg == noreg) {
temp_reg = L1;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -236,8 +236,6 @@ void NativeCall::test() {
//-------------------------------------------------------------------
#ifdef _LP64
void NativeFarCall::set_destination(address dest) {
// Address materialized in the instruction stream, so nothing to do.
return;
@ -290,8 +288,6 @@ void NativeFarCall::test() {
}
// End code for unit testing implementation of NativeFarCall class
#endif // _LP64
//-------------------------------------------------------------------
@ -304,18 +300,9 @@ void NativeMovConstReg::verify() {
// verify the pattern "sethi %hi22(imm), reg ; add reg, %lo10(imm), reg"
Register rd = inv_rd(i0);
#ifndef _LP64
if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
is_op3(i1, Assembler::add_op3, Assembler::arith_op) &&
inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
rd == inv_rs1(i1) && rd == inv_rd(i1))) {
fatal("not a set_metadata");
}
#else
if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
fatal("not a set_metadata");
}
#endif
}
@ -324,23 +311,13 @@ void NativeMovConstReg::print() {
}
#ifdef _LP64
intptr_t NativeMovConstReg::data() const {
return data64(addr_at(sethi_offset), long_at(add_offset));
}
#else
intptr_t NativeMovConstReg::data() const {
return data32(long_at(sethi_offset), long_at(add_offset));
}
#endif
void NativeMovConstReg::set_data(intptr_t x) {
#ifdef _LP64
set_data64_sethi(addr_at(sethi_offset), x);
#else
set_long_at(sethi_offset, set_data32_sethi( long_at(sethi_offset), x));
#endif
set_long_at(add_offset, set_data32_simm13( long_at(add_offset), x));
// also store the value into an oop_Relocation cell, if any
@ -508,20 +485,12 @@ void NativeMovConstRegPatching::print() {
int NativeMovConstRegPatching::data() const {
#ifdef _LP64
return data64(addr_at(sethi_offset), long_at(add_offset));
#else
return data32(long_at(sethi_offset), long_at(add_offset));
#endif
}
void NativeMovConstRegPatching::set_data(int x) {
#ifdef _LP64
set_data64_sethi(addr_at(sethi_offset), x);
#else
set_long_at(sethi_offset, set_data32_sethi(long_at(sethi_offset), x));
#endif
set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
// also store the value into an oop_Relocation cell, if any
@ -758,21 +727,12 @@ void NativeJump::verify() {
assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
// verify the pattern "sethi %hi22(imm), treg ; jmpl treg, %lo10(imm), lreg"
Register rd = inv_rd(i0);
#ifndef _LP64
if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
(is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op)) &&
inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
rd == inv_rs1(i1))) {
fatal("not a jump_to instruction");
}
#else
// In LP64, the jump instruction location varies for non relocatable
// jumps, for example is could be sethi, xor, jmp instead of the
// 7 instructions for sethi. So let's check sethi only.
if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
fatal("not a jump_to instruction");
}
#endif
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -121,11 +121,7 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
bool is_safepoint_poll() {
int x = long_at(0);
#ifdef _LP64
return is_op3(x, Assembler::ldx_op3, Assembler::ldst_op) &&
#else
return is_op3(x, Assembler::lduw_op3, Assembler::ldst_op) &&
#endif
(inv_rd(x) == G0) && (inv_immed(x) ? Assembler::inv_simm13(x) == 0 : inv_rs2(x) == G0);
}
@ -432,22 +428,6 @@ class NativeCallReg: public NativeInstruction {
// instructions in the sparcv9 vm. Used to call native methods which may be loaded
// anywhere in the address space, possibly out of reach of a call instruction.
#ifndef _LP64
// On 32-bit systems, a far call is the same as a near one.
class NativeFarCall;
inline NativeFarCall* nativeFarCall_at(address instr);
class NativeFarCall : public NativeCall {
public:
friend inline NativeFarCall* nativeFarCall_at(address instr) { return (NativeFarCall*)nativeCall_at(instr); }
friend NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination = NULL)
{ return (NativeFarCall*)nativeCall_overwriting_at(instr, destination); }
friend NativeFarCall* nativeFarCall_before(address return_address)
{ return (NativeFarCall*)nativeCall_before(return_address); }
};
#else
// The format of this extended-range call is:
// jumpl_to addr, lreg
// == sethi %hi54(addr), O7 ; jumpl O7, %lo10(addr), O7 ; <delay>
@ -515,7 +495,6 @@ class NativeFarCall: public NativeInstruction {
static void replace_mt_safe(address instr_addr, address code_buffer);
};
#endif // _LP64
// An interface for accessing/manipulating 32 bit native set_metadata imm, reg instructions
// (used to manipulate inlined data references, etc.)
@ -567,13 +546,8 @@ class NativeMovConstReg: public NativeInstruction {
public:
enum Sparc_specific_constants {
sethi_offset = 0,
#ifdef _LP64
add_offset = 7 * BytesPerInstWord,
instruction_size = 8 * BytesPerInstWord
#else
add_offset = 4,
instruction_size = 8
#endif
};
address instruction_address() const { return addr_at(0); }
@ -626,11 +600,7 @@ inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address);
public:
enum Sparc_specific_constants {
sethi_offset = 0,
#ifdef _LP64
nop_offset = 7 * BytesPerInstWord,
#else
nop_offset = sethi_offset + BytesPerInstWord,
#endif
add_offset = nop_offset + BytesPerInstWord,
instruction_size = add_offset + BytesPerInstWord
};
@ -705,11 +675,7 @@ class NativeMovRegMem: public NativeInstruction {
offset_width = 13,
sethi_offset = 0,
#ifdef _LP64
add_offset = 7 * BytesPerInstWord,
#else
add_offset = 4,
#endif
ldst_offset = add_offset + BytesPerInstWord
};
bool is_immediate() const {
@ -720,11 +686,7 @@ class NativeMovRegMem: public NativeInstruction {
address instruction_address() const { return addr_at(0); }
address next_instruction_address() const {
#ifdef _LP64
return addr_at(is_immediate() ? 4 : (7 * BytesPerInstWord));
#else
return addr_at(is_immediate() ? 4 : 12);
#endif
}
intptr_t offset() const {
return is_immediate()? inv_simm(long_at(0), offset_width) :
@ -777,19 +739,13 @@ class NativeJump: public NativeInstruction {
public:
enum Sparc_specific_constants {
sethi_offset = 0,
#ifdef _LP64
jmpl_offset = 7 * BytesPerInstWord,
instruction_size = 9 * BytesPerInstWord // includes delay slot
#else
jmpl_offset = 1 * BytesPerInstWord,
instruction_size = 3 * BytesPerInstWord // includes delay slot
#endif
};
address instruction_address() const { return addr_at(0); }
address next_instruction_address() const { return addr_at(instruction_size); }
#ifdef _LP64
address jump_destination() const {
return (address) data64(instruction_address(), long_at(jmpl_offset));
}
@ -797,15 +753,6 @@ class NativeJump: public NativeInstruction {
set_data64_sethi( instruction_address(), (intptr_t)dest);
set_long_at(jmpl_offset, set_data32_simm13( long_at(jmpl_offset), (intptr_t)dest));
}
#else
address jump_destination() const {
return (address) data32(long_at(sethi_offset), long_at(jmpl_offset));
}
void set_jump_destination(address dest) {
set_long_at(sethi_offset, set_data32_sethi( long_at(sethi_offset), (intptr_t)dest));
set_long_at(jmpl_offset, set_data32_simm13( long_at(jmpl_offset), (intptr_t)dest));
}
#endif
// Creation
friend inline NativeJump* nativeJump_at(address address) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -93,7 +93,6 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
case Assembler::branch_op:
{
#ifdef _LP64
jint inst2;
guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
if (format() != 0) {
@ -121,17 +120,6 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
} else {
ip->set_data64_sethi( ip->addr_at(0), (intptr_t)x );
}
#else
guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
inst &= ~Assembler::hi22( -1);
inst |= Assembler::hi22((intptr_t)x);
// (ignore offset; it doesn't play into the sethi)
if (verify_only) {
guarantee(ip->long_at(0) == inst, "instructions must match");
} else {
ip->set_long_at(0, inst);
}
#endif
}
break;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,12 +34,8 @@
// There is no need for format bits; the instructions are
// sufficiently self-identifying.
#ifndef _LP64
format_width = 0
#else
// Except narrow oops in 64-bits VM.
format_width = 1
#endif
};

View File

@ -127,56 +127,10 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
// OopMap* map = new OopMap(*total_frame_words, 0);
OopMap* map = new OopMap(frame_size_in_slots, 0);
#if !defined(_LP64)
// Save 64-bit O registers; they will get their heads chopped off on a 'save'.
__ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
__ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
__ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
__ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
__ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
__ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
#endif /* _LP64 */
__ save(SP, -frame_size, SP);
#ifndef _LP64
// Reload the 64 bit Oregs. Although they are now Iregs we load them
// to Oregs here to avoid interrupts cutting off their heads
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
__ stx(O0, SP, o0_offset+STACK_BIAS);
map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg());
__ stx(O1, SP, o1_offset+STACK_BIAS);
map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg());
__ stx(O2, SP, o2_offset+STACK_BIAS);
map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg());
__ stx(O3, SP, o3_offset+STACK_BIAS);
map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg());
__ stx(O4, SP, o4_offset+STACK_BIAS);
map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg());
__ stx(O5, SP, o5_offset+STACK_BIAS);
map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
#endif /* _LP64 */
#ifdef _LP64
int debug_offset = 0;
#else
int debug_offset = 4;
#endif
// Save the G's
__ stx(G1, SP, g1_offset+STACK_BIAS);
map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
@ -192,18 +146,6 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
// This is really a waste but we'll keep things as they were for now
if (true) {
#ifndef _LP64
map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
#endif /* _LP64 */
}
@ -250,70 +192,22 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
__ ldx(SP, g4_offset+STACK_BIAS, G4);
__ ldx(SP, g5_offset+STACK_BIAS, G5);
#if !defined(_LP64)
// Restore the 64-bit O's.
__ ldx(SP, o0_offset+STACK_BIAS, O0);
__ ldx(SP, o1_offset+STACK_BIAS, O1);
__ ldx(SP, o2_offset+STACK_BIAS, O2);
__ ldx(SP, o3_offset+STACK_BIAS, O3);
__ ldx(SP, o4_offset+STACK_BIAS, O4);
__ ldx(SP, o5_offset+STACK_BIAS, O5);
// And temporarily place them in TLS
__ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
__ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
__ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
__ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
__ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
__ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
#endif /* _LP64 */
// Restore flags
__ ldxfsr(SP, fsr_offset+STACK_BIAS);
__ restore();
#if !defined(_LP64)
// Now reload the 64bit Oregs after we've restore the window.
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
#endif /* _LP64 */
}
// Pop the current frame and restore the registers that might be holding
// a result.
void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
#if !defined(_LP64)
// 32bit build returns longs in G1
__ ldx(SP, g1_offset+STACK_BIAS, G1);
// Retrieve the 64-bit O's.
__ ldx(SP, o0_offset+STACK_BIAS, O0);
__ ldx(SP, o1_offset+STACK_BIAS, O1);
// and save to TLS
__ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
__ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
#endif /* _LP64 */
__ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));
__ restore();
#if !defined(_LP64)
// Now reload the 64bit Oregs after we've restore the window.
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
#endif /* _LP64 */
}
// Is vector's size (in bytes) bigger than a size saved by default?
@ -410,11 +304,6 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
case T_CHAR:
case T_BYTE:
case T_BOOLEAN:
#ifndef _LP64
case T_OBJECT:
case T_ARRAY:
case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
#endif // _LP64
if (int_reg < int_reg_max) {
Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
regs[i].set1(r->as_VMReg());
@ -423,7 +312,6 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
}
break;
#ifdef _LP64
case T_LONG:
assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting VOID in other half");
// fall-through
@ -439,15 +327,6 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
slot += 2;
}
break;
#else
case T_LONG:
assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting VOID in other half");
// On 32-bit SPARC put longs always on the stack to keep the pressure off
// integer argument registers. They should be used for oops.
slot = round_to(slot, 2); // align
regs[i].set2(VMRegImpl::stack2reg(slot));
slot += 2;
#endif
break;
case T_FLOAT:
@ -554,7 +433,6 @@ void AdapterGenerator::patch_callers_callsite() {
// The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.
#ifdef _LP64
// mov(s,d)
__ mov(G1, L1);
__ mov(G4, L4);
@ -571,20 +449,6 @@ void AdapterGenerator::patch_callers_callsite() {
__ mov(L1, G1);
__ mov(L4, G4);
__ mov(L5, G5_method);
#else
__ stx(G1, FP, -8 + STACK_BIAS);
__ stx(G4, FP, -16 + STACK_BIAS);
__ mov(G5_method, L5);
__ mov(G5_method, O0); // VM needs target method
__ mov(I7, O1); // VM needs caller's callsite
// Must be a leaf call...
__ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
__ delayed()->mov(G2_thread, L7_thread_cache);
__ mov(L7_thread_cache, G2_thread);
__ ldx(FP, -8 + STACK_BIAS, G1);
__ ldx(FP, -16 + STACK_BIAS, G4);
__ mov(L5, G5_method);
#endif /* _LP64 */
__ restore(); // Restore args
__ bind(L);
@ -605,28 +469,9 @@ RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
// Stores long into offset pointed to by base
void AdapterGenerator::store_c2i_long(Register r, Register base,
const int st_off, bool is_stack) {
#ifdef _LP64
// In V9, longs are given 2 64-bit slots in the interpreter, but the
// data is passed in only 1 slot.
__ stx(r, base, next_arg_slot(st_off));
#else
#ifdef COMPILER2
// Misaligned store of 64-bit data
__ stw(r, base, arg_slot(st_off)); // lo bits
__ srlx(r, 32, r);
__ stw(r, base, next_arg_slot(st_off)); // hi bits
#else
if (is_stack) {
// Misaligned store of 64-bit data
__ stw(r, base, arg_slot(st_off)); // lo bits
__ srlx(r, 32, r);
__ stw(r, base, next_arg_slot(st_off)); // hi bits
} else {
__ stw(r->successor(), base, arg_slot(st_off) ); // lo bits
__ stw(r , base, next_arg_slot(st_off)); // hi bits
}
#endif // COMPILER2
#endif // _LP64
}
void AdapterGenerator::store_c2i_object(Register r, Register base,
@ -642,15 +487,9 @@ void AdapterGenerator::store_c2i_int(Register r, Register base,
// Stores into offset pointed to by base
void AdapterGenerator::store_c2i_double(VMReg r_2,
VMReg r_1, Register base, const int st_off) {
#ifdef _LP64
// In V9, doubles are given 2 64-bit slots in the interpreter, but the
// data is passed in only 1 slot.
__ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
#else
// Need to marshal 64-bit value from misaligned Lesp loads
__ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
__ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
#endif
}
void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
@ -957,22 +796,17 @@ void AdapterGenerator::gen_i2c_adapter(int total_args_passed,
if (!r_2->is_valid()) {
__ ld(Gargs, arg_slot(ld_off), r);
} else {
#ifdef _LP64
// In V9, longs are given 2 64-bit slots in the interpreter, but the
// data is passed in only 1 slot.
RegisterOrConstant slot = (sig_bt[i] == T_LONG) ?
next_arg_slot(ld_off) : arg_slot(ld_off);
__ ldx(Gargs, slot, r);
#else
fatal("longs should be on stack");
#endif
}
} else {
assert(r_1->is_FloatRegister(), "");
if (!r_2->is_valid()) {
__ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister());
} else {
#ifdef _LP64
// In V9, doubles are given 2 64-bit slots in the interpreter, but the
// data is passed in only 1 slot. This code also handles longs that
// are passed on the stack, but need a stack-to-stack move through a
@ -980,11 +814,6 @@ void AdapterGenerator::gen_i2c_adapter(int total_args_passed,
RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
next_arg_slot(ld_off) : arg_slot(ld_off);
__ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister());
#else
// Need to marshal 64-bit value from misaligned Lesp loads
__ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister());
__ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister());
#endif
}
}
// Was the argument really intended to be on the stack, but was loaded
@ -1157,7 +986,6 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
// See int_stk_helper for a further discussion.
int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots();
#ifdef _LP64
// V9 convention: All things "as-if" on double-wide stack slots.
// Hoist any int/ptr/long's in the first 6 to int regs.
// Hoist any flt/dbl's in the first 16 dbl regs.
@ -1241,44 +1069,6 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
if (off > max_stack_slots) max_stack_slots = off;
}
}
#else // _LP64
// V8 convention: first 6 things in O-regs, rest on stack.
// Alignment is willy-nilly.
for (int i = 0; i < total_args_passed; i++) {
switch (sig_bt[i]) {
case T_ADDRESS: // raw pointers, like current thread, for VM calls
case T_ARRAY:
case T_BOOLEAN:
case T_BYTE:
case T_CHAR:
case T_FLOAT:
case T_INT:
case T_OBJECT:
case T_METADATA:
case T_SHORT:
regs[i].set1(int_stk_helper(i));
break;
case T_DOUBLE:
case T_LONG:
assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
regs[i].set_pair(int_stk_helper(i + 1), int_stk_helper(i));
break;
case T_VOID: regs[i].set_bad(); break;
default:
ShouldNotReachHere();
}
if (regs[i].first()->is_stack()) {
int off = regs[i].first()->reg2stack();
if (off > max_stack_slots) max_stack_slots = off;
}
if (regs[i].second()->is_stack()) {
int off = regs[i].second()->reg2stack();
if (off > max_stack_slots) max_stack_slots = off;
}
}
#endif // _LP64
return round_to(max_stack_slots + 1, 2);
}
@ -1406,12 +1196,7 @@ static void object_move(MacroAssembler* masm,
Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register();
__ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle);
__ ld_ptr(rHandle, 0, L4);
#ifdef _LP64
__ movr( Assembler::rc_z, L4, G0, rHandle );
#else
__ tst( L4 );
__ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
#endif
if (dst.first()->is_stack()) {
__ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
}
@ -1432,12 +1217,7 @@ static void object_move(MacroAssembler* masm,
}
map->set_oop(VMRegImpl::stack2reg(oop_slot));
__ add(SP, offset + STACK_BIAS, rHandle);
#ifdef _LP64
__ movr( Assembler::rc_z, rOop, G0, rHandle );
#else
__ tst( rOop );
__ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
#endif
if (dst.first()->is_stack()) {
__ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
@ -2068,11 +1848,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
// Check for a valid (non-zero) hash code and get its value.
#ifdef _LP64
__ srlx(header, markOopDesc::hash_shift, hash);
#else
__ srl(header, markOopDesc::hash_shift, hash);
#endif
__ andcc(hash, mask, hash);
__ br(Assembler::equal, false, Assembler::pn, slowCase);
__ delayed()->nop();
@ -2408,7 +2184,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// We have all of the arguments setup at this point. We MUST NOT touch any Oregs
// except O6/O7. So if we must call out we must push a new frame. We immediately
// push a new frame and flush the windows.
#ifdef _LP64
intptr_t thepc = (intptr_t) __ pc();
{
address here = __ pc();
@ -2416,9 +2191,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ call(here + 8, relocInfo::none);
__ delayed()->nop();
}
#else
intptr_t thepc = __ load_pc_address(O7, 0);
#endif /* _LP64 */
// We use the same pc/oopMap repeatedly when we call out
oop_maps->add_gc_map(thepc - start, map);
@ -2553,13 +2325,9 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Transition from _thread_in_Java to _thread_in_native.
__ set(_thread_in_native, G3_scratch);
#ifdef _LP64
AddressLiteral dest(native_func);
__ relocate(relocInfo::runtime_call_type);
__ jumpl_to(dest, O7, O7);
#else
__ call(native_func, relocInfo::runtime_call_type);
#endif
__ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
__ restore_thread(L7_thread_cache); // restore G2_thread
@ -2574,9 +2342,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
case T_DOUBLE: break; // Got it where we want it (unless slow-path)
// In 64 bits build result is in O0, in O0, O1 in 32bit build
case T_LONG:
#ifndef _LP64
__ mov(O1, I1);
#endif
// Fall thru
case T_OBJECT: // Really a handle
case T_ARRAY:
@ -2797,16 +2562,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Return
#ifndef _LP64
if (ret_type == T_LONG) {
// Must leave proper result in O0,O1 and G1 (c2/tiered only)
__ sllx(I0, 32, G1); // Shift bits into high G1
__ srl (I1, 0, I1); // Zero extend O1 (harmless?)
__ or3 (I1, G1, G1); // OR 64 bits into G1
}
#endif
__ ret();
__ delayed()->restore();
@ -2868,10 +2623,6 @@ static void gen_new_frame(MacroAssembler* masm, bool deopt) {
#ifdef ASSERT
// make sure that the frames are aligned properly
#ifndef _LP64
__ btst(wordSize*2-1, SP);
__ breakpoint_trap(Assembler::notZero, Assembler::ptr_cc);
#endif
#endif
// Deopt needs to pass some extra live values from frame to frame
@ -2989,13 +2740,7 @@ void SharedRuntime::generate_deopt_blob() {
pad += 1000; // Increase the buffer size when compiling for JVMCI
}
#endif
#ifdef _LP64
CodeBuffer buffer("deopt_blob", 2100+pad, 512);
#else
// Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread)
// Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread)
CodeBuffer buffer("deopt_blob", 1600+pad, 512);
#endif /* _LP64 */
MacroAssembler* masm = new MacroAssembler(&buffer);
FloatRegister Freturn0 = F0;
Register Greturn1 = G1;
@ -3006,9 +2751,6 @@ void SharedRuntime::generate_deopt_blob() {
Register G4deopt_mode = G4_scratch;
int frame_size_words;
Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
#if !defined(_LP64) && defined(COMPILER2)
Address saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
#endif
Label cont;
OopMapSet *oop_maps = new OopMapSet();
@ -3220,30 +2962,13 @@ void SharedRuntime::generate_deopt_blob() {
// to the interpreter entry point
__ save(SP, -frame_size_words*wordSize, SP);
__ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
#if !defined(_LP64)
#if defined(COMPILER2)
// 32-bit 1-register longs return longs in G1
__ stx(Greturn1, saved_Greturn1_addr);
#endif
__ set_last_Java_frame(SP, noreg);
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode);
#else
// LP64 uses g4 in set_last_Java_frame
__ mov(G4deopt_mode, O1);
__ set_last_Java_frame(SP, G0);
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1);
#endif
__ reset_last_Java_frame();
__ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
#if !defined(_LP64) && defined(COMPILER2)
// In 32 bit, C2 returns longs in G1 so restore the saved G1 into
// I0/I1 if the return value is long.
Label not_long;
__ cmp_and_br_short(O0,T_LONG, Assembler::notEqual, Assembler::pt, not_long);
__ ldd(saved_Greturn1_addr,I0);
__ bind(not_long);
#endif
__ ret();
__ delayed()->restore();
@ -3273,13 +2998,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
pad += (JavaThread::stack_shadow_zone_size() / os::vm_page_size())*16 + 32;
}
#endif
#ifdef _LP64
CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
#else
// Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
// Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)
CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512);
#endif
MacroAssembler* masm = new MacroAssembler(&buffer);
Register O2UnrollBlock = O2;
Register O2klass_index = O2;

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@ -311,7 +311,6 @@ reg_class o7_regI(R_O7);
// ----------------------------
// Pointer Register Classes
// ----------------------------
#ifdef _LP64
// 64-bit build means 64-bit pointers means hi/lo pairs
reg_class ptr_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5,
R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5,
@ -344,40 +343,6 @@ reg_class o1_regP(R_O1H,R_O1);
reg_class o2_regP(R_O2H,R_O2);
reg_class o7_regP(R_O7H,R_O7);
#else // _LP64
// 32-bit build means 32-bit pointers means 1 register.
reg_class ptr_reg( R_G1, R_G3,R_G4,R_G5,
R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,
R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
// Lock encodings use G3 and G4 internally
reg_class lock_ptr_reg(R_G1, R_G5,
R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,
R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
// Special class for storeP instructions, which can store SP or RPC to TLS.
// It is also used for memory addressing, allowing direct TLS addressing.
reg_class sp_ptr_reg( R_G1,R_G2,R_G3,R_G4,R_G5,
R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_SP,
R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
R_I0,R_I1,R_I2,R_I3,R_I4,R_I5,R_FP);
// R_L7 is the lowest-priority callee-save (i.e., NS) register
// We use it to save R_G2 across calls out of Java.
reg_class l7_regP(R_L7);
// Other special pointer regs
reg_class g1_regP(R_G1);
reg_class g2_regP(R_G2);
reg_class g3_regP(R_G3);
reg_class g4_regP(R_G4);
reg_class g5_regP(R_G5);
reg_class i0_regP(R_I0);
reg_class o0_regP(R_O0);
reg_class o1_regP(R_O1);
reg_class o2_regP(R_O2);
reg_class o7_regP(R_O7);
#endif // _LP64
// ----------------------------
// Long Register Classes
@ -386,12 +351,10 @@ reg_class o7_regP(R_O7);
// Note: O7 is never in this class; it is sometimes used as an encoding temp.
reg_class long_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5
,R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5
#ifdef _LP64
// 64-bit, longs in 1 register: use all 64-bit integer registers
// 32-bit, longs in 1 register: cannot use I's and L's. Restrict to O's and G's.
,R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7
,R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5
#endif // _LP64
);
reg_class g1_regL(R_G1H,R_G1);
@ -533,10 +496,8 @@ static Register reg_to_register_object(int register_encoding);
// instructions which either zero-fill or sign-fill).
bool can_branch_register( Node *bol, Node *cmp ) {
if( !BranchOnRegister ) return false;
#ifdef _LP64
if( cmp->Opcode() == Op_CmpP )
return true; // No problems with pointer compares
#endif
if( cmp->Opcode() == Op_CmpL )
return true; // No problems with long compares
@ -617,15 +578,11 @@ int MachCallDynamicJavaNode::ret_addr_offset() {
}
int MachCallRuntimeNode::ret_addr_offset() {
#ifdef _LP64
if (MacroAssembler::is_far_target(entry_point())) {
return NativeFarCall::instruction_size;
} else {
return NativeCall::instruction_size;
}
#else
return NativeCall::instruction_size; // call; delay slot
#endif
}
// Indicate if the safepoint node needs the polling page as an input.
@ -1024,7 +981,6 @@ void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, RelocationHolder co
#ifdef ASSERT
if (preserve_g2 && (VerifyCompiledCode || VerifyOops)) {
#ifdef _LP64
// Trash argument dump slots.
__ set(0xb0b8ac0db0b8ac0d, G1);
__ mov(G1, G5);
@ -1034,22 +990,6 @@ void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, RelocationHolder co
__ stx(G1, SP, STACK_BIAS + 0x98);
__ stx(G1, SP, STACK_BIAS + 0xA0);
__ stx(G1, SP, STACK_BIAS + 0xA8);
#else // _LP64
// this is also a native call, so smash the first 7 stack locations,
// and the various registers
// Note: [SP+0x40] is sp[callee_aggregate_return_pointer_sp_offset],
// while [SP+0x44..0x58] are the argument dump slots.
__ set((intptr_t)0xbaadf00d, G1);
__ mov(G1, G5);
__ sllx(G1, 32, G1);
__ or3(G1, G5, G1);
__ mov(G1, G5);
__ stx(G1, SP, 0x40);
__ stx(G1, SP, 0x48);
__ stx(G1, SP, 0x50);
__ stw(G1, SP, 0x58); // Do not trash [SP+0x5C] which is a usable spill slot
#endif // _LP64
}
#endif /*ASSERT*/
}
@ -1262,11 +1202,7 @@ void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
if(do_polling() && ra_->C->is_method_compilation()) {
st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t");
#ifdef _LP64
st->print("LDX [L0],G0\t!Poll for Safepointing\n\t");
#else
st->print("LDUW [L0],G0\t!Poll for Safepointing\n\t");
#endif
}
if(do_polling()) {
@ -1472,75 +1408,10 @@ static void mach_spill_copy_implementation_helper(const MachNode* mach,
// hardware does the flop for me. Doubles are always aligned, so no problem
// there. Misaligned sources only come from native-long-returns (handled
// special below).
#ifndef _LP64
if (src_first_rc == rc_int && // source is already big-endian
src_second_rc != rc_bad && // 64-bit move
((dst_first & 1) != 0 || dst_second != dst_first + 1)) { // misaligned dst
assert((src_first & 1) == 0 && src_second == src_first + 1, "source must be aligned");
// Do the big-endian flop.
OptoReg::Name tmp = dst_first ; dst_first = dst_second ; dst_second = tmp ;
enum RC tmp_rc = dst_first_rc; dst_first_rc = dst_second_rc; dst_second_rc = tmp_rc;
}
#endif
// --------------------------------------
// Check for integer reg-reg copy
if (src_first_rc == rc_int && dst_first_rc == rc_int) {
#ifndef _LP64
if (src_first == R_O0_num && src_second == R_O1_num) { // Check for the evil O0/O1 native long-return case
// Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value
// as stored in memory. On a big-endian machine like SPARC, this means that the _second
// operand contains the least significant word of the 64-bit value and vice versa.
OptoReg::Name tmp = OptoReg::Name(R_O7_num);
assert((dst_first & 1) == 0 && dst_second == dst_first + 1, "return a native O0/O1 long to an aligned-adjacent 64-bit reg" );
// Shift O0 left in-place, zero-extend O1, then OR them into the dst
if ( cbuf ) {
emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[tmp], Assembler::sllx_op3, Matcher::_regEncode[src_first], 0x1020);
emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[src_second], Assembler::srl_op3, Matcher::_regEncode[src_second], 0x0000);
emit3 (*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler:: or_op3, Matcher::_regEncode[tmp], 0, Matcher::_regEncode[src_second]);
#ifndef PRODUCT
} else {
print_helper(st, "SLLX R_%s,32,R_%s\t! Move O0-first to O7-high\n\t", OptoReg::regname(src_first), OptoReg::regname(tmp));
print_helper(st, "SRL R_%s, 0,R_%s\t! Zero-extend O1\n\t", OptoReg::regname(src_second), OptoReg::regname(src_second));
print_helper(st, "OR R_%s,R_%s,R_%s\t! spill",OptoReg::regname(tmp), OptoReg::regname(src_second), OptoReg::regname(dst_first));
#endif
}
return;
} else if (dst_first == R_I0_num && dst_second == R_I1_num) {
// returning a long value in I0/I1
// a SpillCopy must be able to target a return instruction's reg_class
// Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value
// as stored in memory. On a big-endian machine like SPARC, this means that the _second
// operand contains the least significant word of the 64-bit value and vice versa.
OptoReg::Name tdest = dst_first;
if (src_first == dst_first) {
tdest = OptoReg::Name(R_O7_num);
}
if (cbuf) {
assert((src_first & 1) == 0 && (src_first + 1) == src_second, "return value was in an aligned-adjacent 64-bit reg");
// Shift value in upper 32-bits of src to lower 32-bits of I0; move lower 32-bits to I1
// ShrL_reg_imm6
emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[tdest], Assembler::srlx_op3, Matcher::_regEncode[src_second], 32 | 0x1000);
// ShrR_reg_imm6 src, 0, dst
emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srl_op3, Matcher::_regEncode[src_first], 0x0000);
if (tdest != dst_first) {
emit3 (*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler::or_op3, 0/*G0*/, 0/*op2*/, Matcher::_regEncode[tdest]);
}
}
#ifndef PRODUCT
else {
print_helper(st, "SRLX R_%s,32,R_%s\t! Extract MSW\n\t",OptoReg::regname(src_second),OptoReg::regname(tdest));
print_helper(st, "SRL R_%s, 0,R_%s\t! Extract LSW\n\t",OptoReg::regname(src_first),OptoReg::regname(dst_second));
if (tdest != dst_first) {
print_helper(st, "MOV R_%s,R_%s\t! spill\n\t", OptoReg::regname(tdest), OptoReg::regname(dst_first));
}
}
#endif // PRODUCT
return size+8;
}
#endif // !_LP64
// Else normal reg-reg copy
assert(src_second != dst_first, "smashed second before evacuating it");
impl_mov_helper(cbuf, src_first, dst_first, Assembler::or_op3, 0, "MOV ", st);
@ -1614,58 +1485,6 @@ static void mach_spill_copy_implementation_helper(const MachNode* mach,
}
assert(src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad");
#ifndef _LP64
// In the LP64 build, all registers can be moved as aligned/adjacent
// pairs, so there's never any need to move the high bits separately.
// The 32-bit builds have to deal with the 32-bit ABI which can force
// all sorts of silly alignment problems.
// Check for integer reg-reg copy. Hi bits are stuck up in the top
// 32-bits of a 64-bit register, but are needed in low bits of another
// register (else it's a hi-bits-to-hi-bits copy which should have
// happened already as part of a 64-bit move)
if (src_second_rc == rc_int && dst_second_rc == rc_int) {
assert((src_second & 1) == 1, "its the evil O0/O1 native return case");
assert((dst_second & 1) == 0, "should have moved with 1 64-bit move");
// Shift src_second down to dst_second's low bits.
if (cbuf) {
emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020);
#ifndef PRODUCT
} else {
print_helper(st, "SRLX R_%s,32,R_%s\t! spill: Move high bits down low", OptoReg::regname(src_second - 1), OptoReg::regname(dst_second));
#endif
}
return;
}
// Check for high word integer store. Must down-shift the hi bits
// into a temp register, then fall into the case of storing int bits.
if (src_second_rc == rc_int && dst_second_rc == rc_stack && (src_second & 1) == 1) {
// Shift src_second down to dst_second's low bits.
if (cbuf) {
emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[R_O7_num], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020);
#ifndef PRODUCT
} else {
print_helper(st, "SRLX R_%s,32,R_%s\t! spill: Move high bits down low", OptoReg::regname(src_second-1), OptoReg::regname(R_O7_num));
#endif
}
src_second = OptoReg::Name(R_O7_num); // Not R_O7H_num!
}
// Check for high word integer load
if (dst_second_rc == rc_int && src_second_rc == rc_stack)
return impl_helper(this, cbuf, ra_, true, ra_->reg2offset(src_second), dst_second, Assembler::lduw_op3, "LDUW", size, st);
// Check for high word integer store
if (src_second_rc == rc_int && dst_second_rc == rc_stack)
return impl_helper(this, cbuf, ra_, false, ra_->reg2offset(dst_second), src_second, Assembler::stw_op3, "STW ", size, st);
// Check for high word float store
if (src_second_rc == rc_float && dst_second_rc == rc_stack)
return impl_helper(this, cbuf, ra_, false, ra_->reg2offset(dst_second), src_second, Assembler::stf_op3, "STF ", size, st);
#endif // !_LP64
Unimplemented();
}
@ -1743,7 +1562,6 @@ uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
#ifndef PRODUCT
void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
st->print_cr("\nUEP:");
#ifdef _LP64
if (UseCompressedClassPointers) {
assert(Universe::heap() != NULL, "java heap should be initialized");
st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
@ -1762,11 +1580,6 @@ void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
}
st->print_cr("\tCMP R_G5,R_G3" );
st->print ("\tTne xcc,R_G0+ST_RESERVED_FOR_USER_0+2");
#else // _LP64
st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
st->print_cr("\tCMP R_G5,R_G3" );
st->print ("\tTne icc,R_G0+ST_RESERVED_FOR_USER_0+2");
#endif // _LP64
}
#endif
@ -1874,9 +1687,7 @@ const bool Matcher::match_rule_supported(int opcode) {
if (!UsePopCountInstruction)
return false;
case Op_CompareAndSwapL:
#ifdef _LP64
case Op_CompareAndSwapP:
#endif
if (!VM_Version::supports_cx8())
return false;
break;
@ -1919,12 +1730,12 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
}
// Vector ideal reg
const int Matcher::vector_ideal_reg(int size) {
const uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize == 8, "");
return Op_RegD;
}
const int Matcher::vector_shift_count_ideal_reg(int size) {
const uint Matcher::vector_shift_count_ideal_reg(int size) {
fatal("vector shift is not supported");
return Node::NotAMachineReg;
}
@ -1992,13 +1803,11 @@ const bool Matcher::require_postalloc_expand = false;
const bool Matcher::need_masked_shift_count = false;
bool Matcher::narrow_oop_use_complex_address() {
NOT_LP64(ShouldNotCallThis());
assert(UseCompressedOops, "only for compressed oops code");
return false;
}
bool Matcher::narrow_klass_use_complex_address() {
NOT_LP64(ShouldNotCallThis());
assert(UseCompressedClassPointers, "only for compressed klass code");
return false;
}
@ -2027,11 +1836,7 @@ const bool Matcher::rematerialize_float_constants = false;
// needed. Else we split the double into 2 integer pieces and move it
// piece-by-piece. Only happens when passing doubles into C code as the
// Java calling convention forces doubles to be aligned.
#ifdef _LP64
const bool Matcher::misaligned_doubles_ok = true;
#else
const bool Matcher::misaligned_doubles_ok = false;
#endif
// No-op on SPARC.
void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
@ -2050,11 +1855,7 @@ bool Matcher::float_in_double() { return false; }
// The relevant question is how the int is callee-saved. In _LP64
// the whole long is written but de-opt'ing will have to extract
// the relevant 32 bits, in not-_LP64 only the low 32 bits is written.
#ifdef _LP64
const bool Matcher::int_in_long = true;
#else
const bool Matcher::int_in_long = false;
#endif
// Return whether or not this register is ever used as an argument. This
// function is used on startup to build the trampoline stubs in generateOptoStub.
@ -2068,7 +1869,6 @@ bool Matcher::can_be_java_arg( int reg ) {
reg == R_I3_num ||
reg == R_I4_num ||
reg == R_I5_num ) return true;
#ifdef _LP64
// 64-bit builds can pass 64-bit pointers and longs in
// the high I registers
if( reg == R_I0H_num ||
@ -2082,14 +1882,6 @@ bool Matcher::can_be_java_arg( int reg ) {
return true;
}
#else
// 32-bit builds with longs-in-one-entry pass longs in G1 & G4.
// Longs cannot be passed in O regs, because O regs become I regs
// after a 'save' and I regs get their high bits chopped off on
// interrupt.
if( reg == R_G1H_num || reg == R_G1_num ) return true;
if( reg == R_G4H_num || reg == R_G4_num ) return true;
#endif
// A few float args in registers
if( reg >= R_F0_num && reg <= R_F7_num ) return true;
@ -2152,19 +1944,11 @@ void Compile::reshape_address(AddPNode* addp) {
// The intptr_t operand types, defined by textual substitution.
// (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.)
#ifdef _LP64
#define immX immL
#define immX13 immL13
#define immX13m7 immL13m7
#define iRegX iRegL
#define g1RegX g1RegL
#else
#define immX immI
#define immX13 immI13
#define immX13m7 immI13m7
#define iRegX iRegI
#define g1RegX g1RegI
#endif
//----------ENCODING BLOCK-----------------------------------------------------
// This block specifies the encoding classes used by the compiler to output
@ -2326,7 +2110,6 @@ encode %{
emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::add_op3, R_O7_enc, frame::pc_return_offset );
%}
#ifdef _LP64
/* %%% merge with enc_to_bool */
enc_class enc_convP2B( iRegI dst, iRegP src ) %{
MacroAssembler _masm(&cbuf);
@ -2335,7 +2118,6 @@ encode %{
Register dst_reg = reg_to_register_object($dst$$reg);
__ movr(Assembler::rc_nz, src_reg, 1, dst_reg);
%}
#endif
enc_class enc_cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp ) %{
// (Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)))
@ -2626,16 +2408,6 @@ encode %{
// to G1 so the register allocator will not have to deal with the misaligned register
// pair.
enc_class adjust_long_from_native_call %{
#ifndef _LP64
if (returns_long()) {
// sllx O0,32,O0
emit3_simm13( cbuf, Assembler::arith_op, R_O0_enc, Assembler::sllx_op3, R_O0_enc, 0x1020 );
// srl O1,0,O1
emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::srl_op3, R_O1_enc, 0x0000 );
// or O0,O1,G1
emit3 ( cbuf, Assembler::arith_op, R_G1_enc, Assembler:: or_op3, R_O0_enc, 0, R_O1_enc );
}
#endif
%}
enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime
@ -3102,11 +2874,7 @@ frame %{
cisc_spilling_operand_name(indOffset);
// Number of stack slots consumed by a Monitor enter
#ifdef _LP64
sync_stack_slots(2);
#else
sync_stack_slots(1);
#endif
// Compiled code's Frame Pointer
frame_pointer(R_SP);
@ -3124,13 +2892,8 @@ frame %{
// Number of outgoing stack slots killed above the out_preserve_stack_slots
// for calls to C. Supports the var-args backing area for register parms.
// ADLC doesn't support parsing expressions, so I folded the math by hand.
#ifdef _LP64
// (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (0)) * 2-stack-slots-per-word
varargs_C_out_slots_killed(12);
#else
// (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (1)) * 1-stack-slots-per-word
varargs_C_out_slots_killed( 7);
#endif
// The after-PROLOG location of the return address. Location of
// return address specifies a type (REG or STACK) and a number
@ -3161,17 +2924,10 @@ frame %{
// opcodes. This simplifies the register allocator.
c_return_value %{
assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
#ifdef _LP64
static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
#else // !_LP64
static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
#endif
return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
(is_outgoing?lo_out:lo_in)[ideal_reg] );
%}
@ -3179,17 +2935,10 @@ frame %{
// Location of compiled Java return values. Same as C
return_value %{
assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
#ifdef _LP64
static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
#else // !_LP64
static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
#endif
return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
(is_outgoing?lo_out:lo_in)[ideal_reg] );
%}
@ -3444,7 +3193,6 @@ operand immP() %{
interface(CONST_INTER);
%}
#ifdef _LP64
// Pointer Immediate: 64-bit
operand immP_set() %{
predicate(!VM_Version::is_niagara_plus());
@ -3478,7 +3226,6 @@ operand immP_no_oop_cheap() %{
format %{ %}
interface(CONST_INTER);
%}
#endif
operand immP13() %{
predicate((-4096 < n->get_ptr()) && (n->get_ptr() <= 4095));
@ -3919,11 +3666,7 @@ operand flagsRegP() %{
constraint(ALLOC_IN_RC(int_flags));
match(RegFlags);
#ifdef _LP64
format %{ "xcc_P" %}
#else
format %{ "icc_P" %}
#endif
interface(REG_INTER);
%}
@ -4500,7 +4243,6 @@ pipe_class ialu_reg_flags( iRegI op2_out, iRegI op2_in, iRegI op1, flagsReg cr )
MS : R(2);
%}
#ifdef _LP64
pipe_class ialu_clr_and_mover( iRegI dst, iRegP src ) %{
instruction_count(1); multiple_bundles;
dst : C(write)+1;
@ -4509,7 +4251,6 @@ pipe_class ialu_clr_and_mover( iRegI dst, iRegP src ) %{
BR : E(2);
MS : E(2);
%}
#endif
// Integer ALU reg operation
pipe_class ialu_move_reg_L_to_I(iRegI dst, iRegL src) %{
@ -4614,13 +4355,8 @@ pipe_class loadConP( iRegP dst, immP src ) %{
// Polling Address
pipe_class loadConP_poll( iRegP dst, immP_poll src ) %{
#ifdef _LP64
instruction_count(0); multiple_bundles;
fixed_latency(6);
#else
dst : E(write);
IALU : R;
#endif
%}
// Long Constant small
@ -5361,7 +5097,6 @@ instruct regL_to_stkL(stackSlotL dst, iRegL src) %{
ins_pipe(istore_mem_reg);
%}
#ifdef _LP64
// Load pointer from stack slot, 64-bit encoding
instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
match(Set dst src);
@ -5381,27 +5116,6 @@ instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
ins_encode(simple_form3_mem_reg( dst, src ) );
ins_pipe(istore_mem_reg);
%}
#else // _LP64
// Load pointer from stack slot, 32-bit encoding
instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
match(Set dst src);
ins_cost(MEMORY_REF_COST);
format %{ "LDUW $src,$dst\t!ptr" %}
opcode(Assembler::lduw_op3, Assembler::ldst_op);
ins_encode(simple_form3_mem_reg( src, dst ) );
ins_pipe(iload_mem);
%}
// Store pointer to stack slot
instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
match(Set dst src);
ins_cost(MEMORY_REF_COST);
format %{ "STW $src,$dst\t!ptr" %}
opcode(Assembler::stw_op3, Assembler::ldst_op);
ins_encode(simple_form3_mem_reg( dst, src ) );
ins_pipe(istore_mem_reg);
%}
#endif // _LP64
//------------Special Nop instructions for bundling - no match rules-----------
// Nop using the A0 functional unit
@ -5858,17 +5572,10 @@ instruct loadP(iRegP dst, memory mem) %{
ins_cost(MEMORY_REF_COST);
size(4);
#ifndef _LP64
format %{ "LDUW $mem,$dst\t! ptr" %}
ins_encode %{
__ lduw($mem$$Address, $dst$$Register);
%}
#else
format %{ "LDX $mem,$dst\t! ptr" %}
ins_encode %{
__ ldx($mem$$Address, $dst$$Register);
%}
#endif
ins_pipe(iload_mem);
%}
@ -5891,17 +5598,10 @@ instruct loadKlass(iRegP dst, memory mem) %{
ins_cost(MEMORY_REF_COST);
size(4);
#ifndef _LP64
format %{ "LDUW $mem,$dst\t! klass ptr" %}
ins_encode %{
__ lduw($mem$$Address, $dst$$Register);
%}
#else
format %{ "LDX $mem,$dst\t! klass ptr" %}
ins_encode %{
__ ldx($mem$$Address, $dst$$Register);
%}
#endif
ins_pipe(iload_mem);
%}
@ -5969,26 +5669,6 @@ instruct loadConI13( iRegI dst, immI13 src ) %{
ins_pipe(ialu_imm);
%}
#ifndef _LP64
instruct loadConP(iRegP dst, immP con) %{
match(Set dst con);
ins_cost(DEFAULT_COST * 3/2);
format %{ "SET $con,$dst\t!ptr" %}
ins_encode %{
relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc();
intptr_t val = $con$$constant;
if (constant_reloc == relocInfo::oop_type) {
__ set_oop_constant((jobject) val, $dst$$Register);
} else if (constant_reloc == relocInfo::metadata_type) {
__ set_metadata_constant((Metadata*)val, $dst$$Register);
} else { // non-oop pointers, e.g. card mark base, heap top
assert(constant_reloc == relocInfo::none, "unexpected reloc type");
__ set(val, $dst$$Register);
}
%}
ins_pipe(loadConP);
%}
#else
instruct loadConP_set(iRegP dst, immP_set con) %{
match(Set dst con);
ins_cost(DEFAULT_COST * 3/2);
@ -6032,7 +5712,6 @@ instruct loadConP_no_oop_cheap(iRegP dst, immP_no_oop_cheap con) %{
%}
ins_pipe(loadConP);
%}
#endif // _LP64
instruct loadConP0(iRegP dst, immP0 src) %{
match(Set dst src);
@ -6186,19 +5865,6 @@ instruct prefetchAlloc_bis( iRegP dst ) %{
%}
// Next code is used for finding next cache line address to prefetch.
#ifndef _LP64
instruct cacheLineAdr( iRegP dst, iRegP src, immI13 mask ) %{
match(Set dst (CastX2P (AndI (CastP2X src) mask)));
ins_cost(DEFAULT_COST);
size(4);
format %{ "AND $src,$mask,$dst\t! next cache line address" %}
ins_encode %{
__ and3($src$$Register, $mask$$constant, $dst$$Register);
%}
ins_pipe(ialu_reg_imm);
%}
#else
instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{
match(Set dst (CastX2P (AndL (CastP2X src) mask)));
ins_cost(DEFAULT_COST);
@ -6210,7 +5876,6 @@ instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{
%}
ins_pipe(ialu_reg_imm);
%}
#endif
//----------Store Instructions-------------------------------------------------
// Store Byte
@ -6322,13 +5987,8 @@ instruct storeP(memory dst, sp_ptr_RegP src) %{
match(Set dst (StoreP dst src));
ins_cost(MEMORY_REF_COST);
#ifndef _LP64
format %{ "STW $src,$dst\t! ptr" %}
opcode(Assembler::stw_op3, 0, REGP_OP);
#else
format %{ "STX $src,$dst\t! ptr" %}
opcode(Assembler::stx_op3, 0, REGP_OP);
#endif
ins_encode( form3_mem_reg( dst, src ) );
ins_pipe(istore_mem_spORreg);
%}
@ -6337,13 +5997,8 @@ instruct storeP0(memory dst, immP0 src) %{
match(Set dst (StoreP dst src));
ins_cost(MEMORY_REF_COST);
#ifndef _LP64
format %{ "STW $src,$dst\t! ptr" %}
opcode(Assembler::stw_op3, 0, REGP_OP);
#else
format %{ "STX $src,$dst\t! ptr" %}
opcode(Assembler::stx_op3, 0, REGP_OP);
#endif
ins_encode( form3_mem_reg( dst, R_G0 ) );
ins_pipe(istore_mem_zero);
%}
@ -7094,13 +6749,8 @@ instruct loadPLocked(iRegP dst, memory mem) %{
match(Set dst (LoadPLocked mem));
ins_cost(MEMORY_REF_COST);
#ifndef _LP64
format %{ "LDUW $mem,$dst\t! ptr" %}
opcode(Assembler::lduw_op3, 0, REGP_OP);
#else
format %{ "LDX $mem,$dst\t! ptr" %}
opcode(Assembler::ldx_op3, 0, REGP_OP);
#endif
ins_encode( form3_mem_reg( mem, dst ) );
ins_pipe(iload_mem);
%}
@ -7171,9 +6821,7 @@ instruct compareAndSwapI_bool(iRegP mem_ptr, iRegI oldval, iRegI newval, iRegI r
%}
instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
#ifdef _LP64
predicate(VM_Version::supports_cx8());
#endif
match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
match(Set res (WeakCompareAndSwapP mem_ptr (Binary oldval newval)));
effect( USE mem_ptr, KILL ccr, KILL tmp1);
@ -7184,13 +6832,8 @@ instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI r
"MOV 1,$res\n\t"
"MOVne xcc,R_G0,$res"
%}
#ifdef _LP64
ins_encode( enc_casx(mem_ptr, oldval, newval),
enc_lflags_ne_to_boolean(res) );
#else
ins_encode( enc_casi(mem_ptr, oldval, newval),
enc_iflags_ne_to_boolean(res) );
#endif
ins_pipe( long_memory_op );
%}
@ -7268,17 +6911,6 @@ instruct xchgI( memory mem, iRegI newval) %{
ins_pipe( long_memory_op );
%}
#ifndef _LP64
instruct xchgP( memory mem, iRegP newval) %{
match(Set newval (GetAndSetP mem newval));
format %{ "SWAP [$mem],$newval" %}
size(4);
ins_encode %{
__ swap($mem$$Address, $newval$$Register);
%}
ins_pipe( long_memory_op );
%}
#endif
instruct xchgN( memory mem, iRegN newval) %{
match(Set newval (GetAndSetN mem newval));
@ -7740,7 +7372,6 @@ instruct shrL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
%}
// Register Shift Right Immediate with a CastP2X
#ifdef _LP64
instruct shrP_reg_imm6(iRegL dst, iRegP src1, immU6 src2) %{
match(Set dst (URShiftL (CastP2X src1) src2));
size(4);
@ -7749,16 +7380,6 @@ instruct shrP_reg_imm6(iRegL dst, iRegP src1, immU6 src2) %{
ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
ins_pipe(ialu_reg_imm);
%}
#else
instruct shrP_reg_imm5(iRegI dst, iRegP src1, immU5 src2) %{
match(Set dst (URShiftI (CastP2X src1) src2));
size(4);
format %{ "SRL $src1,$src2,$dst\t! Cast ptr $src1 to int and shift" %}
opcode(Assembler::srl_op3, Assembler::arith_op);
ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
ins_pipe(ialu_reg_imm);
%}
#endif
//----------Floating Point Arithmetic Instructions-----------------------------
@ -8001,21 +7622,6 @@ instruct orL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
ins_pipe(ialu_reg_imm);
%}
#ifndef _LP64
// Use sp_ptr_RegP to match G2 (TLS register) without spilling.
instruct orI_reg_castP2X(iRegI dst, iRegI src1, sp_ptr_RegP src2) %{
match(Set dst (OrI src1 (CastP2X src2)));
size(4);
format %{ "OR $src1,$src2,$dst" %}
opcode(Assembler::or_op3, Assembler::arith_op);
ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
ins_pipe(ialu_reg_reg);
%}
#else
instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{
match(Set dst (OrL src1 (CastP2X src2)));
@ -8027,8 +7633,6 @@ instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{
ins_pipe(ialu_reg_reg);
%}
#endif
// Xor Instructions
// Register Xor
instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
@ -8088,17 +7692,6 @@ instruct convI2B( iRegI dst, iRegI src, flagsReg ccr ) %{
ins_pipe(ialu_reg_ialu);
%}
#ifndef _LP64
instruct convP2B( iRegI dst, iRegP src, flagsReg ccr ) %{
match(Set dst (Conv2B src));
effect( KILL ccr );
ins_cost(DEFAULT_COST*2);
format %{ "CMP R_G0,$src\n\t"
"ADDX R_G0,0,$dst" %}
ins_encode( enc_to_bool( src, dst ) );
ins_pipe(ialu_reg_ialu);
%}
#else
instruct convP2B( iRegI dst, iRegP src ) %{
match(Set dst (Conv2B src));
ins_cost(DEFAULT_COST*2);
@ -8107,7 +7700,6 @@ instruct convP2B( iRegI dst, iRegP src ) %{
ins_encode( form3_g0_rs2_rd_move( src, dst ), enc_convP2B( dst, src ) );
ins_pipe(ialu_clr_and_mover);
%}
#endif
instruct cmpLTMask0( iRegI dst, iRegI src, immI0 zero, flagsReg ccr ) %{
match(Set dst (CmpLTMask src zero));
@ -8750,16 +8342,10 @@ instruct convL2F_reg(regF dst, iRegL src) %{
instruct convL2I_reg(iRegI dst, iRegL src) %{
match(Set dst (ConvL2I src));
#ifndef _LP64
format %{ "MOV $src.lo,$dst\t! long->int" %}
ins_encode( form3_g0_rs2_rd_move_lo2( src, dst ) );
ins_pipe(ialu_move_reg_I_to_L);
#else
size(4);
format %{ "SRA $src,R_G0,$dst\t! long->int" %}
ins_encode( form3_rs1_rd_signextend_lo1( src, dst ) );
ins_pipe(ialu_reg);
#endif
%}
// Register Shift Right Immediate
@ -9528,11 +9114,7 @@ instruct cmpP_reg_branch_short(cmpOpP cmp, iRegP op1, iRegP op2, label labl, fla
size(4);
ins_cost(BRANCH_COST);
#ifdef _LP64
format %{ "CXB$cmp $op1,$op2,$labl\t! ptr" %}
#else
format %{ "CWB$cmp $op1,$op2,$labl\t! ptr" %}
#endif
ins_encode %{
Label* L = $labl$$label;
assert(__ use_cbcond(*L), "back to back cbcond");
@ -9550,11 +9132,7 @@ instruct cmpP_null_branch_short(cmpOpP cmp, iRegP op1, immP0 null, label labl, f
size(4);
ins_cost(BRANCH_COST);
#ifdef _LP64
format %{ "CXB$cmp $op1,0,$labl\t! ptr" %}
#else
format %{ "CWB$cmp $op1,0,$labl\t! ptr" %}
#endif
ins_encode %{
Label* L = $labl$$label;
assert(__ use_cbcond(*L), "back to back cbcond");
@ -9822,11 +9400,7 @@ instruct safePoint_poll(iRegP poll) %{
effect(USE poll);
size(4);
#ifdef _LP64
format %{ "LDX [$poll],R_G0\t! Safepoint: poll for GC" %}
#else
format %{ "LDUW [$poll],R_G0\t! Safepoint: poll for GC" %}
#endif
ins_encode %{
__ relocate(relocInfo::poll_type);
__ ld_ptr($poll$$Register, 0, G0);
@ -10259,15 +9833,15 @@ instruct array_equalsC(o0RegP ary1, o1RegP ary2, g3RegI tmp1, notemp_iRegI resul
instruct has_negatives(o0RegP pAryR, g3RegI iSizeR, notemp_iRegI resultR,
iRegL tmp1L, iRegL tmp2L, iRegL tmp3L, iRegL tmp4L,
flagsReg ccr)
flagsReg ccr)
%{
match(Set resultR (HasNegatives pAryR iSizeR));
effect(TEMP resultR, TEMP tmp1L, TEMP tmp2L, TEMP tmp3L, TEMP tmp4L, USE pAryR, USE iSizeR, KILL ccr);
format %{ "has negatives byte[] $pAryR,$iSizeR -> $resultR // KILL $tmp1L,$tmp2L,$tmp3L,$tmp4L" %}
ins_encode %{
__ has_negatives($pAryR$$Register, $iSizeR$$Register,
__ has_negatives($pAryR$$Register, $iSizeR$$Register,
$resultR$$Register,
$tmp1L$$Register, $tmp2L$$Register,
$tmp1L$$Register, $tmp2L$$Register,
$tmp3L$$Register, $tmp4L$$Register);
%}
ins_pipe(long_memory_op);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -216,9 +216,7 @@ class StubGenerator: public StubCodeGenerator {
__ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words)
__ sll(t, Interpreter::logStackElementSize, t); // compute number of bytes
__ sub(FP, t, Gargs); // setup parameter pointer
#ifdef _LP64
__ add( Gargs, STACK_BIAS, Gargs ); // Account for LP64 stack bias
#endif
__ mov(SP, O5_savedSP);
@ -271,27 +269,8 @@ class StubGenerator: public StubCodeGenerator {
__ delayed()->stf(FloatRegisterImpl::D, F0, addr, G0);
__ BIND(is_long);
#ifdef _LP64
__ ba(exit);
__ delayed()->st_long(O0, addr, G0); // store entire long
#else
#if defined(COMPILER2)
// All return values are where we want them, except for Longs. C2 returns
// longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
// Since the interpreter will return longs in G1 and O0/O1 in the 32bit
// build we simply always use G1.
// Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
// do this here. Unfortunately if we did a rethrow we'd see an machepilog node
// first which would move g1 -> O0/O1 and destroy the exception we were throwing.
__ ba(exit);
__ delayed()->stx(G1, addr, G0); // store entire long
#else
__ st(O1, addr, BytesPerInt);
__ ba(exit);
__ delayed()->st(O0, addr, G0);
#endif /* COMPILER2 */
#endif /* _LP64 */
}
return start;
}
@ -746,22 +725,10 @@ class StubGenerator: public StubCodeGenerator {
address start = __ pc();
Label miss;
#if defined(COMPILER2) && !defined(_LP64)
// Do not use a 'save' because it blows the 64-bit O registers.
__ add(SP,-4*wordSize,SP); // Make space for 4 temps (stack must be 2 words aligned)
__ st_ptr(L0,SP,(frame::register_save_words+0)*wordSize);
__ st_ptr(L1,SP,(frame::register_save_words+1)*wordSize);
__ st_ptr(L2,SP,(frame::register_save_words+2)*wordSize);
__ st_ptr(L3,SP,(frame::register_save_words+3)*wordSize);
Register Rret = O0;
Register Rsub = O1;
Register Rsuper = O2;
#else
__ save_frame(0);
Register Rret = I0;
Register Rsub = I1;
Register Rsuper = I2;
#endif
Register L0_ary_len = L0;
Register L1_ary_ptr = L1;
@ -775,32 +742,14 @@ class StubGenerator: public StubCodeGenerator {
// Match falls through here.
__ addcc(G0,0,Rret); // set Z flags, Z result
#if defined(COMPILER2) && !defined(_LP64)
__ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
__ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
__ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
__ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
__ retl(); // Result in Rret is zero; flags set to Z
__ delayed()->add(SP,4*wordSize,SP);
#else
__ ret(); // Result in Rret is zero; flags set to Z
__ delayed()->restore();
#endif
__ BIND(miss);
__ addcc(G0,1,Rret); // set NZ flags, NZ result
#if defined(COMPILER2) && !defined(_LP64)
__ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
__ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
__ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
__ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
__ retl(); // Result in Rret is != 0; flags set to NZ
__ delayed()->add(SP,4*wordSize,SP);
#else
__ ret(); // Result in Rret is != 0; flags set to NZ
__ delayed()->restore();
#endif
return start;
}
@ -828,11 +777,11 @@ class StubGenerator: public StubCodeGenerator {
// Rtmp - scratch
//
void assert_clean_int(Register Rint, Register Rtmp) {
#if defined(ASSERT) && defined(_LP64)
#if defined(ASSERT)
__ signx(Rint, Rtmp);
__ cmp(Rint, Rtmp);
__ breakpoint_trap(Assembler::notEqual, Assembler::xcc);
#endif
#endif
}
//
@ -1019,10 +968,11 @@ class StubGenerator: public StubCodeGenerator {
// than prefetch distance.
__ set(prefetch_count, O4);
__ cmp_and_brx_short(count, O4, Assembler::less, Assembler::pt, L_block_copy);
__ sub(count, prefetch_count, count);
__ sub(count, O4, count);
(this->*copy_loop_func)(from, to, count, count_dec, L_block_copy_prefetch, true, true);
__ add(count, prefetch_count, count); // restore count
__ set(prefetch_count, O4);
__ add(count, O4, count);
} // prefetch_count > 0
@ -1043,11 +993,12 @@ class StubGenerator: public StubCodeGenerator {
// than prefetch distance.
__ set(prefetch_count, O4);
__ cmp_and_brx_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_copy);
__ sub(count, prefetch_count, count);
__ sub(count, O4, count);
Label L_copy_prefetch;
(this->*copy_loop_func)(from, to, count, count_dec, L_copy_prefetch, true, false);
__ add(count, prefetch_count, count); // restore count
__ set(prefetch_count, O4);
__ add(count, O4, count);
} // prefetch_count > 0
@ -1269,17 +1220,6 @@ class StubGenerator: public StubCodeGenerator {
// Aligned arrays have 4 bytes alignment in 32-bits VM
// and 8 bytes - in 64-bits VM. So we do it only for 32-bits VM
//
#ifndef _LP64
// copy a 4-bytes word if necessary to align 'to' to 8 bytes
__ andcc(to, 7, G0);
__ br(Assembler::zero, false, Assembler::pn, L_skip_alignment);
__ delayed()->ld(from, 0, O3);
__ inc(from, 4);
__ inc(to, 4);
__ dec(count, 4);
__ st(O3, to, -4);
__ BIND(L_skip_alignment);
#endif
} else {
// copy bytes to align 'to' on 8 byte boundary
__ andcc(to, 7, G1); // misaligned bytes
@ -1296,9 +1236,7 @@ class StubGenerator: public StubCodeGenerator {
__ delayed()->inc(to);
__ BIND(L_skip_alignment);
}
#ifdef _LP64
if (!aligned)
#endif
{
// Copy with shift 16 bytes per iteration if arrays do not have
// the same alignment mod 8, otherwise fall through to the next
@ -1395,14 +1333,12 @@ class StubGenerator: public StubCodeGenerator {
__ delayed()->stb(O3, end_to, 0);
__ BIND(L_skip_alignment);
}
#ifdef _LP64
if (aligned) {
// Both arrays are aligned to 8-bytes in 64-bits VM.
// The 'count' is decremented in copy_16_bytes_backward_with_shift()
// in unaligned case.
__ dec(count, 16);
} else
#endif
{
// Copy with shift 16 bytes per iteration if arrays do not have
// the same alignment mod 8, otherwise jump to the next
@ -1490,17 +1426,6 @@ class StubGenerator: public StubCodeGenerator {
// Aligned arrays have 4 bytes alignment in 32-bits VM
// and 8 bytes - in 64-bits VM.
//
#ifndef _LP64
// copy a 2-elements word if necessary to align 'to' to 8 bytes
__ andcc(to, 7, G0);
__ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
__ delayed()->ld(from, 0, O3);
__ inc(from, 4);
__ inc(to, 4);
__ dec(count, 2);
__ st(O3, to, -4);
__ BIND(L_skip_alignment);
#endif
} else {
// copy 1 element if necessary to align 'to' on an 4 bytes
__ andcc(to, 3, G0);
@ -1524,9 +1449,7 @@ class StubGenerator: public StubCodeGenerator {
__ sth(O4, to, -2);
__ BIND(L_skip_alignment2);
}
#ifdef _LP64
if (!aligned)
#endif
{
// Copy with shift 16 bytes per iteration if arrays do not have
// the same alignment mod 8, otherwise fall through to the next
@ -1643,9 +1566,7 @@ class StubGenerator: public StubCodeGenerator {
__ dec(count, 1 << (shift - 1));
__ BIND(L_skip_align2);
}
#ifdef _LP64
if (!aligned) {
#endif
// align to 8 bytes, we know we are 4 byte aligned to start
__ andcc(to, 7, G0);
__ br(Assembler::zero, false, Assembler::pt, L_fill_32_bytes);
@ -1654,9 +1575,7 @@ class StubGenerator: public StubCodeGenerator {
__ inc(to, 4);
__ dec(count, 1 << shift);
__ BIND(L_fill_32_bytes);
#ifdef _LP64
}
#endif
if (t == T_INT) {
// Zero extend value
@ -1857,14 +1776,12 @@ class StubGenerator: public StubCodeGenerator {
__ sth(O4, end_to, 0);
__ BIND(L_skip_alignment2);
}
#ifdef _LP64
if (aligned) {
// Both arrays are aligned to 8-bytes in 64-bits VM.
// The 'count' is decremented in copy_16_bytes_backward_with_shift()
// in unaligned case.
__ dec(count, 8);
} else
#endif
{
// Copy with shift 16 bytes per iteration if arrays do not have
// the same alignment mod 8, otherwise jump to the next
@ -1974,9 +1891,7 @@ class StubGenerator: public StubCodeGenerator {
// Aligned arrays have 4 bytes alignment in 32-bits VM
// and 8 bytes - in 64-bits VM.
//
#ifdef _LP64
if (!aligned)
#endif
{
// The next check could be put under 'ifndef' since the code in
// generate_disjoint_long_copy_core() has own checks and set 'offset'.
@ -2463,16 +2378,12 @@ class StubGenerator: public StubCodeGenerator {
__ mov(to, G1);
__ mov(count, G5);
gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized);
#ifdef _LP64
assert_clean_int(count, O3); // Make sure 'count' is clean int.
if (UseCompressedOops) {
generate_disjoint_int_copy_core(aligned);
} else {
generate_disjoint_long_copy_core(aligned);
}
#else
generate_disjoint_int_copy_core(aligned);
#endif
// O0 is used as temp register
gen_write_ref_array_post_barrier(G1, G5, O0);
@ -2518,15 +2429,11 @@ class StubGenerator: public StubCodeGenerator {
__ mov(count, G5);
gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized);
#ifdef _LP64
if (UseCompressedOops) {
generate_conjoint_int_copy_core(aligned);
} else {
generate_conjoint_long_copy_core(aligned);
}
#else
generate_conjoint_int_copy_core(aligned);
#endif
// O0 is used as temp register
gen_write_ref_array_post_barrier(G1, G5, O0);
@ -3138,7 +3045,6 @@ class StubGenerator: public StubCodeGenerator {
"arrayof_jint_disjoint_arraycopy");
StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy,
"arrayof_jint_arraycopy");
#ifdef _LP64
// In 64 bit we need both aligned and unaligned versions of jint arraycopy.
// entry_jint_arraycopy always points to the unaligned version (notice that we overwrite it).
StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, &entry,
@ -3146,14 +3052,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, entry,
&entry_jint_arraycopy,
"jint_arraycopy");
#else
// In 32 bit jints are always HeapWordSize aligned, so always use the aligned version
// (in fact in 32bit we always have a pre-loop part even in the aligned version,
// because it uses 64-bit loads/stores, so the aligned flag is actually ignored).
StubRoutines::_jint_disjoint_arraycopy = StubRoutines::_arrayof_jint_disjoint_arraycopy;
StubRoutines::_jint_arraycopy = StubRoutines::_arrayof_jint_arraycopy;
#endif
//*** jlong
// It is always aligned
@ -3178,7 +3076,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, entry, NULL,
"arrayof_oop_arraycopy_uninit",
/*dest_uninitialized*/true);
#ifdef _LP64
if (UseCompressedOops) {
// With compressed oops we need unaligned versions, notice that we overwrite entry_oop_arraycopy.
StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, &entry,
@ -3193,7 +3090,6 @@ class StubGenerator: public StubCodeGenerator {
"oop_arraycopy_uninit",
/*dest_uninitialized*/true);
} else
#endif
{
// oop arraycopy is always aligned on 32bit and 64bit without compressed oops
StubRoutines::_oop_disjoint_arraycopy = StubRoutines::_arrayof_oop_disjoint_arraycopy;
@ -5104,17 +5000,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::Sparc::_stop_subroutine_entry = generate_stop_subroutine();
StubRoutines::Sparc::_flush_callers_register_windows_entry = generate_flush_callers_register_windows();
#if !defined(COMPILER2) && !defined(_LP64)
StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
StubRoutines::_atomic_add_entry = generate_atomic_add();
StubRoutines::_atomic_xchg_ptr_entry = StubRoutines::_atomic_xchg_entry;
StubRoutines::_atomic_cmpxchg_ptr_entry = StubRoutines::_atomic_cmpxchg_entry;
StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry;
#endif // COMPILER2 !=> _LP64
// Build this early so it's available for the interpreter.
StubRoutines::_throw_StackOverflowError_entry =
generate_throw_exception("StackOverflowError throw_exception",
@ -5222,11 +5107,9 @@ class StubGenerator: public StubCodeGenerator {
void stub_prolog(StubCodeDesc* cdesc) {
# ifdef ASSERT
// put extra information in the stub code, to make it more readable
#ifdef _LP64
// Write the high part of the address
// [RGV] Check if there is a dependency on the size of this prolog
__ emit_data((intptr_t)cdesc >> 32, relocInfo::none);
#endif
__ emit_data((intptr_t)cdesc, relocInfo::none);
__ emit_data(++_stub_count, relocInfo::none);
# endif

View File

@ -57,13 +57,9 @@
// if too small.
// Run with +PrintInterpreter to get the VM to print out the size.
// Max size with JVMTI
#ifdef _LP64
// The sethi() instruction generates lots more instructions when shell
// stack limit is unlimited, so that's why this is much bigger.
// The sethi() instruction generates lots more instructions when shell
// stack limit is unlimited, so that's why this is much bigger.
int TemplateInterpreter::InterpreterCodeSize = 260 * K;
#else
int TemplateInterpreter::InterpreterCodeSize = 230 * K;
#endif
// Generation of Interpreter
//
@ -75,41 +71,6 @@ int TemplateInterpreter::InterpreterCodeSize = 230 * K;
//----------------------------------------------------------------------------------------------------
#ifndef _LP64
address TemplateInterpreterGenerator::generate_slow_signature_handler() {
address entry = __ pc();
Argument argv(0, true);
// We are in the jni transition frame. Save the last_java_frame corresponding to the
// outer interpreter frame
//
__ set_last_Java_frame(FP, noreg);
// make sure the interpreter frame we've pushed has a valid return pc
__ mov(O7, I7);
__ mov(Lmethod, G3_scratch);
__ mov(Llocals, G4_scratch);
__ save_frame(0);
__ mov(G2_thread, L7_thread_cache);
__ add(argv.address_in_frame(), O3);
__ mov(G2_thread, O0);
__ mov(G3_scratch, O1);
__ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type);
__ delayed()->mov(G4_scratch, O2);
__ mov(L7_thread_cache, G2_thread);
__ reset_last_Java_frame();
// load the register arguments (the C code packed them as varargs)
for (Argument ldarg = argv.successor(); ldarg.is_register(); ldarg = ldarg.successor()) {
__ ld_ptr(ldarg.address_in_frame(), ldarg.as_register());
}
__ ret();
__ delayed()->
restore(O0, 0, Lscratch); // caller's Lscratch gets the result handler
return entry;
}
#else
// LP64 passes floating point arguments in F1, F3, F5, etc. instead of
// O0, O1, O2 etc..
// Doubles are passed in D0, D2, D4
@ -206,7 +167,6 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
restore(O0, 0, Lscratch); // caller's Lscratch gets the result handler
return entry;
}
#endif
void TemplateInterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
@ -253,11 +213,7 @@ void TemplateInterpreterGenerator::save_native_result(void) {
// save and restore any potential method result value around the unlocking operation
__ stf(FloatRegisterImpl::D, F0, d_tmp);
#ifdef _LP64
__ stx(O0, l_tmp);
#else
__ std(O0, l_tmp);
#endif
}
void TemplateInterpreterGenerator::restore_native_result(void) {
@ -266,11 +222,7 @@ void TemplateInterpreterGenerator::restore_native_result(void) {
// Restore any method result value
__ ldf(FloatRegisterImpl::D, d_tmp, F0);
#ifdef _LP64
__ ldx(l_tmp, O0);
#else
__ ldd(l_tmp, O0);
#endif
}
address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
@ -340,22 +292,6 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ profile_return_type(O0, G3_scratch, G1_scratch);
}
#if !defined(_LP64) && defined(COMPILER2)
// All return values are where we want them, except for Longs. C2 returns
// longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
// Since the interpreter will return longs in G1 and O0/O1 in the 32bit
// build even if we are returning from interpreted we just do a little
// stupid shuffing.
// Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
// do this here. Unfortunately if we did a rethrow we'd see an machepilog node
// first which would move g1 -> O0/O1 and destroy the exception we were throwing.
if (state == ltos) {
__ srl (G1, 0, O1);
__ srlx(G1, 32, O0);
}
#endif // !_LP64 && COMPILER2
// The callee returns with the stack possibly adjusted by adapter transition
// We remove that possible adjustment here.
// All interpreter local registers are untouched. Any result is passed back
@ -374,6 +310,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size); // argument size in words
__ sll(parameter_size, Interpreter::logStackElementSize, parameter_size); // each argument size in bytes
__ add(Lesp, parameter_size, Lesp); // pop arguments
__ check_and_handle_popframe(Gtemp);
__ check_and_handle_earlyret(Gtemp);
__ dispatch_next(state, step);
return entry;
@ -438,9 +378,6 @@ address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type
case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break;
case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break;
case T_LONG :
#ifndef _LP64
__ mov(O1, Itos_l2); // move other half of long
#endif // ifdef or no ifdef, fall through to the T_INT case
case T_INT : __ mov(O0, Itos_i); break;
case T_VOID : /* nothing to do */ break;
case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break;
@ -466,12 +403,6 @@ address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state,
}
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
address entry = __ pc();
__ dispatch_next(state);
return entry;
}
//
// Helpers for commoning out cases in the various type of method entries.
//
@ -886,9 +817,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
__ st_ptr(mirror, FP, (frame::interpreter_frame_mirror_offset * wordSize) + STACK_BIAS);
__ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache
__ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
#ifdef _LP64
__ add( Lmonitors, STACK_BIAS, Lmonitors ); // Account for 64 bit stack bias
#endif
__ sub(Lmonitors, BytesPerWord, Lesp); // set Lesp
// setup interpreter activation registers
@ -1483,12 +1412,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// Move the result handler address
__ mov(Lscratch, G3_scratch);
// return possible result to the outer frame
#ifndef __LP64
__ mov(O0, I0);
__ restore(O1, G0, O1);
#else
__ restore(O0, G0, O0);
#endif /* __LP64 */
// Move result handler to expected register
__ mov(G3_scratch, Lscratch);
@ -1568,17 +1492,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
restore_native_result();
}
#if defined(COMPILER2) && !defined(_LP64)
// C2 expects long results in G1 we can't tell if we're returning to interpreted
// or compiled so just be safe.
__ sllx(O0, 32, G1); // Shift bits into high G1
__ srl (O1, 0, O1); // Zero extend O1
__ or3 (O1, G1, G1); // OR 64 bits into G1
#endif /* COMPILER2 && !_LP64 */
// dispose of return address and remove activation
#ifdef ASSERT
{

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -248,12 +248,7 @@ void TemplateTable::iconst(int value) {
void TemplateTable::lconst(int value) {
transition(vtos, ltos);
assert(value >= 0, "check this code");
#ifdef _LP64
__ set(value, Otos_l);
#else
__ set(value, Otos_l2);
__ clr( Otos_l1);
#endif
}
@ -406,24 +401,12 @@ void TemplateTable::ldc2_w() {
// Check out Conversions.java for an example.
// Also ConstantPool::header_size() is 20, which makes it very difficult
// to double-align double on the constant pool. SG, 11/7/97
#ifdef _LP64
__ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d);
#else
FloatRegister f = Ftos_d;
__ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f);
__ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2,
f->successor());
#endif
__ push(dtos);
__ ba_short(exit);
__ bind(Long);
#ifdef _LP64
__ ldx(G3_scratch, base_offset, Otos_l);
#else
__ ld(G3_scratch, base_offset, Otos_l);
__ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor());
#endif
__ push(ltos);
__ bind(exit);
@ -1128,19 +1111,11 @@ void TemplateTable::lop2(Operation op) {
transition(ltos, ltos);
__ pop_l(O2);
switch (op) {
#ifdef _LP64
case add: __ add(O2, Otos_l, Otos_l); break;
case sub: __ sub(O2, Otos_l, Otos_l); break;
case _and: __ and3(O2, Otos_l, Otos_l); break;
case _or: __ or3(O2, Otos_l, Otos_l); break;
case _xor: __ xor3(O2, Otos_l, Otos_l); break;
#else
case add: __ addcc(O3, Otos_l2, Otos_l2); __ addc(O2, Otos_l1, Otos_l1); break;
case sub: __ subcc(O3, Otos_l2, Otos_l2); __ subc(O2, Otos_l1, Otos_l1); break;
case _and: __ and3(O3, Otos_l2, Otos_l2); __ and3(O2, Otos_l1, Otos_l1); break;
case _or: __ or3(O3, Otos_l2, Otos_l2); __ or3(O2, Otos_l1, Otos_l1); break;
case _xor: __ xor3(O3, Otos_l2, Otos_l2); __ xor3(O2, Otos_l1, Otos_l1); break;
#endif
default: ShouldNotReachHere();
}
}
@ -1171,14 +1146,10 @@ void TemplateTable::idiv() {
Label regular;
__ cmp(Otos_i, -1);
__ br(Assembler::notEqual, false, Assembler::pt, regular);
#ifdef _LP64
// Don't put set in delay slot
// Set will turn into multiple instructions in 64 bit mode
__ delayed()->nop();
__ set(min_int, G4_scratch);
#else
__ delayed()->set(min_int, G4_scratch);
#endif
Label done;
__ cmp(O1, G4_scratch);
__ br(Assembler::equal, true, Assembler::pt, done);
@ -1202,11 +1173,7 @@ void TemplateTable::irem() {
void TemplateTable::lmul() {
transition(ltos, ltos);
__ pop_l(O2);
#ifdef _LP64
__ mulx(Otos_l, O2, Otos_l);
#else
__ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul));
#endif
}
@ -1216,15 +1183,9 @@ void TemplateTable::ldiv() {
// check for zero
__ pop_l(O2);
#ifdef _LP64
__ tst(Otos_l);
__ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
__ sdivx(O2, Otos_l, Otos_l);
#else
__ orcc(Otos_l1, Otos_l2, G0);
__ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
__ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
#endif
}
@ -1233,17 +1194,11 @@ void TemplateTable::lrem() {
// check for zero
__ pop_l(O2);
#ifdef _LP64
__ tst(Otos_l);
__ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
__ sdivx(O2, Otos_l, Otos_l2);
__ mulx (Otos_l2, Otos_l, Otos_l2);
__ sub (O2, Otos_l2, Otos_l);
#else
__ orcc(Otos_l1, Otos_l2, G0);
__ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
__ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
#endif
}
@ -1251,11 +1206,7 @@ void TemplateTable::lshl() {
transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra
__ pop_l(O2); // shift value in O2, O3
#ifdef _LP64
__ sllx(O2, Otos_i, Otos_l);
#else
__ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
#endif
}
@ -1263,11 +1214,7 @@ void TemplateTable::lshr() {
transition(itos, ltos); // %%%% see lshl comment
__ pop_l(O2); // shift value in O2, O3
#ifdef _LP64
__ srax(O2, Otos_i, Otos_l);
#else
__ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
#endif
}
@ -1276,11 +1223,7 @@ void TemplateTable::lushr() {
transition(itos, ltos); // %%%% see lshl comment
__ pop_l(O2); // shift value in O2, O3
#ifdef _LP64
__ srlx(O2, Otos_i, Otos_l);
#else
__ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
#endif
}
@ -1293,15 +1236,9 @@ void TemplateTable::fop2(Operation op) {
case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
case rem:
assert(Ftos_f == F0, "just checking");
#ifdef _LP64
// LP64 calling conventions use F1, F3 for passing 2 floats
__ pop_f(F1);
__ fmov(FloatRegisterImpl::S, Ftos_f, F3);
#else
__ pop_i(O0);
__ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp);
__ ld( __ d_tmp, O1 );
#endif
__ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem));
assert( Ftos_f == F0, "fix this code" );
break;
@ -1319,18 +1256,9 @@ void TemplateTable::dop2(Operation op) {
case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
case rem:
#ifdef _LP64
// Pass arguments in D0, D2
__ fmov(FloatRegisterImpl::D, Ftos_f, F2 );
__ pop_d( F0 );
#else
// Pass arguments in O0O1, O2O3
__ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
__ ldd( __ d_tmp, O2 );
__ pop_d(Ftos_f);
__ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
__ ldd( __ d_tmp, O0 );
#endif
__ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem));
assert( Ftos_d == F0, "fix this code" );
break;
@ -1348,11 +1276,7 @@ void TemplateTable::ineg() {
void TemplateTable::lneg() {
transition(ltos, ltos);
#ifdef _LP64
__ sub(G0, Otos_l, Otos_l);
#else
__ lneg(Otos_l1, Otos_l2);
#endif
}
@ -1437,15 +1361,8 @@ void TemplateTable::convert() {
Label done;
switch (bytecode()) {
case Bytecodes::_i2l:
#ifdef _LP64
// Sign extend the 32 bits
__ sra ( Otos_i, 0, Otos_l );
#else
__ addcc(Otos_i, 0, Otos_l2);
__ br(Assembler::greaterEqual, true, Assembler::pt, done);
__ delayed()->clr(Otos_l1);
__ set(~0, Otos_l1);
#endif
break;
case Bytecodes::_i2f:
@ -1476,12 +1393,8 @@ void TemplateTable::convert() {
break;
case Bytecodes::_l2i:
#ifndef _LP64
__ mov(Otos_l2, Otos_i);
#else
// Sign-extend into the high 32 bits
__ sra(Otos_l, 0, Otos_i);
#endif
break;
case Bytecodes::_l2f:
@ -1512,11 +1425,7 @@ void TemplateTable::convert() {
case Bytecodes::_f2l:
// must uncache tos
__ push_f();
#ifdef _LP64
__ pop_f(F1);
#else
__ pop_i(O0);
#endif
__ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
break;
@ -1528,13 +1437,8 @@ void TemplateTable::convert() {
case Bytecodes::_d2l:
// must uncache tos
__ push_d();
#ifdef _LP64
// LP64 calling conventions pass first double arg in D0
__ pop_d( Ftos_d );
#else
__ pop_i( O0 );
__ pop_i( O1 );
#endif
__ call_VM_leaf(Lscratch,
bytecode() == Bytecodes::_d2i
? CAST_FROM_FN_PTR(address, SharedRuntime::d2i)
@ -1554,13 +1458,8 @@ void TemplateTable::convert() {
void TemplateTable::lcmp() {
transition(ltos, itos);
#ifdef _LP64
__ pop_l(O1); // pop off value 1, value 2 is in O0
__ lcmp( O1, Otos_l, Otos_i );
#else
__ pop_l(O2); // cmp O2,3 to O0,1
__ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i );
#endif
}
@ -1756,7 +1655,6 @@ void TemplateTable::ret() {
__ access_local_returnAddress(G3_scratch, Otos_i);
// Otos_i contains the bci, compute the bcp from that
#ifdef _LP64
#ifdef ASSERT
// jsr result was labeled as an 'itos' not an 'atos' because we cannot GC
// the result. The return address (really a BCI) was stored with an
@ -1771,7 +1669,6 @@ void TemplateTable::ret() {
__ stop("BCI is in the wrong register half?");
__ bind (zzz) ;
}
#endif
#endif
__ profile_ret(vtos, Otos_i, G4_scratch);
@ -1808,10 +1705,8 @@ void TemplateTable::tableswitch() {
// load lo, hi
__ ld(O1, 1 * BytesPerInt, O2); // Low Byte
__ ld(O1, 2 * BytesPerInt, O3); // High Byte
#ifdef _LP64
// Sign extend the 32 bits
__ sra ( Otos_i, 0, Otos_i );
#endif /* _LP64 */
// check against lo & hi
__ cmp( Otos_i, O2);
@ -3346,9 +3241,7 @@ void TemplateTable::_new() {
__ br(Assembler::notEqual, false, Assembler::pn, slow_case);
__ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
// get InstanceKlass
//__ sll(Roffset, LogBytesPerWord, Roffset); // executed in delay slot
__ add(Roffset, sizeof(ConstantPool), Roffset);
__ ld_ptr(Rscratch, Roffset, RinstanceKlass);
__ load_resolved_klass_at_offset(Rscratch, Roffset, RinstanceKlass);
// make sure klass is fully initialized:
__ ldub(RinstanceKlass, in_bytes(InstanceKlass::init_state_offset()), G3_scratch);
@ -3400,11 +3293,7 @@ void TemplateTable::_new() {
// Check if tlab should be discarded (refill_waste_limit >= free)
__ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
__ sub(RendValue, RoldTopValue, RfreeValue);
#ifdef _LP64
__ srlx(RfreeValue, LogHeapWordSize, RfreeValue);
#else
__ srl(RfreeValue, LogHeapWordSize, RfreeValue);
#endif
__ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small
// increment waste limit to prevent getting stuck on this slow path
@ -3574,8 +3463,9 @@ void TemplateTable::checkcast() {
// Extract target class from constant pool
__ bind(quicked);
__ add(Roffset, sizeof(ConstantPool), Roffset);
__ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
__ load_resolved_klass_at_offset(Lscratch, Roffset, RspecifiedKlass);
__ bind(resolved);
__ load_klass(Otos_i, RobjKlass); // get value klass
@ -3631,9 +3521,9 @@ void TemplateTable::instanceof() {
// Extract target class from constant pool
__ bind(quicked);
__ add(Roffset, sizeof(ConstantPool), Roffset);
__ get_constant_pool(Lscratch);
__ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
__ load_resolved_klass_at_offset(Lscratch, Roffset, RspecifiedKlass);
__ bind(resolved);
__ load_klass(Otos_i, RobjKlass); // get value klass

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -70,12 +70,10 @@ void VM_Version::initialize() {
if (FLAG_IS_DEFAULT(OptoLoopAlignment)) {
FLAG_SET_DEFAULT(OptoLoopAlignment, 4);
}
#ifdef _LP64
// 32-bit oops don't make sense for the 64-bit VM on sparc
// since the 32-bit VM has the same registers and smaller objects.
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
#endif // _LP64
#ifdef COMPILER2
// Indirect branch is the same cost as direct
if (FLAG_IS_DEFAULT(UseJumpTables)) {

View File

@ -232,7 +232,7 @@ int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
return basic + slop;
} else {
const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord +
const int basic = 34 * BytesPerInstWord +
// shift;add for load_klass (only shift with zero heap based)
(UseCompressedClassPointers ?
MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
@ -257,7 +257,6 @@ int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
// ld [ %g3 + 0xe8 ], %l2
// sll %l2, 2, %l2
// add %l2, 0x134, %l2
// and %l2, -8, %l2 ! NOT_LP64 only
// add %g3, %l2, %l2
// add %g3, 4, %g3
// ld [ %l2 ], %l5

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -21,6 +21,8 @@
* questions.
*/
#include "precompiled.hpp"
#include "aot/compiledIC_aot.hpp"
#include "code/codeCache.hpp"
#include "memory/resourceArea.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -509,7 +509,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
get_constant_pool(result);
// load pointer for resolved_references[] objArray
movptr(result, Address(result, ConstantPool::resolved_references_offset_in_bytes()));
movptr(result, Address(result, ConstantPool::cache_offset_in_bytes()));
movptr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
// JNIHandles::resolve(obj);
movptr(result, Address(result, 0));
// Add in the index
@ -517,6 +518,14 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
}
// load cpool->resolved_klass_at(index)
void InterpreterMacroAssembler::load_resolved_klass_at_index(Register cpool,
Register index, Register klass) {
movw(index, Address(cpool, index, Address::times_ptr, sizeof(ConstantPool)));
Register resolved_klasses = cpool;
movptr(resolved_klasses, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes()));
movptr(klass, Address(resolved_klasses, index, Address::times_ptr, Array<Klass*>::base_offset_in_bytes()));
}
// Generate a subtype check: branch to ok_is_subtype if sub_klass is a
// subtype of super_klass.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,9 +48,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
int number_of_arguments,
bool check_exceptions);
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// base routine for all dispatches
void dispatch_base(TosState state, address* table, bool verifyoop = true);
@ -61,6 +58,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
void jump_to_entry(address entry);
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
void load_earlyret_value(TosState state);
// Interpreter-specific registers
@ -123,6 +123,11 @@ class InterpreterMacroAssembler: public MacroAssembler {
// load cpool->resolved_references(index);
void load_resolved_reference_at_index(Register result, Register index);
// load cpool->resolved_klass_at(index)
void load_resolved_klass_at_index(Register cpool, // the constant pool (corrupted on return)
Register index, // the constant pool index (corrupted on return)
Register klass); // contains the Klass on return
NOT_LP64(void f2ieee();) // truncate ftos to 32bits
NOT_LP64(void d2ieee();) // truncate dtos to 64bits

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -65,7 +65,7 @@ jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Hand
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) {
address pc = _instructions->start() + pc_offset;
Handle obj = HotSpotObjectConstantImpl::object(constant);
Handle obj(THREAD, HotSpotObjectConstantImpl::object(constant));
jobject value = JNIHandles::make_local(obj());
if (HotSpotObjectConstantImpl::compressed(constant)) {
#ifdef _LP64

View File

@ -71,12 +71,6 @@ class MacroAssembler: public Assembler {
bool check_exceptions // whether to check for pending exceptions after return
);
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler,
// as only the interpreter handles PopFrame and ForceEarlyReturn requests.
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
// helpers for FPU flag access
@ -87,6 +81,12 @@ class MacroAssembler: public Assembler {
public:
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler,
// as only the interpreter handles PopFrame and ForceEarlyReturn requests.
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// Support for NULL-checks
//
// Generates code that causes a NULL OS exception if the content of reg is NULL.

View File

@ -1,124 +0,0 @@
/*
* Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/codeBuffer.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
// This method will be called (as any other Klass virtual method) with
// the Klass itself as the first argument. Example:
//
// oop obj;
// int size = obj->klass()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();
//
// The dummy method is called with the Klass object as the first
// operand, and an object as the second argument.
//
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
// differing only by an ordinal constant, and they bear no relationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#define __ masm->
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
char** mc_top,
char* mc_end) {
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
*(intptr_t *)(*md_top) = vtable_bytes;
*md_top += sizeof(intptr_t);
void** dummy_vtable = (void**)*md_top;
*vtable = dummy_vtable;
*md_top += vtable_bytes;
// Get ready to generate dummy methods.
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
MacroAssembler* masm = new MacroAssembler(&cb);
Label common_code;
for (int i = 0; i < vtbl_list_size; ++i) {
for (int j = 0; j < num_virtuals; ++j) {
dummy_vtable[num_virtuals * i + j] = (void*)masm->pc();
// Load rax, with a value indicating vtable/offset pair.
// -- bits[ 7..0] (8 bits) which virtual method in table?
// -- bits[12..8] (5 bits) which virtual method table?
// -- must fit in 13-bit instruction immediate field.
__ movl(rax, (i << 8) + j);
__ jmp(common_code);
}
}
__ bind(common_code);
#ifdef WIN32
// Expecting to be called with "thiscall" conventions -- the arguments
// are on the stack, except that the "this" pointer is in rcx.
#else
// Expecting to be called with Unix conventions -- the arguments
// are on the stack, including the "this" pointer.
#endif
// In addition, rax was set (above) to the offset of the method in the
// table.
#ifdef WIN32
__ push(rcx); // save "this"
#endif
__ mov(rcx, rax);
__ shrptr(rcx, 8); // isolate vtable identifier.
__ shlptr(rcx, LogBytesPerWord);
Address index(noreg, rcx, Address::times_1);
ExternalAddress vtbl((address)vtbl_list);
__ movptr(rdx, ArrayAddress(vtbl, index)); // get correct vtable address.
#ifdef WIN32
__ pop(rcx); // restore "this"
#else
__ movptr(rcx, Address(rsp, BytesPerWord)); // fetch "this"
#endif
__ movptr(Address(rcx, 0), rdx); // update vtable pointer.
__ andptr(rax, 0x00ff); // isolate vtable method index
__ shlptr(rax, LogBytesPerWord);
__ addptr(rax, rdx); // address of real method pointer.
__ jmp(Address(rax, 0)); // get real method pointer.
__ flush();
*mc_top = (char*)__ pc();
}

Some files were not shown because too many files have changed in this diff Show More