This commit is contained in:
Jesper Wilhelmsson 2017-04-24 21:34:24 +02:00
commit dd358a3bac
1680 changed files with 117458 additions and 37755 deletions

View File

@ -47,11 +47,9 @@ ifeq ($(INCLUDE_GRAAL), true)
$(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_MATCH_PROCESSOR, \ $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_MATCH_PROCESSOR, \
SETUP := GENERATE_OLDBYTECODE, \ SETUP := GENERATE_OLDBYTECODE, \
SRC := \ SRC := \
$(SRC_DIR)/org.graalvm.compiler.common/src \
$(SRC_DIR)/org.graalvm.compiler.core/src \ $(SRC_DIR)/org.graalvm.compiler.core/src \
$(SRC_DIR)/org.graalvm.compiler.core.common/src \ $(SRC_DIR)/org.graalvm.compiler.core.common/src \
$(SRC_DIR)/org.graalvm.compiler.core.match.processor/src \ $(SRC_DIR)/org.graalvm.compiler.core.match.processor/src \
$(SRC_DIR)/org.graalvm.compiler.api.collections/src \
$(SRC_DIR)/org.graalvm.compiler.api.replacements/src \ $(SRC_DIR)/org.graalvm.compiler.api.replacements/src \
$(SRC_DIR)/org.graalvm.compiler.asm/src \ $(SRC_DIR)/org.graalvm.compiler.asm/src \
$(SRC_DIR)/org.graalvm.compiler.bytecode/src \ $(SRC_DIR)/org.graalvm.compiler.bytecode/src \
@ -68,6 +66,7 @@ ifeq ($(INCLUDE_GRAAL), true)
$(SRC_DIR)/org.graalvm.compiler.phases.common/src \ $(SRC_DIR)/org.graalvm.compiler.phases.common/src \
$(SRC_DIR)/org.graalvm.compiler.serviceprovider/src \ $(SRC_DIR)/org.graalvm.compiler.serviceprovider/src \
$(SRC_DIR)/org.graalvm.compiler.virtual/src \ $(SRC_DIR)/org.graalvm.compiler.virtual/src \
$(SRC_DIR)/org.graalvm.util/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.code/src \ $(VM_CI_SRC_DIR)/jdk.vm.ci.code/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.common/src \ $(VM_CI_SRC_DIR)/jdk.vm.ci.common/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.meta/src \ $(VM_CI_SRC_DIR)/jdk.vm.ci.meta/src \
@ -102,6 +101,7 @@ ifeq ($(INCLUDE_GRAAL), true)
SRC := \ SRC := \
$(SRC_DIR)/org.graalvm.compiler.options/src \ $(SRC_DIR)/org.graalvm.compiler.options/src \
$(SRC_DIR)/org.graalvm.compiler.options.processor/src \ $(SRC_DIR)/org.graalvm.compiler.options.processor/src \
$(SRC_DIR)/org.graalvm.util/src \
, \ , \
BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor, \ BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor, \
JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor.jar, \ JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor.jar, \
@ -114,9 +114,7 @@ ifeq ($(INCLUDE_GRAAL), true)
$(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_REPLACEMENTS_VERIFIER, \ $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_REPLACEMENTS_VERIFIER, \
SETUP := GENERATE_OLDBYTECODE, \ SETUP := GENERATE_OLDBYTECODE, \
SRC := \ SRC := \
$(SRC_DIR)/org.graalvm.compiler.common/src \
$(SRC_DIR)/org.graalvm.compiler.replacements.verifier/src \ $(SRC_DIR)/org.graalvm.compiler.replacements.verifier/src \
$(SRC_DIR)/org.graalvm.compiler.api.collections/src \
$(SRC_DIR)/org.graalvm.compiler.api.replacements/src \ $(SRC_DIR)/org.graalvm.compiler.api.replacements/src \
$(SRC_DIR)/org.graalvm.compiler.code/src \ $(SRC_DIR)/org.graalvm.compiler.code/src \
$(SRC_DIR)/org.graalvm.compiler.core.common/src \ $(SRC_DIR)/org.graalvm.compiler.core.common/src \
@ -125,6 +123,7 @@ ifeq ($(INCLUDE_GRAAL), true)
$(SRC_DIR)/org.graalvm.compiler.nodeinfo/src \ $(SRC_DIR)/org.graalvm.compiler.nodeinfo/src \
$(SRC_DIR)/org.graalvm.compiler.options/src \ $(SRC_DIR)/org.graalvm.compiler.options/src \
$(SRC_DIR)/org.graalvm.compiler.serviceprovider/src \ $(SRC_DIR)/org.graalvm.compiler.serviceprovider/src \
$(SRC_DIR)/org.graalvm.util/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.code/src \ $(VM_CI_SRC_DIR)/jdk.vm.ci.code/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.common/src \ $(VM_CI_SRC_DIR)/jdk.vm.ci.common/src \
$(VM_CI_SRC_DIR)/jdk.vm.ci.meta/src \ $(VM_CI_SRC_DIR)/jdk.vm.ci.meta/src \

View File

@ -37,7 +37,6 @@ SRC_DIR := $(HOTSPOT_TOPDIR)/src/$(MODULE)/share/classes
PROC_SRC_SUBDIRS := \ PROC_SRC_SUBDIRS := \
org.graalvm.compiler.code \ org.graalvm.compiler.code \
org.graalvm.compiler.common \
org.graalvm.compiler.core \ org.graalvm.compiler.core \
org.graalvm.compiler.core.aarch64 \ org.graalvm.compiler.core.aarch64 \
org.graalvm.compiler.core.amd64 \ org.graalvm.compiler.core.amd64 \

View File

@ -1,53 +0,0 @@
#
# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
include $(SPEC)
include NativeCompilation.gmk
$(eval $(call IncludeCustomExtension, hotspot, lib/Lib-jdk.aot.gmk))
##############################################################################
# Build libjelfshim only when AOT is enabled.
ifeq ($(ENABLE_AOT), true)
JELFSHIM_NAME := jelfshim
$(eval $(call SetupNativeCompilation, BUILD_LIBJELFSHIM, \
TOOLCHAIN := TOOLCHAIN_DEFAULT, \
OPTIMIZATION := LOW, \
LIBRARY := $(JELFSHIM_NAME), \
OUTPUT_DIR := $(call FindLibDirForModule, $(MODULE)), \
SRC := $(HOTSPOT_TOPDIR)/src/jdk.aot/unix/native/libjelfshim, \
CFLAGS := $(CFLAGS_JDKLIB) $(ELF_CFLAGS) \
-DAOT_VERSION_STRING='"$(VERSION_STRING)"' \
-I$(SUPPORT_OUTPUTDIR)/headers/$(MODULE), \
LDFLAGS := $(LDFLAGS_JDKLIB), \
OBJECT_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/lib$(JELFSHIM_NAME), \
LIBS := $(ELF_LIBS) $(LIBS_JDKLIB), \
))
TARGETS += $(BUILD_LIBJELFSHIM)
endif
##############################################################################

View File

@ -35,12 +35,15 @@ include $(SPEC)
include MakeBase.gmk include MakeBase.gmk
include TestFilesCompilation.gmk include TestFilesCompilation.gmk
$(eval $(call IncludeCustomExtension, hotspot, test/JtregNative.gmk))
################################################################################ ################################################################################
# Targets for building the native tests themselves. # Targets for building the native tests themselves.
################################################################################ ################################################################################
# Add more directories here when needed. # Add more directories here when needed.
BUILD_HOTSPOT_JTREG_NATIVE_SRC := \ BUILD_HOTSPOT_JTREG_NATIVE_SRC += \
$(HOTSPOT_TOPDIR)/test/gc/stress/gclocker \
$(HOTSPOT_TOPDIR)/test/native_sanity \ $(HOTSPOT_TOPDIR)/test/native_sanity \
$(HOTSPOT_TOPDIR)/test/runtime/jni/8025979 \ $(HOTSPOT_TOPDIR)/test/runtime/jni/8025979 \
$(HOTSPOT_TOPDIR)/test/runtime/jni/8033445 \ $(HOTSPOT_TOPDIR)/test/runtime/jni/8033445 \

View File

@ -1,5 +1,5 @@
// //
// Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2014, Red Hat Inc. All rights reserved. // Copyright (c) 2014, Red Hat Inc. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
@ -3564,7 +3564,7 @@ const int Matcher::min_vector_size(const BasicType bt) {
} }
// Vector ideal reg. // Vector ideal reg.
const int Matcher::vector_ideal_reg(int len) { const uint Matcher::vector_ideal_reg(int len) {
switch(len) { switch(len) {
case 8: return Op_VecD; case 8: return Op_VecD;
case 16: return Op_VecX; case 16: return Op_VecX;
@ -3573,7 +3573,7 @@ const int Matcher::vector_ideal_reg(int len) {
return 0; return 0;
} }
const int Matcher::vector_shift_count_ideal_reg(int size) { const uint Matcher::vector_shift_count_ideal_reg(int size) {
return Op_VecX; return Op_VecX;
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -54,9 +54,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
int number_of_arguments, int number_of_arguments,
bool check_exceptions); bool check_exceptions);
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// base routine for all dispatches // base routine for all dispatches
void dispatch_base(TosState state, address* table, bool verifyoop = true); void dispatch_base(TosState state, address* table, bool verifyoop = true);
@ -67,6 +64,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
void jump_to_entry(address entry); void jump_to_entry(address entry);
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// Interpreter-specific registers // Interpreter-specific registers
void save_bcp() { void save_bcp() {
str(rbcp, Address(rfp, frame::interpreter_frame_bcp_offset * wordSize)); str(rbcp, Address(rfp, frame::interpreter_frame_bcp_offset * wordSize));

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -55,7 +55,7 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS)
} }
} }
#endif // ASSERT #endif // ASSERT
Handle obj = HotSpotObjectConstantImpl::object(constant); Handle obj(THREAD, HotSpotObjectConstantImpl::object(constant));
jobject value = JNIHandles::make_local(obj()); jobject value = JNIHandles::make_local(obj());
MacroAssembler::patch_oop(pc, (address)obj()); MacroAssembler::patch_oop(pc, (address)obj());
int oop_index = _oop_recorder->find_index(value); int oop_index = _oop_recorder->find_index(value);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -77,12 +77,6 @@ class MacroAssembler: public Assembler {
bool check_exceptions // whether to check for pending exceptions after return bool check_exceptions // whether to check for pending exceptions after return
); );
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler,
// as only the interpreter handles PopFrame and ForceEarlyReturn requests.
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
// Maximum size of class area in Metaspace when compressed // Maximum size of class area in Metaspace when compressed
@ -97,6 +91,12 @@ class MacroAssembler: public Assembler {
> (1u << log2_intptr(CompressedClassSpaceSize)))); > (1u << log2_intptr(CompressedClassSpaceSize))));
} }
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler,
// as only the interpreter handles PopFrame and ForceEarlyReturn requests.
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// Biased locking support // Biased locking support
// lock_reg and obj_reg must be loaded up with the appropriate values. // lock_reg and obj_reg must be loaded up with the appropriate values.
// swap_reg is killed. // swap_reg is killed.

View File

@ -1,126 +0,0 @@
/*
* Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
// This method will be called (as any other Klass virtual method) with
// the Klass itself as the first argument. Example:
//
// oop obj;
// int size = obj->klass()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();
//
// The dummy method is called with the Klass object as the first
// operand, and an object as the second argument.
//
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
// differing only by an ordinal constant, and they bear no relationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#define __ masm->
extern "C" {
void aarch64_prolog(void);
}
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
char** mc_top,
char* mc_end) {
#ifdef BUILTIN_SIM
// Write a dummy word to the writable shared metaspace.
// MetaspaceShared::initialize_shared_spaces will fill it with the
// address of aarch64_prolog().
address *prolog_ptr = (address*)*md_top;
*(intptr_t *)(*md_top) = (intptr_t)0;
(*md_top) += sizeof(intptr_t);
#endif
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
*(intptr_t *)(*md_top) = vtable_bytes;
*md_top += sizeof(intptr_t);
void** dummy_vtable = (void**)*md_top;
*vtable = dummy_vtable;
*md_top += vtable_bytes;
// Get ready to generate dummy methods.
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
MacroAssembler* masm = new MacroAssembler(&cb);
Label common_code;
for (int i = 0; i < vtbl_list_size; ++i) {
for (int j = 0; j < num_virtuals; ++j) {
dummy_vtable[num_virtuals * i + j] = (void*)masm->pc();
// We're called directly from C code.
#ifdef BUILTIN_SIM
__ c_stub_prolog(8, 0, MacroAssembler::ret_type_integral, prolog_ptr);
#endif
// Load rscratch1 with a value indicating vtable/offset pair.
// -- bits[ 7..0] (8 bits) which virtual method in table?
// -- bits[12..8] (5 bits) which virtual method table?
__ mov(rscratch1, (i << 8) + j);
__ b(common_code);
}
}
__ bind(common_code);
Register tmp0 = r10, tmp1 = r11; // AAPCS64 temporary registers
__ enter();
__ lsr(tmp0, rscratch1, 8); // isolate vtable identifier.
__ mov(tmp1, (address)vtbl_list); // address of list of vtable pointers.
__ ldr(tmp1, Address(tmp1, tmp0, Address::lsl(LogBytesPerWord))); // get correct vtable pointer.
__ str(tmp1, Address(c_rarg0)); // update vtable pointer in obj.
__ add(rscratch1, tmp1, rscratch1, ext::uxtb, LogBytesPerWord); // address of real method pointer.
__ ldr(rscratch1, Address(rscratch1)); // get real method pointer.
__ blrt(rscratch1, 8, 0, 1); // jump to the real method.
__ leave();
__ ret(lr);
*mc_top = (char*)__ pc();
}
#ifdef BUILTIN_SIM
void MetaspaceShared::relocate_vtbl_list(char **buffer) {
void **sim_entry = (void**)*buffer;
*sim_entry = (void*)aarch64_prolog;
*buffer += sizeof(intptr_t);
}
#endif

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -63,7 +63,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Register obj, SystemDictionary::WKID klass_id, Register obj, SystemDictionary::WKID klass_id,
const char* error_message) { const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id); InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id); Klass* klass = SystemDictionary::well_known_klass(klass_id);
Register temp = rscratch2; Register temp = rscratch2;
Register temp2 = rscratch1; // used by MacroAssembler::cmpptr Register temp2 = rscratch1; // used by MacroAssembler::cmpptr
Label L_ok, L_bad; Label L_ok, L_bad;

View File

@ -402,14 +402,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(
return entry; return entry;
} }
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
address entry = __ pc();
// NULL last_sp until next java call
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
__ dispatch_next(state);
return entry;
}
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
address entry = __ pc(); address entry = __ pc();
@ -444,6 +436,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ notify(Assembler::method_reentry); __ notify(Assembler::method_reentry);
} }
#endif #endif
__ check_and_handle_popframe(rthread);
__ check_and_handle_earlyret(rthread);
__ get_dispatch(); __ get_dispatch();
__ dispatch_next(state, step); __ dispatch_next(state, step);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -234,8 +234,15 @@ void AbstractInterpreter::layout_activation(Method* method,
#ifdef AARCH64 #ifdef AARCH64
interpreter_frame->interpreter_frame_set_stack_top(stack_top); interpreter_frame->interpreter_frame_set_stack_top(stack_top);
// We have to add extra reserved slots to max_stack. There are 3 users of the extra slots,
// none of which are at the same time, so we just need to make sure there is enough room
// for the biggest user:
// -reserved slot for exception handler
// -reserved slots for JSR292. Method::extra_stack_entries() is the size.
// -3 reserved slots so get_method_counters() can save some registers before call_VM().
int max_stack = method->constMethod()->max_stack() + MAX2(3, Method::extra_stack_entries());
intptr_t* extended_sp = (intptr_t*) monbot - intptr_t* extended_sp = (intptr_t*) monbot -
(method->max_stack() + 1) * Interpreter::stackElementWords - // +1 is reserved slot for exception handler (max_stack * Interpreter::stackElementWords) -
popframe_extra_args; popframe_extra_args;
extended_sp = (intptr_t*)round_down((intptr_t)extended_sp, StackAlignmentInBytes); extended_sp = (intptr_t*)round_down((intptr_t)extended_sp, StackAlignmentInBytes);
interpreter_frame->interpreter_frame_set_extended_sp(extended_sp); interpreter_frame->interpreter_frame_set_extended_sp(extended_sp);

View File

@ -1,5 +1,5 @@
// //
// Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
// This code is free software; you can redistribute it and/or modify it // This code is free software; you can redistribute it and/or modify it
@ -1122,7 +1122,7 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
} }
// Vector ideal reg corresponding to specified size in bytes // Vector ideal reg corresponding to specified size in bytes
const int Matcher::vector_ideal_reg(int size) { const uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize >= size, ""); assert(MaxVectorSize >= size, "");
switch(size) { switch(size) {
case 8: return Op_VecD; case 8: return Op_VecD;
@ -1132,7 +1132,7 @@ const int Matcher::vector_ideal_reg(int size) {
return 0; return 0;
} }
const int Matcher::vector_shift_count_ideal_reg(int size) { const uint Matcher::vector_shift_count_ideal_reg(int size) {
return vector_ideal_reg(size); return vector_ideal_reg(size);
} }

View File

@ -2016,75 +2016,42 @@ void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
void InterpreterMacroAssembler::get_method_counters(Register method, void InterpreterMacroAssembler::get_method_counters(Register method,
Register Rcounters, Register Rcounters,
Label& skip) { Label& skip,
bool saveRegs,
Register reg1,
Register reg2,
Register reg3) {
const Address method_counters(method, Method::method_counters_offset()); const Address method_counters(method, Method::method_counters_offset());
Label has_counters; Label has_counters;
ldr(Rcounters, method_counters); ldr(Rcounters, method_counters);
cbnz(Rcounters, has_counters); cbnz(Rcounters, has_counters);
if (saveRegs) {
// Save and restore in use caller-saved registers since they will be trashed by call_VM
assert(reg1 != noreg, "must specify reg1");
assert(reg2 != noreg, "must specify reg2");
#ifdef AARCH64 #ifdef AARCH64
const Register tmp = Rcounters; assert(reg3 != noreg, "must specify reg3");
const int saved_regs_size = 20*wordSize; stp(reg1, reg2, Address(Rstack_top, -2*wordSize, pre_indexed));
stp(reg3, ZR, Address(Rstack_top, -2*wordSize, pre_indexed));
// Note: call_VM will cut SP according to Rstack_top value before call, and restore SP to
// extended_sp value from frame after the call.
// So make sure there is enough stack space to save registers and adjust Rstack_top accordingly.
{
Label enough_stack_space;
check_extended_sp(tmp);
sub(Rstack_top, Rstack_top, saved_regs_size);
cmp(SP, Rstack_top);
b(enough_stack_space, ls);
align_reg(tmp, Rstack_top, StackAlignmentInBytes);
mov(SP, tmp);
str(tmp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
bind(enough_stack_space);
check_stack_top();
int offset = 0;
stp(R0, R1, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R2, R3, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R4, R5, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R6, R7, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R8, R9, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R10, R11, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R12, R13, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R14, R15, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R16, R17, Address(Rstack_top, offset)); offset += 2*wordSize;
stp(R18, LR, Address(Rstack_top, offset)); offset += 2*wordSize;
assert (offset == saved_regs_size, "should be");
}
#else #else
push(RegisterSet(R0, R3) | RegisterSet(R12) | RegisterSet(R14)); assert(reg3 == noreg, "must not specify reg3");
#endif // AARCH64 push(RegisterSet(reg1) | RegisterSet(reg2));
#endif
}
mov(R1, method); mov(R1, method);
call_VM(noreg, CAST_FROM_FN_PTR(address, call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), R1);
InterpreterRuntime::build_method_counters), R1);
if (saveRegs) {
#ifdef AARCH64 #ifdef AARCH64
{ ldp(reg3, ZR, Address(Rstack_top, 2*wordSize, post_indexed));
int offset = 0; ldp(reg1, reg2, Address(Rstack_top, 2*wordSize, post_indexed));
ldp(R0, R1, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R2, R3, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R4, R5, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R6, R7, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R8, R9, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R10, R11, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R12, R13, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R14, R15, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R16, R17, Address(Rstack_top, offset)); offset += 2*wordSize;
ldp(R18, LR, Address(Rstack_top, offset)); offset += 2*wordSize;
assert (offset == saved_regs_size, "should be");
add(Rstack_top, Rstack_top, saved_regs_size);
}
#else #else
pop(RegisterSet(R0, R3) | RegisterSet(R12) | RegisterSet(R14)); pop(RegisterSet(reg1) | RegisterSet(reg2));
#endif // AARCH64 #endif
}
ldr(Rcounters, method_counters); ldr(Rcounters, method_counters);
cbz(Rcounters, skip); // No MethodCounters created, OutOfMemory cbz(Rcounters, skip); // No MethodCounters created, OutOfMemory

View File

@ -53,9 +53,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
// Template interpreter specific version of call_VM_helper // Template interpreter specific version of call_VM_helper
virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions); virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions);
virtual void check_and_handle_popframe();
virtual void check_and_handle_earlyret();
// base routine for all dispatches // base routine for all dispatches
typedef enum { DispatchDefault, DispatchNormal } DispatchTableMode; typedef enum { DispatchDefault, DispatchNormal } DispatchTableMode;
void dispatch_base(TosState state, DispatchTableMode table_mode, bool verifyoop = true); void dispatch_base(TosState state, DispatchTableMode table_mode, bool verifyoop = true);
@ -63,6 +60,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
public: public:
InterpreterMacroAssembler(CodeBuffer* code); InterpreterMacroAssembler(CodeBuffer* code);
virtual void check_and_handle_popframe();
virtual void check_and_handle_earlyret();
// Interpreter-specific registers // Interpreter-specific registers
#if defined(AARCH64) && defined(ASSERT) #if defined(AARCH64) && defined(ASSERT)
@ -328,7 +328,13 @@ class InterpreterMacroAssembler: public MacroAssembler {
void trace_state(const char* msg) PRODUCT_RETURN; void trace_state(const char* msg) PRODUCT_RETURN;
void get_method_counters(Register method, Register Rcounters, Label& skip); void get_method_counters(Register method,
Register Rcounters,
Label& skip,
bool saveRegs = false,
Register reg1 = noreg,
Register reg2 = noreg,
Register reg3 = noreg);
}; };
#endif // CPU_ARM_VM_INTERP_MASM_ARM_HPP #endif // CPU_ARM_VM_INTERP_MASM_ARM_HPP

View File

@ -206,6 +206,9 @@ protected:
// may customize this version by overriding it for its purposes (e.g., to save/restore // may customize this version by overriding it for its purposes (e.g., to save/restore
// additional registers when doing a VM call). // additional registers when doing a VM call).
virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions); virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions);
public:
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler, // The implementation is only non-empty for the InterpreterMacroAssembler,
@ -213,10 +216,6 @@ protected:
virtual void check_and_handle_popframe() {} virtual void check_and_handle_popframe() {}
virtual void check_and_handle_earlyret() {} virtual void check_and_handle_earlyret() {}
public:
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
// By default, we do not need relocation information for non // By default, we do not need relocation information for non
// patchable absolute addresses. However, when needed by some // patchable absolute addresses. However, when needed by some
// extensions, ignore_non_patchable_relocations can be modified, // extensions, ignore_non_patchable_relocations can be modified,

View File

@ -1,99 +0,0 @@
/*
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "assembler_arm.inline.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
// This method will be called (as any other Klass virtual method) with
// the Klass itself as the first argument. Example:
//
// oop obj;
// int size = obj->klass()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();
//
// The dummy method is called with the Klass object as the first
// operand, and an object as the second argument.
//
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
// differing only by an ordinal constant, and they bear no relationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#define __ masm->
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
char** mc_top,
char* mc_end) {
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
*(intptr_t *)(*md_top) = vtable_bytes;
*md_top += sizeof(intptr_t);
void** dummy_vtable = (void**)*md_top;
*vtable = dummy_vtable;
*md_top += vtable_bytes;
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
MacroAssembler* masm = new MacroAssembler(&cb);
for (int i = 0; i < vtbl_list_size; ++i) {
Label common_code;
for (int j = 0; j < num_virtuals; ++j) {
dummy_vtable[num_virtuals * i + j] = (void*) __ pc();
__ mov(Rtemp, j); // Rtemp contains an index of a virtual method in the table
__ b(common_code);
}
InlinedAddress vtable_address((address)&vtbl_list[i]);
__ bind(common_code);
const Register tmp2 = AARCH64_ONLY(Rtemp2) NOT_AARCH64(R4);
assert_different_registers(Rtemp, tmp2);
#ifndef AARCH64
__ push(tmp2);
#endif // !AARCH64
// Do not use ldr_global since the code must be portable across all ARM architectures
__ ldr_literal(tmp2, vtable_address);
__ ldr(tmp2, Address(tmp2)); // get correct vtable address
__ ldr(Rtemp, Address::indexed_ptr(tmp2, Rtemp)); // get real method pointer
__ str(tmp2, Address(R0)); // update vtable. R0 = "this"
#ifndef AARCH64
__ pop(tmp2);
#endif // !AARCH64
__ jump(Rtemp);
__ bind_literal(vtable_address);
}
__ flush();
*mc_top = (char*) __ pc();
}

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -67,7 +67,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Register obj, Register temp1, Register temp2, SystemDictionary::WKID klass_id, Register obj, Register temp1, Register temp2, SystemDictionary::WKID klass_id,
const char* error_message) { const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id); InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id); Klass* klass = SystemDictionary::well_known_klass(klass_id);
Label L_ok, L_bad; Label L_ok, L_bad;
BLOCK_COMMENT("verify_klass {"); BLOCK_COMMENT("verify_klass {");
__ verify_oop(obj); __ verify_oop(obj);

View File

@ -270,12 +270,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
return entry; return entry;
} }
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
// Not used.
STOP("generate_continuation_for");
return NULL;
}
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
address entry = __ pc(); address entry = __ pc();
@ -310,6 +304,9 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ convert_retval_to_tos(state); __ convert_retval_to_tos(state);
#endif // !AARCH64 #endif // !AARCH64
__ check_and_handle_popframe();
__ check_and_handle_earlyret();
__ dispatch_next(state, step); __ dispatch_next(state, step);
return entry; return entry;
@ -1401,7 +1398,13 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
#ifdef AARCH64 #ifdef AARCH64
// setup RmaxStack // setup RmaxStack
__ ldrh(RmaxStack, Address(RconstMethod, ConstMethod::max_stack_offset())); __ ldrh(RmaxStack, Address(RconstMethod, ConstMethod::max_stack_offset()));
__ add(RmaxStack, RmaxStack, MAX2(1, Method::extra_stack_entries())); // reserve slots for exception handler and JSR292 appendix argument // We have to add extra reserved slots to max_stack. There are 3 users of the extra slots,
// none of which are at the same time, so we just need to make sure there is enough room
// for the biggest user:
// -reserved slot for exception handler
// -reserved slots for JSR292. Method::extra_stack_entries() is the size.
// -3 reserved slots so get_method_counters() can save some registers before call_VM().
__ add(RmaxStack, RmaxStack, MAX2(3, Method::extra_stack_entries()));
#endif // AARCH64 #endif // AARCH64
// see if we've got enough room on the stack for locals plus overhead. // see if we've got enough room on the stack for locals plus overhead.

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -2286,13 +2286,18 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
} }
__ bind(no_mdo); __ bind(no_mdo);
// Increment backedge counter in MethodCounters* // Increment backedge counter in MethodCounters*
__ get_method_counters(Rmethod, Rcounters, dispatch); // Note Rbumped_taken_count is a callee saved registers for ARM32, but caller saved for ARM64
__ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
Rdisp, R3_bytecode,
AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset())); const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset()));
__ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask, __ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask,
Rcnt, R4_tmp, eq, &backedge_counter_overflow); Rcnt, R4_tmp, eq, &backedge_counter_overflow);
} else { } else {
// increment counter // Increment backedge counter in MethodCounters*
__ get_method_counters(Rmethod, Rcounters, dispatch); __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
Rdisp, R3_bytecode,
AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg));
__ ldr_u32(Rtemp, Address(Rcounters, be_offset)); // load backedge counter __ ldr_u32(Rtemp, Address(Rcounters, be_offset)); // load backedge counter
__ add(Rtemp, Rtemp, InvocationCounter::count_increment); // increment counter __ add(Rtemp, Rtemp, InvocationCounter::count_increment); // increment counter
__ str_32(Rtemp, Address(Rcounters, be_offset)); // store counter __ str_32(Rtemp, Address(Rcounters, be_offset)); // store counter

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved. * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -3177,9 +3177,8 @@ void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
assert_different_registers(val, crc, res); assert_different_registers(val, crc, res);
__ load_const_optimized(res, StubRoutines::crc_table_addr(), R0); __ load_const_optimized(res, StubRoutines::crc_table_addr(), R0);
__ nand(crc, crc, crc); // ~crc __ kernel_crc32_singleByteReg(crc, val, res, true);
__ update_byte_crc32(crc, val, res); __ mr(res, crc);
__ nand(res, crc, crc); // ~crc
} }
#undef __ #undef __

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -63,18 +63,6 @@ void LIRItem::load_nonconstant() {
} }
inline void load_int_as_long(LIR_List *ll, LIRItem &li, LIR_Opr dst) {
LIR_Opr r = li.value()->operand();
if (r->is_register()) {
LIR_Opr dst_l = FrameMap::as_long_opr(dst->as_register());
ll->convert(Bytecodes::_i2l, li.result(), dst_l); // Convert.
} else {
// Constants or memory get loaded with sign extend on this platform.
ll->move(li.result(), dst);
}
}
//-------------------------------------------------------------- //--------------------------------------------------------------
// LIRGenerator // LIRGenerator
//-------------------------------------------------------------- //--------------------------------------------------------------
@ -1419,10 +1407,9 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
arg2 = cc->at(1), arg2 = cc->at(1),
arg3 = cc->at(2); arg3 = cc->at(2);
// CCallingConventionRequiresIntsAsLongs
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits. crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits.
__ leal(LIR_OprFact::address(a), arg2); __ leal(LIR_OprFact::address(a), arg2);
load_int_as_long(gen()->lir(), len, arg3); len.load_item_force(arg3); // We skip int->long conversion here, , because CRC32 stub expects int.
__ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args()); __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args());
__ move(result_reg, result); __ move(result_reg, result);
@ -1434,6 +1421,76 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
} }
} }
void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
assert(UseCRC32CIntrinsics, "or should not be here");
LIR_Opr result = rlock_result(x);
switch (x->id()) {
case vmIntrinsics::_updateBytesCRC32C:
case vmIntrinsics::_updateDirectByteBufferCRC32C: {
bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
LIRItem crc(x->argument_at(0), this);
LIRItem buf(x->argument_at(1), this);
LIRItem off(x->argument_at(2), this);
LIRItem end(x->argument_at(3), this);
buf.load_item();
off.load_nonconstant();
end.load_nonconstant();
// len = end - off
LIR_Opr len = end.result();
LIR_Opr tmpA = new_register(T_INT);
LIR_Opr tmpB = new_register(T_INT);
__ move(end.result(), tmpA);
__ move(off.result(), tmpB);
__ sub(tmpA, tmpB, tmpA);
len = tmpA;
LIR_Opr index = off.result();
int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
if (off.result()->is_constant()) {
index = LIR_OprFact::illegalOpr;
offset += off.result()->as_jint();
}
LIR_Opr base_op = buf.result();
LIR_Address* a = NULL;
if (index->is_valid()) {
LIR_Opr tmp = new_register(T_LONG);
__ convert(Bytecodes::_i2l, index, tmp);
index = tmp;
__ add(index, LIR_OprFact::intptrConst(offset), index);
a = new LIR_Address(base_op, index, T_BYTE);
} else {
a = new LIR_Address(base_op, offset, T_BYTE);
}
BasicTypeList signature(3);
signature.append(T_INT);
signature.append(T_ADDRESS);
signature.append(T_INT);
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
const LIR_Opr result_reg = result_register_for(x->type());
LIR_Opr arg1 = cc->at(0),
arg2 = cc->at(1),
arg3 = cc->at(2);
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32C stub doesn't care about high bits.
__ leal(LIR_OprFact::address(a), arg2);
__ move(len, cc->at(2)); // We skip int->long conversion here, because CRC32C stub expects int.
__ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), LIR_OprFact::illegalOpr, result_reg, cc->args());
__ move(result_reg, result);
break;
}
default: {
ShouldNotReachHere();
}
}
}
void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) { void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
assert(x->number_of_arguments() == 3, "wrong type"); assert(x->number_of_arguments() == 3, "wrong type");
assert(UseFMA, "Needs FMA instructions support."); assert(UseFMA, "Needs FMA instructions support.");
@ -1460,7 +1517,3 @@ void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
fatal("vectorizedMismatch intrinsic is not implemented on this platform"); fatal("vectorizedMismatch intrinsic is not implemented on this platform");
} }
void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
Unimplemented();
}

View File

@ -45,8 +45,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
#define thread_(field_name) in_bytes(JavaThread::field_name ## _offset()), R16_thread #define thread_(field_name) in_bytes(JavaThread::field_name ## _offset()), R16_thread
#define method_(field_name) in_bytes(Method::field_name ## _offset()), R19_method #define method_(field_name) in_bytes(Method::field_name ## _offset()), R19_method
virtual void check_and_handle_popframe(Register java_thread); virtual void check_and_handle_popframe(Register scratch_reg);
virtual void check_and_handle_earlyret(Register java_thread); virtual void check_and_handle_earlyret(Register scratch_reg);
// Base routine for all dispatches. // Base routine for all dispatches.
void dispatch_base(TosState state, address* table); void dispatch_base(TosState state, address* table);

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 SAP SE. All rights reserved. * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -4120,7 +4120,7 @@ void MacroAssembler::update_byte_crc32(Register crc, Register val, Register tabl
* @param table register pointing to CRC table * @param table register pointing to CRC table
*/ */
void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
Register data, bool loopAlignment, bool invertCRC) { Register data, bool loopAlignment) {
assert_different_registers(crc, buf, len, table, data); assert_different_registers(crc, buf, len, table, data);
Label L_mainLoop, L_done; Label L_mainLoop, L_done;
@ -4131,10 +4131,6 @@ void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register
clrldi_(len, len, 32); // Enforce 32 bit. Anything to do? clrldi_(len, len, 32); // Enforce 32 bit. Anything to do?
beq(CCR0, L_done); beq(CCR0, L_done);
if (invertCRC) {
nand(crc, crc, crc); // ~c
}
mtctr(len); mtctr(len);
align(mainLoop_alignment); align(mainLoop_alignment);
BIND(L_mainLoop); BIND(L_mainLoop);
@ -4143,10 +4139,6 @@ void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register
update_byte_crc32(crc, data, table); update_byte_crc32(crc, data, table);
bdnz(L_mainLoop); // Iterate. bdnz(L_mainLoop); // Iterate.
if (invertCRC) {
nand(crc, crc, crc); // ~c
}
bind(L_done); bind(L_done);
} }
@ -4203,7 +4195,8 @@ void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register tab
*/ */
void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table, void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3, Register t0, Register t1, Register t2, Register t3,
Register tc0, Register tc1, Register tc2, Register tc3) { Register tc0, Register tc1, Register tc2, Register tc3,
bool invertCRC) {
assert_different_registers(crc, buf, len, table); assert_different_registers(crc, buf, len, table);
Label L_mainLoop, L_tail; Label L_mainLoop, L_tail;
@ -4217,14 +4210,16 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
const int complexThreshold = 2*mainLoop_stepping; const int complexThreshold = 2*mainLoop_stepping;
// Don't test for len <= 0 here. This pathological case should not occur anyway. // Don't test for len <= 0 here. This pathological case should not occur anyway.
// Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles. // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles
// The situation itself is detected and handled correctly by the conditional branches // for all well-behaved cases. The situation itself is detected and handled correctly
// following aghi(len, -stepping) and aghi(len, +stepping). // within update_byteLoop_crc32.
assert(tailLoop_stepping == 1, "check tailLoop_stepping!"); assert(tailLoop_stepping == 1, "check tailLoop_stepping!");
BLOCK_COMMENT("kernel_crc32_2word {"); BLOCK_COMMENT("kernel_crc32_2word {");
nand(crc, crc, crc); // ~c if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
// Check for short (<mainLoop_stepping) buffer. // Check for short (<mainLoop_stepping) buffer.
cmpdi(CCR0, len, complexThreshold); cmpdi(CCR0, len, complexThreshold);
@ -4245,7 +4240,7 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
blt(CCR0, L_tail); // For less than one mainloop_stepping left, do only tail processing blt(CCR0, L_tail); // For less than one mainloop_stepping left, do only tail processing
mr(len, tmp); // remaining bytes for main loop (>=mainLoop_stepping is guaranteed). mr(len, tmp); // remaining bytes for main loop (>=mainLoop_stepping is guaranteed).
} }
update_byteLoop_crc32(crc, buf, tmp2, table, data, false, false); update_byteLoop_crc32(crc, buf, tmp2, table, data, false);
} }
srdi(tmp2, len, log_stepping); // #iterations for mainLoop srdi(tmp2, len, log_stepping); // #iterations for mainLoop
@ -4281,9 +4276,11 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
// Process last few (<complexThreshold) bytes of buffer. // Process last few (<complexThreshold) bytes of buffer.
BIND(L_tail); BIND(L_tail);
update_byteLoop_crc32(crc, buf, len, table, data, false, false); update_byteLoop_crc32(crc, buf, len, table, data, false);
nand(crc, crc, crc); // ~c if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
BLOCK_COMMENT("} kernel_crc32_2word"); BLOCK_COMMENT("} kernel_crc32_2word");
} }
@ -4297,7 +4294,8 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
*/ */
void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table, void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3, Register t0, Register t1, Register t2, Register t3,
Register tc0, Register tc1, Register tc2, Register tc3) { Register tc0, Register tc1, Register tc2, Register tc3,
bool invertCRC) {
assert_different_registers(crc, buf, len, table); assert_different_registers(crc, buf, len, table);
Label L_mainLoop, L_tail; Label L_mainLoop, L_tail;
@ -4311,14 +4309,16 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
const int complexThreshold = 2*mainLoop_stepping; const int complexThreshold = 2*mainLoop_stepping;
// Don't test for len <= 0 here. This pathological case should not occur anyway. // Don't test for len <= 0 here. This pathological case should not occur anyway.
// Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles. // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles
// The situation itself is detected and handled correctly by the conditional branches // for all well-behaved cases. The situation itself is detected and handled correctly
// following aghi(len, -stepping) and aghi(len, +stepping). // within update_byteLoop_crc32.
assert(tailLoop_stepping == 1, "check tailLoop_stepping!"); assert(tailLoop_stepping == 1, "check tailLoop_stepping!");
BLOCK_COMMENT("kernel_crc32_1word {"); BLOCK_COMMENT("kernel_crc32_1word {");
nand(crc, crc, crc); // ~c if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
// Check for short (<mainLoop_stepping) buffer. // Check for short (<mainLoop_stepping) buffer.
cmpdi(CCR0, len, complexThreshold); cmpdi(CCR0, len, complexThreshold);
@ -4339,7 +4339,7 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
blt(CCR0, L_tail); // For less than one mainloop_stepping left, do only tail processing blt(CCR0, L_tail); // For less than one mainloop_stepping left, do only tail processing
mr(len, tmp); // remaining bytes for main loop (>=mainLoop_stepping is guaranteed). mr(len, tmp); // remaining bytes for main loop (>=mainLoop_stepping is guaranteed).
} }
update_byteLoop_crc32(crc, buf, tmp2, table, data, false, false); update_byteLoop_crc32(crc, buf, tmp2, table, data, false);
} }
srdi(tmp2, len, log_stepping); // #iterations for mainLoop srdi(tmp2, len, log_stepping); // #iterations for mainLoop
@ -4374,9 +4374,11 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
// Process last few (<complexThreshold) bytes of buffer. // Process last few (<complexThreshold) bytes of buffer.
BIND(L_tail); BIND(L_tail);
update_byteLoop_crc32(crc, buf, len, table, data, false, false); update_byteLoop_crc32(crc, buf, len, table, data, false);
nand(crc, crc, crc); // ~c if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
BLOCK_COMMENT("} kernel_crc32_1word"); BLOCK_COMMENT("} kernel_crc32_1word");
} }
@ -4389,16 +4391,24 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
* Uses R7_ARG5, R8_ARG6 as work registers. * Uses R7_ARG5, R8_ARG6 as work registers.
*/ */
void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table, void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3) { Register t0, Register t1, Register t2, Register t3,
bool invertCRC) {
assert_different_registers(crc, buf, len, table); assert_different_registers(crc, buf, len, table);
Register data = t0; // Holds the current byte to be folded into crc. Register data = t0; // Holds the current byte to be folded into crc.
BLOCK_COMMENT("kernel_crc32_1byte {"); BLOCK_COMMENT("kernel_crc32_1byte {");
// Process all bytes in a single-byte loop. if (invertCRC) {
update_byteLoop_crc32(crc, buf, len, table, data, true, true); nand(crc, crc, crc); // 1s complement of crc
}
// Process all bytes in a single-byte loop.
update_byteLoop_crc32(crc, buf, len, table, data, true);
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
BLOCK_COMMENT("} kernel_crc32_1byte"); BLOCK_COMMENT("} kernel_crc32_1byte");
} }
@ -4416,7 +4426,8 @@ void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len
*/ */
void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Register len, Register table, void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Register len, Register table,
Register constants, Register barretConstants, Register constants, Register barretConstants,
Register t0, Register t1, Register t2, Register t3, Register t4) { Register t0, Register t1, Register t2, Register t3, Register t4,
bool invertCRC) {
assert_different_registers(crc, buf, len, table); assert_different_registers(crc, buf, len, table);
Label L_alignedHead, L_tail, L_alignTail, L_start, L_end; Label L_alignedHead, L_tail, L_alignTail, L_start, L_end;
@ -4434,13 +4445,15 @@ void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Regi
Register tc0 = t4; Register tc0 = t4;
Register tc1 = constants; Register tc1 = constants;
Register tc2 = barretConstants; Register tc2 = barretConstants;
kernel_crc32_1word(crc, buf, len, table,t0, t1, t2, t3, tc0, tc1, tc2, table); kernel_crc32_1word(crc, buf, len, table,t0, t1, t2, t3, tc0, tc1, tc2, table, invertCRC);
b(L_end); b(L_end);
BIND(L_start); BIND(L_start);
// 2. ~c // 2. ~c
nand(crc, crc, crc); if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
// 3. calculate from 0 to first 128bit-aligned address // 3. calculate from 0 to first 128bit-aligned address
clrldi_(prealign, buf, 57); clrldi_(prealign, buf, 57);
@ -4449,7 +4462,7 @@ void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Regi
subfic(prealign, prealign, 128); subfic(prealign, prealign, 128);
subf(len, prealign, len); subf(len, prealign, len);
update_byteLoop_crc32(crc, buf, prealign, table, t2, false, false); update_byteLoop_crc32(crc, buf, prealign, table, t2, false);
// 4. calculate from first 128bit-aligned address to last 128bit-aligned address // 4. calculate from first 128bit-aligned address to last 128bit-aligned address
BIND(L_alignedHead); BIND(L_alignedHead);
@ -4464,12 +4477,14 @@ void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Regi
cmpdi(CCR0, postalign, 0); cmpdi(CCR0, postalign, 0);
beq(CCR0, L_tail); beq(CCR0, L_tail);
update_byteLoop_crc32(crc, buf, postalign, table, t2, false, false); update_byteLoop_crc32(crc, buf, postalign, table, t2, false);
BIND(L_tail); BIND(L_tail);
// 6. ~c // 6. ~c
nand(crc, crc, crc); if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
BIND(L_end); BIND(L_end);
@ -4961,16 +4976,35 @@ void MacroAssembler::kernel_crc32_1word_aligned(Register crc, Register buf, Regi
offsetInt -= 8; ld(R31, offsetInt, R1_SP); offsetInt -= 8; ld(R31, offsetInt, R1_SP);
} }
void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp) { void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp, bool invertCRC) {
assert_different_registers(crc, buf, /* len, not used!! */ table, tmp); assert_different_registers(crc, buf, /* len, not used!! */ table, tmp);
BLOCK_COMMENT("kernel_crc32_singleByte:"); BLOCK_COMMENT("kernel_crc32_singleByte:");
nand(crc, crc, crc); // ~c if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
lbz(tmp, 0, buf); // Byte from buffer, zero-extended. lbz(tmp, 0, buf); // Byte from buffer, zero-extended.
update_byte_crc32(crc, tmp, table); update_byte_crc32(crc, tmp, table);
nand(crc, crc, crc); // ~c if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
}
void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table, bool invertCRC) {
assert_different_registers(crc, val, table);
BLOCK_COMMENT("kernel_crc32_singleByteReg:");
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
update_byte_crc32(crc, val, table);
if (invertCRC) {
nand(crc, crc, crc); // 1s complement of crc
}
} }
// dest_lo += src1 + src2 // dest_lo += src1 + src2

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 SAP SE. All rights reserved. * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -819,33 +819,47 @@ class MacroAssembler: public Assembler {
Register tmp6, Register tmp7, Register tmp8, Register tmp9, Register tmp10, Register tmp6, Register tmp7, Register tmp8, Register tmp9, Register tmp10,
Register tmp11, Register tmp12, Register tmp13); Register tmp11, Register tmp12, Register tmp13);
// CRC32 Intrinsics. // Emitters for CRC32 calculation.
// A note on invertCRC:
// Unfortunately, internal representation of crc differs between CRC32 and CRC32C.
// CRC32 holds it's current crc value in the externally visible representation.
// CRC32C holds it's current crc value in internal format, ready for updating.
// Thus, the crc value must be bit-flipped before updating it in the CRC32 case.
// In the CRC32C case, it must be bit-flipped when it is given to the outside world (getValue()).
// The bool invertCRC parameter indicates whether bit-flipping is required before updates.
void load_reverse_32(Register dst, Register src); void load_reverse_32(Register dst, Register src);
int crc32_table_columns(Register table, Register tc0, Register tc1, Register tc2, Register tc3); int crc32_table_columns(Register table, Register tc0, Register tc1, Register tc2, Register tc3);
void fold_byte_crc32(Register crc, Register val, Register table, Register tmp); void fold_byte_crc32(Register crc, Register val, Register table, Register tmp);
void fold_8bit_crc32(Register crc, Register table, Register tmp); void fold_8bit_crc32(Register crc, Register table, Register tmp);
void update_byte_crc32(Register crc, Register val, Register table); void update_byte_crc32(Register crc, Register val, Register table);
void update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, void update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
Register data, bool loopAlignment, bool invertCRC); Register data, bool loopAlignment);
void update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc, void update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
Register t0, Register t1, Register t2, Register t3, Register t0, Register t1, Register t2, Register t3,
Register tc0, Register tc1, Register tc2, Register tc3); Register tc0, Register tc1, Register tc2, Register tc3);
void kernel_crc32_2word(Register crc, Register buf, Register len, Register table, void kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3, Register t0, Register t1, Register t2, Register t3,
Register tc0, Register tc1, Register tc2, Register tc3); Register tc0, Register tc1, Register tc2, Register tc3,
bool invertCRC);
void kernel_crc32_1word(Register crc, Register buf, Register len, Register table, void kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3, Register t0, Register t1, Register t2, Register t3,
Register tc0, Register tc1, Register tc2, Register tc3); Register tc0, Register tc1, Register tc2, Register tc3,
bool invertCRC);
void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table, void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3); Register t0, Register t1, Register t2, Register t3,
bool invertCRC);
void kernel_crc32_1word_vpmsumd(Register crc, Register buf, Register len, Register table, void kernel_crc32_1word_vpmsumd(Register crc, Register buf, Register len, Register table,
Register constants, Register barretConstants, Register constants, Register barretConstants,
Register t0, Register t1, Register t2, Register t3, Register t4); Register t0, Register t1, Register t2, Register t3, Register t4,
bool invertCRC);
void kernel_crc32_1word_aligned(Register crc, Register buf, Register len, void kernel_crc32_1word_aligned(Register crc, Register buf, Register len,
Register constants, Register barretConstants, Register constants, Register barretConstants,
Register t0, Register t1, Register t2); Register t0, Register t1, Register t2);
void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp); void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
bool invertCRC);
void kernel_crc32_singleByteReg(Register crc, Register val, Register table,
bool invertCRC);
// //
// Debugging // Debugging

View File

@ -1,78 +0,0 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "asm/codeBuffer.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
// This method will be called (as any other Klass virtual method) with
// the Klass itself as the first argument. Example:
//
// oop obj;
// int size = obj->klass()->klass_part()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();
//
// The dummy method is called with the Klass object as the first
// operand, and an object as the second argument.
//
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
// differing only by an ordinal constant, and they bear no releationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#define __ masm->
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
char** mc_top,
char* mc_end) {
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
*(intptr_t *)(*md_top) = vtable_bytes;
*md_top += sizeof(intptr_t);
void** dummy_vtable = (void**)*md_top;
*vtable = dummy_vtable;
*md_top += vtable_bytes;
// Get ready to generate dummy methods.
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
MacroAssembler* masm = new MacroAssembler(&cb);
// There are more general problems with CDS on ppc, so I can not
// really test this. But having this instead of Unimplementd() allows
// us to pass TestOptionsWithRanges.java.
__ unimplemented();
}

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -71,7 +71,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Register temp_reg, Register temp2_reg, Register temp_reg, Register temp2_reg,
const char* error_message) { const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id); InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id); Klass* klass = SystemDictionary::well_known_klass(klass_id);
Label L_ok, L_bad; Label L_ok, L_bad;
BLOCK_COMMENT("verify_klass {"); BLOCK_COMMENT("verify_klass {");
__ verify_oop(obj_reg); __ verify_oop(obj_reg);

View File

@ -1,5 +1,5 @@
// //
// Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2012, 2016 SAP SE. All rights reserved. // Copyright (c) 2012, 2016 SAP SE. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
@ -2053,12 +2053,12 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
} }
// Vector ideal reg. // Vector ideal reg.
const int Matcher::vector_ideal_reg(int size) { const uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize == 8 && size == 8, ""); assert(MaxVectorSize == 8 && size == 8, "");
return Op_RegL; return Op_RegL;
} }
const int Matcher::vector_shift_count_ideal_reg(int size) { const uint Matcher::vector_shift_count_ideal_reg(int size) {
fatal("vector shift is not supported"); fatal("vector shift is not supported");
return Node::NotAMachineReg; return Node::NotAMachineReg;
} }

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved. * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -3276,6 +3276,36 @@ class StubGenerator: public StubCodeGenerator {
return start; return start;
} }
// Compute CRC32/CRC32C function.
void generate_CRC_updateBytes(const char* name, Register table, bool invertCRC) {
// arguments to kernel_crc32:
const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call.
const Register data = R4_ARG2; // source byte array
const Register dataLen = R5_ARG3; // #bytes to process
const Register t0 = R2;
const Register t1 = R7;
const Register t2 = R8;
const Register t3 = R9;
const Register tc0 = R10;
const Register tc1 = R11;
const Register tc2 = R12;
BLOCK_COMMENT("Stub body {");
assert_different_registers(crc, data, dataLen, table);
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, table, invertCRC);
BLOCK_COMMENT("return");
__ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
__ blr();
BLOCK_COMMENT("} Stub body");
}
/** /**
* Arguments: * Arguments:
* *
@ -3296,14 +3326,14 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", name); StubCodeMark mark(this, "StubRoutines", name);
address start = __ function_entry(); // Remember stub start address (is rtn value). address start = __ function_entry(); // Remember stub start address (is rtn value).
const Register table = R6; // crc table address
#ifdef VM_LITTLE_ENDIAN
// arguments to kernel_crc32: // arguments to kernel_crc32:
const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call. const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call.
const Register data = R4_ARG2; // source byte array const Register data = R4_ARG2; // source byte array
const Register dataLen = R5_ARG3; // #bytes to process const Register dataLen = R5_ARG3; // #bytes to process
const Register table = R6; // crc table address
#ifdef VM_LITTLE_ENDIAN
if (VM_Version::has_vpmsumb()) { if (VM_Version::has_vpmsumb()) {
const Register constants = R2; // constants address const Register constants = R2; // constants address
const Register bconstants = R8; // barret table address const Register bconstants = R8; // barret table address
@ -3321,7 +3351,7 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::ppc64::generate_load_crc_constants_addr(_masm, constants); StubRoutines::ppc64::generate_load_crc_constants_addr(_masm, constants);
StubRoutines::ppc64::generate_load_crc_barret_constants_addr(_masm, bconstants); StubRoutines::ppc64::generate_load_crc_barret_constants_addr(_masm, bconstants);
__ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4); __ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, true);
BLOCK_COMMENT("return"); BLOCK_COMMENT("return");
__ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET). __ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
@ -3331,31 +3361,79 @@ class StubGenerator: public StubCodeGenerator {
} else } else
#endif #endif
{ {
const Register t0 = R2; StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
const Register t1 = R7; generate_CRC_updateBytes(name, table, true);
const Register t2 = R8; }
const Register t3 = R9;
const Register tc0 = R10; return start;
const Register tc1 = R11; }
const Register tc2 = R12;
/**
* Arguments:
*
* Inputs:
* R3_ARG1 - int crc
* R4_ARG2 - byte* buf
* R5_ARG3 - int length (of buffer)
*
* scratch:
* R2, R6-R12
*
* Ouput:
* R3_RET - int crc result
*/
// Compute CRC32C function.
address generate_CRC32C_updateBytes(const char* name) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ function_entry(); // Remember stub start address (is rtn value).
const Register table = R6; // crc table address
#if 0 // no vector support yet for CRC32C
#ifdef VM_LITTLE_ENDIAN
// arguments to kernel_crc32:
const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call.
const Register data = R4_ARG2; // source byte array
const Register dataLen = R5_ARG3; // #bytes to process
if (VM_Version::has_vpmsumb()) {
const Register constants = R2; // constants address
const Register bconstants = R8; // barret table address
const Register t0 = R9;
const Register t1 = R10;
const Register t2 = R11;
const Register t3 = R12;
const Register t4 = R7;
BLOCK_COMMENT("Stub body {"); BLOCK_COMMENT("Stub body {");
assert_different_registers(crc, data, dataLen, table); assert_different_registers(crc, data, dataLen, table);
StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table); StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
StubRoutines::ppc64::generate_load_crc32c_constants_addr(_masm, constants);
StubRoutines::ppc64::generate_load_crc32c_barret_constants_addr(_masm, bconstants);
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, table); __ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, false);
BLOCK_COMMENT("return"); BLOCK_COMMENT("return");
__ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET). __ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
__ blr(); __ blr();
BLOCK_COMMENT("} Stub body"); BLOCK_COMMENT("} Stub body");
} else
#endif
#endif
{
StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
generate_CRC_updateBytes(name, table, false);
} }
return start; return start;
} }
// Initialization // Initialization
void generate_initial() { void generate_initial() {
// Generates all stubs and initializes the entry points // Generates all stubs and initializes the entry points
@ -3383,6 +3461,12 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_crc_table_adr = (address)StubRoutines::ppc64::_crc_table; StubRoutines::_crc_table_adr = (address)StubRoutines::ppc64::_crc_table;
StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes"); StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes");
} }
// CRC32C Intrinsics.
if (UseCRC32CIntrinsics) {
StubRoutines::_crc32c_table_addr = (address)StubRoutines::ppc64::_crc32c_table;
StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes("CRC32C_updateBytes");
}
} }
void generate_all() { void generate_all() {

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved. * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -55,13 +55,16 @@ class ppc64 {
// CRC32 Intrinsics. // CRC32 Intrinsics.
static juint _crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE]; static juint _crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
static juint _crc32c_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
static juint* _constants; static juint* _constants;
static juint* _barret_constants; static juint* _barret_constants;
public: public:
// CRC32 Intrinsics. // CRC32 Intrinsics.
static void generate_load_table_addr(MacroAssembler* masm, Register table, address table_addr, uint64_t table_contents);
static void generate_load_crc_table_addr(MacroAssembler* masm, Register table); static void generate_load_crc_table_addr(MacroAssembler* masm, Register table);
static void generate_load_crc32c_table_addr(MacroAssembler* masm, Register table);
static void generate_load_crc_constants_addr(MacroAssembler* masm, Register table); static void generate_load_crc_constants_addr(MacroAssembler* masm, Register table);
static void generate_load_crc_barret_constants_addr(MacroAssembler* masm, Register table); static void generate_load_crc_barret_constants_addr(MacroAssembler* masm, Register table);
static juint* generate_crc_constants(); static juint* generate_crc_constants();

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2017 SAP SE. All rights reserved. * Copyright (c) 2015, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -643,12 +643,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
return entry; return entry;
} }
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
address entry = __ pc();
__ unimplemented("generate_continuation_for");
return entry;
}
// This entry is returned to when a call returns to the interpreter. // This entry is returned to when a call returns to the interpreter.
// When we arrive here, we expect that the callee stack frame is already popped. // When we arrive here, we expect that the callee stack frame is already popped.
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
@ -692,6 +686,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
#endif #endif
__ sldi(size, size, Interpreter::logStackElementSize); __ sldi(size, size, Interpreter::logStackElementSize);
__ add(R15_esp, R15_esp, size); __ add(R15_esp, R15_esp, size);
__ check_and_handle_popframe(R11_scratch1);
__ check_and_handle_earlyret(R11_scratch1);
__ dispatch_next(state, step); __ dispatch_next(state, step);
return entry; return entry;
} }
@ -1894,7 +1892,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
__ lwz(crc, 2*wordSize, argP); // Current crc state, zero extend to 64 bit to have a clean register. __ lwz(crc, 2*wordSize, argP); // Current crc state, zero extend to 64 bit to have a clean register.
StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table); StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
__ kernel_crc32_singleByte(crc, data, dataLen, table, tmp); __ kernel_crc32_singleByte(crc, data, dataLen, table, tmp, true);
// Restore caller sp for c2i case and return. // Restore caller sp for c2i case and return.
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started. __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
@ -1910,7 +1908,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
return NULL; return NULL;
} }
// CRC32 Intrinsics.
/** /**
* Method entry for static native methods: * Method entry for static native methods:
* int java.util.zip.CRC32.updateBytes( int crc, byte[] b, int off, int len) * int java.util.zip.CRC32.updateBytes( int crc, byte[] b, int off, int len)
@ -1986,7 +1984,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
// Performance measurements show the 1word and 2word variants to be almost equivalent, // Performance measurements show the 1word and 2word variants to be almost equivalent,
// with very light advantages for the 1word variant. We chose the 1word variant for // with very light advantages for the 1word variant. We chose the 1word variant for
// code compactness. // code compactness.
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3); __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3, true);
// Restore caller sp for c2i case and return. // Restore caller sp for c2i case and return.
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started. __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
@ -2002,8 +2000,88 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
return NULL; return NULL;
} }
// Not supported
/**
* Method entry for intrinsic-candidate (non-native) methods:
* int java.util.zip.CRC32C.updateBytes( int crc, byte[] b, int off, int end)
* int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long* buf, int off, int end)
* Unlike CRC32, CRC32C does not have any methods marked as native
* CRC32C also uses an "end" variable instead of the length variable CRC32 uses
**/
address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
if (UseCRC32CIntrinsics) {
address start = __ pc(); // Remember stub start address (is rtn value).
// We don't generate local frame and don't align stack because
// we not even call stub code (we generate the code inline)
// and there is no safepoint on this path.
// Load parameters.
// Z_esp is callers operand stack pointer, i.e. it points to the parameters.
const Register argP = R15_esp;
const Register crc = R3_ARG1; // crc value
const Register data = R4_ARG2; // address of java byte array
const Register dataLen = R5_ARG3; // source data len
const Register table = R6_ARG4; // address of crc32c table
const Register t0 = R9; // scratch registers for crc calculation
const Register t1 = R10;
const Register t2 = R11;
const Register t3 = R12;
const Register tc0 = R2; // registers to hold pre-calculated column addresses
const Register tc1 = R7;
const Register tc2 = R8;
const Register tc3 = table; // table address is reconstructed at the end of kernel_crc32_* emitters
const Register tmp = t0; // Only used very locally to calculate byte buffer address.
// Arguments are reversed on java expression stack.
// Calculate address of start element.
if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) { // Used for "updateDirectByteBuffer".
BLOCK_COMMENT("CRC32C_updateDirectByteBuffer {");
// crc @ (SP + 5W) (32bit)
// buf @ (SP + 3W) (64bit ptr to long array)
// off @ (SP + 2W) (32bit)
// dataLen @ (SP + 1W) (32bit)
// data = buf + off
__ ld( data, 3*wordSize, argP); // start of byte buffer
__ lwa( tmp, 2*wordSize, argP); // byte buffer offset
__ lwa( dataLen, 1*wordSize, argP); // #bytes to process
__ lwz( crc, 5*wordSize, argP); // current crc state
__ add( data, data, tmp); // Add byte buffer offset.
__ sub( dataLen, dataLen, tmp); // (end_index - offset)
} else { // Used for "updateBytes update".
BLOCK_COMMENT("CRC32C_updateBytes {");
// crc @ (SP + 4W) (32bit)
// buf @ (SP + 3W) (64bit ptr to byte array)
// off @ (SP + 2W) (32bit)
// dataLen @ (SP + 1W) (32bit)
// data = buf + off + base_offset
__ ld( data, 3*wordSize, argP); // start of byte buffer
__ lwa( tmp, 2*wordSize, argP); // byte buffer offset
__ lwa( dataLen, 1*wordSize, argP); // #bytes to process
__ add( data, data, tmp); // add byte buffer offset
__ sub( dataLen, dataLen, tmp); // (end_index - offset)
__ lwz( crc, 4*wordSize, argP); // current crc state
__ addi(data, data, arrayOopDesc::base_offset_in_bytes(T_BYTE));
}
StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
// Performance measurements show the 1word and 2word variants to be almost equivalent,
// with very light advantages for the 1word variant. We chose the 1word variant for
// code compactness.
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3, false);
// Restore caller sp for c2i case and return.
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
__ blr();
BLOCK_COMMENT("} CRC32C_update{Bytes|DirectByteBuffer}");
return start;
}
return NULL; return NULL;
} }

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved. * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -172,18 +172,27 @@ void VM_Version::initialize() {
assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive"); assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
// Implementation does not use any of the vector instructions // If defined(VM_LITTLE_ENDIAN) and running on Power8 or newer hardware,
// available with Power8. Their exploitation is still pending. // the implementation uses the vector instructions available with Power8.
// In all other cases, the implementation uses only generally available instructions.
if (!UseCRC32Intrinsics) { if (!UseCRC32Intrinsics) {
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
FLAG_SET_DEFAULT(UseCRC32Intrinsics, true); FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
} }
} }
if (UseCRC32CIntrinsics) { // Implementation does not use any of the vector instructions available with Power8.
if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) // Their exploitation is still pending (aka "work in progress").
warning("CRC32C intrinsics are not available on this CPU"); if (!UseCRC32CIntrinsics) {
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false); if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
}
}
// TODO: Provide implementation.
if (UseAdler32Intrinsics) {
warning("Adler32Intrinsics not available on this CPU.");
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
} }
// The AES intrinsic stubs require AES instruction support. // The AES intrinsic stubs require AES instruction support.
@ -245,11 +254,6 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
} }
if (UseAdler32Intrinsics) {
warning("Adler32Intrinsics not available on this CPU.");
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
}
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
UseMultiplyToLenIntrinsic = true; UseMultiplyToLenIntrinsic = true;
} }

View File

@ -28,8 +28,6 @@
#undef LUCY_DBG #undef LUCY_DBG
#define NearLabel Label
// Immediate is an abstraction to represent the various immediate // Immediate is an abstraction to represent the various immediate
// operands which exist on z/Architecture. Neither this class nor // operands which exist on z/Architecture. Neither this class nor
// instances hereof have an own state. It consists of methods only. // instances hereof have an own state. It consists of methods only.

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved. * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -3048,9 +3048,8 @@ void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
assert_different_registers(val, crc, res); assert_different_registers(val, crc, res);
__ load_const_optimized(res, StubRoutines::crc_table_addr()); __ load_const_optimized(res, StubRoutines::crc_table_addr());
__ not_(crc, noreg, false); // ~crc __ kernel_crc32_singleByteReg(crc, val, res, true);
__ update_byte_crc32(crc, val, res); __ z_lgfr(res, crc);
__ not_(res, crc, false); // ~crc
} }
#undef __ #undef __

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved. * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -61,20 +61,6 @@ void LIRItem::load_nonconstant(int bits) {
} }
} }
inline void load_int_as_long(LIR_List *ll, LIRItem &li, LIR_Opr dst) {
LIR_Opr r = li.value()->operand();
if (r->is_constant()) {
// Constants get loaded with sign extend on this platform.
ll->move(li.result(), dst);
} else {
if (!r->is_register()) {
li.load_item_force(dst);
}
LIR_Opr dst_l = FrameMap::as_long_opr(dst->as_register());
ll->convert(Bytecodes::_i2l, li.result(), dst_l); // Convert.
}
}
//-------------------------------------------------------------- //--------------------------------------------------------------
// LIRGenerator // LIRGenerator
//-------------------------------------------------------------- //--------------------------------------------------------------
@ -1217,10 +1203,9 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
LIR_Opr arg2 = cc->at(1); LIR_Opr arg2 = cc->at(1);
LIR_Opr arg3 = cc->at(2); LIR_Opr arg3 = cc->at(2);
// CCallingConventionRequiresIntsAsLongs
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits. crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits.
__ leal(LIR_OprFact::address(a), arg2); __ leal(LIR_OprFact::address(a), arg2);
load_int_as_long(gen()->lir(), len, arg3); len.load_item_force(arg3); // We skip int->long conversion here, because CRC32 stub expects int.
__ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args()); __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args());
__ move(result_reg, result); __ move(result_reg, result);
@ -1233,7 +1218,70 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
} }
void LIRGenerator::do_update_CRC32C(Intrinsic* x) { void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
Unimplemented(); assert(UseCRC32CIntrinsics, "or should not be here");
LIR_Opr result = rlock_result(x);
switch (x->id()) {
case vmIntrinsics::_updateBytesCRC32C:
case vmIntrinsics::_updateDirectByteBufferCRC32C: {
bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
LIRItem crc(x->argument_at(0), this);
LIRItem buf(x->argument_at(1), this);
LIRItem off(x->argument_at(2), this);
LIRItem end(x->argument_at(3), this);
buf.load_item();
off.load_nonconstant();
end.load_nonconstant();
// len = end - off
LIR_Opr len = end.result();
LIR_Opr tmpA = new_register(T_INT);
LIR_Opr tmpB = new_register(T_INT);
__ move(end.result(), tmpA);
__ move(off.result(), tmpB);
__ sub(tmpA, tmpB, tmpA);
len = tmpA;
LIR_Opr index = off.result();
int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
if (off.result()->is_constant()) {
index = LIR_OprFact::illegalOpr;
offset += off.result()->as_jint();
}
LIR_Opr base_op = buf.result();
if (index->is_valid()) {
LIR_Opr tmp = new_register(T_LONG);
__ convert(Bytecodes::_i2l, index, tmp);
index = tmp;
}
LIR_Address* a = new LIR_Address(base_op, index, offset, T_BYTE);
BasicTypeList signature(3);
signature.append(T_INT);
signature.append(T_ADDRESS);
signature.append(T_INT);
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
const LIR_Opr result_reg = result_register_for (x->type());
LIR_Opr arg1 = cc->at(0);
LIR_Opr arg2 = cc->at(1);
LIR_Opr arg3 = cc->at(2);
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32C stub doesn't care about high bits.
__ leal(LIR_OprFact::address(a), arg2);
__ move(len, cc->at(2)); // We skip int->long conversion here, because CRC32C stub expects int.
__ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), LIR_OprFact::illegalOpr, result_reg, cc->args());
__ move(result_reg, result);
break;
}
default: {
ShouldNotReachHere();
}
}
} }
void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) { void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
@ -1264,4 +1312,3 @@ void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
fatal("vectorizedMismatch intrinsic is not implemented on this platform"); fatal("vectorizedMismatch intrinsic is not implemented on this platform");
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -48,9 +48,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
bool allow_relocation, bool allow_relocation,
bool check_exceptions); bool check_exceptions);
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// Base routine for all dispatches. // Base routine for all dispatches.
void dispatch_base(TosState state, address* table); void dispatch_base(TosState state, address* table);
@ -58,6 +55,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
InterpreterMacroAssembler(CodeBuffer* c) InterpreterMacroAssembler(CodeBuffer* c)
: MacroAssembler(c) {} : MacroAssembler(c) {}
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
void jump_to_entry(address entry, Register Rscratch); void jump_to_entry(address entry, Register Rscratch);
virtual void load_earlyret_value(TosState state); virtual void load_earlyret_value(TosState state);

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved. * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1616,6 +1616,8 @@ void MacroAssembler::branch_optimized(Assembler::branch_condition cond, Label& b
if (branch_target.is_bound()) { if (branch_target.is_bound()) {
address branch_addr = target(branch_target); address branch_addr = target(branch_target);
branch_optimized(cond, branch_addr); branch_optimized(cond, branch_addr);
} else if (branch_target.is_near()) {
z_brc(cond, branch_target); // Caller assures that the target will be in range for z_brc.
} else { } else {
z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time. z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time.
} }
@ -1674,7 +1676,8 @@ void MacroAssembler::compare_and_branch_optimized(Register r1,
bool has_sign) { bool has_sign) {
address branch_origin = pc(); address branch_origin = pc();
bool x2_imm8 = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2)); bool x2_imm8 = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2));
bool is_RelAddr16 = (branch_target.is_bound() && bool is_RelAddr16 = branch_target.is_near() ||
(branch_target.is_bound() &&
RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin)); RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin));
unsigned int casenum = (len64?2:0)+(has_sign?0:1); unsigned int casenum = (len64?2:0)+(has_sign?0:1);
@ -1744,13 +1747,21 @@ void MacroAssembler::compare_and_branch_optimized(Register r1,
Label& branch_target, Label& branch_target,
bool len64, bool len64,
bool has_sign) { bool has_sign) {
unsigned int casenum = (len64?2:0)+(has_sign?0:1); unsigned int casenum = (len64 ? 2 : 0) + (has_sign ? 0 : 1);
if (branch_target.is_bound()) { if (branch_target.is_bound()) {
address branch_addr = target(branch_target); address branch_addr = target(branch_target);
compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign); compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign);
} else { } else {
{ if (VM_Version::has_CompareBranch() && branch_target.is_near()) {
switch (casenum) {
case 0: z_crj( r1, r2, cond, branch_target); break;
case 1: z_clrj( r1, r2, cond, branch_target); break;
case 2: z_cgrj( r1, r2, cond, branch_target); break;
case 3: z_clgrj(r1, r2, cond, branch_target); break;
default: ShouldNotReachHere(); break;
}
} else {
switch (casenum) { switch (casenum) {
case 0: z_cr( r1, r2); break; case 0: z_cr( r1, r2); break;
case 1: z_clr(r1, r2); break; case 1: z_clr(r1, r2); break;
@ -2741,11 +2752,11 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
BLOCK_COMMENT("lookup_interface_method {"); BLOCK_COMMENT("lookup_interface_method {");
// Load start of itable entries into itable_entry_addr. // Load start of itable entries into itable_entry_addr.
z_llgf(vtable_len, Address(recv_klass, InstanceKlass::vtable_length_offset())); z_llgf(vtable_len, Address(recv_klass, Klass::vtable_length_offset()));
z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes())); z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
// Loop over all itable entries until desired interfaceOop(Rinterface) found. // Loop over all itable entries until desired interfaceOop(Rinterface) found.
const int vtable_base_offset = in_bytes(InstanceKlass::vtable_start_offset()); const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
add2reg_with_index(itable_entry_addr, add2reg_with_index(itable_entry_addr,
vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(), vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(),
@ -5927,8 +5938,7 @@ void MacroAssembler::update_byte_crc32(Register crc, Register val, Register tabl
* @param len register containing number of bytes * @param len register containing number of bytes
* @param table register pointing to CRC table * @param table register pointing to CRC table
*/ */
void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, Register data) {
Register data, bool invertCRC) {
assert_different_registers(crc, buf, len, table, data); assert_different_registers(crc, buf, len, table, data);
Label L_mainLoop, L_done; Label L_mainLoop, L_done;
@ -5938,20 +5948,12 @@ void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register
z_ltr(len, len); z_ltr(len, len);
z_brnh(L_done); z_brnh(L_done);
if (invertCRC) {
not_(crc, noreg, false); // ~c
}
bind(L_mainLoop); bind(L_mainLoop);
z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register. z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
add2reg(buf, mainLoop_stepping); // Advance buffer position. add2reg(buf, mainLoop_stepping); // Advance buffer position.
update_byte_crc32(crc, data, table); update_byte_crc32(crc, data, table);
z_brct(len, L_mainLoop); // Iterate. z_brct(len, L_mainLoop); // Iterate.
if (invertCRC) {
not_(crc, noreg, false); // ~c
}
bind(L_done); bind(L_done);
} }
@ -5968,6 +5970,7 @@ void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register tab
// c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \ // c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \
// crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24] // crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24]
// #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4 // #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4
// Pre-calculate (constant) column offsets, use columns 4..7 for big-endian.
const int ix0 = 4*(4*CRC32_COLUMN_SIZE); const int ix0 = 4*(4*CRC32_COLUMN_SIZE);
const int ix1 = 5*(4*CRC32_COLUMN_SIZE); const int ix1 = 5*(4*CRC32_COLUMN_SIZE);
const int ix2 = 6*(4*CRC32_COLUMN_SIZE); const int ix2 = 6*(4*CRC32_COLUMN_SIZE);
@ -5986,17 +5989,12 @@ void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register tab
rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true); // ((c >> 16) & 0xff) << 2 rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true); // ((c >> 16) & 0xff) << 2
rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true); // ((c >> 24) & 0xff) << 2 rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true); // ((c >> 24) & 0xff) << 2
// Load pre-calculated table values. // XOR indexed table values to calculate updated crc.
// Use columns 4..7 for big-endian.
z_ly(t3, Address(table, t3, (intptr_t)ix0));
z_ly(t2, Address(table, t2, (intptr_t)ix1)); z_ly(t2, Address(table, t2, (intptr_t)ix1));
z_ly(t1, Address(table, t1, (intptr_t)ix2));
z_ly(t0, Address(table, t0, (intptr_t)ix3)); z_ly(t0, Address(table, t0, (intptr_t)ix3));
z_xy(t2, Address(table, t3, (intptr_t)ix0));
// Calculate new crc from table values. z_xy(t0, Address(table, t1, (intptr_t)ix2));
z_xr(t2, t3); z_xr(t0, t2); // Now t0 contains the updated CRC value.
z_xr(t0, t1);
z_xr(t0, t2); // Now crc contains the final checksum value.
lgr_if_needed(crc, t0); lgr_if_needed(crc, t0);
} }
@ -6009,7 +6007,8 @@ void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register tab
* uses Z_R10..Z_R13 as work register. Must be saved/restored by caller! * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
*/ */
void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table, void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3) { Register t0, Register t1, Register t2, Register t3,
bool invertCRC) {
assert_different_registers(crc, buf, len, table); assert_different_registers(crc, buf, len, table);
Label L_mainLoop, L_tail; Label L_mainLoop, L_tail;
@ -6024,7 +6023,9 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
// The situation itself is detected and handled correctly by the conditional branches // The situation itself is detected and handled correctly by the conditional branches
// following aghi(len, -stepping) and aghi(len, +stepping). // following aghi(len, -stepping) and aghi(len, +stepping).
if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc not_(crc, noreg, false); // 1s complement of crc
}
#if 0 #if 0
{ {
@ -6039,7 +6040,7 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
rotate_then_insert(ctr, ctr, 62, 63, 0, true); // TODO: should set cc rotate_then_insert(ctr, ctr, 62, 63, 0, true); // TODO: should set cc
z_sgfr(len, ctr); // Remaining len after alignment. z_sgfr(len, ctr); // Remaining len after alignment.
update_byteLoop_crc32(crc, buf, ctr, table, data, false); update_byteLoop_crc32(crc, buf, ctr, table, data);
} }
#endif #endif
@ -6059,9 +6060,11 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
// Process last few (<8) bytes of buffer. // Process last few (<8) bytes of buffer.
BIND(L_tail); BIND(L_tail);
update_byteLoop_crc32(crc, buf, len, table, data, false); update_byteLoop_crc32(crc, buf, len, table, data);
if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc not_(crc, noreg, false); // 1s complement of crc
}
} }
/** /**
@ -6073,7 +6076,8 @@ void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len
* uses Z_R10..Z_R13 as work register. Must be saved/restored by caller! * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
*/ */
void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table, void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3) { Register t0, Register t1, Register t2, Register t3,
bool invertCRC) {
assert_different_registers(crc, buf, len, table); assert_different_registers(crc, buf, len, table);
Label L_mainLoop, L_tail; Label L_mainLoop, L_tail;
@ -6087,7 +6091,9 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
// The situation itself is detected and handled correctly by the conditional branches // The situation itself is detected and handled correctly by the conditional branches
// following aghi(len, -stepping) and aghi(len, +stepping). // following aghi(len, -stepping) and aghi(len, +stepping).
if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc not_(crc, noreg, false); // 1s complement of crc
}
// Check for short (<4 bytes) buffer. // Check for short (<4 bytes) buffer.
z_srag(ctr, len, log_stepping); z_srag(ctr, len, log_stepping);
@ -6099,13 +6105,16 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
BIND(L_mainLoop); BIND(L_mainLoop);
update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3); update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3);
z_brct(ctr, L_mainLoop); // Iterate. z_brct(ctr, L_mainLoop); // Iterate.
z_lrvr(crc, crc); // Revert byte order back to original. z_lrvr(crc, crc); // Revert byte order back to original.
// Process last few (<8) bytes of buffer. // Process last few (<8) bytes of buffer.
BIND(L_tail); BIND(L_tail);
update_byteLoop_crc32(crc, buf, len, table, data, false); update_byteLoop_crc32(crc, buf, len, table, data);
if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc not_(crc, noreg, false); // 1s complement of crc
}
} }
/** /**
@ -6115,22 +6124,51 @@ void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len
* @param table register pointing to CRC table * @param table register pointing to CRC table
*/ */
void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table, void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3) { Register t0, Register t1, Register t2, Register t3,
bool invertCRC) {
assert_different_registers(crc, buf, len, table); assert_different_registers(crc, buf, len, table);
Register data = t0; Register data = t0;
update_byteLoop_crc32(crc, buf, len, table, data, true); if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
update_byteLoop_crc32(crc, buf, len, table, data);
if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
} }
void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp) { void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
bool invertCRC) {
assert_different_registers(crc, buf, len, table, tmp); assert_different_registers(crc, buf, len, table, tmp);
not_(crc, noreg, false); // ~c if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
z_llgc(tmp, Address(buf, (intptr_t)0)); // Current byte of input buffer (zero extended). Avoids garbage in upper half of register. z_llgc(tmp, Address(buf, (intptr_t)0)); // Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
update_byte_crc32(crc, tmp, table); update_byte_crc32(crc, tmp, table);
not_(crc, noreg, false); // ~c if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
}
void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table,
bool invertCRC) {
assert_different_registers(crc, val, table);
if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
update_byte_crc32(crc, val, table);
if (invertCRC) {
not_(crc, noreg, false); // 1s complement of crc
}
} }
// //

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved. * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1011,22 +1011,35 @@ class MacroAssembler: public Assembler {
int before = 0, int after = 0) PRODUCT_RETURN; int before = 0, int after = 0) PRODUCT_RETURN;
// Emitters for CRC32 calculation. // Emitters for CRC32 calculation.
// A note on invertCRC:
// Unfortunately, internal representation of crc differs between CRC32 and CRC32C.
// CRC32 holds it's current crc value in the externally visible representation.
// CRC32C holds it's current crc value in internal format, ready for updating.
// Thus, the crc value must be bit-flipped before updating it in the CRC32 case.
// In the CRC32C case, it must be bit-flipped when it is given to the outside world (getValue()).
// The bool invertCRC parameter indicates whether bit-flipping is required before updates.
private: private:
void fold_byte_crc32(Register crc, Register table, Register val, Register tmp); void fold_byte_crc32(Register crc, Register table, Register val, Register tmp);
void fold_8bit_crc32(Register crc, Register table, Register tmp); void fold_8bit_crc32(Register crc, Register table, Register tmp);
void update_byte_crc32( Register crc, Register val, Register table);
void update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, void update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
Register data, bool invertCRC); Register data);
void update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc, void update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
Register t0, Register t1, Register t2, Register t3); Register t0, Register t1, Register t2, Register t3);
public: public:
void update_byte_crc32( Register crc, Register val, Register table); void kernel_crc32_singleByteReg(Register crc, Register val, Register table,
void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp); bool invertCRC);
void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
bool invertCRC);
void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table, void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3); Register t0, Register t1, Register t2, Register t3,
bool invertCRC);
void kernel_crc32_1word(Register crc, Register buf, Register len, Register table, void kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3); Register t0, Register t1, Register t2, Register t3,
bool invertCRC);
void kernel_crc32_2word(Register crc, Register buf, Register len, Register table, void kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3); Register t0, Register t1, Register t2, Register t3,
bool invertCRC);
// Emitters for BigInteger.multiplyToLen intrinsic // Emitters for BigInteger.multiplyToLen intrinsic
// note: length of result array (zlen) is passed on the stack // note: length of result array (zlen) is passed on the stack

View File

@ -1,76 +0,0 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/codeBuffer.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
// This method will be called (as any other Klass virtual method) with
// the Klass itself as the first argument. Example:
//
// oop obj;
// int size = obj->klass()->klass_part()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();.
//
// The dummy method is called with the Klass object as the first
// operand, and an object as the second argument.
//
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
// differing only by an ordinal constant, and they bear no releationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#undef __
#define __ masm->
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
char** mc_top,
char* mc_end) {
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
*(intptr_t *)(*md_top) = vtable_bytes;
*md_top += sizeof(intptr_t);
void** dummy_vtable = (void**)*md_top;
*vtable = dummy_vtable;
*md_top += vtable_bytes;
// Get ready to generate dummy methods.
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
MacroAssembler* masm = new MacroAssembler(&cb);
__ unimplemented();
}

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -73,7 +73,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
const char* error_message) { const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id); InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id); Klass* klass = SystemDictionary::well_known_klass(klass_id);
assert(temp_reg != Z_R0 && // Is used as base register! assert(temp_reg != Z_R0 && // Is used as base register!
temp_reg != noreg && temp2_reg != noreg, "need valid registers!"); temp_reg != noreg && temp2_reg != noreg, "need valid registers!");

View File

@ -1,5 +1,5 @@
// //
// Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2016 SAP SE. All rights reserved. // Copyright (c) 2016 SAP SE. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
@ -1562,7 +1562,7 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
} }
// Vector ideal reg. // Vector ideal reg.
const int Matcher::vector_ideal_reg(int size) { const uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize == 8 && size == 8, ""); assert(MaxVectorSize == 8 && size == 8, "");
return Op_RegL; return Op_RegL;
} }
@ -1577,7 +1577,7 @@ const int Matcher::min_vector_size(const BasicType bt) {
return max_vector_size(bt); // Same as max. return max_vector_size(bt); // Same as max.
} }
const int Matcher::vector_shift_count_ideal_reg(int size) { const uint Matcher::vector_shift_count_ideal_reg(int size) {
fatal("vector shift is not supported"); fatal("vector shift is not supported");
return Node::NotAMachineReg; return Node::NotAMachineReg;
} }
@ -6768,6 +6768,7 @@ instruct sllI_reg_imm(iRegI dst, iRegI src, immI nbits) %{
format %{ "SLL $dst,$src,$nbits\t# use RISC-like SLLG also for int" %} format %{ "SLL $dst,$src,$nbits\t# use RISC-like SLLG also for int" %}
ins_encode %{ ins_encode %{
int Nbit = $nbits$$constant; int Nbit = $nbits$$constant;
assert((Nbit & (BitsPerJavaInteger - 1)) == Nbit, "Check shift mask in ideal graph");
__ z_sllg($dst$$Register, $src$$Register, Nbit & (BitsPerJavaInteger - 1), Z_R0); __ z_sllg($dst$$Register, $src$$Register, Nbit & (BitsPerJavaInteger - 1), Z_R0);
%} %}
ins_pipe(pipe_class_dummy); ins_pipe(pipe_class_dummy);
@ -6841,6 +6842,7 @@ instruct sraI_reg_imm(iRegI dst, immI src, flagsReg cr) %{
format %{ "SRA $dst,$src" %} format %{ "SRA $dst,$src" %}
ins_encode %{ ins_encode %{
int Nbit = $src$$constant; int Nbit = $src$$constant;
assert((Nbit & (BitsPerJavaInteger - 1)) == Nbit, "Check shift mask in ideal graph");
__ z_sra($dst$$Register, Nbit & (BitsPerJavaInteger - 1), Z_R0); __ z_sra($dst$$Register, Nbit & (BitsPerJavaInteger - 1), Z_R0);
%} %}
ins_pipe(pipe_class_dummy); ins_pipe(pipe_class_dummy);
@ -6893,6 +6895,7 @@ instruct srlI_reg_imm(iRegI dst, immI src) %{
format %{ "SRL $dst,$src" %} format %{ "SRL $dst,$src" %}
ins_encode %{ ins_encode %{
int Nbit = $src$$constant; int Nbit = $src$$constant;
assert((Nbit & (BitsPerJavaInteger - 1)) == Nbit, "Check shift mask in ideal graph");
__ z_srl($dst$$Register, Nbit & (BitsPerJavaInteger - 1), Z_R0); __ z_srl($dst$$Register, Nbit & (BitsPerJavaInteger - 1), Z_R0);
%} %}
ins_pipe(pipe_class_dummy); ins_pipe(pipe_class_dummy);

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved. * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -623,26 +623,6 @@ class StubGenerator: public StubCodeGenerator {
#define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm):_masm)-> #define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm):_masm)->
#endif #endif
//----------------------------------------------------------------------
// The following routine generates a subroutine to throw an asynchronous
// UnknownError when an unsafe access gets a fault that could not be
// reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.)
//
// Arguments:
// trapping PC: ??
//
// Results:
// Posts an asynchronous exception, skips the trapping instruction.
//
address generate_handler_for_unsafe_access() {
StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
{
address start = __ pc();
__ unimplemented("StubRoutines::handler_for_unsafe_access", 86);
return start;
}
}
// Support for uint StubRoutine::zarch::partial_subtype_check(Klass // Support for uint StubRoutine::zarch::partial_subtype_check(Klass
// sub, Klass super); // sub, Klass super);
// //
@ -2330,26 +2310,25 @@ class StubGenerator: public StubCodeGenerator {
} }
/**
// Arguments: * Arguments:
// Z_ARG1 - int crc *
// Z_ARG2 - byte* buf * Inputs:
// Z_ARG3 - int length (of buffer) * Z_ARG1 - int crc
// * Z_ARG2 - byte* buf
// Result: * Z_ARG3 - int length (of buffer)
// Z_RET - int crc result *
// * Result:
// Compute CRC32 function. * Z_RET - int crc result
address generate_CRC32_updateBytes(const char* name) { **/
__ align(CodeEntryAlignment); // Compute CRC function (generic, for all polynomials).
StubCodeMark mark(this, "StubRoutines", name); void generate_CRC_updateBytes(const char* name, Register table, bool invertCRC) {
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
// arguments to kernel_crc32: // arguments to kernel_crc32:
Register crc = Z_ARG1; // Current checksum, preset by caller or result from previous call, int. Register crc = Z_ARG1; // Current checksum, preset by caller or result from previous call, int.
Register data = Z_ARG2; // source byte array Register data = Z_ARG2; // source byte array
Register dataLen = Z_ARG3; // #bytes to process, int Register dataLen = Z_ARG3; // #bytes to process, int
Register table = Z_ARG4; // crc table address // Register table = Z_ARG4; // crc table address. Preloaded and passed in by caller.
const Register t0 = Z_R10; // work reg for kernel* emitters const Register t0 = Z_R10; // work reg for kernel* emitters
const Register t1 = Z_R11; // work reg for kernel* emitters const Register t1 = Z_R11; // work reg for kernel* emitters
const Register t2 = Z_R12; // work reg for kernel* emitters const Register t2 = Z_R12; // work reg for kernel* emitters
@ -2361,16 +2340,50 @@ class StubGenerator: public StubCodeGenerator {
// Crc used as int. // Crc used as int.
__ z_llgfr(dataLen, dataLen); __ z_llgfr(dataLen, dataLen);
StubRoutines::zarch::generate_load_crc_table_addr(_masm, table);
__ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers. __ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
__ z_stmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 to make them available as work registers. __ z_stmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 to make them available as work registers.
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3); __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, invertCRC);
__ z_lmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 back from stack. __ z_lmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 back from stack.
__ resize_frame(+(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers. __ resize_frame(+(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
__ z_llgfr(Z_RET, crc); // Updated crc is function result. No copying required, just zero upper 32 bits. __ z_llgfr(Z_RET, crc); // Updated crc is function result. No copying required, just zero upper 32 bits.
__ z_br(Z_R14); // Result already in Z_RET == Z_ARG1. __ z_br(Z_R14); // Result already in Z_RET == Z_ARG1.
}
// Compute CRC32 function.
address generate_CRC32_updateBytes(const char* name) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
assert(UseCRC32Intrinsics, "should not generate this stub (%s) with CRC32 intrinsics disabled", name);
BLOCK_COMMENT("CRC32_updateBytes {");
Register table = Z_ARG4; // crc32 table address.
StubRoutines::zarch::generate_load_crc_table_addr(_masm, table);
generate_CRC_updateBytes(name, table, true);
BLOCK_COMMENT("} CRC32_updateBytes");
return __ addr_at(start_off);
}
// Compute CRC32C function.
address generate_CRC32C_updateBytes(const char* name) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
assert(UseCRC32CIntrinsics, "should not generate this stub (%s) with CRC32C intrinsics disabled", name);
BLOCK_COMMENT("CRC32C_updateBytes {");
Register table = Z_ARG4; // crc32c table address.
StubRoutines::zarch::generate_load_crc32c_table_addr(_masm, table);
generate_CRC_updateBytes(name, table, false);
BLOCK_COMMENT("} CRC32C_updateBytes");
return __ addr_at(start_off); return __ addr_at(start_off);
} }
@ -2441,11 +2454,15 @@ class StubGenerator: public StubCodeGenerator {
// Entry points that are platform specific. // Entry points that are platform specific.
if (UseCRC32Intrinsics) { if (UseCRC32Intrinsics) {
// We have no CRC32 table on z/Architecture.
StubRoutines::_crc_table_adr = (address)StubRoutines::zarch::_crc_table; StubRoutines::_crc_table_adr = (address)StubRoutines::zarch::_crc_table;
StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes"); StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes");
} }
if (UseCRC32CIntrinsics) {
StubRoutines::_crc32c_table_addr = (address)StubRoutines::zarch::_crc32c_table;
StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes("CRC32C_updateBytes");
}
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction. // Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
StubRoutines::zarch::_trot_table_addr = (address)StubRoutines::zarch::_trot_table; StubRoutines::zarch::_trot_table_addr = (address)StubRoutines::zarch::_trot_table;
} }
@ -2461,8 +2478,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false); StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false);
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false); StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
StubRoutines::zarch::_handler_for_unsafe_access_entry = generate_handler_for_unsafe_access();
// Support for verify_oop (must happen after universe_init). // Support for verify_oop (must happen after universe_init).
StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine(); StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine();

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved. * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -68,12 +68,11 @@ class zarch {
}; };
private: private:
static address _handler_for_unsafe_access_entry;
static int _atomic_memory_operation_lock; static int _atomic_memory_operation_lock;
static address _partial_subtype_check; static address _partial_subtype_check;
static juint _crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE]; static juint _crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
static juint _crc32c_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction. // Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
static address _trot_table_addr; static address _trot_table_addr;
@ -91,11 +90,11 @@ class zarch {
static int atomic_memory_operation_lock() { return _atomic_memory_operation_lock; } static int atomic_memory_operation_lock() { return _atomic_memory_operation_lock; }
static void set_atomic_memory_operation_lock(int value) { _atomic_memory_operation_lock = value; } static void set_atomic_memory_operation_lock(int value) { _atomic_memory_operation_lock = value; }
static address handler_for_unsafe_access_entry() { return _handler_for_unsafe_access_entry; }
static address partial_subtype_check() { return _partial_subtype_check; } static address partial_subtype_check() { return _partial_subtype_check; }
static void generate_load_absolute_address(MacroAssembler* masm, Register table, address table_addr, uint64_t table_contents);
static void generate_load_crc_table_addr(MacroAssembler* masm, Register table); static void generate_load_crc_table_addr(MacroAssembler* masm, Register table);
static void generate_load_crc32c_table_addr(MacroAssembler* masm, Register table);
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction. // Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
static void generate_load_trot_table_addr(MacroAssembler* masm, Register table); static void generate_load_trot_table_addr(MacroAssembler* masm, Register table);

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017 SAP SE. All rights reserved. * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -642,13 +642,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
return entry; return entry;
} }
// Unused, should never pass by.
address TemplateInterpreterGenerator::generate_continuation_for (TosState state) {
address entry = __ pc();
__ should_not_reach_here();
return entry;
}
address TemplateInterpreterGenerator::generate_return_entry_for (TosState state, int step, size_t index_size) { address TemplateInterpreterGenerator::generate_return_entry_for (TosState state, int step, size_t index_size) {
address entry = __ pc(); address entry = __ pc();
@ -683,6 +676,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for (TosState state,
__ z_llgc(size, Address(cache, offset, flags_offset+(sizeof(size_t)-1))); __ z_llgc(size, Address(cache, offset, flags_offset+(sizeof(size_t)-1)));
__ z_sllg(size, size, Interpreter::logStackElementSize); // Each argument size in bytes. __ z_sllg(size, size, Interpreter::logStackElementSize); // Each argument size in bytes.
__ z_agr(Z_esp, size); // Pop arguments. __ z_agr(Z_esp, size); // Pop arguments.
__ check_and_handle_popframe(Z_thread);
__ check_and_handle_earlyret(Z_thread);
__ dispatch_next(state, step); __ dispatch_next(state, step);
BLOCK_COMMENT("} return_entry"); BLOCK_COMMENT("} return_entry");
@ -1933,8 +1930,11 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
return entry_point; return entry_point;
} }
// Method entry for static native methods:
// int java.util.zip.CRC32.update(int crc, int b) /**
* Method entry for static native methods:
* int java.util.zip.CRC32.update(int crc, int b)
*/
address TemplateInterpreterGenerator::generate_CRC32_update_entry() { address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
if (UseCRC32Intrinsics) { if (UseCRC32Intrinsics) {
@ -1964,7 +1964,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
__ z_llgf(crc, 2 * wordSize, argP); // Current crc state, zero extend to 64 bit to have a clean register. __ z_llgf(crc, 2 * wordSize, argP); // Current crc state, zero extend to 64 bit to have a clean register.
StubRoutines::zarch::generate_load_crc_table_addr(_masm, table); StubRoutines::zarch::generate_load_crc_table_addr(_masm, table);
__ kernel_crc32_singleByte(crc, data, dataLen, table, Z_R1); __ kernel_crc32_singleByte(crc, data, dataLen, table, Z_R1, true);
// Restore caller sp for c2i case. // Restore caller sp for c2i case.
__ resize_frame_absolute(Z_R10, Z_R0, true); // Cut the stack back to where the caller started. __ resize_frame_absolute(Z_R10, Z_R0, true); // Cut the stack back to where the caller started.
@ -1983,9 +1983,11 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
} }
// Method entry for static native methods: /**
// int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) * Method entry for static native methods:
// int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) * int java.util.zip.CRC32.updateBytes( int crc, byte[] b, int off, int len)
* int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len)
*/
address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
if (UseCRC32Intrinsics) { if (UseCRC32Intrinsics) {
@ -2041,7 +2043,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
__ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers. __ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
__ z_stmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 to make them available as work registers. __ z_stmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 to make them available as work registers.
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3); __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, true);
__ z_lmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 back from stack. __ z_lmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 back from stack.
// Restore caller sp for c2i case. // Restore caller sp for c2i case.
@ -2060,8 +2062,79 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
return NULL; return NULL;
} }
// Not supported
/**
* Method entry for intrinsic-candidate (non-native) methods:
* int java.util.zip.CRC32C.updateBytes( int crc, byte[] b, int off, int end)
* int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long* buf, int off, int end)
* Unlike CRC32, CRC32C does not have any methods marked as native
* CRC32C also uses an "end" variable instead of the length variable CRC32 uses
*/
address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
if (UseCRC32CIntrinsics) {
uint64_t entry_off = __ offset();
// We don't generate local frame and don't align stack because
// we call stub code and there is no safepoint on this path.
// Load parameters.
// Z_esp is callers operand stack pointer, i.e. it points to the parameters.
const Register argP = Z_esp;
const Register crc = Z_ARG1; // crc value
const Register data = Z_ARG2; // address of java byte array
const Register dataLen = Z_ARG3; // source data len
const Register table = Z_ARG4; // address of crc32 table
const Register t0 = Z_R10; // work reg for kernel* emitters
const Register t1 = Z_R11; // work reg for kernel* emitters
const Register t2 = Z_R12; // work reg for kernel* emitters
const Register t3 = Z_R13; // work reg for kernel* emitters
// Arguments are reversed on java expression stack.
// Calculate address of start element.
if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) { // Used for "updateByteBuffer direct".
// crc @ (SP + 5W) (32bit)
// buf @ (SP + 3W) (64bit ptr to long array)
// off @ (SP + 2W) (32bit)
// dataLen @ (SP + 1W) (32bit)
// data = buf + off
BLOCK_COMMENT("CRC32C_updateDirectByteBuffer {");
__ z_llgf(crc, 5*wordSize, argP); // current crc state
__ z_lg(data, 3*wordSize, argP); // start of byte buffer
__ z_agf(data, 2*wordSize, argP); // Add byte buffer offset.
__ z_lgf(dataLen, 1*wordSize, argP); // #bytes to process, calculated as
__ z_sgf(dataLen, Address(argP, 2*wordSize)); // (end_index - offset)
} else { // Used for "updateBytes update".
// crc @ (SP + 4W) (32bit)
// buf @ (SP + 3W) (64bit ptr to byte array)
// off @ (SP + 2W) (32bit)
// dataLen @ (SP + 1W) (32bit)
// data = buf + off + base_offset
BLOCK_COMMENT("CRC32C_updateBytes {");
__ z_llgf(crc, 4*wordSize, argP); // current crc state
__ z_lg(data, 3*wordSize, argP); // start of byte buffer
__ z_agf(data, 2*wordSize, argP); // Add byte buffer offset.
__ z_lgf(dataLen, 1*wordSize, argP); // #bytes to process, calculated as
__ z_sgf(dataLen, Address(argP, 2*wordSize)); // (end_index - offset)
__ z_aghi(data, arrayOopDesc::base_offset_in_bytes(T_BYTE));
}
StubRoutines::zarch::generate_load_crc32c_table_addr(_masm, table);
__ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
__ z_stmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 to make them available as work registers.
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, false);
__ z_lmg(t0, t3, 1*8, Z_SP); // Spill regs 10..13 back from stack.
// Restore caller sp for c2i case.
__ resize_frame_absolute(Z_R10, Z_R0, true); // Cut the stack back to where the caller started.
__ z_br(Z_R14);
BLOCK_COMMENT("} CRC32C_update{Bytes|DirectByteBuffer}");
return __ addr_at(entry_off);
}
return NULL; return NULL;
} }

View File

@ -3466,7 +3466,7 @@ void TemplateTable::invokevirtual_helper(Register index,
__ z_sllg(index, index, exact_log2(vtableEntry::size_in_bytes())); __ z_sllg(index, index, exact_log2(vtableEntry::size_in_bytes()));
__ mem2reg_opt(method, __ mem2reg_opt(method,
Address(Z_tmp_2, index, Address(Z_tmp_2, index,
InstanceKlass::vtable_start_offset() + in_ByteSize(vtableEntry::method_offset_in_bytes()))); Klass::vtable_start_offset() + in_ByteSize(vtableEntry::method_offset_in_bytes())));
__ profile_arguments_type(Z_ARG4, method, Z_ARG5, true); __ profile_arguments_type(Z_ARG4, method, Z_ARG5, true);
__ jump_from_interpreted(method, Z_ARG4); __ jump_from_interpreted(method, Z_ARG4);
BLOCK_COMMENT("} invokevirtual_helper"); BLOCK_COMMENT("} invokevirtual_helper");

View File

@ -111,13 +111,23 @@ void VM_Version::initialize() {
ContendedPaddingWidth = cache_line_size; ContendedPaddingWidth = cache_line_size;
} }
// On z/Architecture, the CRC32 intrinsics had to be implemented "by hand". // On z/Architecture, the CRC32/CRC32C intrinsics are implemented "by hand".
// They cannot be based on the CHECKSUM instruction which has been there // TODO: Provide implementation based on the vector instructions available from z13.
// since the very beginning (of z/Architecture). It computes "some kind of" a checksum // Note: The CHECKSUM instruction, which has been there since the very beginning
// which has nothing to do with the CRC32 algorithm. // (of z/Architecture), computes "some kind of" a checksum.
// It has nothing to do with the CRC32 algorithm.
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
FLAG_SET_DEFAULT(UseCRC32Intrinsics, true); FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
} }
if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
}
// TODO: Provide implementation.
if (UseAdler32Intrinsics) {
warning("Adler32Intrinsics not available on this CPU.");
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
}
// On z/Architecture, we take UseAES as the general switch to enable/disable the AES intrinsics. // On z/Architecture, we take UseAES as the general switch to enable/disable the AES intrinsics.
// The specific, and yet to be defined, switches UseAESxxxIntrinsics will then be set // The specific, and yet to be defined, switches UseAESxxxIntrinsics will then be set
@ -195,11 +205,6 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
} }
if (UseAdler32Intrinsics) {
warning("Adler32Intrinsics not available on this CPU.");
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
}
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true); FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true);
} }

View File

@ -83,7 +83,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
__ load_klass(rcvr_klass, Z_ARG1); __ load_klass(rcvr_klass, Z_ARG1);
// Set method (in case of interpreted method), and destination address. // Set method (in case of interpreted method), and destination address.
int entry_offset = in_bytes(InstanceKlass::vtable_start_offset()) + int entry_offset = in_bytes(Klass::vtable_start_offset()) +
vtable_index * vtableEntry::size_in_bytes(); vtable_index * vtableEntry::size_in_bytes();
#ifndef PRODUCT #ifndef PRODUCT
@ -96,8 +96,8 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// worst case actual size // worst case actual size
padding_bytes += __ load_const_size() - __ load_const_optimized_rtn_len(vtable_idx, vtable_index*vtableEntry::size_in_bytes(), true); padding_bytes += __ load_const_size() - __ load_const_optimized_rtn_len(vtable_idx, vtable_index*vtableEntry::size_in_bytes(), true);
assert(Immediate::is_uimm12(in_bytes(InstanceKlass::vtable_length_offset())), "disp to large"); assert(Immediate::is_uimm12(in_bytes(Klass::vtable_length_offset())), "disp to large");
__ z_cl(vtable_idx, in_bytes(InstanceKlass::vtable_length_offset()), rcvr_klass); __ z_cl(vtable_idx, in_bytes(Klass::vtable_length_offset()), rcvr_klass);
__ z_brl(L); __ z_brl(L);
__ z_lghi(Z_ARG3, vtable_index); // Debug code, don't optimize. __ z_lghi(Z_ARG3, vtable_index); // Debug code, don't optimize.
__ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), Z_ARG1, Z_ARG3, false); __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), Z_ARG1, Z_ARG3, false);
@ -187,11 +187,11 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
__ load_klass(rcvr_klass, Z_ARG1); __ load_klass(rcvr_klass, Z_ARG1);
// Load start of itable entries into itable_entry. // Load start of itable entries into itable_entry.
__ z_llgf(vtable_len, Address(rcvr_klass, InstanceKlass::vtable_length_offset())); __ z_llgf(vtable_len, Address(rcvr_klass, Klass::vtable_length_offset()));
__ z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes())); __ z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
// Loop over all itable entries until desired interfaceOop(Rinterface) found. // Loop over all itable entries until desired interfaceOop(Rinterface) found.
const int vtable_base_offset = in_bytes(InstanceKlass::vtable_start_offset()); const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
// Count unused bytes. // Count unused bytes.
start_pc = __ pc(); start_pc = __ pc();
__ add2reg_with_index(itable_entry_addr, vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(), rcvr_klass, vtable_len); __ add2reg_with_index(itable_entry_addr, vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(), rcvr_klass, vtable_len);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -270,9 +270,7 @@ void AbstractInterpreter::layout_activation(Method* method,
assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area"); assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area");
assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area"); assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area");
} }
#ifdef _LP64
assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd"); assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd");
#endif
*interpreter_frame->register_addr(Lmethod) = (intptr_t) method; *interpreter_frame->register_addr(Lmethod) = (intptr_t) method;
*interpreter_frame->register_addr(Llocals) = (intptr_t) locals; *interpreter_frame->register_addr(Llocals) = (intptr_t) locals;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -159,21 +159,12 @@
public: public:
#ifdef _LP64
static LIR_Opr as_long_opr(Register r) { static LIR_Opr as_long_opr(Register r) {
return as_long_single_opr(r); return as_long_single_opr(r);
} }
static LIR_Opr as_pointer_opr(Register r) { static LIR_Opr as_pointer_opr(Register r) {
return as_long_single_opr(r); return as_long_single_opr(r);
} }
#else
static LIR_Opr as_long_opr(Register r) {
return as_long_pair_opr(r);
}
static LIR_Opr as_pointer_opr(Register r) {
return as_opr(r);
}
#endif
static LIR_Opr as_float_opr(FloatRegister r) { static LIR_Opr as_float_opr(FloatRegister r) {
return LIR_OprFact::single_fpu(r->encoding()); return LIR_OprFact::single_fpu(r->encoding());
} }

View File

@ -556,11 +556,9 @@ void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
// guarantee that 32-bit loads always sign extended but that isn't // guarantee that 32-bit loads always sign extended but that isn't
// true and since sign extension isn't free, it would impose a // true and since sign extension isn't free, it would impose a
// slight cost. // slight cost.
#ifdef _LP64
if (op->type() == T_INT) { if (op->type() == T_INT) {
__ br(acond, false, Assembler::pn, *(op->label())); __ br(acond, false, Assembler::pn, *(op->label()));
} else } else
#endif
__ brx(acond, false, Assembler::pn, *(op->label())); __ brx(acond, false, Assembler::pn, *(op->label()));
} }
// The peephole pass fills the delay slot // The peephole pass fills the delay slot
@ -576,12 +574,7 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
Register rlo = dst->as_register_lo(); Register rlo = dst->as_register_lo();
Register rhi = dst->as_register_hi(); Register rhi = dst->as_register_hi();
Register rval = op->in_opr()->as_register(); Register rval = op->in_opr()->as_register();
#ifdef _LP64
__ sra(rval, 0, rlo); __ sra(rval, 0, rlo);
#else
__ mov(rval, rlo);
__ sra(rval, BitsPerInt-1, rhi);
#endif
break; break;
} }
case Bytecodes::_i2d: case Bytecodes::_i2d:
@ -614,11 +607,7 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
Register rlo = op->in_opr()->as_register_lo(); Register rlo = op->in_opr()->as_register_lo();
Register rhi = op->in_opr()->as_register_hi(); Register rhi = op->in_opr()->as_register_hi();
Register rdst = dst->as_register(); Register rdst = dst->as_register();
#ifdef _LP64
__ sra(rlo, 0, rdst); __ sra(rlo, 0, rdst);
#else
__ mov(rlo, rdst);
#endif
break; break;
} }
case Bytecodes::_d2f: case Bytecodes::_d2f:
@ -711,7 +700,6 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType
case T_SHORT : __ sth(from_reg->as_register(), base, offset); break; case T_SHORT : __ sth(from_reg->as_register(), base, offset); break;
case T_INT : __ stw(from_reg->as_register(), base, offset); break; case T_INT : __ stw(from_reg->as_register(), base, offset); break;
case T_LONG : case T_LONG :
#ifdef _LP64
if (unaligned || PatchALot) { if (unaligned || PatchALot) {
// Don't use O7 here because it may be equal to 'base' (see LIR_Assembler::reg2mem) // Don't use O7 here because it may be equal to 'base' (see LIR_Assembler::reg2mem)
assert(G3_scratch != base, "can't handle this"); assert(G3_scratch != base, "can't handle this");
@ -722,11 +710,6 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType
} else { } else {
__ stx(from_reg->as_register_lo(), base, offset); __ stx(from_reg->as_register_lo(), base, offset);
} }
#else
assert(Assembler::is_simm13(offset + 4), "must be");
__ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
__ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes);
#endif
break; break;
case T_ADDRESS: case T_ADDRESS:
case T_METADATA: case T_METADATA:
@ -778,12 +761,7 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicTy
case T_SHORT : __ sth(from_reg->as_register(), base, disp); break; case T_SHORT : __ sth(from_reg->as_register(), base, disp); break;
case T_INT : __ stw(from_reg->as_register(), base, disp); break; case T_INT : __ stw(from_reg->as_register(), base, disp); break;
case T_LONG : case T_LONG :
#ifdef _LP64
__ stx(from_reg->as_register_lo(), base, disp); __ stx(from_reg->as_register_lo(), base, disp);
#else
assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match");
__ std(from_reg->as_register_hi(), base, disp);
#endif
break; break;
case T_ADDRESS: case T_ADDRESS:
__ st_ptr(from_reg->as_register(), base, disp); __ st_ptr(from_reg->as_register(), base, disp);
@ -826,40 +804,22 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ
case T_INT : __ ld(base, offset, to_reg->as_register()); break; case T_INT : __ ld(base, offset, to_reg->as_register()); break;
case T_LONG : case T_LONG :
if (!unaligned && !PatchALot) { if (!unaligned && !PatchALot) {
#ifdef _LP64
__ ldx(base, offset, to_reg->as_register_lo()); __ ldx(base, offset, to_reg->as_register_lo());
#else
assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
"must be sequential");
__ ldd(base, offset, to_reg->as_register_hi());
#endif
} else { } else {
#ifdef _LP64
assert(base != to_reg->as_register_lo(), "can't handle this"); assert(base != to_reg->as_register_lo(), "can't handle this");
assert(O7 != to_reg->as_register_lo(), "can't handle this"); assert(O7 != to_reg->as_register_lo(), "can't handle this");
__ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo()); __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
__ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
__ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo()); __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
__ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo()); __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
#else
if (base == to_reg->as_register_lo()) {
__ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
__ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
} else {
__ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
__ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
}
#endif
} }
break; break;
case T_METADATA: __ ld_ptr(base, offset, to_reg->as_register()); break; case T_METADATA: __ ld_ptr(base, offset, to_reg->as_register()); break;
case T_ADDRESS: case T_ADDRESS:
#ifdef _LP64
if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) { if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {
__ lduw(base, offset, to_reg->as_register()); __ lduw(base, offset, to_reg->as_register());
__ decode_klass_not_null(to_reg->as_register()); __ decode_klass_not_null(to_reg->as_register());
} else } else
#endif
{ {
__ ld_ptr(base, offset, to_reg->as_register()); __ ld_ptr(base, offset, to_reg->as_register());
} }
@ -921,13 +881,7 @@ int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType
case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break; case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break;
case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break; case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break;
case T_LONG : case T_LONG :
#ifdef _LP64
__ ldx(base, disp, to_reg->as_register_lo()); __ ldx(base, disp, to_reg->as_register_lo());
#else
assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
"must be sequential");
__ ldd(base, disp, to_reg->as_register_hi());
#endif
break; break;
default : ShouldNotReachHere(); default : ShouldNotReachHere();
} }
@ -1107,16 +1061,9 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
jlong con = c->as_jlong(); jlong con = c->as_jlong();
if (to_reg->is_double_cpu()) { if (to_reg->is_double_cpu()) {
#ifdef _LP64
__ set(con, to_reg->as_register_lo()); __ set(con, to_reg->as_register_lo());
#else
__ set(low(con), to_reg->as_register_lo());
__ set(high(con), to_reg->as_register_hi());
#endif
#ifdef _LP64
} else if (to_reg->is_single_cpu()) { } else if (to_reg->is_single_cpu()) {
__ set(con, to_reg->as_register()); __ set(con, to_reg->as_register());
#endif
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
assert(to_reg->is_double_fpu(), "wrong register kind"); assert(to_reg->is_double_fpu(), "wrong register kind");
@ -1190,12 +1137,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
__ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg()); __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg());
} else { } else {
assert(to_reg->is_double_cpu(), "Must be a long register."); assert(to_reg->is_double_cpu(), "Must be a long register.");
#ifdef _LP64
__ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo()); __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo());
#else
__ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo());
__ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi());
#endif
} }
} }
@ -1366,22 +1308,10 @@ void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
} }
} else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
if (from_reg->is_double_cpu()) { if (from_reg->is_double_cpu()) {
#ifdef _LP64
__ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register()); __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register());
#else
assert(to_reg->is_double_cpu() &&
from_reg->as_register_hi() != to_reg->as_register_lo() &&
from_reg->as_register_lo() != to_reg->as_register_hi(),
"should both be long and not overlap");
// long to long moves
__ mov(from_reg->as_register_hi(), to_reg->as_register_hi());
__ mov(from_reg->as_register_lo(), to_reg->as_register_lo());
#endif
#ifdef _LP64
} else if (to_reg->is_double_cpu()) { } else if (to_reg->is_double_cpu()) {
// int to int moves // int to int moves
__ mov(from_reg->as_register(), to_reg->as_register_lo()); __ mov(from_reg->as_register(), to_reg->as_register_lo());
#endif
} else { } else {
// int to int moves // int to int moves
__ mov(from_reg->as_register(), to_reg->as_register()); __ mov(from_reg->as_register(), to_reg->as_register());
@ -1461,20 +1391,6 @@ void LIR_Assembler::return_op(LIR_Opr result) {
__ reserved_stack_check(); __ reserved_stack_check();
} }
// the poll may need a register so just pick one that isn't the return register // the poll may need a register so just pick one that isn't the return register
#if defined(TIERED) && !defined(_LP64)
if (result->type_field() == LIR_OprDesc::long_type) {
// Must move the result to G1
// Must leave proper result in O0,O1 and G1 (TIERED only)
__ sllx(I0, 32, G1); // Shift bits into high G1
__ srl (I1, 0, I1); // Zero extend O1 (harmless?)
__ or3 (I1, G1, G1); // OR 64 bits into G1
#ifdef ASSERT
// mangle it so any problems will show up
__ set(0xdeadbeef, I0);
__ set(0xdeadbeef, I1);
#endif
}
#endif // TIERED
__ set((intptr_t)os::get_polling_page(), L0); __ set((intptr_t)os::get_polling_page(), L0);
__ relocate(relocInfo::poll_return_type); __ relocate(relocInfo::poll_return_type);
__ ld_ptr(L0, 0, G0); __ ld_ptr(L0, 0, G0);
@ -1568,23 +1484,11 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
Register xhi = opr1->as_register_hi(); Register xhi = opr1->as_register_hi();
if (opr2->is_constant() && opr2->as_jlong() == 0) { if (opr2->is_constant() && opr2->as_jlong() == 0) {
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases"); assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases");
#ifdef _LP64
__ orcc(xhi, G0, G0); __ orcc(xhi, G0, G0);
#else
__ orcc(xhi, xlo, G0);
#endif
} else if (opr2->is_register()) { } else if (opr2->is_register()) {
Register ylo = opr2->as_register_lo(); Register ylo = opr2->as_register_lo();
Register yhi = opr2->as_register_hi(); Register yhi = opr2->as_register_hi();
#ifdef _LP64
__ cmp(xlo, ylo); __ cmp(xlo, ylo);
#else
__ subcc(xlo, ylo, xlo);
__ subccc(xhi, yhi, xhi);
if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
__ orcc(xhi, xlo, G0);
}
#endif
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
} }
@ -1612,13 +1516,7 @@ void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Op
ShouldNotReachHere(); ShouldNotReachHere();
} }
} else if (code == lir_cmp_l2i) { } else if (code == lir_cmp_l2i) {
#ifdef _LP64
__ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register()); __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register());
#else
__ lcmp(left->as_register_hi(), left->as_register_lo(),
right->as_register_hi(), right->as_register_lo(),
dst->as_register());
#endif
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
} }
@ -1656,11 +1554,9 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
ShouldNotReachHere(); ShouldNotReachHere();
} }
Label skip; Label skip;
#ifdef _LP64
if (type == T_INT) { if (type == T_INT) {
__ br(acond, false, Assembler::pt, skip); __ br(acond, false, Assembler::pt, skip);
} else } else
#endif
__ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit __ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit
if (opr1->is_constant() && opr1->type() == T_INT) { if (opr1->is_constant() && opr1->type() == T_INT) {
Register dest = result->as_register(); Register dest = result->as_register();
@ -1720,7 +1616,6 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
} }
} else if (dest->is_double_cpu()) { } else if (dest->is_double_cpu()) {
#ifdef _LP64
Register dst_lo = dest->as_register_lo(); Register dst_lo = dest->as_register_lo();
Register op1_lo = left->as_pointer_register(); Register op1_lo = left->as_pointer_register();
Register op2_lo = right->as_pointer_register(); Register op2_lo = right->as_pointer_register();
@ -1736,28 +1631,6 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
#else
Register op1_lo = left->as_register_lo();
Register op1_hi = left->as_register_hi();
Register op2_lo = right->as_register_lo();
Register op2_hi = right->as_register_hi();
Register dst_lo = dest->as_register_lo();
Register dst_hi = dest->as_register_hi();
switch (code) {
case lir_add:
__ addcc(op1_lo, op2_lo, dst_lo);
__ addc (op1_hi, op2_hi, dst_hi);
break;
case lir_sub:
__ subcc(op1_lo, op2_lo, dst_lo);
__ subc (op1_hi, op2_hi, dst_hi);
break;
default: ShouldNotReachHere();
}
#endif
} else { } else {
assert (right->is_single_cpu(), "Just Checking"); assert (right->is_single_cpu(), "Just Checking");
@ -1852,23 +1725,14 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
int simm13 = (int)c; int simm13 = (int)c;
switch (code) { switch (code) {
case lir_logic_and: case lir_logic_and:
#ifndef _LP64
__ and3 (left->as_register_hi(), 0, dest->as_register_hi());
#endif
__ and3 (left->as_register_lo(), simm13, dest->as_register_lo()); __ and3 (left->as_register_lo(), simm13, dest->as_register_lo());
break; break;
case lir_logic_or: case lir_logic_or:
#ifndef _LP64
__ or3 (left->as_register_hi(), 0, dest->as_register_hi());
#endif
__ or3 (left->as_register_lo(), simm13, dest->as_register_lo()); __ or3 (left->as_register_lo(), simm13, dest->as_register_lo());
break; break;
case lir_logic_xor: case lir_logic_xor:
#ifndef _LP64
__ xor3 (left->as_register_hi(), 0, dest->as_register_hi());
#endif
__ xor3 (left->as_register_lo(), simm13, dest->as_register_lo()); __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo());
break; break;
@ -1886,7 +1750,6 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
} else { } else {
#ifdef _LP64
Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() : Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() :
left->as_register_lo(); left->as_register_lo();
Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() : Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() :
@ -1898,26 +1761,6 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break; case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
#else
switch (code) {
case lir_logic_and:
__ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
__ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
break;
case lir_logic_or:
__ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
__ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
break;
case lir_logic_xor:
__ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
__ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
break;
default: ShouldNotReachHere();
}
#endif
} }
} }
} }
@ -1975,12 +1818,10 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
if (basic_type == T_ARRAY) basic_type = T_OBJECT; if (basic_type == T_ARRAY) basic_type = T_OBJECT;
#ifdef _LP64
// higher 32bits must be null // higher 32bits must be null
__ sra(dst_pos, 0, dst_pos); __ sra(dst_pos, 0, dst_pos);
__ sra(src_pos, 0, src_pos); __ sra(src_pos, 0, src_pos);
__ sra(length, 0, length); __ sra(length, 0, length);
#endif
// set up the arraycopy stub information // set up the arraycopy stub information
ArrayCopyStub* stub = op->stub(); ArrayCopyStub* stub = op->stub();
@ -2316,7 +2157,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
if (dest->is_single_cpu()) { if (dest->is_single_cpu()) {
#ifdef _LP64
if (left->type() == T_OBJECT) { if (left->type() == T_OBJECT) {
switch (code) { switch (code) {
case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break; case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break;
@ -2325,7 +2165,6 @@ void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
} else } else
#endif
switch (code) { switch (code) {
case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break; case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break;
case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break; case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break;
@ -2333,27 +2172,17 @@ void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
} else { } else {
#ifdef _LP64
switch (code) { switch (code) {
case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
#else
switch (code) {
case lir_shl: __ lshl (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
case lir_shr: __ lshr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
default: ShouldNotReachHere();
}
#endif
} }
} }
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
#ifdef _LP64
if (left->type() == T_OBJECT) { if (left->type() == T_OBJECT) {
count = count & 63; // shouldn't shift by more than sizeof(intptr_t) count = count & 63; // shouldn't shift by more than sizeof(intptr_t)
Register l = left->as_register(); Register l = left->as_register();
@ -2366,7 +2195,6 @@ void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr de
} }
return; return;
} }
#endif
if (dest->is_single_cpu()) { if (dest->is_single_cpu()) {
count = count & 0x1F; // Java spec count = count & 0x1F; // Java spec
@ -2425,7 +2253,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
op->tmp4()->as_register() == O1 && op->tmp4()->as_register() == O1 &&
op->klass()->as_register() == G5, "must be"); op->klass()->as_register() == G5, "must be");
LP64_ONLY( __ signx(op->len()->as_register()); ) __ signx(op->len()->as_register());
if (UseSlowPath || if (UseSlowPath ||
(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
@ -2748,7 +2576,6 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
Register new_value_hi = op->new_value()->as_register_hi(); Register new_value_hi = op->new_value()->as_register_hi();
Register t1 = op->tmp1()->as_register(); Register t1 = op->tmp1()->as_register();
Register t2 = op->tmp2()->as_register(); Register t2 = op->tmp2()->as_register();
#ifdef _LP64
__ mov(cmp_value_lo, t1); __ mov(cmp_value_lo, t1);
__ mov(new_value_lo, t2); __ mov(new_value_lo, t2);
// perform the compare and swap operation // perform the compare and swap operation
@ -2756,23 +2583,6 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
// generate condition code - if the swap succeeded, t2 ("new value" reg) was // generate condition code - if the swap succeeded, t2 ("new value" reg) was
// overwritten with the original value in "addr" and will be equal to t1. // overwritten with the original value in "addr" and will be equal to t1.
__ cmp(t1, t2); __ cmp(t1, t2);
#else
// move high and low halves of long values into single registers
__ sllx(cmp_value_hi, 32, t1); // shift high half into temp reg
__ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half
__ or3(t1, cmp_value_lo, t1); // t1 holds 64-bit compare value
__ sllx(new_value_hi, 32, t2);
__ srl(new_value_lo, 0, new_value_lo);
__ or3(t2, new_value_lo, t2); // t2 holds 64-bit value to swap
// perform the compare and swap operation
__ casx(addr, t1, t2);
// generate condition code - if the swap succeeded, t2 ("new value" reg) was
// overwritten with the original value in "addr" and will be equal to t1.
// Produce icc flag for 32bit.
__ sub(t1, t2, t2);
__ srlx(t2, 32, t1);
__ orcc(t2, t1, G0);
#endif
} else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
Register addr = op->addr()->as_pointer_register(); Register addr = op->addr()->as_pointer_register();
Register cmp_value = op->cmp_value()->as_register(); Register cmp_value = op->cmp_value()->as_register();
@ -2914,13 +2724,8 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
assert(data->is_CounterData(), "need CounterData for calls"); assert(data->is_CounterData(), "need CounterData for calls");
assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
Register mdo = op->mdo()->as_register(); Register mdo = op->mdo()->as_register();
#ifdef _LP64
assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
Register tmp1 = op->tmp1()->as_register_lo(); Register tmp1 = op->tmp1()->as_register_lo();
#else
assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
Register tmp1 = op->tmp1()->as_register();
#endif
metadata2reg(md->constant_encoding(), mdo); metadata2reg(md->constant_encoding(), mdo);
int mdo_offset_bias = 0; int mdo_offset_bias = 0;
if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) + if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
@ -3200,12 +3005,7 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
assert (left->is_double_cpu(), "Must be a long"); assert (left->is_double_cpu(), "Must be a long");
Register Rlow = left->as_register_lo(); Register Rlow = left->as_register_lo();
Register Rhi = left->as_register_hi(); Register Rhi = left->as_register_hi();
#ifdef _LP64
__ sub(G0, Rlow, dest->as_register_lo()); __ sub(G0, Rlow, dest->as_register_lo());
#else
__ subcc(G0, Rlow, dest->as_register_lo());
__ subc (G0, Rhi, dest->as_register_hi());
#endif
} }
} }
@ -3245,9 +3045,7 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest,
void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
#ifdef _LP64
ShouldNotReachHere(); ShouldNotReachHere();
#endif
NEEDS_CLEANUP; NEEDS_CLEANUP;
if (type == T_LONG) { if (type == T_LONG) {
@ -3491,31 +3289,6 @@ void LIR_Assembler::peephole(LIR_List* lir) {
inst->insert_before(i + 1, delay_op); inst->insert_before(i + 1, delay_op);
i++; i++;
} }
#if defined(TIERED) && !defined(_LP64)
// fixup the return value from G1 to O0/O1 for long returns.
// It's done here instead of in LIRGenerator because there's
// such a mismatch between the single reg and double reg
// calling convention.
LIR_OpJavaCall* callop = op->as_OpJavaCall();
if (callop->result_opr() == FrameMap::out_long_opr) {
LIR_OpJavaCall* call;
LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length());
for (int a = 0; a < arguments->length(); a++) {
arguments[a] = callop->arguments()[a];
}
if (op->code() == lir_virtual_call) {
call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
callop->vtable_offset(), arguments, callop->info());
} else {
call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
callop->addr(), arguments, callop->info());
}
inst->at_put(i - 1, call);
inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(),
T_LONG, lir_patch_none, NULL));
}
#endif
break; break;
} }
} }
@ -3533,14 +3306,10 @@ void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr
} else if (data->is_oop()) { } else if (data->is_oop()) {
Register obj = data->as_register(); Register obj = data->as_register();
Register narrow = tmp->as_register(); Register narrow = tmp->as_register();
#ifdef _LP64
assert(UseCompressedOops, "swap is 32bit only"); assert(UseCompressedOops, "swap is 32bit only");
__ encode_heap_oop(obj, narrow); __ encode_heap_oop(obj, narrow);
__ swap(as_Address(addr), narrow); __ swap(as_Address(addr), narrow);
__ decode_heap_oop(narrow, obj); __ decode_heap_oop(narrow, obj);
#else
__ swap(as_Address(addr), obj);
#endif
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -61,11 +61,7 @@
ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias); ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias);
enum { enum {
#ifdef _LP64
_call_stub_size = 68, _call_stub_size = 68,
#else
_call_stub_size = 20,
#endif // _LP64
_call_aot_stub_size = 0, _call_aot_stub_size = 0,
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(128), _exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(128),
_deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(64) _deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(64)

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -70,7 +70,7 @@ LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::Oexcepti
LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::Oissuing_pc_opr; } LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::Oissuing_pc_opr; }
LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); } LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); }
LIR_Opr LIRGenerator::syncTempOpr() { return new_register(T_OBJECT); } LIR_Opr LIRGenerator::syncTempOpr() { return new_register(T_OBJECT); }
LIR_Opr LIRGenerator::getThreadTemp() { return rlock_callee_saved(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); } LIR_Opr LIRGenerator::getThreadTemp() { return rlock_callee_saved(T_LONG); }
LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) { LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
LIR_Opr opr; LIR_Opr opr;
@ -215,13 +215,11 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
} }
} }
} else { } else {
#ifdef _LP64
if (index_opr->type() == T_INT) { if (index_opr->type() == T_INT) {
LIR_Opr tmp = new_register(T_LONG); LIR_Opr tmp = new_register(T_LONG);
__ convert(Bytecodes::_i2l, index_opr, tmp); __ convert(Bytecodes::_i2l, index_opr, tmp);
index_opr = tmp; index_opr = tmp;
} }
#endif
base_opr = new_pointer_register(); base_opr = new_pointer_register();
assert (index_opr->is_register(), "Must be register"); assert (index_opr->is_register(), "Must be register");
@ -1310,20 +1308,12 @@ void LIRGenerator::trace_block_entry(BlockBegin* block) {
void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address, void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
CodeEmitInfo* info) { CodeEmitInfo* info) {
#ifdef _LP64
__ store(value, address, info); __ store(value, address, info);
#else
__ volatile_store_mem_reg(value, address, info);
#endif
} }
void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result, void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
CodeEmitInfo* info) { CodeEmitInfo* info) {
#ifdef _LP64
__ load(address, result, info); __ load(address, result, info);
#else
__ volatile_load_mem_reg(address, result, info);
#endif
} }
@ -1333,11 +1323,6 @@ void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
LIR_Opr index_op = offset; LIR_Opr index_op = offset;
bool is_obj = (type == T_ARRAY || type == T_OBJECT); bool is_obj = (type == T_ARRAY || type == T_OBJECT);
#ifndef _LP64
if (is_volatile && type == T_LONG) {
__ volatile_store_unsafe_reg(data, src, offset, type, NULL, lir_patch_none);
} else
#endif
{ {
if (type == T_BOOLEAN) { if (type == T_BOOLEAN) {
type = T_BYTE; type = T_BYTE;
@ -1367,11 +1352,6 @@ void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset, void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
BasicType type, bool is_volatile) { BasicType type, bool is_volatile) {
#ifndef _LP64
if (is_volatile && type == T_LONG) {
__ volatile_load_unsafe_reg(src, offset, dst, type, NULL, lir_patch_none);
} else
#endif
{ {
LIR_Address* addr = new LIR_Address(src, offset, type); LIR_Address* addr = new LIR_Address(src, offset, type);
__ load(addr, dst); __ load(addr, dst);
@ -1396,17 +1376,13 @@ void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
// Because we want a 2-arg form of xchg // Because we want a 2-arg form of xchg
__ move(data, dst); __ move(data, dst);
assert (!x->is_add() && (type == T_INT || (is_obj LP64_ONLY(&& UseCompressedOops))), "unexpected type"); assert (!x->is_add() && (type == T_INT || (is_obj && UseCompressedOops)), "unexpected type");
LIR_Address* addr; LIR_Address* addr;
if (offset->is_constant()) { if (offset->is_constant()) {
#ifdef _LP64
jlong l = offset->as_jlong(); jlong l = offset->as_jlong();
assert((jlong)((jint)l) == l, "offset too large for constant"); assert((jlong)((jint)l) == l, "offset too large for constant");
jint c = (jint)l; jint c = (jint)l;
#else
jint c = offset->as_jint();
#endif
addr = new LIR_Address(src.result(), c, type); addr = new LIR_Address(src.result(), c, type);
} else { } else {
addr = new LIR_Address(src.result(), offset, type); addr = new LIR_Address(src.result(), offset, type);

View File

@ -48,16 +48,9 @@ LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) {
void LIR_Address::verify() const { void LIR_Address::verify() const {
assert(scale() == times_1, "Scaled addressing mode not available on SPARC and should not be used"); assert(scale() == times_1, "Scaled addressing mode not available on SPARC and should not be used");
assert(disp() == 0 || index()->is_illegal(), "can't have both"); assert(disp() == 0 || index()->is_illegal(), "can't have both");
#ifdef _LP64
assert(base()->is_cpu_register(), "wrong base operand"); assert(base()->is_cpu_register(), "wrong base operand");
assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand"); assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand");
assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA, assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
"wrong type for addresses"); "wrong type for addresses");
#else
assert(base()->is_single_cpu(), "wrong base operand");
assert(index()->is_illegal() || index()->is_single_cpu(), "wrong index operand");
assert(base()->type() == T_OBJECT || base()->type() == T_INT || base()->type() == T_METADATA,
"wrong type for addresses");
#endif
} }
#endif // PRODUCT #endif // PRODUCT

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -32,11 +32,7 @@ inline bool LinearScan::is_processed_reg_num(int reg_num) {
inline int LinearScan::num_physical_regs(BasicType type) { inline int LinearScan::num_physical_regs(BasicType type) {
// Sparc requires two cpu registers for long // Sparc requires two cpu registers for long
// and two cpu registers for double // and two cpu registers for double
#ifdef _LP64
if (type == T_DOUBLE) { if (type == T_DOUBLE) {
#else
if (type == T_DOUBLE || type == T_LONG) {
#endif
return 2; return 2;
} }
return 1; return 1;
@ -44,11 +40,7 @@ inline int LinearScan::num_physical_regs(BasicType type) {
inline bool LinearScan::requires_adjacent_regs(BasicType type) { inline bool LinearScan::requires_adjacent_regs(BasicType type) {
#ifdef _LP64
return type == T_DOUBLE; return type == T_DOUBLE;
#else
return type == T_DOUBLE || type == T_LONG;
#endif
} }
inline bool LinearScan::is_caller_save(int assigned_reg) { inline bool LinearScan::is_caller_save(int assigned_reg) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -273,13 +273,6 @@ void C1_MacroAssembler::initialize_object(
add(obj, hdr_size_in_bytes, t1); // compute address of first element add(obj, hdr_size_in_bytes, t1); // compute address of first element
sub(var_size_in_bytes, hdr_size_in_bytes, t2); // compute size of body sub(var_size_in_bytes, hdr_size_in_bytes, t2); // compute size of body
initialize_body(t1, t2); initialize_body(t1, t2);
#ifndef _LP64
} else if (con_size_in_bytes < threshold * 2) {
// on v9 we can do double word stores to fill twice as much space.
assert(hdr_size_in_bytes % 8 == 0, "double word aligned");
assert(con_size_in_bytes % 8 == 0, "double word aligned");
for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += 2 * HeapWordSize) stx(G0, obj, i);
#endif
} else if (con_size_in_bytes <= threshold) { } else if (con_size_in_bytes <= threshold) {
// use explicit NULL stores // use explicit NULL stores
for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += HeapWordSize) st_ptr(G0, obj, i); for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += HeapWordSize) st_ptr(G0, obj, i);

View File

@ -930,11 +930,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Label not_already_dirty, restart, refill, young_card; Label not_already_dirty, restart, refill, young_card;
#ifdef _LP64
__ srlx(addr, CardTableModRefBS::card_shift, addr); __ srlx(addr, CardTableModRefBS::card_shift, addr);
#else
__ srl(addr, CardTableModRefBS::card_shift, addr);
#endif
AddressLiteral rs(byte_map_base); AddressLiteral rs(byte_map_base);
__ set(rs, cardtable); // cardtable := <card table base> __ set(rs, cardtable); // cardtable := <card table base>

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -66,7 +66,6 @@ define_pd_global(bool, OptoRegScheduling, false);
define_pd_global(bool, SuperWordLoopUnrollAnalysis, false); define_pd_global(bool, SuperWordLoopUnrollAnalysis, false);
define_pd_global(bool, IdealizeClearArrayNode, true); define_pd_global(bool, IdealizeClearArrayNode, true);
#ifdef _LP64
// We need to make sure that all generated code is within // We need to make sure that all generated code is within
// 2 gigs of the libjvm.so runtime routines so we can use // 2 gigs of the libjvm.so runtime routines so we can use
// the faster "call" instruction rather than the expensive // the faster "call" instruction rather than the expensive
@ -82,17 +81,6 @@ define_pd_global(intx, CodeCacheExpansionSize, 64*K);
// Ergonomics related flags // Ergonomics related flags
define_pd_global(uint64_t,MaxRAM, 128ULL*G); define_pd_global(uint64_t,MaxRAM, 128ULL*G);
#else
// InitialCodeCacheSize derived from specjbb2000 run.
define_pd_global(intx, InitialCodeCacheSize, 1536*K); // Integral multiple of CodeCacheExpansionSize
define_pd_global(intx, ReservedCodeCacheSize, 32*M);
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M);
define_pd_global(intx, ProfiledCodeHeapSize, 14*M);
define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
define_pd_global(intx, CodeCacheExpansionSize, 32*K);
// Ergonomics related flags
define_pd_global(uint64_t, MaxRAM, 4ULL*G);
#endif
define_pd_global(uintx, CodeCacheMinBlockLength, 4); define_pd_global(uintx, CodeCacheMinBlockLength, 4);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -114,14 +114,8 @@ static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
} }
static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
#ifdef _LP64
assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
pd_conjoint_oops_atomic((oop*)from, (oop*)to, count); pd_conjoint_oops_atomic((oop*)from, (oop*)to, count);
#else
// Guarantee use of ldd/std via some asm code, because compiler won't.
// See solaris_sparc.il.
_Copy_conjoint_jlongs_atomic(from, to, count);
#endif
} }
static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
@ -162,7 +156,6 @@ static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count)
} }
static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) { static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
#ifdef _LP64
guarantee(mask_bits((uintptr_t)tohw, right_n_bits(LogBytesPerLong)) == 0, guarantee(mask_bits((uintptr_t)tohw, right_n_bits(LogBytesPerLong)) == 0,
"unaligned fill words"); "unaligned fill words");
julong* to = (julong*)tohw; julong* to = (julong*)tohw;
@ -170,12 +163,6 @@ static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
while (count-- > 0) { while (count-- > 0) {
*to++ = v; *to++ = v;
} }
#else // _LP64
juint* to = (juint*)tohw;
while (count-- > 0) {
*to++ = value;
}
#endif // _LP64
} }
typedef void (*_zero_Fn)(HeapWord* to, size_t count); typedef void (*_zero_Fn)(HeapWord* to, size_t count);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -114,11 +114,7 @@ address RegisterMap::pd_location(VMReg regname) const {
// register locations. When that is fixed we'd will return NULL // register locations. When that is fixed we'd will return NULL
// (or assert here). // (or assert here).
reg = regname->prev()->as_Register(); reg = regname->prev()->as_Register();
#ifdef _LP64
second_word = sizeof(jint); second_word = sizeof(jint);
#else
return NULL;
#endif // _LP64
} else { } else {
reg = regname->as_Register(); reg = regname->as_Register();
} }
@ -332,9 +328,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// Construct an unpatchable, deficient frame // Construct an unpatchable, deficient frame
void frame::init(intptr_t* sp, address pc, CodeBlob* cb) { void frame::init(intptr_t* sp, address pc, CodeBlob* cb) {
#ifdef _LP64
assert( (((intptr_t)sp & (wordSize-1)) == 0), "frame constructor passed an invalid sp"); assert( (((intptr_t)sp & (wordSize-1)) == 0), "frame constructor passed an invalid sp");
#endif
_sp = sp; _sp = sp;
_younger_sp = NULL; _younger_sp = NULL;
_pc = pc; _pc = pc;
@ -693,11 +687,9 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
intptr_t* d_scratch = fp() + interpreter_frame_d_scratch_fp_offset; intptr_t* d_scratch = fp() + interpreter_frame_d_scratch_fp_offset;
address l_addr = (address)l_scratch; address l_addr = (address)l_scratch;
#ifdef _LP64
// On 64-bit the result for 1/8/16/32-bit result types is in the other // On 64-bit the result for 1/8/16/32-bit result types is in the other
// word half // word half
l_addr += wordSize/2; l_addr += wordSize/2;
#endif
switch (type) { switch (type) {
case T_OBJECT: case T_OBJECT:

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -100,11 +100,7 @@
// size of each block, in order of increasing address: // size of each block, in order of increasing address:
register_save_words = 16, register_save_words = 16,
#ifdef _LP64
callee_aggregate_return_pointer_words = 0, callee_aggregate_return_pointer_words = 0,
#else
callee_aggregate_return_pointer_words = 1,
#endif
callee_register_argument_save_area_words = 6, callee_register_argument_save_area_words = 6,
// memory_parameter_words = <arbitrary>, // memory_parameter_words = <arbitrary>,

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -38,24 +38,14 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
// The expected size in bytes of a cache line, used to pad data structures. // The expected size in bytes of a cache line, used to pad data structures.
#if defined(TIERED) #if defined(TIERED)
#ifdef _LP64
// tiered, 64-bit, large machine // tiered, 64-bit, large machine
#define DEFAULT_CACHE_LINE_SIZE 128 #define DEFAULT_CACHE_LINE_SIZE 128
#else
// tiered, 32-bit, medium machine
#define DEFAULT_CACHE_LINE_SIZE 64
#endif
#elif defined(COMPILER1) #elif defined(COMPILER1)
// pure C1, 32-bit, small machine // pure C1, 32-bit, small machine
#define DEFAULT_CACHE_LINE_SIZE 16 #define DEFAULT_CACHE_LINE_SIZE 16
#elif defined(COMPILER2) || defined(SHARK) #elif defined(COMPILER2) || defined(SHARK)
#ifdef _LP64
// pure C2, 64-bit, large machine // pure C2, 64-bit, large machine
#define DEFAULT_CACHE_LINE_SIZE 128 #define DEFAULT_CACHE_LINE_SIZE 128
#else
// pure C2, 32-bit, medium machine
#define DEFAULT_CACHE_LINE_SIZE 64
#endif
#endif #endif
#if defined(SOLARIS) #if defined(SOLARIS)

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -56,18 +56,11 @@ define_pd_global(intx, InlineSmallCode, 1500);
#define DEFAULT_STACK_RED_PAGES (1) #define DEFAULT_STACK_RED_PAGES (1)
#define DEFAULT_STACK_RESERVED_PAGES (SOLARIS_ONLY(1) NOT_SOLARIS(0)) #define DEFAULT_STACK_RESERVED_PAGES (SOLARIS_ONLY(1) NOT_SOLARIS(0))
#ifdef _LP64
// Stack slots are 2X larger in LP64 than in the 32 bit VM. // Stack slots are 2X larger in LP64 than in the 32 bit VM.
define_pd_global(intx, CompilerThreadStackSize, 1024); define_pd_global(intx, CompilerThreadStackSize, 1024);
define_pd_global(intx, ThreadStackSize, 1024); define_pd_global(intx, ThreadStackSize, 1024);
define_pd_global(intx, VMThreadStackSize, 1024); define_pd_global(intx, VMThreadStackSize, 1024);
#define DEFAULT_STACK_SHADOW_PAGES (20 DEBUG_ONLY(+2)) #define DEFAULT_STACK_SHADOW_PAGES (20 DEBUG_ONLY(+2))
#else
define_pd_global(intx, CompilerThreadStackSize, 512);
define_pd_global(intx, ThreadStackSize, 512);
define_pd_global(intx, VMThreadStackSize, 512);
#define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2))
#endif // _LP64
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES #define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES #define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -32,13 +32,9 @@
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
int InlineCacheBuffer::ic_stub_code_size() { int InlineCacheBuffer::ic_stub_code_size() {
#ifdef _LP64
return (NativeMovConstReg::instruction_size + // sethi;add return (NativeMovConstReg::instruction_size + // sethi;add
NativeJump::instruction_size + // sethi; jmp; delay slot NativeJump::instruction_size + // sethi; jmp; delay slot
(1*BytesPerInstWord) + 1); // flush + 1 extra byte (1*BytesPerInstWord) + 1); // flush + 1 extra byte
#else
return (2+2+ 1) * wordSize + 1; // set/jump_to/nop + 1 byte so that code_end can be set in CodeBuffer
#endif
} }
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) { void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -318,52 +318,32 @@ void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* tab
void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) { void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) {
assert_not_delayed(); assert_not_delayed();
#ifdef _LP64
ldf(FloatRegisterImpl::D, r1, offset, d); ldf(FloatRegisterImpl::D, r1, offset, d);
#else
ldf(FloatRegisterImpl::S, r1, offset, d);
ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor());
#endif
} }
// Known good alignment in _LP64 but unknown otherwise // Known good alignment in _LP64 but unknown otherwise
void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) { void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) {
assert_not_delayed(); assert_not_delayed();
#ifdef _LP64
stf(FloatRegisterImpl::D, d, r1, offset); stf(FloatRegisterImpl::D, d, r1, offset);
// store something more useful here // store something more useful here
debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
#else
stf(FloatRegisterImpl::S, d, r1, offset);
stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize);
#endif
} }
// Known good alignment in _LP64 but unknown otherwise // Known good alignment in _LP64 but unknown otherwise
void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) { void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) {
assert_not_delayed(); assert_not_delayed();
#ifdef _LP64
ldx(r1, offset, rd); ldx(r1, offset, rd);
#else
ld(r1, offset, rd);
ld(r1, offset + Interpreter::stackElementSize, rd->successor());
#endif
} }
// Known good alignment in _LP64 but unknown otherwise // Known good alignment in _LP64 but unknown otherwise
void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) { void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) {
assert_not_delayed(); assert_not_delayed();
#ifdef _LP64
stx(l, r1, offset); stx(l, r1, offset);
// store something more useful here // store something more useful here
stx(G0, r1, offset+Interpreter::stackElementSize); stx(G0, r1, offset+Interpreter::stackElementSize);
#else
st(l, r1, offset);
st(l->successor(), r1, offset + Interpreter::stackElementSize);
#endif
} }
void InterpreterMacroAssembler::pop_i(Register r) { void InterpreterMacroAssembler::pop_i(Register r) {
@ -527,9 +507,7 @@ void InterpreterMacroAssembler::empty_expression_stack() {
sub( Lesp, Gframe_size, Gframe_size ); sub( Lesp, Gframe_size, Gframe_size );
and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary
debug_only(verify_sp(Gframe_size, G4_scratch)); debug_only(verify_sp(Gframe_size, G4_scratch));
#ifdef _LP64
sub(Gframe_size, STACK_BIAS, Gframe_size ); sub(Gframe_size, STACK_BIAS, Gframe_size );
#endif
mov(Gframe_size, SP); mov(Gframe_size, SP);
bind(done); bind(done);
@ -541,28 +519,20 @@ void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) {
Label Bad, OK; Label Bad, OK;
// Saved SP must be aligned. // Saved SP must be aligned.
#ifdef _LP64
btst(2*BytesPerWord-1, Rsp); btst(2*BytesPerWord-1, Rsp);
#else
btst(LongAlignmentMask, Rsp);
#endif
br(Assembler::notZero, false, Assembler::pn, Bad); br(Assembler::notZero, false, Assembler::pn, Bad);
delayed()->nop(); delayed()->nop();
// Saved SP, plus register window size, must not be above FP. // Saved SP, plus register window size, must not be above FP.
add(Rsp, frame::register_save_words * wordSize, Rtemp); add(Rsp, frame::register_save_words * wordSize, Rtemp);
#ifdef _LP64
sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP
#endif
cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad); cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad);
// Saved SP must not be ridiculously below current SP. // Saved SP must not be ridiculously below current SP.
size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K); size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K);
set(maxstack, Rtemp); set(maxstack, Rtemp);
sub(SP, Rtemp, Rtemp); sub(SP, Rtemp, Rtemp);
#ifdef _LP64
add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp
#endif
cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad); cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad);
ba_short(OK); ba_short(OK);
@ -584,9 +554,7 @@ void InterpreterMacroAssembler::verify_esp(Register Resp) {
delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp); delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp);
stop("too many pops: Lesp points into monitor area"); stop("too many pops: Lesp points into monitor area");
bind(OK1); bind(OK1);
#ifdef _LP64
sub(Resp, STACK_BIAS, Resp); sub(Resp, STACK_BIAS, Resp);
#endif
cmp(Resp, SP); cmp(Resp, SP);
brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2); brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2);
delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp); delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp);
@ -696,21 +664,12 @@ void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(
} }
br(Assembler::zero, true, Assembler::pn, aligned); br(Assembler::zero, true, Assembler::pn, aligned);
#ifdef _LP64
delayed()->ldsw(Rtmp, 0, Rdst); delayed()->ldsw(Rtmp, 0, Rdst);
#else
delayed()->ld(Rtmp, 0, Rdst);
#endif
ldub(Lbcp, bcp_offset + 3, Rdst); ldub(Lbcp, bcp_offset + 3, Rdst);
ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst); ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst);
ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst); ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst);
#ifdef _LP64
ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp);
#else
// Unsigned load is faster than signed on some implementations
ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp);
#endif
or3(Rtmp, Rdst, Rdst ); or3(Rtmp, Rdst, Rdst );
bind(aligned); bind(aligned);
@ -910,10 +869,8 @@ void InterpreterMacroAssembler::index_check_without_pop(Register array, Register
assert_not_delayed(); assert_not_delayed();
verify_oop(array); verify_oop(array);
#ifdef _LP64
// sign extend since tos (index) can be a 32bit value // sign extend since tos (index) can be a 32bit value
sra(index, G0, index); sra(index, G0, index);
#endif // _LP64
// check array // check array
Label ptr_ok; Label ptr_ok;
@ -1191,11 +1148,7 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
// return tos // return tos
assert(Otos_l1 == Otos_i, "adjust code below"); assert(Otos_l1 == Otos_i, "adjust code below");
switch (state) { switch (state) {
#ifdef _LP64
case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0 case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0
#else
case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through // O1 -> I1
#endif
case btos: // fall through case btos: // fall through
case ztos: // fall through case ztos: // fall through
case ctos: case ctos:
@ -1207,20 +1160,6 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
case vtos: /* nothing to do */ break; case vtos: /* nothing to do */ break;
default : ShouldNotReachHere(); default : ShouldNotReachHere();
} }
#if defined(COMPILER2) && !defined(_LP64)
if (state == ltos) {
// C2 expects long results in G1 we can't tell if we're returning to interpreted
// or compiled so just be safe use G1 and O0/O1
// Shift bits into high (msb) of G1
sllx(Otos_l1->after_save(), 32, G1);
// Zero extend low bits
srl (Otos_l2->after_save(), 0, Otos_l2->after_save());
or3 (Otos_l2->after_save(), G1, G1);
}
#endif /* COMPILER2 */
} }
// Lock object // Lock object
@ -1270,9 +1209,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object)
// Check if owner is self by comparing the value in the markOop of object // Check if owner is self by comparing the value in the markOop of object
// with the stack pointer // with the stack pointer
sub(temp_reg, SP, temp_reg); sub(temp_reg, SP, temp_reg);
#ifdef _LP64
sub(temp_reg, STACK_BIAS, temp_reg); sub(temp_reg, STACK_BIAS, temp_reg);
#endif
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
// Composite "andcc" test: // Composite "andcc" test:
@ -2711,11 +2648,7 @@ void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,
void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) { void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) {
if (is_native_call) { if (is_native_call) {
stf(FloatRegisterImpl::D, F0, d_tmp); stf(FloatRegisterImpl::D, F0, d_tmp);
#ifdef _LP64
stx(O0, l_tmp); stx(O0, l_tmp);
#else
std(O0, l_tmp);
#endif
} else { } else {
push(state); push(state);
} }
@ -2724,11 +2657,7 @@ void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native
void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) { void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) {
if (is_native_call) { if (is_native_call) {
ldf(FloatRegisterImpl::D, d_tmp, F0); ldf(FloatRegisterImpl::D, d_tmp, F0);
#ifdef _LP64
ldx(l_tmp, O0); ldx(l_tmp, O0);
#else
ldd(l_tmp, O0);
#endif
} else { } else {
pop(state); pop(state);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -70,9 +70,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
bool check_exception=true bool check_exception=true
); );
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// base routine for all dispatches // base routine for all dispatches
void dispatch_base(TosState state, address* table); void dispatch_base(TosState state, address* table);
@ -80,6 +77,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
InterpreterMacroAssembler(CodeBuffer* c) InterpreterMacroAssembler(CodeBuffer* c)
: MacroAssembler(c) {} : MacroAssembler(c) {}
virtual void check_and_handle_popframe(Register scratch_reg);
virtual void check_and_handle_earlyret(Register scratch_reg);
void jump_to_entry(address entry); void jump_to_entry(address entry);
virtual void load_earlyret_value(TosState state); virtual void load_earlyret_value(TosState state);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -53,47 +53,24 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
Argument jni_arg(jni_offset(), false); Argument jni_arg(jni_offset(), false);
Register Rtmp = O0; Register Rtmp = O0;
#ifdef _LP64
__ ldx(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp); __ ldx(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
__ store_long_argument(Rtmp, jni_arg); __ store_long_argument(Rtmp, jni_arg);
#else
__ ld(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
__ store_argument(Rtmp, jni_arg);
__ ld(Llocals, Interpreter::local_offset_in_bytes(offset() + 0), Rtmp);
Argument successor(jni_arg.successor());
__ store_argument(Rtmp, successor);
#endif
} }
void InterpreterRuntime::SignatureHandlerGenerator::pass_float() { void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
Argument jni_arg(jni_offset(), false); Argument jni_arg(jni_offset(), false);
#ifdef _LP64
FloatRegister Rtmp = F0; FloatRegister Rtmp = F0;
__ ldf(FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(offset()), Rtmp); __ ldf(FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(offset()), Rtmp);
__ store_float_argument(Rtmp, jni_arg); __ store_float_argument(Rtmp, jni_arg);
#else
Register Rtmp = O0;
__ ld(Llocals, Interpreter::local_offset_in_bytes(offset()), Rtmp);
__ store_argument(Rtmp, jni_arg);
#endif
} }
void InterpreterRuntime::SignatureHandlerGenerator::pass_double() { void InterpreterRuntime::SignatureHandlerGenerator::pass_double() {
Argument jni_arg(jni_offset(), false); Argument jni_arg(jni_offset(), false);
#ifdef _LP64
FloatRegister Rtmp = F0; FloatRegister Rtmp = F0;
__ ldf(FloatRegisterImpl::D, Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp); __ ldf(FloatRegisterImpl::D, Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
__ store_double_argument(Rtmp, jni_arg); __ store_double_argument(Rtmp, jni_arg);
#else
Register Rtmp = O0;
__ ld(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
__ store_argument(Rtmp, jni_arg);
__ ld(Llocals, Interpreter::local_offset_in_bytes(offset()), Rtmp);
Argument successor(jni_arg.successor());
__ store_argument(Rtmp, successor);
#endif
} }
void InterpreterRuntime::SignatureHandlerGenerator::pass_object() { void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
@ -171,7 +148,6 @@ class SlowSignatureHandler: public NativeSignatureIterator {
add_signature( non_float ); add_signature( non_float );
} }
#ifdef _LP64
virtual void pass_float() { virtual void pass_float() {
*_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0)); *_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
_from -= Interpreter::stackElementSize; _from -= Interpreter::stackElementSize;
@ -190,23 +166,6 @@ class SlowSignatureHandler: public NativeSignatureIterator {
_from -= 2*Interpreter::stackElementSize; _from -= 2*Interpreter::stackElementSize;
add_signature( long_sig ); add_signature( long_sig );
} }
#else
// pass_double() is pass_long() and pass_float() only _LP64
virtual void pass_long() {
_to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
_to[1] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
_to += 2;
_from -= 2*Interpreter::stackElementSize;
add_signature( non_float );
}
virtual void pass_float() {
*_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
_from -= Interpreter::stackElementSize;
add_signature( non_float );
}
#endif // _LP64
virtual void add_signature( intptr_t sig_type ) { virtual void add_signature( intptr_t sig_type ) {
if ( _argcount < (sizeof (intptr_t))*4 ) { if ( _argcount < (sizeof (intptr_t))*4 ) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -88,9 +88,7 @@ private:
// _last_Java_sp will always be a an unbiased stack pointer // _last_Java_sp will always be a an unbiased stack pointer
// if is is biased then some setter screwed up. This is // if is is biased then some setter screwed up. This is
// deadly. // deadly.
#ifdef _LP64
assert(((intptr_t)_last_Java_sp & 0xF) == 0, "Biased last_Java_sp"); assert(((intptr_t)_last_Java_sp & 0xF) == 0, "Biased last_Java_sp");
#endif
return _last_Java_sp; return _last_Java_sp;
} }

View File

@ -152,39 +152,19 @@ address JNI_FastGetField::generate_fast_get_long_field() {
__ ld_ptr (O1, 0, O5); __ ld_ptr (O1, 0, O5);
__ add (O5, O4, O5); __ add (O5, O4, O5);
#ifndef _LP64
assert(count < LIST_CAPACITY-1, "LIST_CAPACITY too small");
speculative_load_pclist[count++] = __ pc();
__ ld (O5, 0, G2);
speculative_load_pclist[count] = __ pc();
__ ld (O5, 4, O3);
#else
assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
speculative_load_pclist[count] = __ pc(); speculative_load_pclist[count] = __ pc();
__ ldx (O5, 0, O3); __ ldx (O5, 0, O3);
#endif
__ ld (cnt_addr, G1); __ ld (cnt_addr, G1);
__ cmp (G1, G4); __ cmp (G1, G4);
__ br (Assembler::notEqual, false, Assembler::pn, label2); __ br (Assembler::notEqual, false, Assembler::pn, label2);
__ delayed()->mov (O7, G1); __ delayed()->mov (O7, G1);
#ifndef _LP64
__ mov (G2, O0);
__ retl ();
__ delayed()->mov (O3, O1);
#else
__ retl (); __ retl ();
__ delayed()->mov (O3, O0); __ delayed()->mov (O3, O0);
#endif
#ifndef _LP64
slowcase_entry_pclist[count-1] = __ pc();
slowcase_entry_pclist[count++] = __ pc() ;
#else
slowcase_entry_pclist[count++] = __ pc(); slowcase_entry_pclist[count++] = __ pc();
#endif
__ bind (label1); __ bind (label1);
__ mov (O7, G1); __ mov (O7, G1);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -55,18 +55,10 @@ public:
static inline void put_int(jint from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = from; } static inline void put_int(jint from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = from; }
static inline void put_int(jint *from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = *from; } static inline void put_int(jint *from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = *from; }
#ifdef _LP64
// Longs are stored in native format in one JavaCallArgument slot at *(to+1). // Longs are stored in native format in one JavaCallArgument slot at *(to+1).
static inline void put_long(jlong from, intptr_t *to) { *(jlong *)(to + 1 + 0) = from; } static inline void put_long(jlong from, intptr_t *to) { *(jlong *)(to + 1 + 0) = from; }
static inline void put_long(jlong from, intptr_t *to, int& pos) { *(jlong *)(to + 1 + pos) = from; pos += 2; } static inline void put_long(jlong from, intptr_t *to, int& pos) { *(jlong *)(to + 1 + pos) = from; pos += 2; }
static inline void put_long(jlong *from, intptr_t *to, int& pos) { *(jlong *)(to + 1 + pos) = *from; pos += 2; } static inline void put_long(jlong *from, intptr_t *to, int& pos) { *(jlong *)(to + 1 + pos) = *from; pos += 2; }
#else
// Longs are stored in reversed native word format in two JavaCallArgument slots at *to.
// The high half is in *(to+1) and the low half in *to.
static inline void put_long(jlong from, intptr_t *to) { put_int2r((jint *)&from, (jint *)to); }
static inline void put_long(jlong from, intptr_t *to, int& pos) { put_int2r((jint *)&from, (jint *)to, pos); }
static inline void put_long(jlong *from, intptr_t *to, int& pos) { put_int2r((jint *) from, (jint *)to, pos); }
#endif
// Oops are stored in native format in one JavaCallArgument slot at *to. // Oops are stored in native format in one JavaCallArgument slot at *to.
static inline void put_obj(oop from, intptr_t *to) { *(oop *)(to + 0 ) = from; } static inline void put_obj(oop from, intptr_t *to) { *(oop *)(to + 0 ) = from; }
@ -78,39 +70,21 @@ public:
static inline void put_float(jfloat from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = from; } static inline void put_float(jfloat from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = from; }
static inline void put_float(jfloat *from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = *from; } static inline void put_float(jfloat *from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = *from; }
#ifdef _LP64
// Doubles are stored in native word format in one JavaCallArgument slot at *(to+1). // Doubles are stored in native word format in one JavaCallArgument slot at *(to+1).
static inline void put_double(jdouble from, intptr_t *to) { *(jdouble *)(to + 1 + 0) = from; } static inline void put_double(jdouble from, intptr_t *to) { *(jdouble *)(to + 1 + 0) = from; }
static inline void put_double(jdouble from, intptr_t *to, int& pos) { *(jdouble *)(to + 1 + pos) = from; pos += 2; } static inline void put_double(jdouble from, intptr_t *to, int& pos) { *(jdouble *)(to + 1 + pos) = from; pos += 2; }
static inline void put_double(jdouble *from, intptr_t *to, int& pos) { *(jdouble *)(to + 1 + pos) = *from; pos += 2; } static inline void put_double(jdouble *from, intptr_t *to, int& pos) { *(jdouble *)(to + 1 + pos) = *from; pos += 2; }
#else
// Doubles are stored in reversed native word format in two JavaCallArgument slots at *to.
static inline void put_double(jdouble from, intptr_t *to) { put_int2r((jint *)&from, (jint *)to); }
static inline void put_double(jdouble from, intptr_t *to, int& pos) { put_int2r((jint *)&from, (jint *)to, pos); }
static inline void put_double(jdouble *from, intptr_t *to, int& pos) { put_int2r((jint *) from, (jint *)to, pos); }
#endif
// The get_xxx routines, on the other hand, actually _do_ fetch // The get_xxx routines, on the other hand, actually _do_ fetch
// java primitive types from the interpreter stack. // java primitive types from the interpreter stack.
static inline jint get_int(intptr_t *from) { return *(jint *)from; } static inline jint get_int(intptr_t *from) { return *(jint *)from; }
#ifdef _LP64
static inline jlong get_long(intptr_t *from) { return *(jlong *)from; } static inline jlong get_long(intptr_t *from) { return *(jlong *)from; }
#else
static inline jlong get_long(intptr_t *from) { return ((jlong)(*( signed int *)((jint *)from )) << 32) |
((jlong)(*(unsigned int *)((jint *)from + 1)) << 0); }
#endif
static inline oop get_obj(intptr_t *from) { return *(oop *)from; } static inline oop get_obj(intptr_t *from) { return *(oop *)from; }
static inline jfloat get_float(intptr_t *from) { return *(jfloat *)from; } static inline jfloat get_float(intptr_t *from) { return *(jfloat *)from; }
#ifdef _LP64
static inline jdouble get_double(intptr_t *from) { return *(jdouble *)from; } static inline jdouble get_double(intptr_t *from) { return *(jdouble *)from; }
#else
static inline jdouble get_double(intptr_t *from) { jlong jl = ((jlong)(*( signed int *)((jint *)from )) << 32) |
((jlong)(*(unsigned int *)((jint *)from + 1)) << 0);
return *(jdouble *)&jl; }
#endif
}; };

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -39,10 +39,6 @@
typedef int jint; typedef int jint;
#ifdef _LP64 typedef long jlong;
typedef long jlong;
#else
typedef long long jlong;
#endif
typedef signed char jbyte; typedef signed char jbyte;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -44,16 +44,12 @@ jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Hand
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) { void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) {
address pc = _instructions->start() + pc_offset; address pc = _instructions->start() + pc_offset;
Handle obj = HotSpotObjectConstantImpl::object(constant); Handle obj(THREAD, HotSpotObjectConstantImpl::object(constant));
jobject value = JNIHandles::make_local(obj()); jobject value = JNIHandles::make_local(obj());
if (HotSpotObjectConstantImpl::compressed(constant)) { if (HotSpotObjectConstantImpl::compressed(constant)) {
#ifdef _LP64
int oop_index = _oop_recorder->find_index(value); int oop_index = _oop_recorder->find_index(value);
RelocationHolder rspec = oop_Relocation::spec(oop_index); RelocationHolder rspec = oop_Relocation::spec(oop_index);
_instructions->relocate(pc, rspec, 1); _instructions->relocate(pc, rspec, 1);
#else
JVMCI_ERROR("compressed oop on 32bit");
#endif
} else { } else {
NativeMovConstReg* move = nativeMovConstReg_at(pc); NativeMovConstReg* move = nativeMovConstReg_at(pc);
move->set_data((intptr_t) value); move->set_data((intptr_t) value);
@ -69,14 +65,10 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS)
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS) { void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS) {
address pc = _instructions->start() + pc_offset; address pc = _instructions->start() + pc_offset;
if (HotSpotMetaspaceConstantImpl::compressed(constant)) { if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
#ifdef _LP64
NativeMovConstReg32* move = nativeMovConstReg32_at(pc); NativeMovConstReg32* move = nativeMovConstReg32_at(pc);
narrowKlass narrowOop = record_narrow_metadata_reference(_instructions, pc, constant, CHECK); narrowKlass narrowOop = record_narrow_metadata_reference(_instructions, pc, constant, CHECK);
move->set_data((intptr_t)narrowOop); move->set_data((intptr_t)narrowOop);
TRACE_jvmci_3("relocating (narrow metaspace constant) at " PTR_FORMAT "/0x%x", p2i(pc), narrowOop); TRACE_jvmci_3("relocating (narrow metaspace constant) at " PTR_FORMAT "/0x%x", p2i(pc), narrowOop);
#else
JVMCI_ERROR("compressed Klass* on 32bit");
#endif
} else { } else {
NativeMovConstReg* move = nativeMovConstReg_at(pc); NativeMovConstReg* move = nativeMovConstReg_at(pc);
void* reference = record_metadata_reference(_instructions, pc, constant, CHECK); void* reference = record_metadata_reference(_instructions, pc, constant, CHECK);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -296,11 +296,6 @@ void MacroAssembler::verify_thread() {
mov(G3, L3); // avoid clobbering G3 mov(G3, L3); // avoid clobbering G3
mov(G4, L4); // avoid clobbering G4 mov(G4, L4); // avoid clobbering G4
mov(G5_method, L5); // avoid clobbering G5_method mov(G5_method, L5); // avoid clobbering G5_method
#if defined(COMPILER2) && !defined(_LP64)
// Save & restore possible 64-bit Long arguments in G-regs
srlx(G1,32,L0);
srlx(G4,32,L6);
#endif
call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type); call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type);
delayed()->mov(G2_thread, O0); delayed()->mov(G2_thread, O0);
@ -309,15 +304,6 @@ void MacroAssembler::verify_thread() {
mov(L3, G3); // restore G3 mov(L3, G3); // restore G3
mov(L4, G4); // restore G4 mov(L4, G4); // restore G4
mov(L5, G5_method); // restore G5_method mov(L5, G5_method); // restore G5_method
#if defined(COMPILER2) && !defined(_LP64)
// Save & restore possible 64-bit Long arguments in G-regs
sllx(L0,32,G2); // Move old high G1 bits high in G2
srl(G1, 0,G1); // Clear current high G1 bits
or3 (G1,G2,G1); // Recover 64-bit G1
sllx(L6,32,G2); // Move old high G4 bits high in G2
srl(G4, 0,G4); // Clear current high G4 bits
or3 (G4,G2,G4); // Recover 64-bit G4
#endif
restore(O0, 0, G2_thread); restore(O0, 0, G2_thread);
} }
} }
@ -387,7 +373,6 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Ja
st_ptr(last_Java_pc, pc_addr); st_ptr(last_Java_pc, pc_addr);
} }
#ifdef _LP64
#ifdef ASSERT #ifdef ASSERT
// Make sure that we have an odd stack // Make sure that we have an odd stack
Label StackOk; Label StackOk;
@ -400,9 +385,6 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Ja
assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame");
add( last_java_sp, STACK_BIAS, G4_scratch ); add( last_java_sp, STACK_BIAS, G4_scratch );
st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset()); st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset());
#else
st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset());
#endif // _LP64
} }
void MacroAssembler::reset_last_Java_frame(void) { void MacroAssembler::reset_last_Java_frame(void) {
@ -658,11 +640,7 @@ void MacroAssembler::ic_call(address entry, bool emit_delay, jint method_index)
void MacroAssembler::card_table_write(jbyte* byte_map_base, void MacroAssembler::card_table_write(jbyte* byte_map_base,
Register tmp, Register obj) { Register tmp, Register obj) {
#ifdef _LP64
srlx(obj, CardTableModRefBS::card_shift, obj); srlx(obj, CardTableModRefBS::card_shift, obj);
#else
srl(obj, CardTableModRefBS::card_shift, obj);
#endif
assert(tmp != obj, "need separate temp reg"); assert(tmp != obj, "need separate temp reg");
set((address) byte_map_base, tmp); set((address) byte_map_base, tmp);
stb(G0, tmp, obj); stb(G0, tmp, obj);
@ -672,7 +650,6 @@ void MacroAssembler::card_table_write(jbyte* byte_map_base,
void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
address save_pc; address save_pc;
int shiftcnt; int shiftcnt;
#ifdef _LP64
# ifdef CHECK_DELAY # ifdef CHECK_DELAY
assert_not_delayed((char*) "cannot put two instructions in delay slot"); assert_not_delayed((char*) "cannot put two instructions in delay slot");
# endif # endif
@ -719,9 +696,6 @@ void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, b
while (pc() < (save_pc + (7 * BytesPerInstWord))) while (pc() < (save_pc + (7 * BytesPerInstWord)))
nop(); nop();
} }
#else
Assembler::sethi(addrlit.value(), d, addrlit.rspec());
#endif
} }
@ -736,7 +710,6 @@ void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d)
int MacroAssembler::insts_for_sethi(address a, bool worst_case) { int MacroAssembler::insts_for_sethi(address a, bool worst_case) {
#ifdef _LP64
if (worst_case) return 7; if (worst_case) return 7;
intptr_t iaddr = (intptr_t) a; intptr_t iaddr = (intptr_t) a;
int msb32 = (int) (iaddr >> 32); int msb32 = (int) (iaddr >> 32);
@ -756,9 +729,6 @@ int MacroAssembler::insts_for_sethi(address a, bool worst_case) {
} }
} }
return count; return count;
#else
return 1;
#endif
} }
int MacroAssembler::worst_case_insts_for_set() { int MacroAssembler::worst_case_insts_for_set() {
@ -1488,11 +1458,7 @@ void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresul
void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) { void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) {
#ifdef _LP64
add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult); add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult);
#else
add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult);
#endif
bclr(1, Rresult); bclr(1, Rresult);
sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes
} }
@ -1531,22 +1497,12 @@ void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a,
// Does a test & branch on 32-bit systems and a register-branch on 64-bit. // Does a test & branch on 32-bit systems and a register-branch on 64-bit.
void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) { void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) {
assert_not_delayed(); assert_not_delayed();
#ifdef _LP64
bpr( rc_z, a, p, s1, L ); bpr( rc_z, a, p, s1, L );
#else
tst(s1);
br ( zero, a, p, L );
#endif
} }
void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) { void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) {
assert_not_delayed(); assert_not_delayed();
#ifdef _LP64
bpr( rc_nz, a, p, s1, L ); bpr( rc_nz, a, p, s1, L );
#else
tst(s1);
br ( notZero, a, p, L );
#endif
} }
// Compare registers and branch with nop in delay slot or cbcond without delay slot. // Compare registers and branch with nop in delay slot or cbcond without delay slot.
@ -1862,14 +1818,12 @@ void MacroAssembler::lushr( Register Rin_high, Register Rin_low,
bind( done ); bind( done );
} }
#ifdef _LP64
void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) { void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) {
cmp(Ra, Rb); cmp(Ra, Rb);
mov(-1, Rresult); mov(-1, Rresult);
movcc(equal, false, xcc, 0, Rresult); movcc(equal, false, xcc, 0, Rresult);
movcc(greater, false, xcc, 1, Rresult); movcc(greater, false, xcc, 1, Rresult);
} }
#endif
void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) { void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) {
@ -2668,9 +2622,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
// if compare/exchange succeeded we found an unlocked object and we now have locked it // if compare/exchange succeeded we found an unlocked object and we now have locked it
// hence we are done // hence we are done
cmp(Rmark, Rscratch); cmp(Rmark, Rscratch);
#ifdef _LP64
sub(Rscratch, STACK_BIAS, Rscratch); sub(Rscratch, STACK_BIAS, Rscratch);
#endif
brx(Assembler::equal, false, Assembler::pt, done); brx(Assembler::equal, false, Assembler::pt, done);
delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot
@ -2716,9 +2668,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
// Stack-lock attempt failed - check for recursive stack-lock. // Stack-lock attempt failed - check for recursive stack-lock.
// See the comments below about how we might remove this case. // See the comments below about how we might remove this case.
#ifdef _LP64
sub(Rscratch, STACK_BIAS, Rscratch); sub(Rscratch, STACK_BIAS, Rscratch);
#endif
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
andcc(Rscratch, 0xfffff003, Rscratch); andcc(Rscratch, 0xfffff003, Rscratch);
br(Assembler::always, false, Assembler::pt, done); br(Assembler::always, false, Assembler::pt, done);
@ -2800,9 +2750,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
// control to the "slow" operators in synchronizer.cpp. // control to the "slow" operators in synchronizer.cpp.
// RScratch contains the fetched obj->mark value from the failed CAS. // RScratch contains the fetched obj->mark value from the failed CAS.
#ifdef _LP64
sub(Rscratch, STACK_BIAS, Rscratch); sub(Rscratch, STACK_BIAS, Rscratch);
#endif
sub(Rscratch, SP, Rscratch); sub(Rscratch, SP, Rscratch);
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
andcc(Rscratch, 0xfffff003, Rscratch); andcc(Rscratch, 0xfffff003, Rscratch);
@ -3720,11 +3668,7 @@ static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
Label not_already_dirty, restart, refill, young_card; Label not_already_dirty, restart, refill, young_card;
#ifdef _LP64
__ srlx(O0, CardTableModRefBS::card_shift, O0); __ srlx(O0, CardTableModRefBS::card_shift, O0);
#else
__ srl(O0, CardTableModRefBS::card_shift, O0);
#endif
AddressLiteral addrlit(byte_map_base); AddressLiteral addrlit(byte_map_base);
__ set(addrlit, O1); // O1 := <card table base> __ set(addrlit, O1); // O1 := <card table base>
__ ldub(O0, O1, O2); // O2 := [O0 + O1] __ ldub(O0, O1, O2); // O2 := [O0 + O1]
@ -3826,11 +3770,7 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val
if (G1RSBarrierRegionFilter) { if (G1RSBarrierRegionFilter) {
xor3(store_addr, new_val, tmp); xor3(store_addr, new_val, tmp);
#ifdef _LP64
srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp); srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
#else
srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
#endif
// XXX Should I predict this taken or not? Does it matter? // XXX Should I predict this taken or not? Does it matter?
cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -333,14 +333,12 @@ class AddressLiteral VALUE_OBJ_CLASS_SPEC {
return external_word_Relocation::spec(addr); return external_word_Relocation::spec(addr);
case relocInfo::internal_word_type: case relocInfo::internal_word_type:
return internal_word_Relocation::spec(addr); return internal_word_Relocation::spec(addr);
#ifdef _LP64
case relocInfo::opt_virtual_call_type: case relocInfo::opt_virtual_call_type:
return opt_virtual_call_Relocation::spec(); return opt_virtual_call_Relocation::spec();
case relocInfo::static_call_type: case relocInfo::static_call_type:
return static_call_Relocation::spec(); return static_call_Relocation::spec();
case relocInfo::runtime_call_type: case relocInfo::runtime_call_type:
return runtime_call_Relocation::spec(); return runtime_call_Relocation::spec();
#endif
case relocInfo::none: case relocInfo::none:
return RelocationHolder(); return RelocationHolder();
default: default:
@ -396,12 +394,10 @@ class AddressLiteral VALUE_OBJ_CLASS_SPEC {
: _address((address) addr), : _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {} _rspec(rspec_from_rtype(rtype, (address) addr)) {}
#ifdef _LP64
// 32-bit complains about a multiple declaration for int*. // 32-bit complains about a multiple declaration for int*.
AddressLiteral(intptr_t* addr, relocInfo::relocType rtype = relocInfo::none) AddressLiteral(intptr_t* addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr), : _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {} _rspec(rspec_from_rtype(rtype, (address) addr)) {}
#endif
AddressLiteral(Metadata* addr, relocInfo::relocType rtype = relocInfo::none) AddressLiteral(Metadata* addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr), : _address((address) addr),
@ -464,16 +460,10 @@ class Argument VALUE_OBJ_CLASS_SPEC {
bool _is_in; bool _is_in;
public: public:
#ifdef _LP64
enum { enum {
n_register_parameters = 6, // only 6 registers may contain integer parameters n_register_parameters = 6, // only 6 registers may contain integer parameters
n_float_register_parameters = 16 // Can have up to 16 floating registers n_float_register_parameters = 16 // Can have up to 16 floating registers
}; };
#else
enum {
n_register_parameters = 6 // only 6 registers may contain integer parameters
};
#endif
// creation // creation
Argument(int number, bool is_in) : _number(number), _is_in(is_in) {} Argument(int number, bool is_in) : _number(number), _is_in(is_in) {}
@ -489,7 +479,6 @@ class Argument VALUE_OBJ_CLASS_SPEC {
// locating register-based arguments: // locating register-based arguments:
bool is_register() const { return _number < n_register_parameters; } bool is_register() const { return _number < n_register_parameters; }
#ifdef _LP64
// locating Floating Point register-based arguments: // locating Floating Point register-based arguments:
bool is_float_register() const { return _number < n_float_register_parameters; } bool is_float_register() const { return _number < n_float_register_parameters; }
@ -501,7 +490,6 @@ class Argument VALUE_OBJ_CLASS_SPEC {
assert(is_float_register(), "must be a register argument"); assert(is_float_register(), "must be a register argument");
return as_FloatRegister(( number() *2 )); return as_FloatRegister(( number() *2 ));
} }
#endif
Register as_register() const { Register as_register() const {
assert(is_register(), "must be a register argument"); assert(is_register(), "must be a register argument");
@ -604,15 +592,15 @@ class MacroAssembler : public Assembler {
bool check_exception=true // flag which indicates if exception should be checked bool check_exception=true // flag which indicates if exception should be checked
); );
public:
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
// This routine should emit JVMTI PopFrame and ForceEarlyReturn handling code. // This routine should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler, // The implementation is only non-empty for the InterpreterMacroAssembler,
// as only the interpreter handles and ForceEarlyReturn PopFrame requests. // as only the interpreter handles and ForceEarlyReturn PopFrame requests.
virtual void check_and_handle_popframe(Register scratch_reg); virtual void check_and_handle_popframe(Register scratch_reg);
virtual void check_and_handle_earlyret(Register scratch_reg); virtual void check_and_handle_earlyret(Register scratch_reg);
public:
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
// Support for NULL-checks // Support for NULL-checks
// //
// Generates code that causes a NULL OS exception if the content of reg is NULL. // Generates code that causes a NULL OS exception if the content of reg is NULL.
@ -1217,9 +1205,7 @@ public:
void lushr( Register Rin_high, Register Rin_low, Register Rcount, void lushr( Register Rin_high, Register Rin_low, Register Rcount,
Register Rout_high, Register Rout_low, Register Rtemp ); Register Rout_high, Register Rout_low, Register Rtemp );
#ifdef _LP64
void lcmp( Register Ra, Register Rb, Register Rresult); void lcmp( Register Ra, Register Rb, Register Rresult);
#endif
// Load and store values by size and signed-ness // Load and store values by size and signed-ness
void load_sized_value( Address src, Register dst, size_t size_in_bytes, bool is_signed); void load_sized_value( Address src, Register dst, size_t size_in_bytes, bool is_signed);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -45,19 +45,11 @@ inline void MacroAssembler::pd_patch_instruction(address branch, address target)
// Use the right loads/stores for the platform // Use the right loads/stores for the platform
inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) { inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, s2, d); Assembler::ldx(s1, s2, d);
#else
ld( s1, s2, d);
#endif
} }
inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) { inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, simm13a, d); Assembler::ldx(s1, simm13a, d);
#else
ld( s1, simm13a, d);
#endif
} }
#ifdef ASSERT #ifdef ASSERT
@ -68,35 +60,19 @@ inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d )
#endif #endif
inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) { inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) {
#ifdef _LP64
ldx(s1, s2, d); ldx(s1, s2, d);
#else
ld( s1, s2, d);
#endif
} }
inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) { inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) {
#ifdef _LP64
ldx(a, d, offset); ldx(a, d, offset);
#else
ld( a, d, offset);
#endif
} }
inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) { inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
#ifdef _LP64
Assembler::stx(d, s1, s2); Assembler::stx(d, s1, s2);
#else
st( d, s1, s2);
#endif
} }
inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) { inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) {
#ifdef _LP64
Assembler::stx(d, s1, simm13a); Assembler::stx(d, s1, simm13a);
#else
st( d, s1, simm13a);
#endif
} }
#ifdef ASSERT #ifdef ASSERT
@ -107,84 +83,44 @@ inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a )
#endif #endif
inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) { inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) {
#ifdef _LP64
stx(d, s1, s2); stx(d, s1, s2);
#else
st( d, s1, s2);
#endif
} }
inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) { inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) {
#ifdef _LP64
stx(d, a, offset); stx(d, a, offset);
#else
st( d, a, offset);
#endif
} }
// Use the right loads/stores for the platform // Use the right loads/stores for the platform
inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) { inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, s2, d); Assembler::ldx(s1, s2, d);
#else
Assembler::ldd(s1, s2, d);
#endif
} }
inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) { inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, simm13a, d); Assembler::ldx(s1, simm13a, d);
#else
Assembler::ldd(s1, simm13a, d);
#endif
} }
inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) { inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) {
#ifdef _LP64
ldx(s1, s2, d); ldx(s1, s2, d);
#else
ldd(s1, s2, d);
#endif
} }
inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) { inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) {
#ifdef _LP64
ldx(a, d, offset); ldx(a, d, offset);
#else
ldd(a, d, offset);
#endif
} }
inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) { inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) {
#ifdef _LP64
Assembler::stx(d, s1, s2); Assembler::stx(d, s1, s2);
#else
Assembler::std(d, s1, s2);
#endif
} }
inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) { inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) {
#ifdef _LP64
Assembler::stx(d, s1, simm13a); Assembler::stx(d, s1, simm13a);
#else
Assembler::std(d, s1, simm13a);
#endif
} }
inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) { inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) {
#ifdef _LP64
stx(d, s1, s2); stx(d, s1, s2);
#else
std(d, s1, s2);
#endif
} }
inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) { inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) {
#ifdef _LP64
stx(d, a, offset); stx(d, a, offset);
#else
std(d, a, offset);
#endif
} }
inline void MacroAssembler::stbool(Register d, const Address& a) { stb(d, a); } inline void MacroAssembler::stbool(Register d, const Address& a) { stb(d, a); }
@ -207,45 +143,25 @@ inline void MacroAssembler::casx( Register s1, Register s2, Register d) { casxa(
// Functions for isolating 64 bit atomic swaps for LP64 // Functions for isolating 64 bit atomic swaps for LP64
// cas_ptr will perform cas for 32 bit VM's and casx for 64 bit VM's // cas_ptr will perform cas for 32 bit VM's and casx for 64 bit VM's
inline void MacroAssembler::cas_ptr( Register s1, Register s2, Register d) { inline void MacroAssembler::cas_ptr( Register s1, Register s2, Register d) {
#ifdef _LP64
casx( s1, s2, d ); casx( s1, s2, d );
#else
cas( s1, s2, d );
#endif
} }
// Functions for isolating 64 bit shifts for LP64 // Functions for isolating 64 bit shifts for LP64
inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) { inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::sllx(s1, s2, d); Assembler::sllx(s1, s2, d);
#else
Assembler::sll( s1, s2, d);
#endif
} }
inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) { inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) {
#ifdef _LP64
Assembler::sllx(s1, imm6a, d); Assembler::sllx(s1, imm6a, d);
#else
Assembler::sll( s1, imm6a, d);
#endif
} }
inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) { inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::srlx(s1, s2, d); Assembler::srlx(s1, s2, d);
#else
Assembler::srl( s1, s2, d);
#endif
} }
inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) { inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) {
#ifdef _LP64
Assembler::srlx(s1, imm6a, d); Assembler::srlx(s1, imm6a, d);
#else
Assembler::srl( s1, imm6a, d);
#endif
} }
inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) { inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) {
@ -277,11 +193,7 @@ inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
// Branch that tests either xcc or icc depending on the // Branch that tests either xcc or icc depending on the
// architecture compiled (LP64 or not) // architecture compiled (LP64 or not)
inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
#ifdef _LP64
Assembler::bp(c, a, xcc, p, d, rt); Assembler::bp(c, a, xcc, p, d, rt);
#else
MacroAssembler::br(c, a, p, d, rt);
#endif
} }
inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) { inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
@ -338,7 +250,6 @@ inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
} }
inline void MacroAssembler::call( address d, RelocationHolder const& rspec ) { inline void MacroAssembler::call( address d, RelocationHolder const& rspec ) {
#ifdef _LP64
intptr_t disp; intptr_t disp;
// NULL is ok because it will be relocated later. // NULL is ok because it will be relocated later.
// Must change NULL to a reachable address in order to // Must change NULL to a reachable address in order to
@ -355,9 +266,6 @@ inline void MacroAssembler::call( address d, RelocationHolder const& rspec ) {
} else { } else {
Assembler::call(d, rspec); Assembler::call(d, rspec);
} }
#else
Assembler::call( d, rspec );
#endif
} }
inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) { inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
@ -414,12 +322,7 @@ inline void MacroAssembler::cmp( Register s1, int simm13a ) { subcc( s1, simm13
// 2 instructions. All PCs in the CodeCache are within 2 Gig of each other. // 2 instructions. All PCs in the CodeCache are within 2 Gig of each other.
inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) { inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) {
intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip; intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip;
#ifdef _LP64
Unimplemented(); Unimplemented();
#else
Assembler::sethi( thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc));
add(reg, thepc & 0x3ff, reg, internal_word_Relocation::spec((address)thepc));
#endif
return thepc; return thepc;
} }
@ -554,7 +457,6 @@ inline void MacroAssembler::store_ptr_argument( Register s, Argument& a ) {
} }
#ifdef _LP64
inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) { inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) {
if (a.is_float_register()) if (a.is_float_register())
// V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2 // V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2
@ -579,7 +481,6 @@ inline void MacroAssembler::store_long_argument( Register s, Argument& a ) {
else else
stx(s, a.as_address()); stx(s, a.as_address());
} }
#endif
inline void MacroAssembler::round_to( Register r, int modulus ) { inline void MacroAssembler::round_to( Register r, int modulus ) {
assert_not_delayed(); assert_not_delayed();
@ -640,22 +541,13 @@ inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm1
inline void MacroAssembler::clruw( Register s, Register d ) { srl( s, G0, d); } inline void MacroAssembler::clruw( Register s, Register d ) { srl( s, G0, d); }
inline void MacroAssembler::clruwu( Register d ) { srl( d, G0, d); } inline void MacroAssembler::clruwu( Register d ) { srl( d, G0, d); }
#ifdef _LP64
// Make all 32 bit loads signed so 64 bit registers maintain proper sign // Make all 32 bit loads signed so 64 bit registers maintain proper sign
inline void MacroAssembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); } inline void MacroAssembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); }
inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); } inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); }
#else
inline void MacroAssembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); }
inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); }
#endif
#ifdef ASSERT #ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int. // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
# ifdef _LP64
inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); } inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); }
# else
inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); }
# endif
#endif #endif
inline void MacroAssembler::ld( const Address& a, Register d, int offset) { inline void MacroAssembler::ld( const Address& a, Register d, int offset) {

View File

@ -1,120 +0,0 @@
/*
* Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "asm/codeBuffer.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
// This method will be called (as any other Klass virtual method) with
// the Klass itself as the first argument. Example:
//
// oop obj;
// int size = obj->klass()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();
//
// The dummy method is called with the Klass object as the first
// operand, and an object as the second argument.
//
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
// differing only by an ordinal constant, and they bear no relationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#define __ masm->
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
char** mc_top,
char* mc_end) {
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
*(intptr_t *)(*md_top) = vtable_bytes;
*md_top += sizeof(intptr_t);
void** dummy_vtable = (void**)*md_top;
*vtable = dummy_vtable;
*md_top += vtable_bytes;
// Get ready to generate dummy methods.
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
MacroAssembler* masm = new MacroAssembler(&cb);
Label common_code;
for (int i = 0; i < vtbl_list_size; ++i) {
for (int j = 0; j < num_virtuals; ++j) {
dummy_vtable[num_virtuals * i + j] = (void*)masm->pc();
__ save(SP, -256, SP);
int offset = (i << 8) + j;
Register src = G0;
if (!Assembler::is_simm13(offset)) {
__ sethi(offset, L0);
src = L0;
offset = offset & ((1 << 10) - 1);
}
__ brx(Assembler::always, false, Assembler::pt, common_code);
// Load L0 with a value indicating vtable/offset pair.
// -- bits[ 7..0] (8 bits) which virtual method in table?
// -- bits[13..8] (6 bits) which virtual method table?
__ delayed()->or3(src, offset, L0);
}
}
__ bind(common_code);
// Expecting to be called with the "this" pointer in O0/I0 (where
// "this" is a Klass object). In addition, L0 was set (above) to
// identify the method and table.
// Look up the correct vtable pointer.
__ set((intptr_t)vtbl_list, L2); // L2 = address of new vtable list.
__ srl(L0, 8, L3); // Isolate L3 = vtable identifier.
__ sll(L3, LogBytesPerWord, L3);
__ ld_ptr(L2, L3, L3); // L3 = new (correct) vtable pointer.
__ st_ptr(L3, Address(I0, 0)); // Save correct vtable ptr in entry.
// Restore registers and jump to the correct method;
__ and3(L0, 255, L4); // Isolate L3 = method offset;.
__ sll(L4, LogBytesPerWord, L4);
__ ld_ptr(L3, L4, L4); // Get address of correct virtual method
__ jmpl(L4, 0, G0); // Jump to correct method.
__ delayed()->restore(); // Restore registers.
__ flush();
*mc_top = (char*)__ pc();
guarantee(*mc_top <= mc_end, "Insufficient space for method wrappers.");
}

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -71,7 +71,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Register temp_reg, Register temp2_reg, Register temp_reg, Register temp2_reg,
const char* error_message) { const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id); InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id); Klass* klass = SystemDictionary::well_known_klass(klass_id);
bool did_save = false; bool did_save = false;
if (temp_reg == noreg || temp2_reg == noreg) { if (temp_reg == noreg || temp2_reg == noreg) {
temp_reg = L1; temp_reg = L1;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -236,8 +236,6 @@ void NativeCall::test() {
//------------------------------------------------------------------- //-------------------------------------------------------------------
#ifdef _LP64
void NativeFarCall::set_destination(address dest) { void NativeFarCall::set_destination(address dest) {
// Address materialized in the instruction stream, so nothing to do. // Address materialized in the instruction stream, so nothing to do.
return; return;
@ -290,8 +288,6 @@ void NativeFarCall::test() {
} }
// End code for unit testing implementation of NativeFarCall class // End code for unit testing implementation of NativeFarCall class
#endif // _LP64
//------------------------------------------------------------------- //-------------------------------------------------------------------
@ -304,18 +300,9 @@ void NativeMovConstReg::verify() {
// verify the pattern "sethi %hi22(imm), reg ; add reg, %lo10(imm), reg" // verify the pattern "sethi %hi22(imm), reg ; add reg, %lo10(imm), reg"
Register rd = inv_rd(i0); Register rd = inv_rd(i0);
#ifndef _LP64
if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
is_op3(i1, Assembler::add_op3, Assembler::arith_op) &&
inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
rd == inv_rs1(i1) && rd == inv_rd(i1))) {
fatal("not a set_metadata");
}
#else
if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) { if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
fatal("not a set_metadata"); fatal("not a set_metadata");
} }
#endif
} }
@ -324,23 +311,13 @@ void NativeMovConstReg::print() {
} }
#ifdef _LP64
intptr_t NativeMovConstReg::data() const { intptr_t NativeMovConstReg::data() const {
return data64(addr_at(sethi_offset), long_at(add_offset)); return data64(addr_at(sethi_offset), long_at(add_offset));
} }
#else
intptr_t NativeMovConstReg::data() const {
return data32(long_at(sethi_offset), long_at(add_offset));
}
#endif
void NativeMovConstReg::set_data(intptr_t x) { void NativeMovConstReg::set_data(intptr_t x) {
#ifdef _LP64
set_data64_sethi(addr_at(sethi_offset), x); set_data64_sethi(addr_at(sethi_offset), x);
#else
set_long_at(sethi_offset, set_data32_sethi( long_at(sethi_offset), x));
#endif
set_long_at(add_offset, set_data32_simm13( long_at(add_offset), x)); set_long_at(add_offset, set_data32_simm13( long_at(add_offset), x));
// also store the value into an oop_Relocation cell, if any // also store the value into an oop_Relocation cell, if any
@ -508,20 +485,12 @@ void NativeMovConstRegPatching::print() {
int NativeMovConstRegPatching::data() const { int NativeMovConstRegPatching::data() const {
#ifdef _LP64
return data64(addr_at(sethi_offset), long_at(add_offset)); return data64(addr_at(sethi_offset), long_at(add_offset));
#else
return data32(long_at(sethi_offset), long_at(add_offset));
#endif
} }
void NativeMovConstRegPatching::set_data(int x) { void NativeMovConstRegPatching::set_data(int x) {
#ifdef _LP64
set_data64_sethi(addr_at(sethi_offset), x); set_data64_sethi(addr_at(sethi_offset), x);
#else
set_long_at(sethi_offset, set_data32_sethi(long_at(sethi_offset), x));
#endif
set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x)); set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
// also store the value into an oop_Relocation cell, if any // also store the value into an oop_Relocation cell, if any
@ -758,21 +727,12 @@ void NativeJump::verify() {
assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok"); assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
// verify the pattern "sethi %hi22(imm), treg ; jmpl treg, %lo10(imm), lreg" // verify the pattern "sethi %hi22(imm), treg ; jmpl treg, %lo10(imm), lreg"
Register rd = inv_rd(i0); Register rd = inv_rd(i0);
#ifndef _LP64
if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
(is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op)) &&
inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
rd == inv_rs1(i1))) {
fatal("not a jump_to instruction");
}
#else
// In LP64, the jump instruction location varies for non relocatable // In LP64, the jump instruction location varies for non relocatable
// jumps, for example is could be sethi, xor, jmp instead of the // jumps, for example is could be sethi, xor, jmp instead of the
// 7 instructions for sethi. So let's check sethi only. // 7 instructions for sethi. So let's check sethi only.
if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) { if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
fatal("not a jump_to instruction"); fatal("not a jump_to instruction");
} }
#endif
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -121,11 +121,7 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
bool is_safepoint_poll() { bool is_safepoint_poll() {
int x = long_at(0); int x = long_at(0);
#ifdef _LP64
return is_op3(x, Assembler::ldx_op3, Assembler::ldst_op) && return is_op3(x, Assembler::ldx_op3, Assembler::ldst_op) &&
#else
return is_op3(x, Assembler::lduw_op3, Assembler::ldst_op) &&
#endif
(inv_rd(x) == G0) && (inv_immed(x) ? Assembler::inv_simm13(x) == 0 : inv_rs2(x) == G0); (inv_rd(x) == G0) && (inv_immed(x) ? Assembler::inv_simm13(x) == 0 : inv_rs2(x) == G0);
} }
@ -432,22 +428,6 @@ class NativeCallReg: public NativeInstruction {
// instructions in the sparcv9 vm. Used to call native methods which may be loaded // instructions in the sparcv9 vm. Used to call native methods which may be loaded
// anywhere in the address space, possibly out of reach of a call instruction. // anywhere in the address space, possibly out of reach of a call instruction.
#ifndef _LP64
// On 32-bit systems, a far call is the same as a near one.
class NativeFarCall;
inline NativeFarCall* nativeFarCall_at(address instr);
class NativeFarCall : public NativeCall {
public:
friend inline NativeFarCall* nativeFarCall_at(address instr) { return (NativeFarCall*)nativeCall_at(instr); }
friend NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination = NULL)
{ return (NativeFarCall*)nativeCall_overwriting_at(instr, destination); }
friend NativeFarCall* nativeFarCall_before(address return_address)
{ return (NativeFarCall*)nativeCall_before(return_address); }
};
#else
// The format of this extended-range call is: // The format of this extended-range call is:
// jumpl_to addr, lreg // jumpl_to addr, lreg
// == sethi %hi54(addr), O7 ; jumpl O7, %lo10(addr), O7 ; <delay> // == sethi %hi54(addr), O7 ; jumpl O7, %lo10(addr), O7 ; <delay>
@ -515,7 +495,6 @@ class NativeFarCall: public NativeInstruction {
static void replace_mt_safe(address instr_addr, address code_buffer); static void replace_mt_safe(address instr_addr, address code_buffer);
}; };
#endif // _LP64
// An interface for accessing/manipulating 32 bit native set_metadata imm, reg instructions // An interface for accessing/manipulating 32 bit native set_metadata imm, reg instructions
// (used to manipulate inlined data references, etc.) // (used to manipulate inlined data references, etc.)
@ -567,13 +546,8 @@ class NativeMovConstReg: public NativeInstruction {
public: public:
enum Sparc_specific_constants { enum Sparc_specific_constants {
sethi_offset = 0, sethi_offset = 0,
#ifdef _LP64
add_offset = 7 * BytesPerInstWord, add_offset = 7 * BytesPerInstWord,
instruction_size = 8 * BytesPerInstWord instruction_size = 8 * BytesPerInstWord
#else
add_offset = 4,
instruction_size = 8
#endif
}; };
address instruction_address() const { return addr_at(0); } address instruction_address() const { return addr_at(0); }
@ -626,11 +600,7 @@ inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address);
public: public:
enum Sparc_specific_constants { enum Sparc_specific_constants {
sethi_offset = 0, sethi_offset = 0,
#ifdef _LP64
nop_offset = 7 * BytesPerInstWord, nop_offset = 7 * BytesPerInstWord,
#else
nop_offset = sethi_offset + BytesPerInstWord,
#endif
add_offset = nop_offset + BytesPerInstWord, add_offset = nop_offset + BytesPerInstWord,
instruction_size = add_offset + BytesPerInstWord instruction_size = add_offset + BytesPerInstWord
}; };
@ -705,11 +675,7 @@ class NativeMovRegMem: public NativeInstruction {
offset_width = 13, offset_width = 13,
sethi_offset = 0, sethi_offset = 0,
#ifdef _LP64
add_offset = 7 * BytesPerInstWord, add_offset = 7 * BytesPerInstWord,
#else
add_offset = 4,
#endif
ldst_offset = add_offset + BytesPerInstWord ldst_offset = add_offset + BytesPerInstWord
}; };
bool is_immediate() const { bool is_immediate() const {
@ -720,11 +686,7 @@ class NativeMovRegMem: public NativeInstruction {
address instruction_address() const { return addr_at(0); } address instruction_address() const { return addr_at(0); }
address next_instruction_address() const { address next_instruction_address() const {
#ifdef _LP64
return addr_at(is_immediate() ? 4 : (7 * BytesPerInstWord)); return addr_at(is_immediate() ? 4 : (7 * BytesPerInstWord));
#else
return addr_at(is_immediate() ? 4 : 12);
#endif
} }
intptr_t offset() const { intptr_t offset() const {
return is_immediate()? inv_simm(long_at(0), offset_width) : return is_immediate()? inv_simm(long_at(0), offset_width) :
@ -777,19 +739,13 @@ class NativeJump: public NativeInstruction {
public: public:
enum Sparc_specific_constants { enum Sparc_specific_constants {
sethi_offset = 0, sethi_offset = 0,
#ifdef _LP64
jmpl_offset = 7 * BytesPerInstWord, jmpl_offset = 7 * BytesPerInstWord,
instruction_size = 9 * BytesPerInstWord // includes delay slot instruction_size = 9 * BytesPerInstWord // includes delay slot
#else
jmpl_offset = 1 * BytesPerInstWord,
instruction_size = 3 * BytesPerInstWord // includes delay slot
#endif
}; };
address instruction_address() const { return addr_at(0); } address instruction_address() const { return addr_at(0); }
address next_instruction_address() const { return addr_at(instruction_size); } address next_instruction_address() const { return addr_at(instruction_size); }
#ifdef _LP64
address jump_destination() const { address jump_destination() const {
return (address) data64(instruction_address(), long_at(jmpl_offset)); return (address) data64(instruction_address(), long_at(jmpl_offset));
} }
@ -797,15 +753,6 @@ class NativeJump: public NativeInstruction {
set_data64_sethi( instruction_address(), (intptr_t)dest); set_data64_sethi( instruction_address(), (intptr_t)dest);
set_long_at(jmpl_offset, set_data32_simm13( long_at(jmpl_offset), (intptr_t)dest)); set_long_at(jmpl_offset, set_data32_simm13( long_at(jmpl_offset), (intptr_t)dest));
} }
#else
address jump_destination() const {
return (address) data32(long_at(sethi_offset), long_at(jmpl_offset));
}
void set_jump_destination(address dest) {
set_long_at(sethi_offset, set_data32_sethi( long_at(sethi_offset), (intptr_t)dest));
set_long_at(jmpl_offset, set_data32_simm13( long_at(jmpl_offset), (intptr_t)dest));
}
#endif
// Creation // Creation
friend inline NativeJump* nativeJump_at(address address) { friend inline NativeJump* nativeJump_at(address address) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -93,7 +93,6 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
case Assembler::branch_op: case Assembler::branch_op:
{ {
#ifdef _LP64
jint inst2; jint inst2;
guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi"); guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
if (format() != 0) { if (format() != 0) {
@ -121,17 +120,6 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
} else { } else {
ip->set_data64_sethi( ip->addr_at(0), (intptr_t)x ); ip->set_data64_sethi( ip->addr_at(0), (intptr_t)x );
} }
#else
guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
inst &= ~Assembler::hi22( -1);
inst |= Assembler::hi22((intptr_t)x);
// (ignore offset; it doesn't play into the sethi)
if (verify_only) {
guarantee(ip->long_at(0) == inst, "instructions must match");
} else {
ip->set_long_at(0, inst);
}
#endif
} }
break; break;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -34,12 +34,8 @@
// There is no need for format bits; the instructions are // There is no need for format bits; the instructions are
// sufficiently self-identifying. // sufficiently self-identifying.
#ifndef _LP64
format_width = 0
#else
// Except narrow oops in 64-bits VM. // Except narrow oops in 64-bits VM.
format_width = 1 format_width = 1
#endif
}; };

View File

@ -127,56 +127,10 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
// OopMap* map = new OopMap(*total_frame_words, 0); // OopMap* map = new OopMap(*total_frame_words, 0);
OopMap* map = new OopMap(frame_size_in_slots, 0); OopMap* map = new OopMap(frame_size_in_slots, 0);
#if !defined(_LP64)
// Save 64-bit O registers; they will get their heads chopped off on a 'save'.
__ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
__ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
__ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
__ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
__ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
__ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
#endif /* _LP64 */
__ save(SP, -frame_size, SP); __ save(SP, -frame_size, SP);
#ifndef _LP64
// Reload the 64 bit Oregs. Although they are now Iregs we load them
// to Oregs here to avoid interrupts cutting off their heads
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
__ stx(O0, SP, o0_offset+STACK_BIAS);
map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg());
__ stx(O1, SP, o1_offset+STACK_BIAS);
map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg());
__ stx(O2, SP, o2_offset+STACK_BIAS);
map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg());
__ stx(O3, SP, o3_offset+STACK_BIAS);
map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg());
__ stx(O4, SP, o4_offset+STACK_BIAS);
map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg());
__ stx(O5, SP, o5_offset+STACK_BIAS);
map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
#endif /* _LP64 */
#ifdef _LP64
int debug_offset = 0; int debug_offset = 0;
#else
int debug_offset = 4;
#endif
// Save the G's // Save the G's
__ stx(G1, SP, g1_offset+STACK_BIAS); __ stx(G1, SP, g1_offset+STACK_BIAS);
map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg()); map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
@ -192,18 +146,6 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
// This is really a waste but we'll keep things as they were for now // This is really a waste but we'll keep things as they were for now
if (true) { if (true) {
#ifndef _LP64
map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
#endif /* _LP64 */
} }
@ -250,70 +192,22 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
__ ldx(SP, g4_offset+STACK_BIAS, G4); __ ldx(SP, g4_offset+STACK_BIAS, G4);
__ ldx(SP, g5_offset+STACK_BIAS, G5); __ ldx(SP, g5_offset+STACK_BIAS, G5);
#if !defined(_LP64)
// Restore the 64-bit O's.
__ ldx(SP, o0_offset+STACK_BIAS, O0);
__ ldx(SP, o1_offset+STACK_BIAS, O1);
__ ldx(SP, o2_offset+STACK_BIAS, O2);
__ ldx(SP, o3_offset+STACK_BIAS, O3);
__ ldx(SP, o4_offset+STACK_BIAS, O4);
__ ldx(SP, o5_offset+STACK_BIAS, O5);
// And temporarily place them in TLS
__ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
__ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
__ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
__ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
__ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
__ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
#endif /* _LP64 */
// Restore flags // Restore flags
__ ldxfsr(SP, fsr_offset+STACK_BIAS); __ ldxfsr(SP, fsr_offset+STACK_BIAS);
__ restore(); __ restore();
#if !defined(_LP64)
// Now reload the 64bit Oregs after we've restore the window.
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
#endif /* _LP64 */
} }
// Pop the current frame and restore the registers that might be holding // Pop the current frame and restore the registers that might be holding
// a result. // a result.
void RegisterSaver::restore_result_registers(MacroAssembler* masm) { void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
#if !defined(_LP64)
// 32bit build returns longs in G1
__ ldx(SP, g1_offset+STACK_BIAS, G1);
// Retrieve the 64-bit O's.
__ ldx(SP, o0_offset+STACK_BIAS, O0);
__ ldx(SP, o1_offset+STACK_BIAS, O1);
// and save to TLS
__ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
__ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
#endif /* _LP64 */
__ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0)); __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));
__ restore(); __ restore();
#if !defined(_LP64)
// Now reload the 64bit Oregs after we've restore the window.
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
__ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
#endif /* _LP64 */
} }
// Is vector's size (in bytes) bigger than a size saved by default? // Is vector's size (in bytes) bigger than a size saved by default?
@ -410,11 +304,6 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
case T_CHAR: case T_CHAR:
case T_BYTE: case T_BYTE:
case T_BOOLEAN: case T_BOOLEAN:
#ifndef _LP64
case T_OBJECT:
case T_ARRAY:
case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
#endif // _LP64
if (int_reg < int_reg_max) { if (int_reg < int_reg_max) {
Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
regs[i].set1(r->as_VMReg()); regs[i].set1(r->as_VMReg());
@ -423,7 +312,6 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
} }
break; break;
#ifdef _LP64
case T_LONG: case T_LONG:
assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting VOID in other half"); assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting VOID in other half");
// fall-through // fall-through
@ -439,15 +327,6 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
slot += 2; slot += 2;
} }
break; break;
#else
case T_LONG:
assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting VOID in other half");
// On 32-bit SPARC put longs always on the stack to keep the pressure off
// integer argument registers. They should be used for oops.
slot = round_to(slot, 2); // align
regs[i].set2(VMRegImpl::stack2reg(slot));
slot += 2;
#endif
break; break;
case T_FLOAT: case T_FLOAT:
@ -554,7 +433,6 @@ void AdapterGenerator::patch_callers_callsite() {
// The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops. // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.
#ifdef _LP64
// mov(s,d) // mov(s,d)
__ mov(G1, L1); __ mov(G1, L1);
__ mov(G4, L4); __ mov(G4, L4);
@ -571,20 +449,6 @@ void AdapterGenerator::patch_callers_callsite() {
__ mov(L1, G1); __ mov(L1, G1);
__ mov(L4, G4); __ mov(L4, G4);
__ mov(L5, G5_method); __ mov(L5, G5_method);
#else
__ stx(G1, FP, -8 + STACK_BIAS);
__ stx(G4, FP, -16 + STACK_BIAS);
__ mov(G5_method, L5);
__ mov(G5_method, O0); // VM needs target method
__ mov(I7, O1); // VM needs caller's callsite
// Must be a leaf call...
__ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
__ delayed()->mov(G2_thread, L7_thread_cache);
__ mov(L7_thread_cache, G2_thread);
__ ldx(FP, -8 + STACK_BIAS, G1);
__ ldx(FP, -16 + STACK_BIAS, G4);
__ mov(L5, G5_method);
#endif /* _LP64 */
__ restore(); // Restore args __ restore(); // Restore args
__ bind(L); __ bind(L);
@ -605,28 +469,9 @@ RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
// Stores long into offset pointed to by base // Stores long into offset pointed to by base
void AdapterGenerator::store_c2i_long(Register r, Register base, void AdapterGenerator::store_c2i_long(Register r, Register base,
const int st_off, bool is_stack) { const int st_off, bool is_stack) {
#ifdef _LP64
// In V9, longs are given 2 64-bit slots in the interpreter, but the // In V9, longs are given 2 64-bit slots in the interpreter, but the
// data is passed in only 1 slot. // data is passed in only 1 slot.
__ stx(r, base, next_arg_slot(st_off)); __ stx(r, base, next_arg_slot(st_off));
#else
#ifdef COMPILER2
// Misaligned store of 64-bit data
__ stw(r, base, arg_slot(st_off)); // lo bits
__ srlx(r, 32, r);
__ stw(r, base, next_arg_slot(st_off)); // hi bits
#else
if (is_stack) {
// Misaligned store of 64-bit data
__ stw(r, base, arg_slot(st_off)); // lo bits
__ srlx(r, 32, r);
__ stw(r, base, next_arg_slot(st_off)); // hi bits
} else {
__ stw(r->successor(), base, arg_slot(st_off) ); // lo bits
__ stw(r , base, next_arg_slot(st_off)); // hi bits
}
#endif // COMPILER2
#endif // _LP64
} }
void AdapterGenerator::store_c2i_object(Register r, Register base, void AdapterGenerator::store_c2i_object(Register r, Register base,
@ -642,15 +487,9 @@ void AdapterGenerator::store_c2i_int(Register r, Register base,
// Stores into offset pointed to by base // Stores into offset pointed to by base
void AdapterGenerator::store_c2i_double(VMReg r_2, void AdapterGenerator::store_c2i_double(VMReg r_2,
VMReg r_1, Register base, const int st_off) { VMReg r_1, Register base, const int st_off) {
#ifdef _LP64
// In V9, doubles are given 2 64-bit slots in the interpreter, but the // In V9, doubles are given 2 64-bit slots in the interpreter, but the
// data is passed in only 1 slot. // data is passed in only 1 slot.
__ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off)); __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
#else
// Need to marshal 64-bit value from misaligned Lesp loads
__ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
__ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
#endif
} }
void AdapterGenerator::store_c2i_float(FloatRegister f, Register base, void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
@ -957,22 +796,17 @@ void AdapterGenerator::gen_i2c_adapter(int total_args_passed,
if (!r_2->is_valid()) { if (!r_2->is_valid()) {
__ ld(Gargs, arg_slot(ld_off), r); __ ld(Gargs, arg_slot(ld_off), r);
} else { } else {
#ifdef _LP64
// In V9, longs are given 2 64-bit slots in the interpreter, but the // In V9, longs are given 2 64-bit slots in the interpreter, but the
// data is passed in only 1 slot. // data is passed in only 1 slot.
RegisterOrConstant slot = (sig_bt[i] == T_LONG) ? RegisterOrConstant slot = (sig_bt[i] == T_LONG) ?
next_arg_slot(ld_off) : arg_slot(ld_off); next_arg_slot(ld_off) : arg_slot(ld_off);
__ ldx(Gargs, slot, r); __ ldx(Gargs, slot, r);
#else
fatal("longs should be on stack");
#endif
} }
} else { } else {
assert(r_1->is_FloatRegister(), ""); assert(r_1->is_FloatRegister(), "");
if (!r_2->is_valid()) { if (!r_2->is_valid()) {
__ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister()); __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister());
} else { } else {
#ifdef _LP64
// In V9, doubles are given 2 64-bit slots in the interpreter, but the // In V9, doubles are given 2 64-bit slots in the interpreter, but the
// data is passed in only 1 slot. This code also handles longs that // data is passed in only 1 slot. This code also handles longs that
// are passed on the stack, but need a stack-to-stack move through a // are passed on the stack, but need a stack-to-stack move through a
@ -980,11 +814,6 @@ void AdapterGenerator::gen_i2c_adapter(int total_args_passed,
RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ? RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
next_arg_slot(ld_off) : arg_slot(ld_off); next_arg_slot(ld_off) : arg_slot(ld_off);
__ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister()); __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister());
#else
// Need to marshal 64-bit value from misaligned Lesp loads
__ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister());
__ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister());
#endif
} }
} }
// Was the argument really intended to be on the stack, but was loaded // Was the argument really intended to be on the stack, but was loaded
@ -1157,7 +986,6 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
// See int_stk_helper for a further discussion. // See int_stk_helper for a further discussion.
int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots(); int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots();
#ifdef _LP64
// V9 convention: All things "as-if" on double-wide stack slots. // V9 convention: All things "as-if" on double-wide stack slots.
// Hoist any int/ptr/long's in the first 6 to int regs. // Hoist any int/ptr/long's in the first 6 to int regs.
// Hoist any flt/dbl's in the first 16 dbl regs. // Hoist any flt/dbl's in the first 16 dbl regs.
@ -1241,44 +1069,6 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
if (off > max_stack_slots) max_stack_slots = off; if (off > max_stack_slots) max_stack_slots = off;
} }
} }
#else // _LP64
// V8 convention: first 6 things in O-regs, rest on stack.
// Alignment is willy-nilly.
for (int i = 0; i < total_args_passed; i++) {
switch (sig_bt[i]) {
case T_ADDRESS: // raw pointers, like current thread, for VM calls
case T_ARRAY:
case T_BOOLEAN:
case T_BYTE:
case T_CHAR:
case T_FLOAT:
case T_INT:
case T_OBJECT:
case T_METADATA:
case T_SHORT:
regs[i].set1(int_stk_helper(i));
break;
case T_DOUBLE:
case T_LONG:
assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
regs[i].set_pair(int_stk_helper(i + 1), int_stk_helper(i));
break;
case T_VOID: regs[i].set_bad(); break;
default:
ShouldNotReachHere();
}
if (regs[i].first()->is_stack()) {
int off = regs[i].first()->reg2stack();
if (off > max_stack_slots) max_stack_slots = off;
}
if (regs[i].second()->is_stack()) {
int off = regs[i].second()->reg2stack();
if (off > max_stack_slots) max_stack_slots = off;
}
}
#endif // _LP64
return round_to(max_stack_slots + 1, 2); return round_to(max_stack_slots + 1, 2);
} }
@ -1406,12 +1196,7 @@ static void object_move(MacroAssembler* masm,
Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register(); Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register();
__ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle); __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle);
__ ld_ptr(rHandle, 0, L4); __ ld_ptr(rHandle, 0, L4);
#ifdef _LP64
__ movr( Assembler::rc_z, L4, G0, rHandle ); __ movr( Assembler::rc_z, L4, G0, rHandle );
#else
__ tst( L4 );
__ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
#endif
if (dst.first()->is_stack()) { if (dst.first()->is_stack()) {
__ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS); __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
} }
@ -1432,12 +1217,7 @@ static void object_move(MacroAssembler* masm,
} }
map->set_oop(VMRegImpl::stack2reg(oop_slot)); map->set_oop(VMRegImpl::stack2reg(oop_slot));
__ add(SP, offset + STACK_BIAS, rHandle); __ add(SP, offset + STACK_BIAS, rHandle);
#ifdef _LP64
__ movr( Assembler::rc_z, rOop, G0, rHandle ); __ movr( Assembler::rc_z, rOop, G0, rHandle );
#else
__ tst( rOop );
__ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
#endif
if (dst.first()->is_stack()) { if (dst.first()->is_stack()) {
__ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS); __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
@ -2068,11 +1848,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask); __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
// Check for a valid (non-zero) hash code and get its value. // Check for a valid (non-zero) hash code and get its value.
#ifdef _LP64
__ srlx(header, markOopDesc::hash_shift, hash); __ srlx(header, markOopDesc::hash_shift, hash);
#else
__ srl(header, markOopDesc::hash_shift, hash);
#endif
__ andcc(hash, mask, hash); __ andcc(hash, mask, hash);
__ br(Assembler::equal, false, Assembler::pn, slowCase); __ br(Assembler::equal, false, Assembler::pn, slowCase);
__ delayed()->nop(); __ delayed()->nop();
@ -2408,7 +2184,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// We have all of the arguments setup at this point. We MUST NOT touch any Oregs // We have all of the arguments setup at this point. We MUST NOT touch any Oregs
// except O6/O7. So if we must call out we must push a new frame. We immediately // except O6/O7. So if we must call out we must push a new frame. We immediately
// push a new frame and flush the windows. // push a new frame and flush the windows.
#ifdef _LP64
intptr_t thepc = (intptr_t) __ pc(); intptr_t thepc = (intptr_t) __ pc();
{ {
address here = __ pc(); address here = __ pc();
@ -2416,9 +2191,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ call(here + 8, relocInfo::none); __ call(here + 8, relocInfo::none);
__ delayed()->nop(); __ delayed()->nop();
} }
#else
intptr_t thepc = __ load_pc_address(O7, 0);
#endif /* _LP64 */
// We use the same pc/oopMap repeatedly when we call out // We use the same pc/oopMap repeatedly when we call out
oop_maps->add_gc_map(thepc - start, map); oop_maps->add_gc_map(thepc - start, map);
@ -2553,13 +2325,9 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Transition from _thread_in_Java to _thread_in_native. // Transition from _thread_in_Java to _thread_in_native.
__ set(_thread_in_native, G3_scratch); __ set(_thread_in_native, G3_scratch);
#ifdef _LP64
AddressLiteral dest(native_func); AddressLiteral dest(native_func);
__ relocate(relocInfo::runtime_call_type); __ relocate(relocInfo::runtime_call_type);
__ jumpl_to(dest, O7, O7); __ jumpl_to(dest, O7, O7);
#else
__ call(native_func, relocInfo::runtime_call_type);
#endif
__ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); __ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
__ restore_thread(L7_thread_cache); // restore G2_thread __ restore_thread(L7_thread_cache); // restore G2_thread
@ -2574,9 +2342,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
case T_DOUBLE: break; // Got it where we want it (unless slow-path) case T_DOUBLE: break; // Got it where we want it (unless slow-path)
// In 64 bits build result is in O0, in O0, O1 in 32bit build // In 64 bits build result is in O0, in O0, O1 in 32bit build
case T_LONG: case T_LONG:
#ifndef _LP64
__ mov(O1, I1);
#endif
// Fall thru // Fall thru
case T_OBJECT: // Really a handle case T_OBJECT: // Really a handle
case T_ARRAY: case T_ARRAY:
@ -2797,16 +2562,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Return // Return
#ifndef _LP64
if (ret_type == T_LONG) {
// Must leave proper result in O0,O1 and G1 (c2/tiered only)
__ sllx(I0, 32, G1); // Shift bits into high G1
__ srl (I1, 0, I1); // Zero extend O1 (harmless?)
__ or3 (I1, G1, G1); // OR 64 bits into G1
}
#endif
__ ret(); __ ret();
__ delayed()->restore(); __ delayed()->restore();
@ -2868,10 +2623,6 @@ static void gen_new_frame(MacroAssembler* masm, bool deopt) {
#ifdef ASSERT #ifdef ASSERT
// make sure that the frames are aligned properly // make sure that the frames are aligned properly
#ifndef _LP64
__ btst(wordSize*2-1, SP);
__ breakpoint_trap(Assembler::notZero, Assembler::ptr_cc);
#endif
#endif #endif
// Deopt needs to pass some extra live values from frame to frame // Deopt needs to pass some extra live values from frame to frame
@ -2989,13 +2740,7 @@ void SharedRuntime::generate_deopt_blob() {
pad += 1000; // Increase the buffer size when compiling for JVMCI pad += 1000; // Increase the buffer size when compiling for JVMCI
} }
#endif #endif
#ifdef _LP64
CodeBuffer buffer("deopt_blob", 2100+pad, 512); CodeBuffer buffer("deopt_blob", 2100+pad, 512);
#else
// Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread)
// Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread)
CodeBuffer buffer("deopt_blob", 1600+pad, 512);
#endif /* _LP64 */
MacroAssembler* masm = new MacroAssembler(&buffer); MacroAssembler* masm = new MacroAssembler(&buffer);
FloatRegister Freturn0 = F0; FloatRegister Freturn0 = F0;
Register Greturn1 = G1; Register Greturn1 = G1;
@ -3006,9 +2751,6 @@ void SharedRuntime::generate_deopt_blob() {
Register G4deopt_mode = G4_scratch; Register G4deopt_mode = G4_scratch;
int frame_size_words; int frame_size_words;
Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS); Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
#if !defined(_LP64) && defined(COMPILER2)
Address saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
#endif
Label cont; Label cont;
OopMapSet *oop_maps = new OopMapSet(); OopMapSet *oop_maps = new OopMapSet();
@ -3220,30 +2962,13 @@ void SharedRuntime::generate_deopt_blob() {
// to the interpreter entry point // to the interpreter entry point
__ save(SP, -frame_size_words*wordSize, SP); __ save(SP, -frame_size_words*wordSize, SP);
__ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr); __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
#if !defined(_LP64)
#if defined(COMPILER2)
// 32-bit 1-register longs return longs in G1
__ stx(Greturn1, saved_Greturn1_addr);
#endif
__ set_last_Java_frame(SP, noreg);
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode);
#else
// LP64 uses g4 in set_last_Java_frame // LP64 uses g4 in set_last_Java_frame
__ mov(G4deopt_mode, O1); __ mov(G4deopt_mode, O1);
__ set_last_Java_frame(SP, G0); __ set_last_Java_frame(SP, G0);
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1); __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1);
#endif
__ reset_last_Java_frame(); __ reset_last_Java_frame();
__ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0); __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
#if !defined(_LP64) && defined(COMPILER2)
// In 32 bit, C2 returns longs in G1 so restore the saved G1 into
// I0/I1 if the return value is long.
Label not_long;
__ cmp_and_br_short(O0,T_LONG, Assembler::notEqual, Assembler::pt, not_long);
__ ldd(saved_Greturn1_addr,I0);
__ bind(not_long);
#endif
__ ret(); __ ret();
__ delayed()->restore(); __ delayed()->restore();
@ -3273,13 +2998,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
pad += (JavaThread::stack_shadow_zone_size() / os::vm_page_size())*16 + 32; pad += (JavaThread::stack_shadow_zone_size() / os::vm_page_size())*16 + 32;
} }
#endif #endif
#ifdef _LP64
CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512); CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
#else
// Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
// Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)
CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512);
#endif
MacroAssembler* masm = new MacroAssembler(&buffer); MacroAssembler* masm = new MacroAssembler(&buffer);
Register O2UnrollBlock = O2; Register O2UnrollBlock = O2;
Register O2klass_index = O2; Register O2klass_index = O2;

View File

@ -1,5 +1,5 @@
// //
// Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
// This code is free software; you can redistribute it and/or modify it // This code is free software; you can redistribute it and/or modify it
@ -311,7 +311,6 @@ reg_class o7_regI(R_O7);
// ---------------------------- // ----------------------------
// Pointer Register Classes // Pointer Register Classes
// ---------------------------- // ----------------------------
#ifdef _LP64
// 64-bit build means 64-bit pointers means hi/lo pairs // 64-bit build means 64-bit pointers means hi/lo pairs
reg_class ptr_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5, reg_class ptr_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5,
R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5,
@ -344,40 +343,6 @@ reg_class o1_regP(R_O1H,R_O1);
reg_class o2_regP(R_O2H,R_O2); reg_class o2_regP(R_O2H,R_O2);
reg_class o7_regP(R_O7H,R_O7); reg_class o7_regP(R_O7H,R_O7);
#else // _LP64
// 32-bit build means 32-bit pointers means 1 register.
reg_class ptr_reg( R_G1, R_G3,R_G4,R_G5,
R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,
R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
// Lock encodings use G3 and G4 internally
reg_class lock_ptr_reg(R_G1, R_G5,
R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,
R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
// Special class for storeP instructions, which can store SP or RPC to TLS.
// It is also used for memory addressing, allowing direct TLS addressing.
reg_class sp_ptr_reg( R_G1,R_G2,R_G3,R_G4,R_G5,
R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_SP,
R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
R_I0,R_I1,R_I2,R_I3,R_I4,R_I5,R_FP);
// R_L7 is the lowest-priority callee-save (i.e., NS) register
// We use it to save R_G2 across calls out of Java.
reg_class l7_regP(R_L7);
// Other special pointer regs
reg_class g1_regP(R_G1);
reg_class g2_regP(R_G2);
reg_class g3_regP(R_G3);
reg_class g4_regP(R_G4);
reg_class g5_regP(R_G5);
reg_class i0_regP(R_I0);
reg_class o0_regP(R_O0);
reg_class o1_regP(R_O1);
reg_class o2_regP(R_O2);
reg_class o7_regP(R_O7);
#endif // _LP64
// ---------------------------- // ----------------------------
// Long Register Classes // Long Register Classes
@ -386,12 +351,10 @@ reg_class o7_regP(R_O7);
// Note: O7 is never in this class; it is sometimes used as an encoding temp. // Note: O7 is never in this class; it is sometimes used as an encoding temp.
reg_class long_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5 reg_class long_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5
,R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5 ,R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5
#ifdef _LP64
// 64-bit, longs in 1 register: use all 64-bit integer registers // 64-bit, longs in 1 register: use all 64-bit integer registers
// 32-bit, longs in 1 register: cannot use I's and L's. Restrict to O's and G's. // 32-bit, longs in 1 register: cannot use I's and L's. Restrict to O's and G's.
,R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7 ,R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7
,R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 ,R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5
#endif // _LP64
); );
reg_class g1_regL(R_G1H,R_G1); reg_class g1_regL(R_G1H,R_G1);
@ -533,10 +496,8 @@ static Register reg_to_register_object(int register_encoding);
// instructions which either zero-fill or sign-fill). // instructions which either zero-fill or sign-fill).
bool can_branch_register( Node *bol, Node *cmp ) { bool can_branch_register( Node *bol, Node *cmp ) {
if( !BranchOnRegister ) return false; if( !BranchOnRegister ) return false;
#ifdef _LP64
if( cmp->Opcode() == Op_CmpP ) if( cmp->Opcode() == Op_CmpP )
return true; // No problems with pointer compares return true; // No problems with pointer compares
#endif
if( cmp->Opcode() == Op_CmpL ) if( cmp->Opcode() == Op_CmpL )
return true; // No problems with long compares return true; // No problems with long compares
@ -617,15 +578,11 @@ int MachCallDynamicJavaNode::ret_addr_offset() {
} }
int MachCallRuntimeNode::ret_addr_offset() { int MachCallRuntimeNode::ret_addr_offset() {
#ifdef _LP64
if (MacroAssembler::is_far_target(entry_point())) { if (MacroAssembler::is_far_target(entry_point())) {
return NativeFarCall::instruction_size; return NativeFarCall::instruction_size;
} else { } else {
return NativeCall::instruction_size; return NativeCall::instruction_size;
} }
#else
return NativeCall::instruction_size; // call; delay slot
#endif
} }
// Indicate if the safepoint node needs the polling page as an input. // Indicate if the safepoint node needs the polling page as an input.
@ -1024,7 +981,6 @@ void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, RelocationHolder co
#ifdef ASSERT #ifdef ASSERT
if (preserve_g2 && (VerifyCompiledCode || VerifyOops)) { if (preserve_g2 && (VerifyCompiledCode || VerifyOops)) {
#ifdef _LP64
// Trash argument dump slots. // Trash argument dump slots.
__ set(0xb0b8ac0db0b8ac0d, G1); __ set(0xb0b8ac0db0b8ac0d, G1);
__ mov(G1, G5); __ mov(G1, G5);
@ -1034,22 +990,6 @@ void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, RelocationHolder co
__ stx(G1, SP, STACK_BIAS + 0x98); __ stx(G1, SP, STACK_BIAS + 0x98);
__ stx(G1, SP, STACK_BIAS + 0xA0); __ stx(G1, SP, STACK_BIAS + 0xA0);
__ stx(G1, SP, STACK_BIAS + 0xA8); __ stx(G1, SP, STACK_BIAS + 0xA8);
#else // _LP64
// this is also a native call, so smash the first 7 stack locations,
// and the various registers
// Note: [SP+0x40] is sp[callee_aggregate_return_pointer_sp_offset],
// while [SP+0x44..0x58] are the argument dump slots.
__ set((intptr_t)0xbaadf00d, G1);
__ mov(G1, G5);
__ sllx(G1, 32, G1);
__ or3(G1, G5, G1);
__ mov(G1, G5);
__ stx(G1, SP, 0x40);
__ stx(G1, SP, 0x48);
__ stx(G1, SP, 0x50);
__ stw(G1, SP, 0x58); // Do not trash [SP+0x5C] which is a usable spill slot
#endif // _LP64
} }
#endif /*ASSERT*/ #endif /*ASSERT*/
} }
@ -1262,11 +1202,7 @@ void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
if(do_polling() && ra_->C->is_method_compilation()) { if(do_polling() && ra_->C->is_method_compilation()) {
st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t"); st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t");
#ifdef _LP64
st->print("LDX [L0],G0\t!Poll for Safepointing\n\t"); st->print("LDX [L0],G0\t!Poll for Safepointing\n\t");
#else
st->print("LDUW [L0],G0\t!Poll for Safepointing\n\t");
#endif
} }
if(do_polling()) { if(do_polling()) {
@ -1472,75 +1408,10 @@ static void mach_spill_copy_implementation_helper(const MachNode* mach,
// hardware does the flop for me. Doubles are always aligned, so no problem // hardware does the flop for me. Doubles are always aligned, so no problem
// there. Misaligned sources only come from native-long-returns (handled // there. Misaligned sources only come from native-long-returns (handled
// special below). // special below).
#ifndef _LP64
if (src_first_rc == rc_int && // source is already big-endian
src_second_rc != rc_bad && // 64-bit move
((dst_first & 1) != 0 || dst_second != dst_first + 1)) { // misaligned dst
assert((src_first & 1) == 0 && src_second == src_first + 1, "source must be aligned");
// Do the big-endian flop.
OptoReg::Name tmp = dst_first ; dst_first = dst_second ; dst_second = tmp ;
enum RC tmp_rc = dst_first_rc; dst_first_rc = dst_second_rc; dst_second_rc = tmp_rc;
}
#endif
// -------------------------------------- // --------------------------------------
// Check for integer reg-reg copy // Check for integer reg-reg copy
if (src_first_rc == rc_int && dst_first_rc == rc_int) { if (src_first_rc == rc_int && dst_first_rc == rc_int) {
#ifndef _LP64
if (src_first == R_O0_num && src_second == R_O1_num) { // Check for the evil O0/O1 native long-return case
// Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value
// as stored in memory. On a big-endian machine like SPARC, this means that the _second
// operand contains the least significant word of the 64-bit value and vice versa.
OptoReg::Name tmp = OptoReg::Name(R_O7_num);
assert((dst_first & 1) == 0 && dst_second == dst_first + 1, "return a native O0/O1 long to an aligned-adjacent 64-bit reg" );
// Shift O0 left in-place, zero-extend O1, then OR them into the dst
if ( cbuf ) {
emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[tmp], Assembler::sllx_op3, Matcher::_regEncode[src_first], 0x1020);
emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[src_second], Assembler::srl_op3, Matcher::_regEncode[src_second], 0x0000);
emit3 (*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler:: or_op3, Matcher::_regEncode[tmp], 0, Matcher::_regEncode[src_second]);
#ifndef PRODUCT
} else {
print_helper(st, "SLLX R_%s,32,R_%s\t! Move O0-first to O7-high\n\t", OptoReg::regname(src_first), OptoReg::regname(tmp));
print_helper(st, "SRL R_%s, 0,R_%s\t! Zero-extend O1\n\t", OptoReg::regname(src_second), OptoReg::regname(src_second));
print_helper(st, "OR R_%s,R_%s,R_%s\t! spill",OptoReg::regname(tmp), OptoReg::regname(src_second), OptoReg::regname(dst_first));
#endif
}
return;
} else if (dst_first == R_I0_num && dst_second == R_I1_num) {
// returning a long value in I0/I1
// a SpillCopy must be able to target a return instruction's reg_class
// Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value
// as stored in memory. On a big-endian machine like SPARC, this means that the _second
// operand contains the least significant word of the 64-bit value and vice versa.
OptoReg::Name tdest = dst_first;
if (src_first == dst_first) {
tdest = OptoReg::Name(R_O7_num);
}
if (cbuf) {
assert((src_first & 1) == 0 && (src_first + 1) == src_second, "return value was in an aligned-adjacent 64-bit reg");
// Shift value in upper 32-bits of src to lower 32-bits of I0; move lower 32-bits to I1
// ShrL_reg_imm6
emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[tdest], Assembler::srlx_op3, Matcher::_regEncode[src_second], 32 | 0x1000);
// ShrR_reg_imm6 src, 0, dst
emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srl_op3, Matcher::_regEncode[src_first], 0x0000);
if (tdest != dst_first) {
emit3 (*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler::or_op3, 0/*G0*/, 0/*op2*/, Matcher::_regEncode[tdest]);
}
}
#ifndef PRODUCT
else {
print_helper(st, "SRLX R_%s,32,R_%s\t! Extract MSW\n\t",OptoReg::regname(src_second),OptoReg::regname(tdest));
print_helper(st, "SRL R_%s, 0,R_%s\t! Extract LSW\n\t",OptoReg::regname(src_first),OptoReg::regname(dst_second));
if (tdest != dst_first) {
print_helper(st, "MOV R_%s,R_%s\t! spill\n\t", OptoReg::regname(tdest), OptoReg::regname(dst_first));
}
}
#endif // PRODUCT
return size+8;
}
#endif // !_LP64
// Else normal reg-reg copy // Else normal reg-reg copy
assert(src_second != dst_first, "smashed second before evacuating it"); assert(src_second != dst_first, "smashed second before evacuating it");
impl_mov_helper(cbuf, src_first, dst_first, Assembler::or_op3, 0, "MOV ", st); impl_mov_helper(cbuf, src_first, dst_first, Assembler::or_op3, 0, "MOV ", st);
@ -1614,58 +1485,6 @@ static void mach_spill_copy_implementation_helper(const MachNode* mach,
} }
assert(src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad"); assert(src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad");
#ifndef _LP64
// In the LP64 build, all registers can be moved as aligned/adjacent
// pairs, so there's never any need to move the high bits separately.
// The 32-bit builds have to deal with the 32-bit ABI which can force
// all sorts of silly alignment problems.
// Check for integer reg-reg copy. Hi bits are stuck up in the top
// 32-bits of a 64-bit register, but are needed in low bits of another
// register (else it's a hi-bits-to-hi-bits copy which should have
// happened already as part of a 64-bit move)
if (src_second_rc == rc_int && dst_second_rc == rc_int) {
assert((src_second & 1) == 1, "its the evil O0/O1 native return case");
assert((dst_second & 1) == 0, "should have moved with 1 64-bit move");
// Shift src_second down to dst_second's low bits.
if (cbuf) {
emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020);
#ifndef PRODUCT
} else {
print_helper(st, "SRLX R_%s,32,R_%s\t! spill: Move high bits down low", OptoReg::regname(src_second - 1), OptoReg::regname(dst_second));
#endif
}
return;
}
// Check for high word integer store. Must down-shift the hi bits
// into a temp register, then fall into the case of storing int bits.
if (src_second_rc == rc_int && dst_second_rc == rc_stack && (src_second & 1) == 1) {
// Shift src_second down to dst_second's low bits.
if (cbuf) {
emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[R_O7_num], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020);
#ifndef PRODUCT
} else {
print_helper(st, "SRLX R_%s,32,R_%s\t! spill: Move high bits down low", OptoReg::regname(src_second-1), OptoReg::regname(R_O7_num));
#endif
}
src_second = OptoReg::Name(R_O7_num); // Not R_O7H_num!
}
// Check for high word integer load
if (dst_second_rc == rc_int && src_second_rc == rc_stack)
return impl_helper(this, cbuf, ra_, true, ra_->reg2offset(src_second), dst_second, Assembler::lduw_op3, "LDUW", size, st);
// Check for high word integer store
if (src_second_rc == rc_int && dst_second_rc == rc_stack)
return impl_helper(this, cbuf, ra_, false, ra_->reg2offset(dst_second), src_second, Assembler::stw_op3, "STW ", size, st);
// Check for high word float store
if (src_second_rc == rc_float && dst_second_rc == rc_stack)
return impl_helper(this, cbuf, ra_, false, ra_->reg2offset(dst_second), src_second, Assembler::stf_op3, "STF ", size, st);
#endif // !_LP64
Unimplemented(); Unimplemented();
} }
@ -1743,7 +1562,6 @@ uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
#ifndef PRODUCT #ifndef PRODUCT
void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
st->print_cr("\nUEP:"); st->print_cr("\nUEP:");
#ifdef _LP64
if (UseCompressedClassPointers) { if (UseCompressedClassPointers) {
assert(Universe::heap() != NULL, "java heap should be initialized"); assert(Universe::heap() != NULL, "java heap should be initialized");
st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass"); st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
@ -1762,11 +1580,6 @@ void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
} }
st->print_cr("\tCMP R_G5,R_G3" ); st->print_cr("\tCMP R_G5,R_G3" );
st->print ("\tTne xcc,R_G0+ST_RESERVED_FOR_USER_0+2"); st->print ("\tTne xcc,R_G0+ST_RESERVED_FOR_USER_0+2");
#else // _LP64
st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
st->print_cr("\tCMP R_G5,R_G3" );
st->print ("\tTne icc,R_G0+ST_RESERVED_FOR_USER_0+2");
#endif // _LP64
} }
#endif #endif
@ -1874,9 +1687,7 @@ const bool Matcher::match_rule_supported(int opcode) {
if (!UsePopCountInstruction) if (!UsePopCountInstruction)
return false; return false;
case Op_CompareAndSwapL: case Op_CompareAndSwapL:
#ifdef _LP64
case Op_CompareAndSwapP: case Op_CompareAndSwapP:
#endif
if (!VM_Version::supports_cx8()) if (!VM_Version::supports_cx8())
return false; return false;
break; break;
@ -1919,12 +1730,12 @@ const int Matcher::vector_width_in_bytes(BasicType bt) {
} }
// Vector ideal reg // Vector ideal reg
const int Matcher::vector_ideal_reg(int size) { const uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize == 8, ""); assert(MaxVectorSize == 8, "");
return Op_RegD; return Op_RegD;
} }
const int Matcher::vector_shift_count_ideal_reg(int size) { const uint Matcher::vector_shift_count_ideal_reg(int size) {
fatal("vector shift is not supported"); fatal("vector shift is not supported");
return Node::NotAMachineReg; return Node::NotAMachineReg;
} }
@ -1992,13 +1803,11 @@ const bool Matcher::require_postalloc_expand = false;
const bool Matcher::need_masked_shift_count = false; const bool Matcher::need_masked_shift_count = false;
bool Matcher::narrow_oop_use_complex_address() { bool Matcher::narrow_oop_use_complex_address() {
NOT_LP64(ShouldNotCallThis());
assert(UseCompressedOops, "only for compressed oops code"); assert(UseCompressedOops, "only for compressed oops code");
return false; return false;
} }
bool Matcher::narrow_klass_use_complex_address() { bool Matcher::narrow_klass_use_complex_address() {
NOT_LP64(ShouldNotCallThis());
assert(UseCompressedClassPointers, "only for compressed klass code"); assert(UseCompressedClassPointers, "only for compressed klass code");
return false; return false;
} }
@ -2027,11 +1836,7 @@ const bool Matcher::rematerialize_float_constants = false;
// needed. Else we split the double into 2 integer pieces and move it // needed. Else we split the double into 2 integer pieces and move it
// piece-by-piece. Only happens when passing doubles into C code as the // piece-by-piece. Only happens when passing doubles into C code as the
// Java calling convention forces doubles to be aligned. // Java calling convention forces doubles to be aligned.
#ifdef _LP64
const bool Matcher::misaligned_doubles_ok = true; const bool Matcher::misaligned_doubles_ok = true;
#else
const bool Matcher::misaligned_doubles_ok = false;
#endif
// No-op on SPARC. // No-op on SPARC.
void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) { void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
@ -2050,11 +1855,7 @@ bool Matcher::float_in_double() { return false; }
// The relevant question is how the int is callee-saved. In _LP64 // The relevant question is how the int is callee-saved. In _LP64
// the whole long is written but de-opt'ing will have to extract // the whole long is written but de-opt'ing will have to extract
// the relevant 32 bits, in not-_LP64 only the low 32 bits is written. // the relevant 32 bits, in not-_LP64 only the low 32 bits is written.
#ifdef _LP64
const bool Matcher::int_in_long = true; const bool Matcher::int_in_long = true;
#else
const bool Matcher::int_in_long = false;
#endif
// Return whether or not this register is ever used as an argument. This // Return whether or not this register is ever used as an argument. This
// function is used on startup to build the trampoline stubs in generateOptoStub. // function is used on startup to build the trampoline stubs in generateOptoStub.
@ -2068,7 +1869,6 @@ bool Matcher::can_be_java_arg( int reg ) {
reg == R_I3_num || reg == R_I3_num ||
reg == R_I4_num || reg == R_I4_num ||
reg == R_I5_num ) return true; reg == R_I5_num ) return true;
#ifdef _LP64
// 64-bit builds can pass 64-bit pointers and longs in // 64-bit builds can pass 64-bit pointers and longs in
// the high I registers // the high I registers
if( reg == R_I0H_num || if( reg == R_I0H_num ||
@ -2082,14 +1882,6 @@ bool Matcher::can_be_java_arg( int reg ) {
return true; return true;
} }
#else
// 32-bit builds with longs-in-one-entry pass longs in G1 & G4.
// Longs cannot be passed in O regs, because O regs become I regs
// after a 'save' and I regs get their high bits chopped off on
// interrupt.
if( reg == R_G1H_num || reg == R_G1_num ) return true;
if( reg == R_G4H_num || reg == R_G4_num ) return true;
#endif
// A few float args in registers // A few float args in registers
if( reg >= R_F0_num && reg <= R_F7_num ) return true; if( reg >= R_F0_num && reg <= R_F7_num ) return true;
@ -2152,19 +1944,11 @@ void Compile::reshape_address(AddPNode* addp) {
// The intptr_t operand types, defined by textual substitution. // The intptr_t operand types, defined by textual substitution.
// (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.) // (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.)
#ifdef _LP64
#define immX immL #define immX immL
#define immX13 immL13 #define immX13 immL13
#define immX13m7 immL13m7 #define immX13m7 immL13m7
#define iRegX iRegL #define iRegX iRegL
#define g1RegX g1RegL #define g1RegX g1RegL
#else
#define immX immI
#define immX13 immI13
#define immX13m7 immI13m7
#define iRegX iRegI
#define g1RegX g1RegI
#endif
//----------ENCODING BLOCK----------------------------------------------------- //----------ENCODING BLOCK-----------------------------------------------------
// This block specifies the encoding classes used by the compiler to output // This block specifies the encoding classes used by the compiler to output
@ -2326,7 +2110,6 @@ encode %{
emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::add_op3, R_O7_enc, frame::pc_return_offset ); emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::add_op3, R_O7_enc, frame::pc_return_offset );
%} %}
#ifdef _LP64
/* %%% merge with enc_to_bool */ /* %%% merge with enc_to_bool */
enc_class enc_convP2B( iRegI dst, iRegP src ) %{ enc_class enc_convP2B( iRegI dst, iRegP src ) %{
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
@ -2335,7 +2118,6 @@ encode %{
Register dst_reg = reg_to_register_object($dst$$reg); Register dst_reg = reg_to_register_object($dst$$reg);
__ movr(Assembler::rc_nz, src_reg, 1, dst_reg); __ movr(Assembler::rc_nz, src_reg, 1, dst_reg);
%} %}
#endif
enc_class enc_cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp ) %{ enc_class enc_cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp ) %{
// (Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))) // (Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)))
@ -2626,16 +2408,6 @@ encode %{
// to G1 so the register allocator will not have to deal with the misaligned register // to G1 so the register allocator will not have to deal with the misaligned register
// pair. // pair.
enc_class adjust_long_from_native_call %{ enc_class adjust_long_from_native_call %{
#ifndef _LP64
if (returns_long()) {
// sllx O0,32,O0
emit3_simm13( cbuf, Assembler::arith_op, R_O0_enc, Assembler::sllx_op3, R_O0_enc, 0x1020 );
// srl O1,0,O1
emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::srl_op3, R_O1_enc, 0x0000 );
// or O0,O1,G1
emit3 ( cbuf, Assembler::arith_op, R_G1_enc, Assembler:: or_op3, R_O0_enc, 0, R_O1_enc );
}
#endif
%} %}
enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime
@ -3102,11 +2874,7 @@ frame %{
cisc_spilling_operand_name(indOffset); cisc_spilling_operand_name(indOffset);
// Number of stack slots consumed by a Monitor enter // Number of stack slots consumed by a Monitor enter
#ifdef _LP64
sync_stack_slots(2); sync_stack_slots(2);
#else
sync_stack_slots(1);
#endif
// Compiled code's Frame Pointer // Compiled code's Frame Pointer
frame_pointer(R_SP); frame_pointer(R_SP);
@ -3124,13 +2892,8 @@ frame %{
// Number of outgoing stack slots killed above the out_preserve_stack_slots // Number of outgoing stack slots killed above the out_preserve_stack_slots
// for calls to C. Supports the var-args backing area for register parms. // for calls to C. Supports the var-args backing area for register parms.
// ADLC doesn't support parsing expressions, so I folded the math by hand. // ADLC doesn't support parsing expressions, so I folded the math by hand.
#ifdef _LP64
// (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (0)) * 2-stack-slots-per-word // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (0)) * 2-stack-slots-per-word
varargs_C_out_slots_killed(12); varargs_C_out_slots_killed(12);
#else
// (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (1)) * 1-stack-slots-per-word
varargs_C_out_slots_killed( 7);
#endif
// The after-PROLOG location of the return address. Location of // The after-PROLOG location of the return address. Location of
// return address specifies a type (REG or STACK) and a number // return address specifies a type (REG or STACK) and a number
@ -3161,17 +2924,10 @@ frame %{
// opcodes. This simplifies the register allocator. // opcodes. This simplifies the register allocator.
c_return_value %{ c_return_value %{
assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" ); assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
#ifdef _LP64
static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num }; static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num}; static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num }; static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num}; static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
#else // !_LP64
static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
#endif
return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg], return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
(is_outgoing?lo_out:lo_in)[ideal_reg] ); (is_outgoing?lo_out:lo_in)[ideal_reg] );
%} %}
@ -3179,17 +2935,10 @@ frame %{
// Location of compiled Java return values. Same as C // Location of compiled Java return values. Same as C
return_value %{ return_value %{
assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" ); assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
#ifdef _LP64
static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num }; static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num}; static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num }; static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num}; static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
#else // !_LP64
static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
#endif
return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg], return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
(is_outgoing?lo_out:lo_in)[ideal_reg] ); (is_outgoing?lo_out:lo_in)[ideal_reg] );
%} %}
@ -3444,7 +3193,6 @@ operand immP() %{
interface(CONST_INTER); interface(CONST_INTER);
%} %}
#ifdef _LP64
// Pointer Immediate: 64-bit // Pointer Immediate: 64-bit
operand immP_set() %{ operand immP_set() %{
predicate(!VM_Version::is_niagara_plus()); predicate(!VM_Version::is_niagara_plus());
@ -3478,7 +3226,6 @@ operand immP_no_oop_cheap() %{
format %{ %} format %{ %}
interface(CONST_INTER); interface(CONST_INTER);
%} %}
#endif
operand immP13() %{ operand immP13() %{
predicate((-4096 < n->get_ptr()) && (n->get_ptr() <= 4095)); predicate((-4096 < n->get_ptr()) && (n->get_ptr() <= 4095));
@ -3919,11 +3666,7 @@ operand flagsRegP() %{
constraint(ALLOC_IN_RC(int_flags)); constraint(ALLOC_IN_RC(int_flags));
match(RegFlags); match(RegFlags);
#ifdef _LP64
format %{ "xcc_P" %} format %{ "xcc_P" %}
#else
format %{ "icc_P" %}
#endif
interface(REG_INTER); interface(REG_INTER);
%} %}
@ -4500,7 +4243,6 @@ pipe_class ialu_reg_flags( iRegI op2_out, iRegI op2_in, iRegI op1, flagsReg cr )
MS : R(2); MS : R(2);
%} %}
#ifdef _LP64
pipe_class ialu_clr_and_mover( iRegI dst, iRegP src ) %{ pipe_class ialu_clr_and_mover( iRegI dst, iRegP src ) %{
instruction_count(1); multiple_bundles; instruction_count(1); multiple_bundles;
dst : C(write)+1; dst : C(write)+1;
@ -4509,7 +4251,6 @@ pipe_class ialu_clr_and_mover( iRegI dst, iRegP src ) %{
BR : E(2); BR : E(2);
MS : E(2); MS : E(2);
%} %}
#endif
// Integer ALU reg operation // Integer ALU reg operation
pipe_class ialu_move_reg_L_to_I(iRegI dst, iRegL src) %{ pipe_class ialu_move_reg_L_to_I(iRegI dst, iRegL src) %{
@ -4614,13 +4355,8 @@ pipe_class loadConP( iRegP dst, immP src ) %{
// Polling Address // Polling Address
pipe_class loadConP_poll( iRegP dst, immP_poll src ) %{ pipe_class loadConP_poll( iRegP dst, immP_poll src ) %{
#ifdef _LP64
instruction_count(0); multiple_bundles; instruction_count(0); multiple_bundles;
fixed_latency(6); fixed_latency(6);
#else
dst : E(write);
IALU : R;
#endif
%} %}
// Long Constant small // Long Constant small
@ -5361,7 +5097,6 @@ instruct regL_to_stkL(stackSlotL dst, iRegL src) %{
ins_pipe(istore_mem_reg); ins_pipe(istore_mem_reg);
%} %}
#ifdef _LP64
// Load pointer from stack slot, 64-bit encoding // Load pointer from stack slot, 64-bit encoding
instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{ instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
match(Set dst src); match(Set dst src);
@ -5381,27 +5116,6 @@ instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
ins_encode(simple_form3_mem_reg( dst, src ) ); ins_encode(simple_form3_mem_reg( dst, src ) );
ins_pipe(istore_mem_reg); ins_pipe(istore_mem_reg);
%} %}
#else // _LP64
// Load pointer from stack slot, 32-bit encoding
instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
match(Set dst src);
ins_cost(MEMORY_REF_COST);
format %{ "LDUW $src,$dst\t!ptr" %}
opcode(Assembler::lduw_op3, Assembler::ldst_op);
ins_encode(simple_form3_mem_reg( src, dst ) );
ins_pipe(iload_mem);
%}
// Store pointer to stack slot
instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
match(Set dst src);
ins_cost(MEMORY_REF_COST);
format %{ "STW $src,$dst\t!ptr" %}
opcode(Assembler::stw_op3, Assembler::ldst_op);
ins_encode(simple_form3_mem_reg( dst, src ) );
ins_pipe(istore_mem_reg);
%}
#endif // _LP64
//------------Special Nop instructions for bundling - no match rules----------- //------------Special Nop instructions for bundling - no match rules-----------
// Nop using the A0 functional unit // Nop using the A0 functional unit
@ -5858,17 +5572,10 @@ instruct loadP(iRegP dst, memory mem) %{
ins_cost(MEMORY_REF_COST); ins_cost(MEMORY_REF_COST);
size(4); size(4);
#ifndef _LP64
format %{ "LDUW $mem,$dst\t! ptr" %}
ins_encode %{
__ lduw($mem$$Address, $dst$$Register);
%}
#else
format %{ "LDX $mem,$dst\t! ptr" %} format %{ "LDX $mem,$dst\t! ptr" %}
ins_encode %{ ins_encode %{
__ ldx($mem$$Address, $dst$$Register); __ ldx($mem$$Address, $dst$$Register);
%} %}
#endif
ins_pipe(iload_mem); ins_pipe(iload_mem);
%} %}
@ -5891,17 +5598,10 @@ instruct loadKlass(iRegP dst, memory mem) %{
ins_cost(MEMORY_REF_COST); ins_cost(MEMORY_REF_COST);
size(4); size(4);
#ifndef _LP64
format %{ "LDUW $mem,$dst\t! klass ptr" %}
ins_encode %{
__ lduw($mem$$Address, $dst$$Register);
%}
#else
format %{ "LDX $mem,$dst\t! klass ptr" %} format %{ "LDX $mem,$dst\t! klass ptr" %}
ins_encode %{ ins_encode %{
__ ldx($mem$$Address, $dst$$Register); __ ldx($mem$$Address, $dst$$Register);
%} %}
#endif
ins_pipe(iload_mem); ins_pipe(iload_mem);
%} %}
@ -5969,26 +5669,6 @@ instruct loadConI13( iRegI dst, immI13 src ) %{
ins_pipe(ialu_imm); ins_pipe(ialu_imm);
%} %}
#ifndef _LP64
instruct loadConP(iRegP dst, immP con) %{
match(Set dst con);
ins_cost(DEFAULT_COST * 3/2);
format %{ "SET $con,$dst\t!ptr" %}
ins_encode %{
relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc();
intptr_t val = $con$$constant;
if (constant_reloc == relocInfo::oop_type) {
__ set_oop_constant((jobject) val, $dst$$Register);
} else if (constant_reloc == relocInfo::metadata_type) {
__ set_metadata_constant((Metadata*)val, $dst$$Register);
} else { // non-oop pointers, e.g. card mark base, heap top
assert(constant_reloc == relocInfo::none, "unexpected reloc type");
__ set(val, $dst$$Register);
}
%}
ins_pipe(loadConP);
%}
#else
instruct loadConP_set(iRegP dst, immP_set con) %{ instruct loadConP_set(iRegP dst, immP_set con) %{
match(Set dst con); match(Set dst con);
ins_cost(DEFAULT_COST * 3/2); ins_cost(DEFAULT_COST * 3/2);
@ -6032,7 +5712,6 @@ instruct loadConP_no_oop_cheap(iRegP dst, immP_no_oop_cheap con) %{
%} %}
ins_pipe(loadConP); ins_pipe(loadConP);
%} %}
#endif // _LP64
instruct loadConP0(iRegP dst, immP0 src) %{ instruct loadConP0(iRegP dst, immP0 src) %{
match(Set dst src); match(Set dst src);
@ -6186,19 +5865,6 @@ instruct prefetchAlloc_bis( iRegP dst ) %{
%} %}
// Next code is used for finding next cache line address to prefetch. // Next code is used for finding next cache line address to prefetch.
#ifndef _LP64
instruct cacheLineAdr( iRegP dst, iRegP src, immI13 mask ) %{
match(Set dst (CastX2P (AndI (CastP2X src) mask)));
ins_cost(DEFAULT_COST);
size(4);
format %{ "AND $src,$mask,$dst\t! next cache line address" %}
ins_encode %{
__ and3($src$$Register, $mask$$constant, $dst$$Register);
%}
ins_pipe(ialu_reg_imm);
%}
#else
instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{ instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{
match(Set dst (CastX2P (AndL (CastP2X src) mask))); match(Set dst (CastX2P (AndL (CastP2X src) mask)));
ins_cost(DEFAULT_COST); ins_cost(DEFAULT_COST);
@ -6210,7 +5876,6 @@ instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{
%} %}
ins_pipe(ialu_reg_imm); ins_pipe(ialu_reg_imm);
%} %}
#endif
//----------Store Instructions------------------------------------------------- //----------Store Instructions-------------------------------------------------
// Store Byte // Store Byte
@ -6322,13 +5987,8 @@ instruct storeP(memory dst, sp_ptr_RegP src) %{
match(Set dst (StoreP dst src)); match(Set dst (StoreP dst src));
ins_cost(MEMORY_REF_COST); ins_cost(MEMORY_REF_COST);
#ifndef _LP64
format %{ "STW $src,$dst\t! ptr" %}
opcode(Assembler::stw_op3, 0, REGP_OP);
#else
format %{ "STX $src,$dst\t! ptr" %} format %{ "STX $src,$dst\t! ptr" %}
opcode(Assembler::stx_op3, 0, REGP_OP); opcode(Assembler::stx_op3, 0, REGP_OP);
#endif
ins_encode( form3_mem_reg( dst, src ) ); ins_encode( form3_mem_reg( dst, src ) );
ins_pipe(istore_mem_spORreg); ins_pipe(istore_mem_spORreg);
%} %}
@ -6337,13 +5997,8 @@ instruct storeP0(memory dst, immP0 src) %{
match(Set dst (StoreP dst src)); match(Set dst (StoreP dst src));
ins_cost(MEMORY_REF_COST); ins_cost(MEMORY_REF_COST);
#ifndef _LP64
format %{ "STW $src,$dst\t! ptr" %}
opcode(Assembler::stw_op3, 0, REGP_OP);
#else
format %{ "STX $src,$dst\t! ptr" %} format %{ "STX $src,$dst\t! ptr" %}
opcode(Assembler::stx_op3, 0, REGP_OP); opcode(Assembler::stx_op3, 0, REGP_OP);
#endif
ins_encode( form3_mem_reg( dst, R_G0 ) ); ins_encode( form3_mem_reg( dst, R_G0 ) );
ins_pipe(istore_mem_zero); ins_pipe(istore_mem_zero);
%} %}
@ -7094,13 +6749,8 @@ instruct loadPLocked(iRegP dst, memory mem) %{
match(Set dst (LoadPLocked mem)); match(Set dst (LoadPLocked mem));
ins_cost(MEMORY_REF_COST); ins_cost(MEMORY_REF_COST);
#ifndef _LP64
format %{ "LDUW $mem,$dst\t! ptr" %}
opcode(Assembler::lduw_op3, 0, REGP_OP);
#else
format %{ "LDX $mem,$dst\t! ptr" %} format %{ "LDX $mem,$dst\t! ptr" %}
opcode(Assembler::ldx_op3, 0, REGP_OP); opcode(Assembler::ldx_op3, 0, REGP_OP);
#endif
ins_encode( form3_mem_reg( mem, dst ) ); ins_encode( form3_mem_reg( mem, dst ) );
ins_pipe(iload_mem); ins_pipe(iload_mem);
%} %}
@ -7171,9 +6821,7 @@ instruct compareAndSwapI_bool(iRegP mem_ptr, iRegI oldval, iRegI newval, iRegI r
%} %}
instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
#ifdef _LP64
predicate(VM_Version::supports_cx8()); predicate(VM_Version::supports_cx8());
#endif
match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval))); match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
match(Set res (WeakCompareAndSwapP mem_ptr (Binary oldval newval))); match(Set res (WeakCompareAndSwapP mem_ptr (Binary oldval newval)));
effect( USE mem_ptr, KILL ccr, KILL tmp1); effect( USE mem_ptr, KILL ccr, KILL tmp1);
@ -7184,13 +6832,8 @@ instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI r
"MOV 1,$res\n\t" "MOV 1,$res\n\t"
"MOVne xcc,R_G0,$res" "MOVne xcc,R_G0,$res"
%} %}
#ifdef _LP64
ins_encode( enc_casx(mem_ptr, oldval, newval), ins_encode( enc_casx(mem_ptr, oldval, newval),
enc_lflags_ne_to_boolean(res) ); enc_lflags_ne_to_boolean(res) );
#else
ins_encode( enc_casi(mem_ptr, oldval, newval),
enc_iflags_ne_to_boolean(res) );
#endif
ins_pipe( long_memory_op ); ins_pipe( long_memory_op );
%} %}
@ -7268,17 +6911,6 @@ instruct xchgI( memory mem, iRegI newval) %{
ins_pipe( long_memory_op ); ins_pipe( long_memory_op );
%} %}
#ifndef _LP64
instruct xchgP( memory mem, iRegP newval) %{
match(Set newval (GetAndSetP mem newval));
format %{ "SWAP [$mem],$newval" %}
size(4);
ins_encode %{
__ swap($mem$$Address, $newval$$Register);
%}
ins_pipe( long_memory_op );
%}
#endif
instruct xchgN( memory mem, iRegN newval) %{ instruct xchgN( memory mem, iRegN newval) %{
match(Set newval (GetAndSetN mem newval)); match(Set newval (GetAndSetN mem newval));
@ -7740,7 +7372,6 @@ instruct shrL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
%} %}
// Register Shift Right Immediate with a CastP2X // Register Shift Right Immediate with a CastP2X
#ifdef _LP64
instruct shrP_reg_imm6(iRegL dst, iRegP src1, immU6 src2) %{ instruct shrP_reg_imm6(iRegL dst, iRegP src1, immU6 src2) %{
match(Set dst (URShiftL (CastP2X src1) src2)); match(Set dst (URShiftL (CastP2X src1) src2));
size(4); size(4);
@ -7749,16 +7380,6 @@ instruct shrP_reg_imm6(iRegL dst, iRegP src1, immU6 src2) %{
ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
ins_pipe(ialu_reg_imm); ins_pipe(ialu_reg_imm);
%} %}
#else
instruct shrP_reg_imm5(iRegI dst, iRegP src1, immU5 src2) %{
match(Set dst (URShiftI (CastP2X src1) src2));
size(4);
format %{ "SRL $src1,$src2,$dst\t! Cast ptr $src1 to int and shift" %}
opcode(Assembler::srl_op3, Assembler::arith_op);
ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
ins_pipe(ialu_reg_imm);
%}
#endif
//----------Floating Point Arithmetic Instructions----------------------------- //----------Floating Point Arithmetic Instructions-----------------------------
@ -8001,21 +7622,6 @@ instruct orL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
ins_pipe(ialu_reg_imm); ins_pipe(ialu_reg_imm);
%} %}
#ifndef _LP64
// Use sp_ptr_RegP to match G2 (TLS register) without spilling.
instruct orI_reg_castP2X(iRegI dst, iRegI src1, sp_ptr_RegP src2) %{
match(Set dst (OrI src1 (CastP2X src2)));
size(4);
format %{ "OR $src1,$src2,$dst" %}
opcode(Assembler::or_op3, Assembler::arith_op);
ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
ins_pipe(ialu_reg_reg);
%}
#else
instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{ instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{
match(Set dst (OrL src1 (CastP2X src2))); match(Set dst (OrL src1 (CastP2X src2)));
@ -8027,8 +7633,6 @@ instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{
ins_pipe(ialu_reg_reg); ins_pipe(ialu_reg_reg);
%} %}
#endif
// Xor Instructions // Xor Instructions
// Register Xor // Register Xor
instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
@ -8088,17 +7692,6 @@ instruct convI2B( iRegI dst, iRegI src, flagsReg ccr ) %{
ins_pipe(ialu_reg_ialu); ins_pipe(ialu_reg_ialu);
%} %}
#ifndef _LP64
instruct convP2B( iRegI dst, iRegP src, flagsReg ccr ) %{
match(Set dst (Conv2B src));
effect( KILL ccr );
ins_cost(DEFAULT_COST*2);
format %{ "CMP R_G0,$src\n\t"
"ADDX R_G0,0,$dst" %}
ins_encode( enc_to_bool( src, dst ) );
ins_pipe(ialu_reg_ialu);
%}
#else
instruct convP2B( iRegI dst, iRegP src ) %{ instruct convP2B( iRegI dst, iRegP src ) %{
match(Set dst (Conv2B src)); match(Set dst (Conv2B src));
ins_cost(DEFAULT_COST*2); ins_cost(DEFAULT_COST*2);
@ -8107,7 +7700,6 @@ instruct convP2B( iRegI dst, iRegP src ) %{
ins_encode( form3_g0_rs2_rd_move( src, dst ), enc_convP2B( dst, src ) ); ins_encode( form3_g0_rs2_rd_move( src, dst ), enc_convP2B( dst, src ) );
ins_pipe(ialu_clr_and_mover); ins_pipe(ialu_clr_and_mover);
%} %}
#endif
instruct cmpLTMask0( iRegI dst, iRegI src, immI0 zero, flagsReg ccr ) %{ instruct cmpLTMask0( iRegI dst, iRegI src, immI0 zero, flagsReg ccr ) %{
match(Set dst (CmpLTMask src zero)); match(Set dst (CmpLTMask src zero));
@ -8750,16 +8342,10 @@ instruct convL2F_reg(regF dst, iRegL src) %{
instruct convL2I_reg(iRegI dst, iRegL src) %{ instruct convL2I_reg(iRegI dst, iRegL src) %{
match(Set dst (ConvL2I src)); match(Set dst (ConvL2I src));
#ifndef _LP64
format %{ "MOV $src.lo,$dst\t! long->int" %}
ins_encode( form3_g0_rs2_rd_move_lo2( src, dst ) );
ins_pipe(ialu_move_reg_I_to_L);
#else
size(4); size(4);
format %{ "SRA $src,R_G0,$dst\t! long->int" %} format %{ "SRA $src,R_G0,$dst\t! long->int" %}
ins_encode( form3_rs1_rd_signextend_lo1( src, dst ) ); ins_encode( form3_rs1_rd_signextend_lo1( src, dst ) );
ins_pipe(ialu_reg); ins_pipe(ialu_reg);
#endif
%} %}
// Register Shift Right Immediate // Register Shift Right Immediate
@ -9528,11 +9114,7 @@ instruct cmpP_reg_branch_short(cmpOpP cmp, iRegP op1, iRegP op2, label labl, fla
size(4); size(4);
ins_cost(BRANCH_COST); ins_cost(BRANCH_COST);
#ifdef _LP64
format %{ "CXB$cmp $op1,$op2,$labl\t! ptr" %} format %{ "CXB$cmp $op1,$op2,$labl\t! ptr" %}
#else
format %{ "CWB$cmp $op1,$op2,$labl\t! ptr" %}
#endif
ins_encode %{ ins_encode %{
Label* L = $labl$$label; Label* L = $labl$$label;
assert(__ use_cbcond(*L), "back to back cbcond"); assert(__ use_cbcond(*L), "back to back cbcond");
@ -9550,11 +9132,7 @@ instruct cmpP_null_branch_short(cmpOpP cmp, iRegP op1, immP0 null, label labl, f
size(4); size(4);
ins_cost(BRANCH_COST); ins_cost(BRANCH_COST);
#ifdef _LP64
format %{ "CXB$cmp $op1,0,$labl\t! ptr" %} format %{ "CXB$cmp $op1,0,$labl\t! ptr" %}
#else
format %{ "CWB$cmp $op1,0,$labl\t! ptr" %}
#endif
ins_encode %{ ins_encode %{
Label* L = $labl$$label; Label* L = $labl$$label;
assert(__ use_cbcond(*L), "back to back cbcond"); assert(__ use_cbcond(*L), "back to back cbcond");
@ -9822,11 +9400,7 @@ instruct safePoint_poll(iRegP poll) %{
effect(USE poll); effect(USE poll);
size(4); size(4);
#ifdef _LP64
format %{ "LDX [$poll],R_G0\t! Safepoint: poll for GC" %} format %{ "LDX [$poll],R_G0\t! Safepoint: poll for GC" %}
#else
format %{ "LDUW [$poll],R_G0\t! Safepoint: poll for GC" %}
#endif
ins_encode %{ ins_encode %{
__ relocate(relocInfo::poll_type); __ relocate(relocInfo::poll_type);
__ ld_ptr($poll$$Register, 0, G0); __ ld_ptr($poll$$Register, 0, G0);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -216,9 +216,7 @@ class StubGenerator: public StubCodeGenerator {
__ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words) __ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words)
__ sll(t, Interpreter::logStackElementSize, t); // compute number of bytes __ sll(t, Interpreter::logStackElementSize, t); // compute number of bytes
__ sub(FP, t, Gargs); // setup parameter pointer __ sub(FP, t, Gargs); // setup parameter pointer
#ifdef _LP64
__ add( Gargs, STACK_BIAS, Gargs ); // Account for LP64 stack bias __ add( Gargs, STACK_BIAS, Gargs ); // Account for LP64 stack bias
#endif
__ mov(SP, O5_savedSP); __ mov(SP, O5_savedSP);
@ -271,27 +269,8 @@ class StubGenerator: public StubCodeGenerator {
__ delayed()->stf(FloatRegisterImpl::D, F0, addr, G0); __ delayed()->stf(FloatRegisterImpl::D, F0, addr, G0);
__ BIND(is_long); __ BIND(is_long);
#ifdef _LP64
__ ba(exit); __ ba(exit);
__ delayed()->st_long(O0, addr, G0); // store entire long __ delayed()->st_long(O0, addr, G0); // store entire long
#else
#if defined(COMPILER2)
// All return values are where we want them, except for Longs. C2 returns
// longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
// Since the interpreter will return longs in G1 and O0/O1 in the 32bit
// build we simply always use G1.
// Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
// do this here. Unfortunately if we did a rethrow we'd see an machepilog node
// first which would move g1 -> O0/O1 and destroy the exception we were throwing.
__ ba(exit);
__ delayed()->stx(G1, addr, G0); // store entire long
#else
__ st(O1, addr, BytesPerInt);
__ ba(exit);
__ delayed()->st(O0, addr, G0);
#endif /* COMPILER2 */
#endif /* _LP64 */
} }
return start; return start;
} }
@ -746,22 +725,10 @@ class StubGenerator: public StubCodeGenerator {
address start = __ pc(); address start = __ pc();
Label miss; Label miss;
#if defined(COMPILER2) && !defined(_LP64)
// Do not use a 'save' because it blows the 64-bit O registers.
__ add(SP,-4*wordSize,SP); // Make space for 4 temps (stack must be 2 words aligned)
__ st_ptr(L0,SP,(frame::register_save_words+0)*wordSize);
__ st_ptr(L1,SP,(frame::register_save_words+1)*wordSize);
__ st_ptr(L2,SP,(frame::register_save_words+2)*wordSize);
__ st_ptr(L3,SP,(frame::register_save_words+3)*wordSize);
Register Rret = O0;
Register Rsub = O1;
Register Rsuper = O2;
#else
__ save_frame(0); __ save_frame(0);
Register Rret = I0; Register Rret = I0;
Register Rsub = I1; Register Rsub = I1;
Register Rsuper = I2; Register Rsuper = I2;
#endif
Register L0_ary_len = L0; Register L0_ary_len = L0;
Register L1_ary_ptr = L1; Register L1_ary_ptr = L1;
@ -775,32 +742,14 @@ class StubGenerator: public StubCodeGenerator {
// Match falls through here. // Match falls through here.
__ addcc(G0,0,Rret); // set Z flags, Z result __ addcc(G0,0,Rret); // set Z flags, Z result
#if defined(COMPILER2) && !defined(_LP64)
__ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
__ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
__ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
__ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
__ retl(); // Result in Rret is zero; flags set to Z
__ delayed()->add(SP,4*wordSize,SP);
#else
__ ret(); // Result in Rret is zero; flags set to Z __ ret(); // Result in Rret is zero; flags set to Z
__ delayed()->restore(); __ delayed()->restore();
#endif
__ BIND(miss); __ BIND(miss);
__ addcc(G0,1,Rret); // set NZ flags, NZ result __ addcc(G0,1,Rret); // set NZ flags, NZ result
#if defined(COMPILER2) && !defined(_LP64)
__ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
__ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
__ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
__ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
__ retl(); // Result in Rret is != 0; flags set to NZ
__ delayed()->add(SP,4*wordSize,SP);
#else
__ ret(); // Result in Rret is != 0; flags set to NZ __ ret(); // Result in Rret is != 0; flags set to NZ
__ delayed()->restore(); __ delayed()->restore();
#endif
return start; return start;
} }
@ -828,11 +777,11 @@ class StubGenerator: public StubCodeGenerator {
// Rtmp - scratch // Rtmp - scratch
// //
void assert_clean_int(Register Rint, Register Rtmp) { void assert_clean_int(Register Rint, Register Rtmp) {
#if defined(ASSERT) && defined(_LP64) #if defined(ASSERT)
__ signx(Rint, Rtmp); __ signx(Rint, Rtmp);
__ cmp(Rint, Rtmp); __ cmp(Rint, Rtmp);
__ breakpoint_trap(Assembler::notEqual, Assembler::xcc); __ breakpoint_trap(Assembler::notEqual, Assembler::xcc);
#endif #endif
} }
// //
@ -1269,17 +1218,6 @@ class StubGenerator: public StubCodeGenerator {
// Aligned arrays have 4 bytes alignment in 32-bits VM // Aligned arrays have 4 bytes alignment in 32-bits VM
// and 8 bytes - in 64-bits VM. So we do it only for 32-bits VM // and 8 bytes - in 64-bits VM. So we do it only for 32-bits VM
// //
#ifndef _LP64
// copy a 4-bytes word if necessary to align 'to' to 8 bytes
__ andcc(to, 7, G0);
__ br(Assembler::zero, false, Assembler::pn, L_skip_alignment);
__ delayed()->ld(from, 0, O3);
__ inc(from, 4);
__ inc(to, 4);
__ dec(count, 4);
__ st(O3, to, -4);
__ BIND(L_skip_alignment);
#endif
} else { } else {
// copy bytes to align 'to' on 8 byte boundary // copy bytes to align 'to' on 8 byte boundary
__ andcc(to, 7, G1); // misaligned bytes __ andcc(to, 7, G1); // misaligned bytes
@ -1296,9 +1234,7 @@ class StubGenerator: public StubCodeGenerator {
__ delayed()->inc(to); __ delayed()->inc(to);
__ BIND(L_skip_alignment); __ BIND(L_skip_alignment);
} }
#ifdef _LP64
if (!aligned) if (!aligned)
#endif
{ {
// Copy with shift 16 bytes per iteration if arrays do not have // Copy with shift 16 bytes per iteration if arrays do not have
// the same alignment mod 8, otherwise fall through to the next // the same alignment mod 8, otherwise fall through to the next
@ -1395,14 +1331,12 @@ class StubGenerator: public StubCodeGenerator {
__ delayed()->stb(O3, end_to, 0); __ delayed()->stb(O3, end_to, 0);
__ BIND(L_skip_alignment); __ BIND(L_skip_alignment);
} }
#ifdef _LP64
if (aligned) { if (aligned) {
// Both arrays are aligned to 8-bytes in 64-bits VM. // Both arrays are aligned to 8-bytes in 64-bits VM.
// The 'count' is decremented in copy_16_bytes_backward_with_shift() // The 'count' is decremented in copy_16_bytes_backward_with_shift()
// in unaligned case. // in unaligned case.
__ dec(count, 16); __ dec(count, 16);
} else } else
#endif
{ {
// Copy with shift 16 bytes per iteration if arrays do not have // Copy with shift 16 bytes per iteration if arrays do not have
// the same alignment mod 8, otherwise jump to the next // the same alignment mod 8, otherwise jump to the next
@ -1490,17 +1424,6 @@ class StubGenerator: public StubCodeGenerator {
// Aligned arrays have 4 bytes alignment in 32-bits VM // Aligned arrays have 4 bytes alignment in 32-bits VM
// and 8 bytes - in 64-bits VM. // and 8 bytes - in 64-bits VM.
// //
#ifndef _LP64
// copy a 2-elements word if necessary to align 'to' to 8 bytes
__ andcc(to, 7, G0);
__ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
__ delayed()->ld(from, 0, O3);
__ inc(from, 4);
__ inc(to, 4);
__ dec(count, 2);
__ st(O3, to, -4);
__ BIND(L_skip_alignment);
#endif
} else { } else {
// copy 1 element if necessary to align 'to' on an 4 bytes // copy 1 element if necessary to align 'to' on an 4 bytes
__ andcc(to, 3, G0); __ andcc(to, 3, G0);
@ -1524,9 +1447,7 @@ class StubGenerator: public StubCodeGenerator {
__ sth(O4, to, -2); __ sth(O4, to, -2);
__ BIND(L_skip_alignment2); __ BIND(L_skip_alignment2);
} }
#ifdef _LP64
if (!aligned) if (!aligned)
#endif
{ {
// Copy with shift 16 bytes per iteration if arrays do not have // Copy with shift 16 bytes per iteration if arrays do not have
// the same alignment mod 8, otherwise fall through to the next // the same alignment mod 8, otherwise fall through to the next
@ -1643,9 +1564,7 @@ class StubGenerator: public StubCodeGenerator {
__ dec(count, 1 << (shift - 1)); __ dec(count, 1 << (shift - 1));
__ BIND(L_skip_align2); __ BIND(L_skip_align2);
} }
#ifdef _LP64
if (!aligned) { if (!aligned) {
#endif
// align to 8 bytes, we know we are 4 byte aligned to start // align to 8 bytes, we know we are 4 byte aligned to start
__ andcc(to, 7, G0); __ andcc(to, 7, G0);
__ br(Assembler::zero, false, Assembler::pt, L_fill_32_bytes); __ br(Assembler::zero, false, Assembler::pt, L_fill_32_bytes);
@ -1654,9 +1573,7 @@ class StubGenerator: public StubCodeGenerator {
__ inc(to, 4); __ inc(to, 4);
__ dec(count, 1 << shift); __ dec(count, 1 << shift);
__ BIND(L_fill_32_bytes); __ BIND(L_fill_32_bytes);
#ifdef _LP64
} }
#endif
if (t == T_INT) { if (t == T_INT) {
// Zero extend value // Zero extend value
@ -1857,14 +1774,12 @@ class StubGenerator: public StubCodeGenerator {
__ sth(O4, end_to, 0); __ sth(O4, end_to, 0);
__ BIND(L_skip_alignment2); __ BIND(L_skip_alignment2);
} }
#ifdef _LP64
if (aligned) { if (aligned) {
// Both arrays are aligned to 8-bytes in 64-bits VM. // Both arrays are aligned to 8-bytes in 64-bits VM.
// The 'count' is decremented in copy_16_bytes_backward_with_shift() // The 'count' is decremented in copy_16_bytes_backward_with_shift()
// in unaligned case. // in unaligned case.
__ dec(count, 8); __ dec(count, 8);
} else } else
#endif
{ {
// Copy with shift 16 bytes per iteration if arrays do not have // Copy with shift 16 bytes per iteration if arrays do not have
// the same alignment mod 8, otherwise jump to the next // the same alignment mod 8, otherwise jump to the next
@ -1974,9 +1889,7 @@ class StubGenerator: public StubCodeGenerator {
// Aligned arrays have 4 bytes alignment in 32-bits VM // Aligned arrays have 4 bytes alignment in 32-bits VM
// and 8 bytes - in 64-bits VM. // and 8 bytes - in 64-bits VM.
// //
#ifdef _LP64
if (!aligned) if (!aligned)
#endif
{ {
// The next check could be put under 'ifndef' since the code in // The next check could be put under 'ifndef' since the code in
// generate_disjoint_long_copy_core() has own checks and set 'offset'. // generate_disjoint_long_copy_core() has own checks and set 'offset'.
@ -2463,16 +2376,12 @@ class StubGenerator: public StubCodeGenerator {
__ mov(to, G1); __ mov(to, G1);
__ mov(count, G5); __ mov(count, G5);
gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized); gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized);
#ifdef _LP64
assert_clean_int(count, O3); // Make sure 'count' is clean int. assert_clean_int(count, O3); // Make sure 'count' is clean int.
if (UseCompressedOops) { if (UseCompressedOops) {
generate_disjoint_int_copy_core(aligned); generate_disjoint_int_copy_core(aligned);
} else { } else {
generate_disjoint_long_copy_core(aligned); generate_disjoint_long_copy_core(aligned);
} }
#else
generate_disjoint_int_copy_core(aligned);
#endif
// O0 is used as temp register // O0 is used as temp register
gen_write_ref_array_post_barrier(G1, G5, O0); gen_write_ref_array_post_barrier(G1, G5, O0);
@ -2518,15 +2427,11 @@ class StubGenerator: public StubCodeGenerator {
__ mov(count, G5); __ mov(count, G5);
gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized); gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized);
#ifdef _LP64
if (UseCompressedOops) { if (UseCompressedOops) {
generate_conjoint_int_copy_core(aligned); generate_conjoint_int_copy_core(aligned);
} else { } else {
generate_conjoint_long_copy_core(aligned); generate_conjoint_long_copy_core(aligned);
} }
#else
generate_conjoint_int_copy_core(aligned);
#endif
// O0 is used as temp register // O0 is used as temp register
gen_write_ref_array_post_barrier(G1, G5, O0); gen_write_ref_array_post_barrier(G1, G5, O0);
@ -3138,7 +3043,6 @@ class StubGenerator: public StubCodeGenerator {
"arrayof_jint_disjoint_arraycopy"); "arrayof_jint_disjoint_arraycopy");
StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy, StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy,
"arrayof_jint_arraycopy"); "arrayof_jint_arraycopy");
#ifdef _LP64
// In 64 bit we need both aligned and unaligned versions of jint arraycopy. // In 64 bit we need both aligned and unaligned versions of jint arraycopy.
// entry_jint_arraycopy always points to the unaligned version (notice that we overwrite it). // entry_jint_arraycopy always points to the unaligned version (notice that we overwrite it).
StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, &entry, StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, &entry,
@ -3146,14 +3050,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, entry, StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, entry,
&entry_jint_arraycopy, &entry_jint_arraycopy,
"jint_arraycopy"); "jint_arraycopy");
#else
// In 32 bit jints are always HeapWordSize aligned, so always use the aligned version
// (in fact in 32bit we always have a pre-loop part even in the aligned version,
// because it uses 64-bit loads/stores, so the aligned flag is actually ignored).
StubRoutines::_jint_disjoint_arraycopy = StubRoutines::_arrayof_jint_disjoint_arraycopy;
StubRoutines::_jint_arraycopy = StubRoutines::_arrayof_jint_arraycopy;
#endif
//*** jlong //*** jlong
// It is always aligned // It is always aligned
@ -3178,7 +3074,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, entry, NULL, StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, entry, NULL,
"arrayof_oop_arraycopy_uninit", "arrayof_oop_arraycopy_uninit",
/*dest_uninitialized*/true); /*dest_uninitialized*/true);
#ifdef _LP64
if (UseCompressedOops) { if (UseCompressedOops) {
// With compressed oops we need unaligned versions, notice that we overwrite entry_oop_arraycopy. // With compressed oops we need unaligned versions, notice that we overwrite entry_oop_arraycopy.
StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, &entry, StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, &entry,
@ -3193,7 +3088,6 @@ class StubGenerator: public StubCodeGenerator {
"oop_arraycopy_uninit", "oop_arraycopy_uninit",
/*dest_uninitialized*/true); /*dest_uninitialized*/true);
} else } else
#endif
{ {
// oop arraycopy is always aligned on 32bit and 64bit without compressed oops // oop arraycopy is always aligned on 32bit and 64bit without compressed oops
StubRoutines::_oop_disjoint_arraycopy = StubRoutines::_arrayof_oop_disjoint_arraycopy; StubRoutines::_oop_disjoint_arraycopy = StubRoutines::_arrayof_oop_disjoint_arraycopy;
@ -5104,17 +4998,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::Sparc::_stop_subroutine_entry = generate_stop_subroutine(); StubRoutines::Sparc::_stop_subroutine_entry = generate_stop_subroutine();
StubRoutines::Sparc::_flush_callers_register_windows_entry = generate_flush_callers_register_windows(); StubRoutines::Sparc::_flush_callers_register_windows_entry = generate_flush_callers_register_windows();
#if !defined(COMPILER2) && !defined(_LP64)
StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
StubRoutines::_atomic_add_entry = generate_atomic_add();
StubRoutines::_atomic_xchg_ptr_entry = StubRoutines::_atomic_xchg_entry;
StubRoutines::_atomic_cmpxchg_ptr_entry = StubRoutines::_atomic_cmpxchg_entry;
StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry;
#endif // COMPILER2 !=> _LP64
// Build this early so it's available for the interpreter. // Build this early so it's available for the interpreter.
StubRoutines::_throw_StackOverflowError_entry = StubRoutines::_throw_StackOverflowError_entry =
generate_throw_exception("StackOverflowError throw_exception", generate_throw_exception("StackOverflowError throw_exception",
@ -5222,11 +5105,9 @@ class StubGenerator: public StubCodeGenerator {
void stub_prolog(StubCodeDesc* cdesc) { void stub_prolog(StubCodeDesc* cdesc) {
# ifdef ASSERT # ifdef ASSERT
// put extra information in the stub code, to make it more readable // put extra information in the stub code, to make it more readable
#ifdef _LP64
// Write the high part of the address // Write the high part of the address
// [RGV] Check if there is a dependency on the size of this prolog // [RGV] Check if there is a dependency on the size of this prolog
__ emit_data((intptr_t)cdesc >> 32, relocInfo::none); __ emit_data((intptr_t)cdesc >> 32, relocInfo::none);
#endif
__ emit_data((intptr_t)cdesc, relocInfo::none); __ emit_data((intptr_t)cdesc, relocInfo::none);
__ emit_data(++_stub_count, relocInfo::none); __ emit_data(++_stub_count, relocInfo::none);
# endif # endif

View File

@ -57,13 +57,9 @@
// if too small. // if too small.
// Run with +PrintInterpreter to get the VM to print out the size. // Run with +PrintInterpreter to get the VM to print out the size.
// Max size with JVMTI // Max size with JVMTI
#ifdef _LP64 // The sethi() instruction generates lots more instructions when shell
// The sethi() instruction generates lots more instructions when shell // stack limit is unlimited, so that's why this is much bigger.
// stack limit is unlimited, so that's why this is much bigger.
int TemplateInterpreter::InterpreterCodeSize = 260 * K; int TemplateInterpreter::InterpreterCodeSize = 260 * K;
#else
int TemplateInterpreter::InterpreterCodeSize = 230 * K;
#endif
// Generation of Interpreter // Generation of Interpreter
// //
@ -75,41 +71,6 @@ int TemplateInterpreter::InterpreterCodeSize = 230 * K;
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------
#ifndef _LP64
address TemplateInterpreterGenerator::generate_slow_signature_handler() {
address entry = __ pc();
Argument argv(0, true);
// We are in the jni transition frame. Save the last_java_frame corresponding to the
// outer interpreter frame
//
__ set_last_Java_frame(FP, noreg);
// make sure the interpreter frame we've pushed has a valid return pc
__ mov(O7, I7);
__ mov(Lmethod, G3_scratch);
__ mov(Llocals, G4_scratch);
__ save_frame(0);
__ mov(G2_thread, L7_thread_cache);
__ add(argv.address_in_frame(), O3);
__ mov(G2_thread, O0);
__ mov(G3_scratch, O1);
__ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type);
__ delayed()->mov(G4_scratch, O2);
__ mov(L7_thread_cache, G2_thread);
__ reset_last_Java_frame();
// load the register arguments (the C code packed them as varargs)
for (Argument ldarg = argv.successor(); ldarg.is_register(); ldarg = ldarg.successor()) {
__ ld_ptr(ldarg.address_in_frame(), ldarg.as_register());
}
__ ret();
__ delayed()->
restore(O0, 0, Lscratch); // caller's Lscratch gets the result handler
return entry;
}
#else
// LP64 passes floating point arguments in F1, F3, F5, etc. instead of // LP64 passes floating point arguments in F1, F3, F5, etc. instead of
// O0, O1, O2 etc.. // O0, O1, O2 etc..
// Doubles are passed in D0, D2, D4 // Doubles are passed in D0, D2, D4
@ -206,7 +167,6 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
restore(O0, 0, Lscratch); // caller's Lscratch gets the result handler restore(O0, 0, Lscratch); // caller's Lscratch gets the result handler
return entry; return entry;
} }
#endif
void TemplateInterpreterGenerator::generate_counter_overflow(Label& Lcontinue) { void TemplateInterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
@ -253,11 +213,7 @@ void TemplateInterpreterGenerator::save_native_result(void) {
// save and restore any potential method result value around the unlocking operation // save and restore any potential method result value around the unlocking operation
__ stf(FloatRegisterImpl::D, F0, d_tmp); __ stf(FloatRegisterImpl::D, F0, d_tmp);
#ifdef _LP64
__ stx(O0, l_tmp); __ stx(O0, l_tmp);
#else
__ std(O0, l_tmp);
#endif
} }
void TemplateInterpreterGenerator::restore_native_result(void) { void TemplateInterpreterGenerator::restore_native_result(void) {
@ -266,11 +222,7 @@ void TemplateInterpreterGenerator::restore_native_result(void) {
// Restore any method result value // Restore any method result value
__ ldf(FloatRegisterImpl::D, d_tmp, F0); __ ldf(FloatRegisterImpl::D, d_tmp, F0);
#ifdef _LP64
__ ldx(l_tmp, O0); __ ldx(l_tmp, O0);
#else
__ ldd(l_tmp, O0);
#endif
} }
address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
@ -340,22 +292,6 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ profile_return_type(O0, G3_scratch, G1_scratch); __ profile_return_type(O0, G3_scratch, G1_scratch);
} }
#if !defined(_LP64) && defined(COMPILER2)
// All return values are where we want them, except for Longs. C2 returns
// longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
// Since the interpreter will return longs in G1 and O0/O1 in the 32bit
// build even if we are returning from interpreted we just do a little
// stupid shuffing.
// Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
// do this here. Unfortunately if we did a rethrow we'd see an machepilog node
// first which would move g1 -> O0/O1 and destroy the exception we were throwing.
if (state == ltos) {
__ srl (G1, 0, O1);
__ srlx(G1, 32, O0);
}
#endif // !_LP64 && COMPILER2
// The callee returns with the stack possibly adjusted by adapter transition // The callee returns with the stack possibly adjusted by adapter transition
// We remove that possible adjustment here. // We remove that possible adjustment here.
// All interpreter local registers are untouched. Any result is passed back // All interpreter local registers are untouched. Any result is passed back
@ -374,6 +310,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size); // argument size in words __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size); // argument size in words
__ sll(parameter_size, Interpreter::logStackElementSize, parameter_size); // each argument size in bytes __ sll(parameter_size, Interpreter::logStackElementSize, parameter_size); // each argument size in bytes
__ add(Lesp, parameter_size, Lesp); // pop arguments __ add(Lesp, parameter_size, Lesp); // pop arguments
__ check_and_handle_popframe(Gtemp);
__ check_and_handle_earlyret(Gtemp);
__ dispatch_next(state, step); __ dispatch_next(state, step);
return entry; return entry;
@ -438,9 +378,6 @@ address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type
case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break; case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break;
case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break; case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break;
case T_LONG : case T_LONG :
#ifndef _LP64
__ mov(O1, Itos_l2); // move other half of long
#endif // ifdef or no ifdef, fall through to the T_INT case
case T_INT : __ mov(O0, Itos_i); break; case T_INT : __ mov(O0, Itos_i); break;
case T_VOID : /* nothing to do */ break; case T_VOID : /* nothing to do */ break;
case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break; case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break;
@ -466,12 +403,6 @@ address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state,
} }
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
address entry = __ pc();
__ dispatch_next(state);
return entry;
}
// //
// Helpers for commoning out cases in the various type of method entries. // Helpers for commoning out cases in the various type of method entries.
// //
@ -886,9 +817,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
__ st_ptr(mirror, FP, (frame::interpreter_frame_mirror_offset * wordSize) + STACK_BIAS); __ st_ptr(mirror, FP, (frame::interpreter_frame_mirror_offset * wordSize) + STACK_BIAS);
__ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache __ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache
__ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
#ifdef _LP64
__ add( Lmonitors, STACK_BIAS, Lmonitors ); // Account for 64 bit stack bias __ add( Lmonitors, STACK_BIAS, Lmonitors ); // Account for 64 bit stack bias
#endif
__ sub(Lmonitors, BytesPerWord, Lesp); // set Lesp __ sub(Lmonitors, BytesPerWord, Lesp); // set Lesp
// setup interpreter activation registers // setup interpreter activation registers
@ -1483,12 +1412,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// Move the result handler address // Move the result handler address
__ mov(Lscratch, G3_scratch); __ mov(Lscratch, G3_scratch);
// return possible result to the outer frame // return possible result to the outer frame
#ifndef __LP64
__ mov(O0, I0);
__ restore(O1, G0, O1);
#else
__ restore(O0, G0, O0); __ restore(O0, G0, O0);
#endif /* __LP64 */
// Move result handler to expected register // Move result handler to expected register
__ mov(G3_scratch, Lscratch); __ mov(G3_scratch, Lscratch);
@ -1568,17 +1492,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
restore_native_result(); restore_native_result();
} }
#if defined(COMPILER2) && !defined(_LP64)
// C2 expects long results in G1 we can't tell if we're returning to interpreted
// or compiled so just be safe.
__ sllx(O0, 32, G1); // Shift bits into high G1
__ srl (O1, 0, O1); // Zero extend O1
__ or3 (O1, G1, G1); // OR 64 bits into G1
#endif /* COMPILER2 && !_LP64 */
// dispose of return address and remove activation // dispose of return address and remove activation
#ifdef ASSERT #ifdef ASSERT
{ {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -248,12 +248,7 @@ void TemplateTable::iconst(int value) {
void TemplateTable::lconst(int value) { void TemplateTable::lconst(int value) {
transition(vtos, ltos); transition(vtos, ltos);
assert(value >= 0, "check this code"); assert(value >= 0, "check this code");
#ifdef _LP64
__ set(value, Otos_l); __ set(value, Otos_l);
#else
__ set(value, Otos_l2);
__ clr( Otos_l1);
#endif
} }
@ -406,24 +401,12 @@ void TemplateTable::ldc2_w() {
// Check out Conversions.java for an example. // Check out Conversions.java for an example.
// Also ConstantPool::header_size() is 20, which makes it very difficult // Also ConstantPool::header_size() is 20, which makes it very difficult
// to double-align double on the constant pool. SG, 11/7/97 // to double-align double on the constant pool. SG, 11/7/97
#ifdef _LP64
__ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d); __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d);
#else
FloatRegister f = Ftos_d;
__ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f);
__ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2,
f->successor());
#endif
__ push(dtos); __ push(dtos);
__ ba_short(exit); __ ba_short(exit);
__ bind(Long); __ bind(Long);
#ifdef _LP64
__ ldx(G3_scratch, base_offset, Otos_l); __ ldx(G3_scratch, base_offset, Otos_l);
#else
__ ld(G3_scratch, base_offset, Otos_l);
__ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor());
#endif
__ push(ltos); __ push(ltos);
__ bind(exit); __ bind(exit);
@ -1128,19 +1111,11 @@ void TemplateTable::lop2(Operation op) {
transition(ltos, ltos); transition(ltos, ltos);
__ pop_l(O2); __ pop_l(O2);
switch (op) { switch (op) {
#ifdef _LP64
case add: __ add(O2, Otos_l, Otos_l); break; case add: __ add(O2, Otos_l, Otos_l); break;
case sub: __ sub(O2, Otos_l, Otos_l); break; case sub: __ sub(O2, Otos_l, Otos_l); break;
case _and: __ and3(O2, Otos_l, Otos_l); break; case _and: __ and3(O2, Otos_l, Otos_l); break;
case _or: __ or3(O2, Otos_l, Otos_l); break; case _or: __ or3(O2, Otos_l, Otos_l); break;
case _xor: __ xor3(O2, Otos_l, Otos_l); break; case _xor: __ xor3(O2, Otos_l, Otos_l); break;
#else
case add: __ addcc(O3, Otos_l2, Otos_l2); __ addc(O2, Otos_l1, Otos_l1); break;
case sub: __ subcc(O3, Otos_l2, Otos_l2); __ subc(O2, Otos_l1, Otos_l1); break;
case _and: __ and3(O3, Otos_l2, Otos_l2); __ and3(O2, Otos_l1, Otos_l1); break;
case _or: __ or3(O3, Otos_l2, Otos_l2); __ or3(O2, Otos_l1, Otos_l1); break;
case _xor: __ xor3(O3, Otos_l2, Otos_l2); __ xor3(O2, Otos_l1, Otos_l1); break;
#endif
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
} }
@ -1171,14 +1146,10 @@ void TemplateTable::idiv() {
Label regular; Label regular;
__ cmp(Otos_i, -1); __ cmp(Otos_i, -1);
__ br(Assembler::notEqual, false, Assembler::pt, regular); __ br(Assembler::notEqual, false, Assembler::pt, regular);
#ifdef _LP64
// Don't put set in delay slot // Don't put set in delay slot
// Set will turn into multiple instructions in 64 bit mode // Set will turn into multiple instructions in 64 bit mode
__ delayed()->nop(); __ delayed()->nop();
__ set(min_int, G4_scratch); __ set(min_int, G4_scratch);
#else
__ delayed()->set(min_int, G4_scratch);
#endif
Label done; Label done;
__ cmp(O1, G4_scratch); __ cmp(O1, G4_scratch);
__ br(Assembler::equal, true, Assembler::pt, done); __ br(Assembler::equal, true, Assembler::pt, done);
@ -1202,11 +1173,7 @@ void TemplateTable::irem() {
void TemplateTable::lmul() { void TemplateTable::lmul() {
transition(ltos, ltos); transition(ltos, ltos);
__ pop_l(O2); __ pop_l(O2);
#ifdef _LP64
__ mulx(Otos_l, O2, Otos_l); __ mulx(Otos_l, O2, Otos_l);
#else
__ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul));
#endif
} }
@ -1216,15 +1183,9 @@ void TemplateTable::ldiv() {
// check for zero // check for zero
__ pop_l(O2); __ pop_l(O2);
#ifdef _LP64
__ tst(Otos_l); __ tst(Otos_l);
__ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
__ sdivx(O2, Otos_l, Otos_l); __ sdivx(O2, Otos_l, Otos_l);
#else
__ orcc(Otos_l1, Otos_l2, G0);
__ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
__ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
#endif
} }
@ -1233,17 +1194,11 @@ void TemplateTable::lrem() {
// check for zero // check for zero
__ pop_l(O2); __ pop_l(O2);
#ifdef _LP64
__ tst(Otos_l); __ tst(Otos_l);
__ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
__ sdivx(O2, Otos_l, Otos_l2); __ sdivx(O2, Otos_l, Otos_l2);
__ mulx (Otos_l2, Otos_l, Otos_l2); __ mulx (Otos_l2, Otos_l, Otos_l2);
__ sub (O2, Otos_l2, Otos_l); __ sub (O2, Otos_l2, Otos_l);
#else
__ orcc(Otos_l1, Otos_l2, G0);
__ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
__ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
#endif
} }
@ -1251,11 +1206,7 @@ void TemplateTable::lshl() {
transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra
__ pop_l(O2); // shift value in O2, O3 __ pop_l(O2); // shift value in O2, O3
#ifdef _LP64
__ sllx(O2, Otos_i, Otos_l); __ sllx(O2, Otos_i, Otos_l);
#else
__ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
#endif
} }
@ -1263,11 +1214,7 @@ void TemplateTable::lshr() {
transition(itos, ltos); // %%%% see lshl comment transition(itos, ltos); // %%%% see lshl comment
__ pop_l(O2); // shift value in O2, O3 __ pop_l(O2); // shift value in O2, O3
#ifdef _LP64
__ srax(O2, Otos_i, Otos_l); __ srax(O2, Otos_i, Otos_l);
#else
__ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
#endif
} }
@ -1276,11 +1223,7 @@ void TemplateTable::lushr() {
transition(itos, ltos); // %%%% see lshl comment transition(itos, ltos); // %%%% see lshl comment
__ pop_l(O2); // shift value in O2, O3 __ pop_l(O2); // shift value in O2, O3
#ifdef _LP64
__ srlx(O2, Otos_i, Otos_l); __ srlx(O2, Otos_i, Otos_l);
#else
__ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
#endif
} }
@ -1293,15 +1236,9 @@ void TemplateTable::fop2(Operation op) {
case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
case rem: case rem:
assert(Ftos_f == F0, "just checking"); assert(Ftos_f == F0, "just checking");
#ifdef _LP64
// LP64 calling conventions use F1, F3 for passing 2 floats // LP64 calling conventions use F1, F3 for passing 2 floats
__ pop_f(F1); __ pop_f(F1);
__ fmov(FloatRegisterImpl::S, Ftos_f, F3); __ fmov(FloatRegisterImpl::S, Ftos_f, F3);
#else
__ pop_i(O0);
__ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp);
__ ld( __ d_tmp, O1 );
#endif
__ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem)); __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem));
assert( Ftos_f == F0, "fix this code" ); assert( Ftos_f == F0, "fix this code" );
break; break;
@ -1319,18 +1256,9 @@ void TemplateTable::dop2(Operation op) {
case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
case rem: case rem:
#ifdef _LP64
// Pass arguments in D0, D2 // Pass arguments in D0, D2
__ fmov(FloatRegisterImpl::D, Ftos_f, F2 ); __ fmov(FloatRegisterImpl::D, Ftos_f, F2 );
__ pop_d( F0 ); __ pop_d( F0 );
#else
// Pass arguments in O0O1, O2O3
__ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
__ ldd( __ d_tmp, O2 );
__ pop_d(Ftos_f);
__ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
__ ldd( __ d_tmp, O0 );
#endif
__ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem)); __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem));
assert( Ftos_d == F0, "fix this code" ); assert( Ftos_d == F0, "fix this code" );
break; break;
@ -1348,11 +1276,7 @@ void TemplateTable::ineg() {
void TemplateTable::lneg() { void TemplateTable::lneg() {
transition(ltos, ltos); transition(ltos, ltos);
#ifdef _LP64
__ sub(G0, Otos_l, Otos_l); __ sub(G0, Otos_l, Otos_l);
#else
__ lneg(Otos_l1, Otos_l2);
#endif
} }
@ -1437,15 +1361,8 @@ void TemplateTable::convert() {
Label done; Label done;
switch (bytecode()) { switch (bytecode()) {
case Bytecodes::_i2l: case Bytecodes::_i2l:
#ifdef _LP64
// Sign extend the 32 bits // Sign extend the 32 bits
__ sra ( Otos_i, 0, Otos_l ); __ sra ( Otos_i, 0, Otos_l );
#else
__ addcc(Otos_i, 0, Otos_l2);
__ br(Assembler::greaterEqual, true, Assembler::pt, done);
__ delayed()->clr(Otos_l1);
__ set(~0, Otos_l1);
#endif
break; break;
case Bytecodes::_i2f: case Bytecodes::_i2f:
@ -1476,12 +1393,8 @@ void TemplateTable::convert() {
break; break;
case Bytecodes::_l2i: case Bytecodes::_l2i:
#ifndef _LP64
__ mov(Otos_l2, Otos_i);
#else
// Sign-extend into the high 32 bits // Sign-extend into the high 32 bits
__ sra(Otos_l, 0, Otos_i); __ sra(Otos_l, 0, Otos_i);
#endif
break; break;
case Bytecodes::_l2f: case Bytecodes::_l2f:
@ -1512,11 +1425,7 @@ void TemplateTable::convert() {
case Bytecodes::_f2l: case Bytecodes::_f2l:
// must uncache tos // must uncache tos
__ push_f(); __ push_f();
#ifdef _LP64
__ pop_f(F1); __ pop_f(F1);
#else
__ pop_i(O0);
#endif
__ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l)); __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
break; break;
@ -1528,13 +1437,8 @@ void TemplateTable::convert() {
case Bytecodes::_d2l: case Bytecodes::_d2l:
// must uncache tos // must uncache tos
__ push_d(); __ push_d();
#ifdef _LP64
// LP64 calling conventions pass first double arg in D0 // LP64 calling conventions pass first double arg in D0
__ pop_d( Ftos_d ); __ pop_d( Ftos_d );
#else
__ pop_i( O0 );
__ pop_i( O1 );
#endif
__ call_VM_leaf(Lscratch, __ call_VM_leaf(Lscratch,
bytecode() == Bytecodes::_d2i bytecode() == Bytecodes::_d2i
? CAST_FROM_FN_PTR(address, SharedRuntime::d2i) ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i)
@ -1554,13 +1458,8 @@ void TemplateTable::convert() {
void TemplateTable::lcmp() { void TemplateTable::lcmp() {
transition(ltos, itos); transition(ltos, itos);
#ifdef _LP64
__ pop_l(O1); // pop off value 1, value 2 is in O0 __ pop_l(O1); // pop off value 1, value 2 is in O0
__ lcmp( O1, Otos_l, Otos_i ); __ lcmp( O1, Otos_l, Otos_i );
#else
__ pop_l(O2); // cmp O2,3 to O0,1
__ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i );
#endif
} }
@ -1756,7 +1655,6 @@ void TemplateTable::ret() {
__ access_local_returnAddress(G3_scratch, Otos_i); __ access_local_returnAddress(G3_scratch, Otos_i);
// Otos_i contains the bci, compute the bcp from that // Otos_i contains the bci, compute the bcp from that
#ifdef _LP64
#ifdef ASSERT #ifdef ASSERT
// jsr result was labeled as an 'itos' not an 'atos' because we cannot GC // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC
// the result. The return address (really a BCI) was stored with an // the result. The return address (really a BCI) was stored with an
@ -1771,7 +1669,6 @@ void TemplateTable::ret() {
__ stop("BCI is in the wrong register half?"); __ stop("BCI is in the wrong register half?");
__ bind (zzz) ; __ bind (zzz) ;
} }
#endif
#endif #endif
__ profile_ret(vtos, Otos_i, G4_scratch); __ profile_ret(vtos, Otos_i, G4_scratch);
@ -1808,10 +1705,8 @@ void TemplateTable::tableswitch() {
// load lo, hi // load lo, hi
__ ld(O1, 1 * BytesPerInt, O2); // Low Byte __ ld(O1, 1 * BytesPerInt, O2); // Low Byte
__ ld(O1, 2 * BytesPerInt, O3); // High Byte __ ld(O1, 2 * BytesPerInt, O3); // High Byte
#ifdef _LP64
// Sign extend the 32 bits // Sign extend the 32 bits
__ sra ( Otos_i, 0, Otos_i ); __ sra ( Otos_i, 0, Otos_i );
#endif /* _LP64 */
// check against lo & hi // check against lo & hi
__ cmp( Otos_i, O2); __ cmp( Otos_i, O2);
@ -3400,11 +3295,7 @@ void TemplateTable::_new() {
// Check if tlab should be discarded (refill_waste_limit >= free) // Check if tlab should be discarded (refill_waste_limit >= free)
__ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue); __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
__ sub(RendValue, RoldTopValue, RfreeValue); __ sub(RendValue, RoldTopValue, RfreeValue);
#ifdef _LP64
__ srlx(RfreeValue, LogHeapWordSize, RfreeValue); __ srlx(RfreeValue, LogHeapWordSize, RfreeValue);
#else
__ srl(RfreeValue, LogHeapWordSize, RfreeValue);
#endif
__ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small __ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small
// increment waste limit to prevent getting stuck on this slow path // increment waste limit to prevent getting stuck on this slow path

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -70,12 +70,10 @@ void VM_Version::initialize() {
if (FLAG_IS_DEFAULT(OptoLoopAlignment)) { if (FLAG_IS_DEFAULT(OptoLoopAlignment)) {
FLAG_SET_DEFAULT(OptoLoopAlignment, 4); FLAG_SET_DEFAULT(OptoLoopAlignment, 4);
} }
#ifdef _LP64
// 32-bit oops don't make sense for the 64-bit VM on sparc // 32-bit oops don't make sense for the 64-bit VM on sparc
// since the 32-bit VM has the same registers and smaller objects. // since the 32-bit VM has the same registers and smaller objects.
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
#endif // _LP64
#ifdef COMPILER2 #ifdef COMPILER2
// Indirect branch is the same cost as direct // Indirect branch is the same cost as direct
if (FLAG_IS_DEFAULT(UseJumpTables)) { if (FLAG_IS_DEFAULT(UseJumpTables)) {

View File

@ -232,7 +232,7 @@ int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
MacroAssembler::instr_size_for_decode_klass_not_null() : 0); MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
return basic + slop; return basic + slop;
} else { } else {
const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord + const int basic = 34 * BytesPerInstWord +
// shift;add for load_klass (only shift with zero heap based) // shift;add for load_klass (only shift with zero heap based)
(UseCompressedClassPointers ? (UseCompressedClassPointers ?
MacroAssembler::instr_size_for_decode_klass_not_null() : 0); MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
@ -257,7 +257,6 @@ int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
// ld [ %g3 + 0xe8 ], %l2 // ld [ %g3 + 0xe8 ], %l2
// sll %l2, 2, %l2 // sll %l2, 2, %l2
// add %l2, 0x134, %l2 // add %l2, 0x134, %l2
// and %l2, -8, %l2 ! NOT_LP64 only
// add %g3, %l2, %l2 // add %g3, %l2, %l2
// add %g3, 4, %g3 // add %g3, 4, %g3
// ld [ %l2 ], %l5 // ld [ %l2 ], %l5

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -21,6 +21,8 @@
* questions. * questions.
*/ */
#include "precompiled.hpp"
#include "aot/compiledIC_aot.hpp" #include "aot/compiledIC_aot.hpp"
#include "code/codeCache.hpp" #include "code/codeCache.hpp"
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -48,9 +48,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
int number_of_arguments, int number_of_arguments,
bool check_exceptions); bool check_exceptions);
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// base routine for all dispatches // base routine for all dispatches
void dispatch_base(TosState state, address* table, bool verifyoop = true); void dispatch_base(TosState state, address* table, bool verifyoop = true);
@ -61,6 +58,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
void jump_to_entry(address entry); void jump_to_entry(address entry);
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
void load_earlyret_value(TosState state); void load_earlyret_value(TosState state);
// Interpreter-specific registers // Interpreter-specific registers

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -65,7 +65,7 @@ jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Hand
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) { void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) {
address pc = _instructions->start() + pc_offset; address pc = _instructions->start() + pc_offset;
Handle obj = HotSpotObjectConstantImpl::object(constant); Handle obj(THREAD, HotSpotObjectConstantImpl::object(constant));
jobject value = JNIHandles::make_local(obj()); jobject value = JNIHandles::make_local(obj());
if (HotSpotObjectConstantImpl::compressed(constant)) { if (HotSpotObjectConstantImpl::compressed(constant)) {
#ifdef _LP64 #ifdef _LP64

View File

@ -71,12 +71,6 @@ class MacroAssembler: public Assembler {
bool check_exceptions // whether to check for pending exceptions after return bool check_exceptions // whether to check for pending exceptions after return
); );
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler,
// as only the interpreter handles PopFrame and ForceEarlyReturn requests.
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
// helpers for FPU flag access // helpers for FPU flag access
@ -87,6 +81,12 @@ class MacroAssembler: public Assembler {
public: public:
MacroAssembler(CodeBuffer* code) : Assembler(code) {} MacroAssembler(CodeBuffer* code) : Assembler(code) {}
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler,
// as only the interpreter handles PopFrame and ForceEarlyReturn requests.
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// Support for NULL-checks // Support for NULL-checks
// //
// Generates code that causes a NULL OS exception if the content of reg is NULL. // Generates code that causes a NULL OS exception if the content of reg is NULL.

View File

@ -1,124 +0,0 @@
/*
* Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/codeBuffer.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
// This method will be called (as any other Klass virtual method) with
// the Klass itself as the first argument. Example:
//
// oop obj;
// int size = obj->klass()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();
//
// The dummy method is called with the Klass object as the first
// operand, and an object as the second argument.
//
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
// differing only by an ordinal constant, and they bear no relationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#define __ masm->
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
char** mc_top,
char* mc_end) {
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
*(intptr_t *)(*md_top) = vtable_bytes;
*md_top += sizeof(intptr_t);
void** dummy_vtable = (void**)*md_top;
*vtable = dummy_vtable;
*md_top += vtable_bytes;
// Get ready to generate dummy methods.
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
MacroAssembler* masm = new MacroAssembler(&cb);
Label common_code;
for (int i = 0; i < vtbl_list_size; ++i) {
for (int j = 0; j < num_virtuals; ++j) {
dummy_vtable[num_virtuals * i + j] = (void*)masm->pc();
// Load rax, with a value indicating vtable/offset pair.
// -- bits[ 7..0] (8 bits) which virtual method in table?
// -- bits[12..8] (5 bits) which virtual method table?
// -- must fit in 13-bit instruction immediate field.
__ movl(rax, (i << 8) + j);
__ jmp(common_code);
}
}
__ bind(common_code);
#ifdef WIN32
// Expecting to be called with "thiscall" conventions -- the arguments
// are on the stack, except that the "this" pointer is in rcx.
#else
// Expecting to be called with Unix conventions -- the arguments
// are on the stack, including the "this" pointer.
#endif
// In addition, rax was set (above) to the offset of the method in the
// table.
#ifdef WIN32
__ push(rcx); // save "this"
#endif
__ mov(rcx, rax);
__ shrptr(rcx, 8); // isolate vtable identifier.
__ shlptr(rcx, LogBytesPerWord);
Address index(noreg, rcx, Address::times_1);
ExternalAddress vtbl((address)vtbl_list);
__ movptr(rdx, ArrayAddress(vtbl, index)); // get correct vtable address.
#ifdef WIN32
__ pop(rcx); // restore "this"
#else
__ movptr(rcx, Address(rsp, BytesPerWord)); // fetch "this"
#endif
__ movptr(Address(rcx, 0), rdx); // update vtable pointer.
__ andptr(rax, 0x00ff); // isolate vtable method index
__ shlptr(rax, LogBytesPerWord);
__ addptr(rax, rdx); // address of real method pointer.
__ jmp(Address(rax, 0)); // get real method pointer.
__ flush();
*mc_top = (char*)__ pc();
}

View File

@ -1,114 +0,0 @@
/*
* Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/codeBuffer.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
// This method will be called (as any other Klass virtual method) with
// the Klass itself as the first argument. Example:
//
// oop obj;
// int size = obj->klass()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();
//
// The dummy method is called with the Klass object as the first
// operand, and an object as the second argument.
//
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
// differing only by an ordinal constant, and they bear no relationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#define __ masm->
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
char** mc_top,
char* mc_end) {
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
*(intptr_t *)(*md_top) = vtable_bytes;
*md_top += sizeof(intptr_t);
void** dummy_vtable = (void**)*md_top;
*vtable = dummy_vtable;
*md_top += vtable_bytes;
// Get ready to generate dummy methods.
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
MacroAssembler* masm = new MacroAssembler(&cb);
Label common_code;
for (int i = 0; i < vtbl_list_size; ++i) {
for (int j = 0; j < num_virtuals; ++j) {
dummy_vtable[num_virtuals * i + j] = (void*)masm->pc();
// Load eax with a value indicating vtable/offset pair.
// -- bits[ 7..0] (8 bits) which virtual method in table?
// -- bits[12..8] (5 bits) which virtual method table?
// -- must fit in 13-bit instruction immediate field.
__ movl(rax, (i << 8) + j);
__ jmp(common_code);
}
}
__ bind(common_code);
// Expecting to be called with "thiscall" convections -- the arguments
// are on the stack and the "this" pointer is in c_rarg0. In addition, rax
// was set (above) to the offset of the method in the table.
__ push(c_rarg1); // save & free register
__ push(c_rarg0); // save "this"
__ mov(c_rarg0, rax);
__ shrptr(c_rarg0, 8); // isolate vtable identifier.
__ shlptr(c_rarg0, LogBytesPerWord);
__ lea(c_rarg1, ExternalAddress((address)vtbl_list)); // ptr to correct vtable list.
__ addptr(c_rarg1, c_rarg0); // ptr to list entry.
__ movptr(c_rarg1, Address(c_rarg1, 0)); // get correct vtable address.
__ pop(c_rarg0); // restore "this"
__ movptr(Address(c_rarg0, 0), c_rarg1); // update vtable pointer.
__ andptr(rax, 0x00ff); // isolate vtable method index
__ shlptr(rax, LogBytesPerWord);
__ addptr(rax, c_rarg1); // address of real method pointer.
__ pop(c_rarg1); // restore register.
__ movptr(rax, Address(rax, 0)); // get real method pointer.
__ jmp(rax); // jump to the real method.
__ flush();
*mc_top = (char*)__ pc();
}

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -65,7 +65,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Register obj, SystemDictionary::WKID klass_id, Register obj, SystemDictionary::WKID klass_id,
const char* error_message) { const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id); InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id); Klass* klass = SystemDictionary::well_known_klass(klass_id);
Register temp = rdi; Register temp = rdi;
Register temp2 = noreg; Register temp2 = noreg;
LP64_ONLY(temp2 = rscratch1); // used by MacroAssembler::cmpptr LP64_ONLY(temp2 = rscratch1); // used by MacroAssembler::cmpptr

View File

@ -171,16 +171,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(
return entry; return entry;
} }
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
address entry = __ pc();
// NULL last_sp until next java call
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
__ dispatch_next(state);
return entry;
}
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
address entry = __ pc(); address entry = __ pc();
@ -230,6 +220,17 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
__ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
__ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
const Register java_thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
if (JvmtiExport::can_pop_frame()) {
NOT_LP64(__ get_thread(java_thread));
__ check_and_handle_popframe(java_thread);
}
if (JvmtiExport::can_force_early_return()) {
NOT_LP64(__ get_thread(java_thread));
__ check_and_handle_earlyret(java_thread);
}
__ dispatch_next(state, step); __ dispatch_next(state, step);
return entry; return entry;

View File

@ -1,5 +1,5 @@
// //
// Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
// This code is free software; you can redistribute it and/or modify it // This code is free software; you can redistribute it and/or modify it
@ -1387,7 +1387,7 @@ const int Matcher::min_vector_size(const BasicType bt) {
} }
// Vector ideal reg corresponding to specidied size in bytes // Vector ideal reg corresponding to specidied size in bytes
const int Matcher::vector_ideal_reg(int size) { const uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize >= size, ""); assert(MaxVectorSize >= size, "");
switch(size) { switch(size) {
case 4: return Op_VecS; case 4: return Op_VecS;
@ -1401,7 +1401,7 @@ const int Matcher::vector_ideal_reg(int size) {
} }
// Only lowest bits of xmm reg are used for vector shift count. // Only lowest bits of xmm reg are used for vector shift count.
const int Matcher::vector_shift_count_ideal_reg(int size) { const uint Matcher::vector_shift_count_ideal_reg(int size) {
return Op_VecS; return Op_VecS;
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -36,16 +36,19 @@ import java.util.Map;
import jdk.tools.jaotc.binformat.Symbol.Binding; import jdk.tools.jaotc.binformat.Symbol.Binding;
import jdk.tools.jaotc.binformat.Symbol.Kind; import jdk.tools.jaotc.binformat.Symbol.Kind;
import jdk.tools.jaotc.binformat.elf.JELFRelocObject; import jdk.tools.jaotc.binformat.elf.JELFRelocObject;
import jdk.tools.jaotc.binformat.macho.JMachORelocObject;
import jdk.tools.jaotc.binformat.pecoff.JPECoffRelocObject;
import org.graalvm.compiler.hotspot.GraalHotSpotVMConfig; import org.graalvm.compiler.hotspot.GraalHotSpotVMConfig;
import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderConfiguration; import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderConfiguration;
import org.graalvm.compiler.options.OptionValues;
/** /**
* A format-agnostic container class that holds various components of a binary. * A format-agnostic container class that holds various components of a binary.
* *
* <p> * <p>
* This class holds information necessary to create platform-specific binary containers such as * This class holds information necessary to create platform-specific binary containers such as
* ELFContainer for Linux and Solaris operating systems or yet-to be created MachOContainer for Mac * ELFContainer for Linux and Solaris operating systems or MachOContainer for Mac OS or PEContainer
* OS or PEContainer for MS Windows operating systems. * for MS Windows operating systems.
* *
* <p> * <p>
* Method APIs provided by this class are used to construct and populate platform-independent * Method APIs provided by this class are used to construct and populate platform-independent
@ -56,6 +59,7 @@ import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderConfiguration;
* Methods to record and access code section contents, symbols and relocations are provided. * Methods to record and access code section contents, symbols and relocations are provided.
*/ */
public class BinaryContainer implements SymbolTable { public class BinaryContainer implements SymbolTable {
private final OptionValues graalOptions;
private final int codeSegmentSize; private final int codeSegmentSize;
@ -257,36 +261,40 @@ public class BinaryContainer implements SymbolTable {
* Allocates a {@code BinaryContainer} object whose content will be generated in a file with the * Allocates a {@code BinaryContainer} object whose content will be generated in a file with the
* prefix {@code prefix}. It also initializes internal code container, symbol table and * prefix {@code prefix}. It also initializes internal code container, symbol table and
* relocation tables. * relocation tables.
*
* @param graalOptions
*/ */
public BinaryContainer(GraalHotSpotVMConfig graalHotSpotVMConfig, GraphBuilderConfiguration graphBuilderConfig, String jvmVersion) { public BinaryContainer(OptionValues graalOptions, GraalHotSpotVMConfig graalHotSpotVMConfig, GraphBuilderConfiguration graphBuilderConfig, String jvmVersion) {
this.graalOptions = graalOptions;
this.codeSegmentSize = graalHotSpotVMConfig.codeSegmentSize; this.codeSegmentSize = graalHotSpotVMConfig.codeSegmentSize;
this.codeEntryAlignment = graalHotSpotVMConfig.codeEntryAlignment; this.codeEntryAlignment = graalHotSpotVMConfig.codeEntryAlignment;
// read only, code // read only, code
codeContainer = new CodeContainer(".text", this); codeContainer = new CodeContainer(".text", this);
extLinkageContainer = new CodeContainer(".hotspot.linkage.plt", this); extLinkageContainer = new CodeContainer(".hs.plt.linkage", this);
// read only, info // read only, info
configContainer = new ReadOnlyDataContainer(".config", this); configContainer = new ReadOnlyDataContainer(".config", this);
metaspaceNamesContainer = new ReadOnlyDataContainer(".metaspace.names", this); metaspaceNamesContainer = new ReadOnlyDataContainer(".meta.names", this);
methodsOffsetsContainer = new ReadOnlyDataContainer(".methods.offsets", this); methodsOffsetsContainer = new ReadOnlyDataContainer(".methods.offsets", this);
klassesOffsetsContainer = new ReadOnlyDataContainer(".klasses.offsets", this); klassesOffsetsContainer = new ReadOnlyDataContainer(".kls.offsets", this);
klassesDependenciesContainer = new ReadOnlyDataContainer(".klasses.dependencies", this); klassesDependenciesContainer = new ReadOnlyDataContainer(".kls.dependencies", this);
headerContainer = new HeaderContainer(jvmVersion, new ReadOnlyDataContainer(".header", this)); headerContainer = new HeaderContainer(jvmVersion, new ReadOnlyDataContainer(".header", this));
stubsOffsetsContainer = new ReadOnlyDataContainer(".stubs.offsets", this); stubsOffsetsContainer = new ReadOnlyDataContainer(".stubs.offsets", this);
codeSegmentsContainer = new ReadOnlyDataContainer(".code.segments", this); codeSegmentsContainer = new ReadOnlyDataContainer(".code.segments", this);
constantDataContainer = new ReadOnlyDataContainer(".method.constdata", this); constantDataContainer = new ReadOnlyDataContainer(".meth.constdata", this);
// needs relocation patching at load time by the loader // needs relocation patching at load time by the loader
methodMetadataContainer = new ReadOnlyDataContainer(".method.metadata", this); methodMetadataContainer = new ReadOnlyDataContainer(".meth.metadata", this);
// writable sections // writable sections
metaspaceGotContainer = new ByteContainer(".metaspace.got", this); metaspaceGotContainer = new ByteContainer(".meta.got", this);
metadataGotContainer = new ByteContainer(".metadata.got", this); metadataGotContainer = new ByteContainer(".metadata.got", this);
methodStateContainer = new ByteContainer(".method.state", this); methodStateContainer = new ByteContainer(".meth.state", this);
oopGotContainer = new ByteContainer(".oop.got", this); oopGotContainer = new ByteContainer(".oop.got", this);
extLinkageGOTContainer = new ByteContainer(".hotspot.linkage.got", this); extLinkageGOTContainer = new ByteContainer(".hs.got.linkage", this);
addGlobalSymbols(); addGlobalSymbols();
@ -303,17 +311,17 @@ public class BinaryContainer implements SymbolTable {
graalHotSpotVMConfig.useCMSGC, graalHotSpotVMConfig.useCMSGC,
graalHotSpotVMConfig.useTLAB, graalHotSpotVMConfig.useTLAB,
graalHotSpotVMConfig.useBiasedLocking, graalHotSpotVMConfig.useBiasedLocking,
TieredAOT.getValue(), TieredAOT.getValue(graalOptions),
graalHotSpotVMConfig.enableContended, graalHotSpotVMConfig.enableContended,
graalHotSpotVMConfig.restrictContended, graalHotSpotVMConfig.restrictContended,
graphBuilderConfig.omitAssertions() graphBuilderConfig.omitAssertions()
}; };
int[] intFlags = { graalHotSpotVMConfig.getOopEncoding().shift, int[] intFlags = { graalHotSpotVMConfig.getOopEncoding().getShift(),
graalHotSpotVMConfig.getKlassEncoding().shift, graalHotSpotVMConfig.getKlassEncoding().getShift(),
graalHotSpotVMConfig.contendedPaddingWidth, graalHotSpotVMConfig.contendedPaddingWidth,
graalHotSpotVMConfig.fieldsAllocationStyle, graalHotSpotVMConfig.fieldsAllocationStyle,
1 << graalHotSpotVMConfig.getOopEncoding().alignment, 1 << graalHotSpotVMConfig.logMinObjAlignment(),
graalHotSpotVMConfig.codeSegmentSize, graalHotSpotVMConfig.codeSegmentSize,
}; };
// @formatter:on // @formatter:on
@ -497,10 +505,19 @@ public class BinaryContainer implements SymbolTable {
switch (osName) { switch (osName) {
case "Linux": case "Linux":
case "SunOS": case "SunOS":
JELFRelocObject elfso = new JELFRelocObject(this, outputFileName, aotVersion); JELFRelocObject elfobj = new JELFRelocObject(this, outputFileName, aotVersion);
elfso.createELFRelocObject(relocationTable, symbolTable.values()); elfobj.createELFRelocObject(relocationTable, symbolTable.values());
break;
case "Mac OS X":
JMachORelocObject machobj = new JMachORelocObject(this, outputFileName);
machobj.createMachORelocObject(relocationTable, symbolTable.values());
break; break;
default: default:
if (osName.startsWith("Windows")) {
JPECoffRelocObject pecoffobj = new JPECoffRelocObject(this, outputFileName, aotVersion);
pecoffobj.createPECoffRelocObject(relocationTable, symbolTable.values());
break;
} else
throw new InternalError("Unsupported platform: " + osName); throw new InternalError("Unsupported platform: " + osName);
} }
} }
@ -742,11 +759,11 @@ public class BinaryContainer implements SymbolTable {
} }
/** /**
* Add constant data as follows. - Adding the data to the method.constdata section * Add constant data as follows. - Adding the data to the meth.constdata section
* *
* @param data * @param data
* @param alignment * @param alignment
* @return the offset in the method.constdata of the data * @return the offset in the meth.constdata of the data
*/ */
public int addConstantData(byte[] data, int alignment) { public int addConstantData(byte[] data, int alignment) {
// Get the current length of the metaspaceNameContainer // Get the current length of the metaspaceNameContainer

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,7 @@ package jdk.tools.jaotc.binformat;
import jdk.tools.jaotc.binformat.Symbol.Binding; import jdk.tools.jaotc.binformat.Symbol.Binding;
import jdk.tools.jaotc.binformat.Symbol.Kind; import jdk.tools.jaotc.binformat.Symbol.Kind;
import jdk.tools.jaotc.jnilibelf.ELFContainer; import jdk.tools.jaotc.binformat.Container;
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
@ -41,7 +41,7 @@ import java.util.Arrays;
* The method {@code putIntAt} updates the content of {@code contentBytes}. Changes are not * The method {@code putIntAt} updates the content of {@code contentBytes}. Changes are not
* reflected in {@code contentStream}. * reflected in {@code contentStream}.
*/ */
public class ByteContainer implements ELFContainer { public class ByteContainer implements Container {
/** /**
* {@code ByteBuffer} representation of {@code BinaryContainer}. * {@code ByteBuffer} representation of {@code BinaryContainer}.
*/ */

Some files were not shown because too many files have changed in this diff Show More