8168503: JEP 297: Unified arm32/arm64 Port
Reviewed-by: kvn, enevill, ihse, dholmes, erikj, coleenp, cjplummer
This commit is contained in:
parent
4106c6bc9b
commit
6009cf0793
@ -114,6 +114,10 @@ ifeq ($(call check-jvm-feature, compiler2), true)
|
||||
ADLCFLAGS += -U_LP64
|
||||
endif
|
||||
|
||||
ifeq ($(HOTSPOT_TARGET_CPU_ARCH), arm)
|
||||
ADLCFLAGS += -DARM=1
|
||||
endif
|
||||
|
||||
##############################################################################
|
||||
# Concatenate all ad source files into a single file, which will be fed to
|
||||
# adlc. Also include a #line directive at the start of every included file
|
||||
|
@ -63,8 +63,8 @@ JVM_CFLAGS_INCLUDES += \
|
||||
# INCLUDE_SUFFIX_* is only meant for including the proper
|
||||
# platform files. Don't use it to guard code. Use the value of
|
||||
# HOTSPOT_TARGET_CPU_DEFINE etc. instead.
|
||||
# Remaining TARGET_ARCH_* is needed to distinguish closed and open
|
||||
# 64-bit ARM ports (also called AARCH64).
|
||||
# Remaining TARGET_ARCH_* is needed to select the cpu specific
|
||||
# sources for 64-bit ARM ports (arm versus aarch64).
|
||||
JVM_CFLAGS_TARGET_DEFINES += \
|
||||
-DTARGET_ARCH_$(HOTSPOT_TARGET_CPU_ARCH) \
|
||||
-DINCLUDE_SUFFIX_OS=_$(HOTSPOT_TARGET_OS) \
|
||||
@ -139,6 +139,20 @@ endif
|
||||
################################################################################
|
||||
# Platform specific setup
|
||||
|
||||
# ARM source selection
|
||||
|
||||
ifeq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU), linux-arm)
|
||||
JVM_EXCLUDE_PATTERNS += arm_64
|
||||
|
||||
else ifeq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU), linux-aarch64)
|
||||
# For 64-bit arm builds, we use the 64 bit hotspot/src/cpu/arm
|
||||
# hotspot sources if HOTSPOT_TARGET_CPU_ARCH is set to arm.
|
||||
# Exclude the aarch64 and 32 bit arm files for this build.
|
||||
ifeq ($(HOTSPOT_TARGET_CPU_ARCH), arm)
|
||||
JVM_EXCLUDE_PATTERNS += arm_32 aarch64
|
||||
endif
|
||||
endif
|
||||
|
||||
ifneq ($(filter $(OPENJDK_TARGET_OS), linux macosx windows), )
|
||||
JVM_PRECOMPILED_HEADER := $(HOTSPOT_TOPDIR)/src/share/vm/precompiled/precompiled.hpp
|
||||
endif
|
||||
|
@ -154,3 +154,108 @@ else
|
||||
compiledIC_aot_x86_64.cpp compilerRuntime.cpp \
|
||||
aotCodeHeap.cpp aotCompiledMethod.cpp aotLoader.cpp compiledIC_aot.cpp
|
||||
endif
|
||||
################################################################################
|
||||
|
||||
ifeq ($(call check-jvm-feature, link-time-opt), true)
|
||||
# NOTE: Disable automatic opimization level and let the explicit cflag control
|
||||
# optimization level instead. This activates O3 on slowdebug builds, just
|
||||
# like the old build, but it's probably not right.
|
||||
JVM_OPTIMIZATION :=
|
||||
JVM_CFLAGS_FEATURES += -O3 -flto
|
||||
JVM_LDFLAGS_FEATURES += -O3 -flto -fwhole-program -fno-strict-aliasing
|
||||
endif
|
||||
|
||||
ifeq ($(call check-jvm-feature, minimal), true)
|
||||
ifeq ($(call check-jvm-feature, link-time-opt), false)
|
||||
JVM_OPTIMIZATION := SIZE
|
||||
OPT_SPEED_SRC := \
|
||||
allocation.cpp \
|
||||
assembler.cpp \
|
||||
assembler_linux_arm.cpp \
|
||||
barrierSet.cpp \
|
||||
basicLock.cpp \
|
||||
biasedLocking.cpp \
|
||||
bytecode.cpp \
|
||||
bytecodeInterpreter.cpp \
|
||||
bytecodeInterpreter_x86.cpp \
|
||||
c1_Compilation.cpp \
|
||||
c1_Compiler.cpp \
|
||||
c1_GraphBuilder.cpp \
|
||||
c1_LinearScan.cpp \
|
||||
c1_LIR.cpp \
|
||||
ciEnv.cpp \
|
||||
ciObjectFactory.cpp \
|
||||
codeBlob.cpp \
|
||||
constantPool.cpp \
|
||||
constMethod.cpp \
|
||||
classLoader.cpp \
|
||||
classLoaderData.cpp \
|
||||
classFileParser.cpp \
|
||||
classFileStream.cpp \
|
||||
cpCache.cpp \
|
||||
defNewGeneration.cpp \
|
||||
frame_arm.cpp \
|
||||
genCollectedHeap.cpp \
|
||||
generation.cpp \
|
||||
genMarkSweep.cpp \
|
||||
growableArray.cpp \
|
||||
handles.cpp \
|
||||
hashtable.cpp \
|
||||
heap.cpp \
|
||||
icache.cpp \
|
||||
icache_arm.cpp \
|
||||
instanceKlass.cpp \
|
||||
invocationCounter.cpp \
|
||||
iterator.cpp \
|
||||
javaCalls.cpp \
|
||||
javaClasses.cpp \
|
||||
jniFastGetField_arm.cpp \
|
||||
jvm.cpp \
|
||||
jvm_linux.cpp \
|
||||
linkResolver.cpp \
|
||||
klass.cpp \
|
||||
klassVtable.cpp \
|
||||
markSweep.cpp \
|
||||
memRegion.cpp \
|
||||
memoryPool.cpp \
|
||||
method.cpp \
|
||||
methodHandles.cpp \
|
||||
methodHandles_arm.cpp \
|
||||
methodLiveness.cpp \
|
||||
metablock.cpp \
|
||||
metaspace.cpp \
|
||||
mutex.cpp \
|
||||
mutex_linux.cpp \
|
||||
mutexLocker.cpp \
|
||||
nativeLookup.cpp \
|
||||
objArrayKlass.cpp \
|
||||
os_linux.cpp \
|
||||
os_linux_arm.cpp \
|
||||
placeHolders.cpp \
|
||||
quickSort.cpp \
|
||||
resourceArea.cpp \
|
||||
rewriter.cpp \
|
||||
sharedRuntime.cpp \
|
||||
signature.cpp \
|
||||
space.cpp \
|
||||
stackMapTable.cpp \
|
||||
symbolTable.cpp \
|
||||
systemDictionary.cpp \
|
||||
symbol.cpp \
|
||||
synchronizer.cpp \
|
||||
threadLS_bsd_x86.cpp \
|
||||
threadLS_linux_arm.cpp \
|
||||
threadLS_linux_x86.cpp \
|
||||
timer.cpp \
|
||||
typeArrayKlass.cpp \
|
||||
unsafe.cpp \
|
||||
utf8.cpp \
|
||||
vmSymbols.cpp \
|
||||
#
|
||||
|
||||
$(foreach s, $(OPT_SPEED_SRC), \
|
||||
$(eval BUILD_LIBJVM_$s_OPTIMIZATION := HIGHEST_JVM))
|
||||
|
||||
BUILD_LIBJVM_systemDictionary.cpp_CXXFLAGS := -fno-optimize-sibling-calls
|
||||
endif
|
||||
endif
|
||||
|
270
hotspot/src/cpu/arm/vm/abstractInterpreter_arm.cpp
Normal file
270
hotspot/src/cpu/arm/vm/abstractInterpreter_arm.cpp
Normal file
@ -0,0 +1,270 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "interpreter/bytecode.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "oops/constMethod.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/synchronizer.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
int AbstractInterpreter::BasicType_as_index(BasicType type) {
|
||||
int i = 0;
|
||||
switch (type) {
|
||||
#ifdef AARCH64
|
||||
case T_BOOLEAN: i = 0; break;
|
||||
case T_CHAR : i = 1; break;
|
||||
case T_BYTE : i = 2; break;
|
||||
case T_SHORT : i = 3; break;
|
||||
case T_INT : // fall through
|
||||
case T_LONG : // fall through
|
||||
case T_VOID : // fall through
|
||||
case T_FLOAT : // fall through
|
||||
case T_DOUBLE : i = 4; break;
|
||||
case T_OBJECT : // fall through
|
||||
case T_ARRAY : i = 5; break;
|
||||
#else
|
||||
case T_VOID : i = 0; break;
|
||||
case T_BOOLEAN: i = 1; break;
|
||||
case T_CHAR : i = 2; break;
|
||||
case T_BYTE : i = 3; break;
|
||||
case T_SHORT : i = 4; break;
|
||||
case T_INT : i = 5; break;
|
||||
case T_OBJECT : // fall through
|
||||
case T_ARRAY : i = 6; break;
|
||||
case T_LONG : i = 7; break;
|
||||
case T_FLOAT : i = 8; break;
|
||||
case T_DOUBLE : i = 9; break;
|
||||
#endif // AARCH64
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
|
||||
return i;
|
||||
}
|
||||
|
||||
// These should never be compiled since the interpreter will prefer
|
||||
// the compiled version to the intrinsic version.
|
||||
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
|
||||
switch (method_kind(m)) {
|
||||
case Interpreter::java_lang_math_sin : // fall thru
|
||||
case Interpreter::java_lang_math_cos : // fall thru
|
||||
case Interpreter::java_lang_math_tan : // fall thru
|
||||
case Interpreter::java_lang_math_abs : // fall thru
|
||||
case Interpreter::java_lang_math_log : // fall thru
|
||||
case Interpreter::java_lang_math_log10 : // fall thru
|
||||
case Interpreter::java_lang_math_sqrt :
|
||||
return false;
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// How much stack a method activation needs in words.
|
||||
int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
|
||||
const int stub_code = AARCH64_ONLY(24) NOT_AARCH64(12); // see generate_call_stub
|
||||
// Save space for one monitor to get into the interpreted method in case
|
||||
// the method is synchronized
|
||||
int monitor_size = method->is_synchronized() ?
|
||||
1*frame::interpreter_frame_monitor_size() : 0;
|
||||
|
||||
// total overhead size: monitor_size + (sender SP, thru expr stack bottom).
|
||||
// be sure to change this if you add/subtract anything to/from the overhead area
|
||||
const int overhead_size = monitor_size +
|
||||
(frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset);
|
||||
const int method_stack = (method->max_locals() + method->max_stack()) *
|
||||
Interpreter::stackElementWords;
|
||||
return overhead_size + method_stack + stub_code;
|
||||
}
|
||||
|
||||
// asm based interpreter deoptimization helpers
|
||||
int AbstractInterpreter::size_activation(int max_stack,
|
||||
int tempcount,
|
||||
int extra_args,
|
||||
int moncount,
|
||||
int callee_param_count,
|
||||
int callee_locals,
|
||||
bool is_top_frame) {
|
||||
// Note: This calculation must exactly parallel the frame setup
|
||||
// in TemplateInterpreterGenerator::generate_fixed_frame.
|
||||
// fixed size of an interpreter frame:
|
||||
int overhead = frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset;
|
||||
|
||||
// Our locals were accounted for by the caller (or last_frame_adjust on the transistion)
|
||||
// Since the callee parameters already account for the callee's params we only need to account for
|
||||
// the extra locals.
|
||||
|
||||
int size = overhead +
|
||||
((callee_locals - callee_param_count)*Interpreter::stackElementWords) +
|
||||
(moncount*frame::interpreter_frame_monitor_size()) +
|
||||
tempcount*Interpreter::stackElementWords + extra_args;
|
||||
|
||||
#ifdef AARCH64
|
||||
size = round_to(size, StackAlignmentInBytes/BytesPerWord);
|
||||
#endif // AARCH64
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
void AbstractInterpreter::layout_activation(Method* method,
|
||||
int tempcount,
|
||||
int popframe_extra_args,
|
||||
int moncount,
|
||||
int caller_actual_parameters,
|
||||
int callee_param_count,
|
||||
int callee_locals,
|
||||
frame* caller,
|
||||
frame* interpreter_frame,
|
||||
bool is_top_frame,
|
||||
bool is_bottom_frame) {
|
||||
|
||||
// Set up the method, locals, and monitors.
|
||||
// The frame interpreter_frame is guaranteed to be the right size,
|
||||
// as determined by a previous call to the size_activation() method.
|
||||
// It is also guaranteed to be walkable even though it is in a skeletal state
|
||||
// NOTE: return size is in words not bytes
|
||||
|
||||
// fixed size of an interpreter frame:
|
||||
int max_locals = method->max_locals() * Interpreter::stackElementWords;
|
||||
int extra_locals = (method->max_locals() - method->size_of_parameters()) * Interpreter::stackElementWords;
|
||||
|
||||
#ifdef ASSERT
|
||||
assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable");
|
||||
#endif
|
||||
|
||||
interpreter_frame->interpreter_frame_set_method(method);
|
||||
// NOTE the difference in using sender_sp and interpreter_frame_sender_sp
|
||||
// interpreter_frame_sender_sp is the original sp of the caller (the unextended_sp)
|
||||
// and sender_sp is (fp + sender_sp_offset*wordSize)
|
||||
|
||||
#ifdef AARCH64
|
||||
intptr_t* locals;
|
||||
if (caller->is_interpreted_frame()) {
|
||||
// attach locals to the expression stack of caller interpreter frame
|
||||
locals = caller->interpreter_frame_tos_address() + caller_actual_parameters*Interpreter::stackElementWords - 1;
|
||||
} else {
|
||||
assert (is_bottom_frame, "should be");
|
||||
locals = interpreter_frame->fp() + frame::sender_sp_offset + method->max_locals() - 1;
|
||||
}
|
||||
|
||||
if (TraceDeoptimization) {
|
||||
tty->print_cr("layout_activation:");
|
||||
|
||||
if (caller->is_entry_frame()) {
|
||||
tty->print("entry ");
|
||||
}
|
||||
if (caller->is_compiled_frame()) {
|
||||
tty->print("compiled ");
|
||||
}
|
||||
if (caller->is_interpreted_frame()) {
|
||||
tty->print("interpreted ");
|
||||
}
|
||||
tty->print_cr("caller: sp=%p, unextended_sp=%p, fp=%p, pc=%p", caller->sp(), caller->unextended_sp(), caller->fp(), caller->pc());
|
||||
tty->print_cr("interpreter_frame: sp=%p, unextended_sp=%p, fp=%p, pc=%p", interpreter_frame->sp(), interpreter_frame->unextended_sp(), interpreter_frame->fp(), interpreter_frame->pc());
|
||||
tty->print_cr("method: max_locals = %d, size_of_parameters = %d", method->max_locals(), method->size_of_parameters());
|
||||
tty->print_cr("caller_actual_parameters = %d", caller_actual_parameters);
|
||||
tty->print_cr("locals = %p", locals);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
if (caller_actual_parameters != method->size_of_parameters()) {
|
||||
assert(caller->is_interpreted_frame(), "adjusted caller_actual_parameters, but caller is not interpreter frame");
|
||||
Bytecode_invoke inv(caller->interpreter_frame_method(), caller->interpreter_frame_bci());
|
||||
|
||||
if (is_bottom_frame) {
|
||||
assert(caller_actual_parameters == 0, "invalid adjusted caller_actual_parameters value for bottom frame");
|
||||
assert(inv.is_invokedynamic() || inv.is_invokehandle(), "adjusted caller_actual_parameters for bottom frame, but not invokedynamic/invokehandle");
|
||||
} else {
|
||||
assert(caller_actual_parameters == method->size_of_parameters()+1, "invalid adjusted caller_actual_parameters value");
|
||||
assert(!inv.is_invokedynamic() && MethodHandles::has_member_arg(inv.klass(), inv.name()), "adjusted caller_actual_parameters, but no member arg");
|
||||
}
|
||||
}
|
||||
if (caller->is_interpreted_frame()) {
|
||||
intptr_t* locals_base = (locals - method->max_locals()*Interpreter::stackElementWords + 1);
|
||||
locals_base = (intptr_t*)round_down((intptr_t)locals_base, StackAlignmentInBytes);
|
||||
assert(interpreter_frame->sender_sp() <= locals_base, "interpreter-to-interpreter frame chaining");
|
||||
|
||||
} else if (caller->is_compiled_frame()) {
|
||||
assert(locals + 1 <= caller->unextended_sp(), "compiled-to-interpreter frame chaining");
|
||||
|
||||
} else {
|
||||
assert(caller->is_entry_frame(), "should be");
|
||||
assert(locals + 1 <= caller->fp(), "entry-to-interpreter frame chaining");
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
#else
|
||||
intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
|
||||
#endif // AARCH64
|
||||
|
||||
interpreter_frame->interpreter_frame_set_locals(locals);
|
||||
BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
|
||||
BasicObjectLock* monbot = montop - moncount;
|
||||
interpreter_frame->interpreter_frame_set_monitor_end(monbot);
|
||||
|
||||
// Set last_sp
|
||||
intptr_t* stack_top = (intptr_t*) monbot -
|
||||
tempcount*Interpreter::stackElementWords -
|
||||
popframe_extra_args;
|
||||
#ifdef AARCH64
|
||||
interpreter_frame->interpreter_frame_set_stack_top(stack_top);
|
||||
|
||||
intptr_t* extended_sp = (intptr_t*) monbot -
|
||||
(method->max_stack() + 1) * Interpreter::stackElementWords - // +1 is reserved slot for exception handler
|
||||
popframe_extra_args;
|
||||
extended_sp = (intptr_t*)round_down((intptr_t)extended_sp, StackAlignmentInBytes);
|
||||
interpreter_frame->interpreter_frame_set_extended_sp(extended_sp);
|
||||
#else
|
||||
interpreter_frame->interpreter_frame_set_last_sp(stack_top);
|
||||
#endif // AARCH64
|
||||
|
||||
// All frames but the initial (oldest) interpreter frame we fill in have a
|
||||
// value for sender_sp that allows walking the stack but isn't
|
||||
// truly correct. Correct the value here.
|
||||
|
||||
#ifdef AARCH64
|
||||
if (caller->is_interpreted_frame()) {
|
||||
intptr_t* sender_sp = (intptr_t*)round_down((intptr_t)caller->interpreter_frame_tos_address(), StackAlignmentInBytes);
|
||||
interpreter_frame->set_interpreter_frame_sender_sp(sender_sp);
|
||||
|
||||
} else {
|
||||
// in case of non-interpreter caller sender_sp of the oldest frame is already
|
||||
// set to valid value
|
||||
}
|
||||
#else
|
||||
if (extra_locals != 0 &&
|
||||
interpreter_frame->sender_sp() == interpreter_frame->interpreter_frame_sender_sp() ) {
|
||||
interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + extra_locals);
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
*interpreter_frame->interpreter_frame_cache_addr() =
|
||||
method->constants()->cache();
|
||||
*interpreter_frame->interpreter_frame_mirror_addr() =
|
||||
method->method_holder()->java_mirror();
|
||||
}
|
14428
hotspot/src/cpu/arm/vm/arm.ad
Normal file
14428
hotspot/src/cpu/arm/vm/arm.ad
Normal file
File diff suppressed because it is too large
Load Diff
586
hotspot/src/cpu/arm/vm/arm_32.ad
Normal file
586
hotspot/src/cpu/arm/vm/arm_32.ad
Normal file
@ -0,0 +1,586 @@
|
||||
//
|
||||
// Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License version 2 only, as
|
||||
// published by the Free Software Foundation.
|
||||
//
|
||||
// This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
// version 2 for more details (a copy is included in the LICENSE file that
|
||||
// accompanied this code).
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License version
|
||||
// 2 along with this work; if not, write to the Free Software Foundation,
|
||||
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
//
|
||||
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
// or visit www.oracle.com if you need additional information or have any
|
||||
// questions.
|
||||
//
|
||||
|
||||
// ARM Architecture Description File
|
||||
|
||||
//----------REGISTER DEFINITION BLOCK------------------------------------------
|
||||
// This information is used by the matcher and the register allocator to
|
||||
// describe individual registers and classes of registers within the target
|
||||
// archtecture.
|
||||
register %{
|
||||
//----------Architecture Description Register Definitions----------------------
|
||||
// General Registers
|
||||
// "reg_def" name ( register save type, C convention save type,
|
||||
// ideal register type, encoding, vm name );
|
||||
// Register Save Types:
|
||||
//
|
||||
// NS = No-Save: The register allocator assumes that these registers
|
||||
// can be used without saving upon entry to the method, &
|
||||
// that they do not need to be saved at call sites.
|
||||
//
|
||||
// SOC = Save-On-Call: The register allocator assumes that these registers
|
||||
// can be used without saving upon entry to the method,
|
||||
// but that they must be saved at call sites.
|
||||
//
|
||||
// SOE = Save-On-Entry: The register allocator assumes that these registers
|
||||
// must be saved before using them upon entry to the
|
||||
// method, but they do not need to be saved at call
|
||||
// sites.
|
||||
//
|
||||
// AS = Always-Save: The register allocator assumes that these registers
|
||||
// must be saved before using them upon entry to the
|
||||
// method, & that they must be saved at call sites.
|
||||
//
|
||||
// Ideal Register Type is used to determine how to save & restore a
|
||||
// register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
|
||||
// spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
|
||||
//
|
||||
// The encoding number is the actual bit-pattern placed into the opcodes.
|
||||
|
||||
|
||||
// ----------------------------
|
||||
// Integer/Long Registers
|
||||
// ----------------------------
|
||||
|
||||
reg_def R_R0 (SOC, SOC, Op_RegI, 0, R(0)->as_VMReg());
|
||||
reg_def R_R1 (SOC, SOC, Op_RegI, 1, R(1)->as_VMReg());
|
||||
reg_def R_R2 (SOC, SOC, Op_RegI, 2, R(2)->as_VMReg());
|
||||
reg_def R_R3 (SOC, SOC, Op_RegI, 3, R(3)->as_VMReg());
|
||||
reg_def R_R4 (SOC, SOE, Op_RegI, 4, R(4)->as_VMReg());
|
||||
reg_def R_R5 (SOC, SOE, Op_RegI, 5, R(5)->as_VMReg());
|
||||
reg_def R_R6 (SOC, SOE, Op_RegI, 6, R(6)->as_VMReg());
|
||||
reg_def R_R7 (SOC, SOE, Op_RegI, 7, R(7)->as_VMReg());
|
||||
reg_def R_R8 (SOC, SOE, Op_RegI, 8, R(8)->as_VMReg());
|
||||
reg_def R_R9 (SOC, SOE, Op_RegI, 9, R(9)->as_VMReg());
|
||||
reg_def R_R10(NS, SOE, Op_RegI, 10, R(10)->as_VMReg());
|
||||
reg_def R_R11(NS, SOE, Op_RegI, 11, R(11)->as_VMReg());
|
||||
reg_def R_R12(SOC, SOC, Op_RegI, 12, R(12)->as_VMReg());
|
||||
reg_def R_R13(NS, NS, Op_RegI, 13, R(13)->as_VMReg());
|
||||
reg_def R_R14(SOC, SOC, Op_RegI, 14, R(14)->as_VMReg());
|
||||
reg_def R_R15(NS, NS, Op_RegI, 15, R(15)->as_VMReg());
|
||||
|
||||
// ----------------------------
|
||||
// Float/Double Registers
|
||||
// ----------------------------
|
||||
|
||||
// Float Registers
|
||||
|
||||
reg_def R_S0 ( SOC, SOC, Op_RegF, 0, S0->as_VMReg());
|
||||
reg_def R_S1 ( SOC, SOC, Op_RegF, 1, S1_reg->as_VMReg());
|
||||
reg_def R_S2 ( SOC, SOC, Op_RegF, 2, S2_reg->as_VMReg());
|
||||
reg_def R_S3 ( SOC, SOC, Op_RegF, 3, S3_reg->as_VMReg());
|
||||
reg_def R_S4 ( SOC, SOC, Op_RegF, 4, S4_reg->as_VMReg());
|
||||
reg_def R_S5 ( SOC, SOC, Op_RegF, 5, S5_reg->as_VMReg());
|
||||
reg_def R_S6 ( SOC, SOC, Op_RegF, 6, S6_reg->as_VMReg());
|
||||
reg_def R_S7 ( SOC, SOC, Op_RegF, 7, S7->as_VMReg());
|
||||
reg_def R_S8 ( SOC, SOC, Op_RegF, 8, S8->as_VMReg());
|
||||
reg_def R_S9 ( SOC, SOC, Op_RegF, 9, S9->as_VMReg());
|
||||
reg_def R_S10( SOC, SOC, Op_RegF, 10,S10->as_VMReg());
|
||||
reg_def R_S11( SOC, SOC, Op_RegF, 11,S11->as_VMReg());
|
||||
reg_def R_S12( SOC, SOC, Op_RegF, 12,S12->as_VMReg());
|
||||
reg_def R_S13( SOC, SOC, Op_RegF, 13,S13->as_VMReg());
|
||||
reg_def R_S14( SOC, SOC, Op_RegF, 14,S14->as_VMReg());
|
||||
reg_def R_S15( SOC, SOC, Op_RegF, 15,S15->as_VMReg());
|
||||
reg_def R_S16( SOC, SOE, Op_RegF, 16,S16->as_VMReg());
|
||||
reg_def R_S17( SOC, SOE, Op_RegF, 17,S17->as_VMReg());
|
||||
reg_def R_S18( SOC, SOE, Op_RegF, 18,S18->as_VMReg());
|
||||
reg_def R_S19( SOC, SOE, Op_RegF, 19,S19->as_VMReg());
|
||||
reg_def R_S20( SOC, SOE, Op_RegF, 20,S20->as_VMReg());
|
||||
reg_def R_S21( SOC, SOE, Op_RegF, 21,S21->as_VMReg());
|
||||
reg_def R_S22( SOC, SOE, Op_RegF, 22,S22->as_VMReg());
|
||||
reg_def R_S23( SOC, SOE, Op_RegF, 23,S23->as_VMReg());
|
||||
reg_def R_S24( SOC, SOE, Op_RegF, 24,S24->as_VMReg());
|
||||
reg_def R_S25( SOC, SOE, Op_RegF, 25,S25->as_VMReg());
|
||||
reg_def R_S26( SOC, SOE, Op_RegF, 26,S26->as_VMReg());
|
||||
reg_def R_S27( SOC, SOE, Op_RegF, 27,S27->as_VMReg());
|
||||
reg_def R_S28( SOC, SOE, Op_RegF, 28,S28->as_VMReg());
|
||||
reg_def R_S29( SOC, SOE, Op_RegF, 29,S29->as_VMReg());
|
||||
reg_def R_S30( SOC, SOE, Op_RegF, 30,S30->as_VMReg());
|
||||
reg_def R_S31( SOC, SOE, Op_RegF, 31,S31->as_VMReg());
|
||||
|
||||
// Double Registers
|
||||
// The rules of ADL require that double registers be defined in pairs.
|
||||
// Each pair must be two 32-bit values, but not necessarily a pair of
|
||||
// single float registers. In each pair, ADLC-assigned register numbers
|
||||
// must be adjacent, with the lower number even. Finally, when the
|
||||
// CPU stores such a register pair to memory, the word associated with
|
||||
// the lower ADLC-assigned number must be stored to the lower address.
|
||||
|
||||
reg_def R_D16 (SOC, SOC, Op_RegD, 32, D16->as_VMReg());
|
||||
reg_def R_D16x(SOC, SOC, Op_RegD,255, D16->as_VMReg()->next());
|
||||
reg_def R_D17 (SOC, SOC, Op_RegD, 34, D17->as_VMReg());
|
||||
reg_def R_D17x(SOC, SOC, Op_RegD,255, D17->as_VMReg()->next());
|
||||
reg_def R_D18 (SOC, SOC, Op_RegD, 36, D18->as_VMReg());
|
||||
reg_def R_D18x(SOC, SOC, Op_RegD,255, D18->as_VMReg()->next());
|
||||
reg_def R_D19 (SOC, SOC, Op_RegD, 38, D19->as_VMReg());
|
||||
reg_def R_D19x(SOC, SOC, Op_RegD,255, D19->as_VMReg()->next());
|
||||
reg_def R_D20 (SOC, SOC, Op_RegD, 40, D20->as_VMReg());
|
||||
reg_def R_D20x(SOC, SOC, Op_RegD,255, D20->as_VMReg()->next());
|
||||
reg_def R_D21 (SOC, SOC, Op_RegD, 42, D21->as_VMReg());
|
||||
reg_def R_D21x(SOC, SOC, Op_RegD,255, D21->as_VMReg()->next());
|
||||
reg_def R_D22 (SOC, SOC, Op_RegD, 44, D22->as_VMReg());
|
||||
reg_def R_D22x(SOC, SOC, Op_RegD,255, D22->as_VMReg()->next());
|
||||
reg_def R_D23 (SOC, SOC, Op_RegD, 46, D23->as_VMReg());
|
||||
reg_def R_D23x(SOC, SOC, Op_RegD,255, D23->as_VMReg()->next());
|
||||
reg_def R_D24 (SOC, SOC, Op_RegD, 48, D24->as_VMReg());
|
||||
reg_def R_D24x(SOC, SOC, Op_RegD,255, D24->as_VMReg()->next());
|
||||
reg_def R_D25 (SOC, SOC, Op_RegD, 50, D25->as_VMReg());
|
||||
reg_def R_D25x(SOC, SOC, Op_RegD,255, D25->as_VMReg()->next());
|
||||
reg_def R_D26 (SOC, SOC, Op_RegD, 52, D26->as_VMReg());
|
||||
reg_def R_D26x(SOC, SOC, Op_RegD,255, D26->as_VMReg()->next());
|
||||
reg_def R_D27 (SOC, SOC, Op_RegD, 54, D27->as_VMReg());
|
||||
reg_def R_D27x(SOC, SOC, Op_RegD,255, D27->as_VMReg()->next());
|
||||
reg_def R_D28 (SOC, SOC, Op_RegD, 56, D28->as_VMReg());
|
||||
reg_def R_D28x(SOC, SOC, Op_RegD,255, D28->as_VMReg()->next());
|
||||
reg_def R_D29 (SOC, SOC, Op_RegD, 58, D29->as_VMReg());
|
||||
reg_def R_D29x(SOC, SOC, Op_RegD,255, D29->as_VMReg()->next());
|
||||
reg_def R_D30 (SOC, SOC, Op_RegD, 60, D30->as_VMReg());
|
||||
reg_def R_D30x(SOC, SOC, Op_RegD,255, D30->as_VMReg()->next());
|
||||
reg_def R_D31 (SOC, SOC, Op_RegD, 62, D31->as_VMReg());
|
||||
reg_def R_D31x(SOC, SOC, Op_RegD,255, D31->as_VMReg()->next());
|
||||
|
||||
// ----------------------------
|
||||
// Special Registers
|
||||
// Condition Codes Flag Registers
|
||||
reg_def APSR (SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad());
|
||||
reg_def FPSCR(SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad());
|
||||
|
||||
// ----------------------------
|
||||
// Specify the enum values for the registers. These enums are only used by the
|
||||
// OptoReg "class". We can convert these enum values at will to VMReg when needed
|
||||
// for visibility to the rest of the vm. The order of this enum influences the
|
||||
// register allocator so having the freedom to set this order and not be stuck
|
||||
// with the order that is natural for the rest of the vm is worth it.
|
||||
|
||||
// registers in that order so that R11/R12 is an aligned pair that can be used for longs
|
||||
alloc_class chunk0(
|
||||
R_R4, R_R5, R_R6, R_R7, R_R8, R_R9, R_R11, R_R12, R_R10, R_R13, R_R14, R_R15, R_R0, R_R1, R_R2, R_R3);
|
||||
|
||||
// Note that a register is not allocatable unless it is also mentioned
|
||||
// in a widely-used reg_class below.
|
||||
|
||||
alloc_class chunk1(
|
||||
R_S16, R_S17, R_S18, R_S19, R_S20, R_S21, R_S22, R_S23,
|
||||
R_S24, R_S25, R_S26, R_S27, R_S28, R_S29, R_S30, R_S31,
|
||||
R_S0, R_S1, R_S2, R_S3, R_S4, R_S5, R_S6, R_S7,
|
||||
R_S8, R_S9, R_S10, R_S11, R_S12, R_S13, R_S14, R_S15,
|
||||
R_D16, R_D16x,R_D17, R_D17x,R_D18, R_D18x,R_D19, R_D19x,
|
||||
R_D20, R_D20x,R_D21, R_D21x,R_D22, R_D22x,R_D23, R_D23x,
|
||||
R_D24, R_D24x,R_D25, R_D25x,R_D26, R_D26x,R_D27, R_D27x,
|
||||
R_D28, R_D28x,R_D29, R_D29x,R_D30, R_D30x,R_D31, R_D31x
|
||||
);
|
||||
|
||||
alloc_class chunk2(APSR, FPSCR);
|
||||
|
||||
//----------Architecture Description Register Classes--------------------------
|
||||
// Several register classes are automatically defined based upon information in
|
||||
// this architecture description.
|
||||
// 1) reg_class inline_cache_reg ( as defined in frame section )
|
||||
// 2) reg_class interpreter_method_oop_reg ( as defined in frame section )
|
||||
// 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
|
||||
//
|
||||
|
||||
// ----------------------------
|
||||
// Integer Register Classes
|
||||
// ----------------------------
|
||||
// Exclusions from i_reg:
|
||||
// SP (R13), PC (R15)
|
||||
// R10: reserved by HotSpot to the TLS register (invariant within Java)
|
||||
reg_class int_reg(R_R0, R_R1, R_R2, R_R3, R_R4, R_R5, R_R6, R_R7, R_R8, R_R9, R_R11, R_R12, R_R14);
|
||||
|
||||
reg_class R0_regI(R_R0);
|
||||
reg_class R1_regI(R_R1);
|
||||
reg_class R2_regI(R_R2);
|
||||
reg_class R3_regI(R_R3);
|
||||
reg_class R12_regI(R_R12);
|
||||
|
||||
// ----------------------------
|
||||
// Pointer Register Classes
|
||||
// ----------------------------
|
||||
reg_class ptr_reg(R_R0, R_R1, R_R2, R_R3, R_R4, R_R5, R_R6, R_R7, R_R8, R_R9, R_R11, R_R12, R_R14);
|
||||
// Special class for storeP instructions, which can store SP or RPC to TLS.
|
||||
// It is also used for memory addressing, allowing direct TLS addressing.
|
||||
reg_class sp_ptr_reg(R_R0, R_R1, R_R2, R_R3, R_R4, R_R5, R_R6, R_R7, R_R8, R_R9, R_R11, R_R12, R_R14, R_R10 /* TLS*/, R_R13 /* SP*/);
|
||||
|
||||
#define R_Ricklass R_R8
|
||||
#define R_Rmethod R_R9
|
||||
#define R_Rthread R_R10
|
||||
#define R_Rexception_obj R_R4
|
||||
|
||||
// Other special pointer regs
|
||||
reg_class R0_regP(R_R0);
|
||||
reg_class R1_regP(R_R1);
|
||||
reg_class R2_regP(R_R2);
|
||||
reg_class R4_regP(R_R4);
|
||||
reg_class Rexception_regP(R_Rexception_obj);
|
||||
reg_class Ricklass_regP(R_Ricklass);
|
||||
reg_class Rmethod_regP(R_Rmethod);
|
||||
reg_class Rthread_regP(R_Rthread);
|
||||
reg_class IP_regP(R_R12);
|
||||
reg_class LR_regP(R_R14);
|
||||
|
||||
reg_class FP_regP(R_R11);
|
||||
|
||||
// ----------------------------
|
||||
// Long Register Classes
|
||||
// ----------------------------
|
||||
reg_class long_reg ( R_R0,R_R1, R_R2,R_R3, R_R4,R_R5, R_R6,R_R7, R_R8,R_R9, R_R11,R_R12);
|
||||
// for ldrexd, strexd: first reg of pair must be even
|
||||
reg_class long_reg_align ( R_R0,R_R1, R_R2,R_R3, R_R4,R_R5, R_R6,R_R7, R_R8,R_R9);
|
||||
|
||||
reg_class R0R1_regL(R_R0,R_R1);
|
||||
reg_class R2R3_regL(R_R2,R_R3);
|
||||
|
||||
// ----------------------------
|
||||
// Special Class for Condition Code Flags Register
|
||||
reg_class int_flags(APSR);
|
||||
reg_class float_flags(FPSCR);
|
||||
|
||||
|
||||
// ----------------------------
|
||||
// Float Point Register Classes
|
||||
// ----------------------------
|
||||
// Skip S14/S15, they are reserved for mem-mem copies
|
||||
reg_class sflt_reg(R_S0, R_S1, R_S2, R_S3, R_S4, R_S5, R_S6, R_S7, R_S8, R_S9, R_S10, R_S11, R_S12, R_S13,
|
||||
R_S16, R_S17, R_S18, R_S19, R_S20, R_S21, R_S22, R_S23, R_S24, R_S25, R_S26, R_S27, R_S28, R_S29, R_S30, R_S31);
|
||||
|
||||
// Paired floating point registers--they show up in the same order as the floats,
|
||||
// but they are used with the "Op_RegD" type, and always occur in even/odd pairs.
|
||||
reg_class dflt_reg(R_S0,R_S1, R_S2,R_S3, R_S4,R_S5, R_S6,R_S7, R_S8,R_S9, R_S10,R_S11, R_S12,R_S13,
|
||||
R_S16,R_S17, R_S18,R_S19, R_S20,R_S21, R_S22,R_S23, R_S24,R_S25, R_S26,R_S27, R_S28,R_S29, R_S30,R_S31,
|
||||
R_D16,R_D16x, R_D17,R_D17x, R_D18,R_D18x, R_D19,R_D19x, R_D20,R_D20x, R_D21,R_D21x, R_D22,R_D22x,
|
||||
R_D23,R_D23x, R_D24,R_D24x, R_D25,R_D25x, R_D26,R_D26x, R_D27,R_D27x, R_D28,R_D28x, R_D29,R_D29x,
|
||||
R_D30,R_D30x, R_D31,R_D31x);
|
||||
|
||||
reg_class dflt_low_reg(R_S0,R_S1, R_S2,R_S3, R_S4,R_S5, R_S6,R_S7, R_S8,R_S9, R_S10,R_S11, R_S12,R_S13,
|
||||
R_S16,R_S17, R_S18,R_S19, R_S20,R_S21, R_S22,R_S23, R_S24,R_S25, R_S26,R_S27, R_S28,R_S29, R_S30,R_S31);
|
||||
|
||||
|
||||
reg_class actual_dflt_reg %{
|
||||
if (VM_Version::has_vfp3_32()) {
|
||||
return DFLT_REG_mask();
|
||||
} else {
|
||||
return DFLT_LOW_REG_mask();
|
||||
}
|
||||
%}
|
||||
|
||||
reg_class S0_regF(R_S0);
|
||||
reg_class D0_regD(R_S0,R_S1);
|
||||
reg_class D1_regD(R_S2,R_S3);
|
||||
reg_class D2_regD(R_S4,R_S5);
|
||||
reg_class D3_regD(R_S6,R_S7);
|
||||
reg_class D4_regD(R_S8,R_S9);
|
||||
reg_class D5_regD(R_S10,R_S11);
|
||||
reg_class D6_regD(R_S12,R_S13);
|
||||
reg_class D7_regD(R_S14,R_S15);
|
||||
|
||||
reg_class D16_regD(R_D16,R_D16x);
|
||||
reg_class D17_regD(R_D17,R_D17x);
|
||||
reg_class D18_regD(R_D18,R_D18x);
|
||||
reg_class D19_regD(R_D19,R_D19x);
|
||||
reg_class D20_regD(R_D20,R_D20x);
|
||||
reg_class D21_regD(R_D21,R_D21x);
|
||||
reg_class D22_regD(R_D22,R_D22x);
|
||||
reg_class D23_regD(R_D23,R_D23x);
|
||||
reg_class D24_regD(R_D24,R_D24x);
|
||||
reg_class D25_regD(R_D25,R_D25x);
|
||||
reg_class D26_regD(R_D26,R_D26x);
|
||||
reg_class D27_regD(R_D27,R_D27x);
|
||||
reg_class D28_regD(R_D28,R_D28x);
|
||||
reg_class D29_regD(R_D29,R_D29x);
|
||||
reg_class D30_regD(R_D30,R_D30x);
|
||||
reg_class D31_regD(R_D31,R_D31x);
|
||||
|
||||
reg_class vectorx_reg(R_S0,R_S1,R_S2,R_S3, R_S4,R_S5,R_S6,R_S7,
|
||||
R_S8,R_S9,R_S10,R_S11, /* skip S14/S15 */
|
||||
R_S16,R_S17,R_S18,R_S19, R_S20,R_S21,R_S22,R_S23,
|
||||
R_S24,R_S25,R_S26,R_S27, R_S28,R_S29,R_S30,R_S31,
|
||||
R_D16,R_D16x,R_D17,R_D17x, R_D18,R_D18x,R_D19,R_D19x,
|
||||
R_D20,R_D20x,R_D21,R_D21x, R_D22,R_D22x,R_D23,R_D23x,
|
||||
R_D24,R_D24x,R_D25,R_D25x, R_D26,R_D26x,R_D27,R_D27x,
|
||||
R_D28,R_D28x,R_D29,R_D29x, R_D30,R_D30x,R_D31,R_D31x);
|
||||
|
||||
%}
|
||||
|
||||
source_hpp %{
|
||||
// FIXME
|
||||
const MachRegisterNumbers R_mem_copy_lo_num = R_S14_num;
|
||||
const MachRegisterNumbers R_mem_copy_hi_num = R_S15_num;
|
||||
const FloatRegister Rmemcopy = S14;
|
||||
const MachRegisterNumbers R_hf_ret_lo_num = R_S0_num;
|
||||
const MachRegisterNumbers R_hf_ret_hi_num = R_S1_num;
|
||||
|
||||
const MachRegisterNumbers R_Ricklass_num = R_R8_num;
|
||||
const MachRegisterNumbers R_Rmethod_num = R_R9_num;
|
||||
|
||||
#define LDR_DOUBLE "FLDD"
|
||||
#define LDR_FLOAT "FLDS"
|
||||
#define STR_DOUBLE "FSTD"
|
||||
#define STR_FLOAT "FSTS"
|
||||
#define LDR_64 "LDRD"
|
||||
#define STR_64 "STRD"
|
||||
#define LDR_32 "LDR"
|
||||
#define STR_32 "STR"
|
||||
#define MOV_DOUBLE "FCPYD"
|
||||
#define MOV_FLOAT "FCPYS"
|
||||
#define FMSR "FMSR"
|
||||
#define FMRS "FMRS"
|
||||
#define LDREX "ldrex "
|
||||
#define STREX "strex "
|
||||
|
||||
#define str_64 strd
|
||||
#define ldr_64 ldrd
|
||||
#define ldr_32 ldr
|
||||
#define ldrex ldrex
|
||||
#define strex strex
|
||||
|
||||
static inline bool is_memoryD(int offset) {
|
||||
return offset < 1024 && offset > -1024;
|
||||
}
|
||||
|
||||
static inline bool is_memoryfp(int offset) {
|
||||
return offset < 1024 && offset > -1024;
|
||||
}
|
||||
|
||||
static inline bool is_memoryI(int offset) {
|
||||
return offset < 4096 && offset > -4096;
|
||||
}
|
||||
|
||||
static inline bool is_memoryP(int offset) {
|
||||
return offset < 4096 && offset > -4096;
|
||||
}
|
||||
|
||||
static inline bool is_memoryHD(int offset) {
|
||||
return offset < 256 && offset > -256;
|
||||
}
|
||||
|
||||
static inline bool is_aimm(int imm) {
|
||||
return AsmOperand::is_rotated_imm(imm);
|
||||
}
|
||||
|
||||
static inline bool is_limmI(jint imm) {
|
||||
return AsmOperand::is_rotated_imm(imm);
|
||||
}
|
||||
|
||||
static inline bool is_limmI_low(jint imm, int n) {
|
||||
int imml = imm & right_n_bits(n);
|
||||
return is_limmI(imml) || is_limmI(imm);
|
||||
}
|
||||
|
||||
static inline int limmI_low(jint imm, int n) {
|
||||
int imml = imm & right_n_bits(n);
|
||||
return is_limmI(imml) ? imml : imm;
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
source %{
|
||||
|
||||
// Given a register encoding, produce a Integer Register object
|
||||
static Register reg_to_register_object(int register_encoding) {
|
||||
assert(R0->encoding() == R_R0_enc && R15->encoding() == R_R15_enc, "right coding");
|
||||
return as_Register(register_encoding);
|
||||
}
|
||||
|
||||
// Given a register encoding, produce a single-precision Float Register object
|
||||
static FloatRegister reg_to_FloatRegister_object(int register_encoding) {
|
||||
assert(S0->encoding() == R_S0_enc && S31->encoding() == R_S31_enc, "right coding");
|
||||
return as_FloatRegister(register_encoding);
|
||||
}
|
||||
|
||||
void Compile::pd_compiler2_init() {
|
||||
// Umimplemented
|
||||
}
|
||||
|
||||
// Location of compiled Java return values. Same as C
|
||||
OptoRegPair c2::return_value(int ideal_reg) {
|
||||
assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
|
||||
#ifndef __ABI_HARD__
|
||||
static int lo[Op_RegL+1] = { 0, 0, OptoReg::Bad, R_R0_num, R_R0_num, R_R0_num, R_R0_num, R_R0_num };
|
||||
static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_R1_num, R_R1_num };
|
||||
#else
|
||||
static int lo[Op_RegL+1] = { 0, 0, OptoReg::Bad, R_R0_num, R_R0_num, R_hf_ret_lo_num, R_hf_ret_lo_num, R_R0_num };
|
||||
static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_hf_ret_hi_num, R_R1_num };
|
||||
#endif
|
||||
return OptoRegPair( hi[ideal_reg], lo[ideal_reg]);
|
||||
}
|
||||
|
||||
// !!!!! Special hack to get all type of calls to specify the byte offset
|
||||
// from the start of the call to the point where the return address
|
||||
// will point.
|
||||
|
||||
int MachCallStaticJavaNode::ret_addr_offset() {
|
||||
bool far = (_method == NULL) ? maybe_far_call(this) : !cache_reachable();
|
||||
return ((far ? 3 : 1) + (_method_handle_invoke ? 1 : 0)) *
|
||||
NativeInstruction::instruction_size;
|
||||
}
|
||||
|
||||
int MachCallDynamicJavaNode::ret_addr_offset() {
|
||||
bool far = !cache_reachable();
|
||||
// mov_oop is always 2 words
|
||||
return (2 + (far ? 3 : 1)) * NativeInstruction::instruction_size;
|
||||
}
|
||||
|
||||
int MachCallRuntimeNode::ret_addr_offset() {
|
||||
// bl or movw; movt; blx
|
||||
bool far = maybe_far_call(this);
|
||||
return (far ? 3 : 1) * NativeInstruction::instruction_size;
|
||||
}
|
||||
%}
|
||||
|
||||
// The intptr_t operand types, defined by textual substitution.
|
||||
// (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.)
|
||||
#define immX immI
|
||||
#define immXRot immIRot
|
||||
#define iRegX iRegI
|
||||
#define aimmX aimmI
|
||||
#define limmX limmI
|
||||
#define immX10x2 immI10x2
|
||||
#define LShiftX LShiftI
|
||||
#define shimmX immU5
|
||||
|
||||
// Compatibility interface
|
||||
#define aimmP immPRot
|
||||
#define immIMov immIRot
|
||||
|
||||
#define store_RegL iRegL
|
||||
#define store_RegLd iRegLd
|
||||
#define store_RegI iRegI
|
||||
#define store_ptr_RegP iRegP
|
||||
|
||||
//----------ATTRIBUTES---------------------------------------------------------
|
||||
//----------Operand Attributes-------------------------------------------------
|
||||
op_attrib op_cost(1); // Required cost attribute
|
||||
|
||||
//----------OPERANDS-----------------------------------------------------------
|
||||
// Operand definitions must precede instruction definitions for correct parsing
|
||||
// in the ADLC because operands constitute user defined types which are used in
|
||||
// instruction definitions.
|
||||
|
||||
//----------Simple Operands----------------------------------------------------
|
||||
// Immediate Operands
|
||||
|
||||
operand immIRot() %{
|
||||
predicate(AsmOperand::is_rotated_imm(n->get_int()));
|
||||
match(ConI);
|
||||
|
||||
op_cost(0);
|
||||
// formats are generated automatically for constants and base registers
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immIRotn() %{
|
||||
predicate(n->get_int() != 0 && AsmOperand::is_rotated_imm(~n->get_int()));
|
||||
match(ConI);
|
||||
|
||||
op_cost(0);
|
||||
// formats are generated automatically for constants and base registers
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immIRotneg() %{
|
||||
// if AsmOperand::is_rotated_imm() is true for this constant, it is
|
||||
// a immIRot and an optimal instruction combination exists to handle the
|
||||
// constant as an immIRot
|
||||
predicate(!AsmOperand::is_rotated_imm(n->get_int()) && AsmOperand::is_rotated_imm(-n->get_int()));
|
||||
match(ConI);
|
||||
|
||||
op_cost(0);
|
||||
// formats are generated automatically for constants and base registers
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Non-negative integer immediate that is encodable using the rotation scheme,
|
||||
// and that when expanded fits in 31 bits.
|
||||
operand immU31Rot() %{
|
||||
predicate((0 <= n->get_int()) && AsmOperand::is_rotated_imm(n->get_int()));
|
||||
match(ConI);
|
||||
|
||||
op_cost(0);
|
||||
// formats are generated automatically for constants and base registers
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immPRot() %{
|
||||
predicate(n->get_ptr() == 0 || (AsmOperand::is_rotated_imm(n->get_ptr()) && ((ConPNode*)n)->type()->reloc() == relocInfo::none));
|
||||
|
||||
match(ConP);
|
||||
|
||||
op_cost(0);
|
||||
// formats are generated automatically for constants and base registers
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immLlowRot() %{
|
||||
predicate(n->get_long() >> 32 == 0 && AsmOperand::is_rotated_imm((int)n->get_long()));
|
||||
match(ConL);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immLRot2() %{
|
||||
predicate(AsmOperand::is_rotated_imm((int)(n->get_long() >> 32)) &&
|
||||
AsmOperand::is_rotated_imm((int)(n->get_long())));
|
||||
match(ConL);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Integer Immediate: 12-bit - for addressing mode
|
||||
operand immI12() %{
|
||||
predicate((-4096 < n->get_int()) && (n->get_int() < 4096));
|
||||
match(ConI);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Integer Immediate: 10-bit disp and disp+4 - for addressing float pair
|
||||
operand immI10x2() %{
|
||||
predicate((-1024 < n->get_int()) && (n->get_int() < 1024 - 4));
|
||||
match(ConI);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Integer Immediate: 12-bit disp and disp+4 - for addressing word pair
|
||||
operand immI12x2() %{
|
||||
predicate((-4096 < n->get_int()) && (n->get_int() < 4096 - 4));
|
||||
match(ConI);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
998
hotspot/src/cpu/arm/vm/arm_64.ad
Normal file
998
hotspot/src/cpu/arm/vm/arm_64.ad
Normal file
@ -0,0 +1,998 @@
|
||||
//
|
||||
// Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License version 2 only, as
|
||||
// published by the Free Software Foundation.
|
||||
//
|
||||
// This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
// version 2 for more details (a copy is included in the LICENSE file that
|
||||
// accompanied this code).
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License version
|
||||
// 2 along with this work; if not, write to the Free Software Foundation,
|
||||
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
//
|
||||
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
// or visit www.oracle.com if you need additional information or have any
|
||||
// questions.
|
||||
//
|
||||
|
||||
// ARM Architecture Description File
|
||||
|
||||
//----------REGISTER DEFINITION BLOCK------------------------------------------
|
||||
// This information is used by the matcher and the register allocator to
|
||||
// describe individual registers and classes of registers within the target
|
||||
// archtecture.
|
||||
register %{
|
||||
//----------Architecture Description Register Definitions----------------------
|
||||
// General Registers
|
||||
// "reg_def" name ( register save type, C convention save type,
|
||||
// ideal register type, encoding, vm name );
|
||||
// Register Save Types:
|
||||
//
|
||||
// NS = No-Save: The register allocator assumes that these registers
|
||||
// can be used without saving upon entry to the method, &
|
||||
// that they do not need to be saved at call sites.
|
||||
//
|
||||
// SOC = Save-On-Call: The register allocator assumes that these registers
|
||||
// can be used without saving upon entry to the method,
|
||||
// but that they must be saved at call sites.
|
||||
//
|
||||
// SOE = Save-On-Entry: The register allocator assumes that these registers
|
||||
// must be saved before using them upon entry to the
|
||||
// method, but they do not need to be saved at call
|
||||
// sites.
|
||||
//
|
||||
// AS = Always-Save: The register allocator assumes that these registers
|
||||
// must be saved before using them upon entry to the
|
||||
// method, & that they must be saved at call sites.
|
||||
//
|
||||
// Ideal Register Type is used to determine how to save & restore a
|
||||
// register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
|
||||
// spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
|
||||
// FIXME: above comment seems wrong. Spill done through MachSpillCopyNode
|
||||
//
|
||||
// The encoding number is the actual bit-pattern placed into the opcodes.
|
||||
|
||||
|
||||
// ----------------------------
|
||||
// Integer/Long Registers
|
||||
// ----------------------------
|
||||
|
||||
// TODO: would be nice to keep track of high-word state:
|
||||
// zeroRegI --> RegL
|
||||
// signedRegI --> RegL
|
||||
// junkRegI --> RegL
|
||||
// how to tell C2 to treak RegI as RegL, or RegL as RegI?
|
||||
reg_def R_R0 (SOC, SOC, Op_RegI, 0, R0->as_VMReg());
|
||||
reg_def R_R0x (SOC, SOC, Op_RegI, 255, R0->as_VMReg()->next());
|
||||
reg_def R_R1 (SOC, SOC, Op_RegI, 1, R1->as_VMReg());
|
||||
reg_def R_R1x (SOC, SOC, Op_RegI, 255, R1->as_VMReg()->next());
|
||||
reg_def R_R2 (SOC, SOC, Op_RegI, 2, R2->as_VMReg());
|
||||
reg_def R_R2x (SOC, SOC, Op_RegI, 255, R2->as_VMReg()->next());
|
||||
reg_def R_R3 (SOC, SOC, Op_RegI, 3, R3->as_VMReg());
|
||||
reg_def R_R3x (SOC, SOC, Op_RegI, 255, R3->as_VMReg()->next());
|
||||
reg_def R_R4 (SOC, SOC, Op_RegI, 4, R4->as_VMReg());
|
||||
reg_def R_R4x (SOC, SOC, Op_RegI, 255, R4->as_VMReg()->next());
|
||||
reg_def R_R5 (SOC, SOC, Op_RegI, 5, R5->as_VMReg());
|
||||
reg_def R_R5x (SOC, SOC, Op_RegI, 255, R5->as_VMReg()->next());
|
||||
reg_def R_R6 (SOC, SOC, Op_RegI, 6, R6->as_VMReg());
|
||||
reg_def R_R6x (SOC, SOC, Op_RegI, 255, R6->as_VMReg()->next());
|
||||
reg_def R_R7 (SOC, SOC, Op_RegI, 7, R7->as_VMReg());
|
||||
reg_def R_R7x (SOC, SOC, Op_RegI, 255, R7->as_VMReg()->next());
|
||||
|
||||
reg_def R_R8 (SOC, SOC, Op_RegI, 8, R8->as_VMReg());
|
||||
reg_def R_R8x (SOC, SOC, Op_RegI, 255, R8->as_VMReg()->next());
|
||||
reg_def R_R9 (SOC, SOC, Op_RegI, 9, R9->as_VMReg());
|
||||
reg_def R_R9x (SOC, SOC, Op_RegI, 255, R9->as_VMReg()->next());
|
||||
reg_def R_R10 (SOC, SOC, Op_RegI, 10, R10->as_VMReg());
|
||||
reg_def R_R10x(SOC, SOC, Op_RegI, 255, R10->as_VMReg()->next());
|
||||
reg_def R_R11 (SOC, SOC, Op_RegI, 11, R11->as_VMReg());
|
||||
reg_def R_R11x(SOC, SOC, Op_RegI, 255, R11->as_VMReg()->next());
|
||||
reg_def R_R12 (SOC, SOC, Op_RegI, 12, R12->as_VMReg());
|
||||
reg_def R_R12x(SOC, SOC, Op_RegI, 255, R12->as_VMReg()->next());
|
||||
reg_def R_R13 (SOC, SOC, Op_RegI, 13, R13->as_VMReg());
|
||||
reg_def R_R13x(SOC, SOC, Op_RegI, 255, R13->as_VMReg()->next());
|
||||
reg_def R_R14 (SOC, SOC, Op_RegI, 14, R14->as_VMReg());
|
||||
reg_def R_R14x(SOC, SOC, Op_RegI, 255, R14->as_VMReg()->next());
|
||||
reg_def R_R15 (SOC, SOC, Op_RegI, 15, R15->as_VMReg());
|
||||
reg_def R_R15x(SOC, SOC, Op_RegI, 255, R15->as_VMReg()->next());
|
||||
|
||||
reg_def R_R16 (SOC, SOC, Op_RegI, 16, R16->as_VMReg()); // IP0
|
||||
reg_def R_R16x(SOC, SOC, Op_RegI, 255, R16->as_VMReg()->next());
|
||||
reg_def R_R17 (SOC, SOC, Op_RegI, 17, R17->as_VMReg()); // IP1
|
||||
reg_def R_R17x(SOC, SOC, Op_RegI, 255, R17->as_VMReg()->next());
|
||||
reg_def R_R18 (SOC, SOC, Op_RegI, 18, R18->as_VMReg()); // Platform Register
|
||||
reg_def R_R18x(SOC, SOC, Op_RegI, 255, R18->as_VMReg()->next());
|
||||
|
||||
reg_def R_R19 (SOC, SOE, Op_RegI, 19, R19->as_VMReg());
|
||||
reg_def R_R19x(SOC, SOE, Op_RegI, 255, R19->as_VMReg()->next());
|
||||
reg_def R_R20 (SOC, SOE, Op_RegI, 20, R20->as_VMReg());
|
||||
reg_def R_R20x(SOC, SOE, Op_RegI, 255, R20->as_VMReg()->next());
|
||||
reg_def R_R21 (SOC, SOE, Op_RegI, 21, R21->as_VMReg());
|
||||
reg_def R_R21x(SOC, SOE, Op_RegI, 255, R21->as_VMReg()->next());
|
||||
reg_def R_R22 (SOC, SOE, Op_RegI, 22, R22->as_VMReg());
|
||||
reg_def R_R22x(SOC, SOE, Op_RegI, 255, R22->as_VMReg()->next());
|
||||
reg_def R_R23 (SOC, SOE, Op_RegI, 23, R23->as_VMReg());
|
||||
reg_def R_R23x(SOC, SOE, Op_RegI, 255, R23->as_VMReg()->next());
|
||||
reg_def R_R24 (SOC, SOE, Op_RegI, 24, R24->as_VMReg());
|
||||
reg_def R_R24x(SOC, SOE, Op_RegI, 255, R24->as_VMReg()->next());
|
||||
reg_def R_R25 (SOC, SOE, Op_RegI, 25, R25->as_VMReg());
|
||||
reg_def R_R25x(SOC, SOE, Op_RegI, 255, R25->as_VMReg()->next());
|
||||
reg_def R_R26 (SOC, SOE, Op_RegI, 26, R26->as_VMReg());
|
||||
reg_def R_R26x(SOC, SOE, Op_RegI, 255, R26->as_VMReg()->next());
|
||||
reg_def R_R27 (SOC, SOE, Op_RegI, 27, R27->as_VMReg()); // Rheap_base
|
||||
reg_def R_R27x(SOC, SOE, Op_RegI, 255, R27->as_VMReg()->next()); // Rheap_base
|
||||
reg_def R_R28 ( NS, SOE, Op_RegI, 28, R28->as_VMReg()); // TLS
|
||||
reg_def R_R28x( NS, SOE, Op_RegI, 255, R28->as_VMReg()->next()); // TLS
|
||||
|
||||
reg_def R_R29 ( NS, SOE, Op_RegI, 29, R29->as_VMReg()); // FP
|
||||
reg_def R_R29x( NS, SOE, Op_RegI, 255, R29->as_VMReg()->next()); // FP
|
||||
reg_def R_R30 (SOC, SOC, Op_RegI, 30, R30->as_VMReg()); // LR
|
||||
reg_def R_R30x(SOC, SOC, Op_RegI, 255, R30->as_VMReg()->next()); // LR
|
||||
|
||||
reg_def R_ZR ( NS, NS, Op_RegI, 31, ZR->as_VMReg()); // ZR
|
||||
reg_def R_ZRx( NS, NS, Op_RegI, 255, ZR->as_VMReg()->next()); // ZR
|
||||
|
||||
// FIXME
|
||||
//reg_def R_SP ( NS, NS, Op_RegP, 32, SP->as_VMReg());
|
||||
reg_def R_SP ( NS, NS, Op_RegI, 32, SP->as_VMReg());
|
||||
//reg_def R_SPx( NS, NS, Op_RegP, 255, SP->as_VMReg()->next());
|
||||
reg_def R_SPx( NS, NS, Op_RegI, 255, SP->as_VMReg()->next());
|
||||
|
||||
// ----------------------------
|
||||
// Float/Double/Vector Registers
|
||||
// ----------------------------
|
||||
|
||||
reg_def R_V0(SOC, SOC, Op_RegF, 0, V0->as_VMReg());
|
||||
reg_def R_V1(SOC, SOC, Op_RegF, 1, V1->as_VMReg());
|
||||
reg_def R_V2(SOC, SOC, Op_RegF, 2, V2->as_VMReg());
|
||||
reg_def R_V3(SOC, SOC, Op_RegF, 3, V3->as_VMReg());
|
||||
reg_def R_V4(SOC, SOC, Op_RegF, 4, V4->as_VMReg());
|
||||
reg_def R_V5(SOC, SOC, Op_RegF, 5, V5->as_VMReg());
|
||||
reg_def R_V6(SOC, SOC, Op_RegF, 6, V6->as_VMReg());
|
||||
reg_def R_V7(SOC, SOC, Op_RegF, 7, V7->as_VMReg());
|
||||
reg_def R_V8(SOC, SOC, Op_RegF, 8, V8->as_VMReg());
|
||||
reg_def R_V9(SOC, SOC, Op_RegF, 9, V9->as_VMReg());
|
||||
reg_def R_V10(SOC, SOC, Op_RegF, 10, V10->as_VMReg());
|
||||
reg_def R_V11(SOC, SOC, Op_RegF, 11, V11->as_VMReg());
|
||||
reg_def R_V12(SOC, SOC, Op_RegF, 12, V12->as_VMReg());
|
||||
reg_def R_V13(SOC, SOC, Op_RegF, 13, V13->as_VMReg());
|
||||
reg_def R_V14(SOC, SOC, Op_RegF, 14, V14->as_VMReg());
|
||||
reg_def R_V15(SOC, SOC, Op_RegF, 15, V15->as_VMReg());
|
||||
reg_def R_V16(SOC, SOC, Op_RegF, 16, V16->as_VMReg());
|
||||
reg_def R_V17(SOC, SOC, Op_RegF, 17, V17->as_VMReg());
|
||||
reg_def R_V18(SOC, SOC, Op_RegF, 18, V18->as_VMReg());
|
||||
reg_def R_V19(SOC, SOC, Op_RegF, 19, V19->as_VMReg());
|
||||
reg_def R_V20(SOC, SOC, Op_RegF, 20, V20->as_VMReg());
|
||||
reg_def R_V21(SOC, SOC, Op_RegF, 21, V21->as_VMReg());
|
||||
reg_def R_V22(SOC, SOC, Op_RegF, 22, V22->as_VMReg());
|
||||
reg_def R_V23(SOC, SOC, Op_RegF, 23, V23->as_VMReg());
|
||||
reg_def R_V24(SOC, SOC, Op_RegF, 24, V24->as_VMReg());
|
||||
reg_def R_V25(SOC, SOC, Op_RegF, 25, V25->as_VMReg());
|
||||
reg_def R_V26(SOC, SOC, Op_RegF, 26, V26->as_VMReg());
|
||||
reg_def R_V27(SOC, SOC, Op_RegF, 27, V27->as_VMReg());
|
||||
reg_def R_V28(SOC, SOC, Op_RegF, 28, V28->as_VMReg());
|
||||
reg_def R_V29(SOC, SOC, Op_RegF, 29, V29->as_VMReg());
|
||||
reg_def R_V30(SOC, SOC, Op_RegF, 30, V30->as_VMReg());
|
||||
reg_def R_V31(SOC, SOC, Op_RegF, 31, V31->as_VMReg());
|
||||
|
||||
reg_def R_V0b(SOC, SOC, Op_RegF, 255, V0->as_VMReg()->next(1));
|
||||
reg_def R_V1b(SOC, SOC, Op_RegF, 255, V1->as_VMReg()->next(1));
|
||||
reg_def R_V2b(SOC, SOC, Op_RegF, 255, V2->as_VMReg()->next(1));
|
||||
reg_def R_V3b(SOC, SOC, Op_RegF, 3, V3->as_VMReg()->next(1));
|
||||
reg_def R_V4b(SOC, SOC, Op_RegF, 4, V4->as_VMReg()->next(1));
|
||||
reg_def R_V5b(SOC, SOC, Op_RegF, 5, V5->as_VMReg()->next(1));
|
||||
reg_def R_V6b(SOC, SOC, Op_RegF, 6, V6->as_VMReg()->next(1));
|
||||
reg_def R_V7b(SOC, SOC, Op_RegF, 7, V7->as_VMReg()->next(1));
|
||||
reg_def R_V8b(SOC, SOC, Op_RegF, 255, V8->as_VMReg()->next(1));
|
||||
reg_def R_V9b(SOC, SOC, Op_RegF, 9, V9->as_VMReg()->next(1));
|
||||
reg_def R_V10b(SOC, SOC, Op_RegF, 10, V10->as_VMReg()->next(1));
|
||||
reg_def R_V11b(SOC, SOC, Op_RegF, 11, V11->as_VMReg()->next(1));
|
||||
reg_def R_V12b(SOC, SOC, Op_RegF, 12, V12->as_VMReg()->next(1));
|
||||
reg_def R_V13b(SOC, SOC, Op_RegF, 13, V13->as_VMReg()->next(1));
|
||||
reg_def R_V14b(SOC, SOC, Op_RegF, 14, V14->as_VMReg()->next(1));
|
||||
reg_def R_V15b(SOC, SOC, Op_RegF, 15, V15->as_VMReg()->next(1));
|
||||
reg_def R_V16b(SOC, SOC, Op_RegF, 16, V16->as_VMReg()->next(1));
|
||||
reg_def R_V17b(SOC, SOC, Op_RegF, 17, V17->as_VMReg()->next(1));
|
||||
reg_def R_V18b(SOC, SOC, Op_RegF, 18, V18->as_VMReg()->next(1));
|
||||
reg_def R_V19b(SOC, SOC, Op_RegF, 19, V19->as_VMReg()->next(1));
|
||||
reg_def R_V20b(SOC, SOC, Op_RegF, 20, V20->as_VMReg()->next(1));
|
||||
reg_def R_V21b(SOC, SOC, Op_RegF, 21, V21->as_VMReg()->next(1));
|
||||
reg_def R_V22b(SOC, SOC, Op_RegF, 22, V22->as_VMReg()->next(1));
|
||||
reg_def R_V23b(SOC, SOC, Op_RegF, 23, V23->as_VMReg()->next(1));
|
||||
reg_def R_V24b(SOC, SOC, Op_RegF, 24, V24->as_VMReg()->next(1));
|
||||
reg_def R_V25b(SOC, SOC, Op_RegF, 25, V25->as_VMReg()->next(1));
|
||||
reg_def R_V26b(SOC, SOC, Op_RegF, 26, V26->as_VMReg()->next(1));
|
||||
reg_def R_V27b(SOC, SOC, Op_RegF, 27, V27->as_VMReg()->next(1));
|
||||
reg_def R_V28b(SOC, SOC, Op_RegF, 28, V28->as_VMReg()->next(1));
|
||||
reg_def R_V29b(SOC, SOC, Op_RegF, 29, V29->as_VMReg()->next(1));
|
||||
reg_def R_V30b(SOC, SOC, Op_RegD, 30, V30->as_VMReg()->next(1));
|
||||
reg_def R_V31b(SOC, SOC, Op_RegF, 31, V31->as_VMReg()->next(1));
|
||||
|
||||
reg_def R_V0c(SOC, SOC, Op_RegF, 0, V0->as_VMReg()->next(2));
|
||||
reg_def R_V1c(SOC, SOC, Op_RegF, 1, V1->as_VMReg()->next(2));
|
||||
reg_def R_V2c(SOC, SOC, Op_RegF, 2, V2->as_VMReg()->next(2));
|
||||
reg_def R_V3c(SOC, SOC, Op_RegF, 3, V3->as_VMReg()->next(2));
|
||||
reg_def R_V4c(SOC, SOC, Op_RegF, 4, V4->as_VMReg()->next(2));
|
||||
reg_def R_V5c(SOC, SOC, Op_RegF, 5, V5->as_VMReg()->next(2));
|
||||
reg_def R_V6c(SOC, SOC, Op_RegF, 6, V6->as_VMReg()->next(2));
|
||||
reg_def R_V7c(SOC, SOC, Op_RegF, 7, V7->as_VMReg()->next(2));
|
||||
reg_def R_V8c(SOC, SOC, Op_RegF, 8, V8->as_VMReg()->next(2));
|
||||
reg_def R_V9c(SOC, SOC, Op_RegF, 9, V9->as_VMReg()->next(2));
|
||||
reg_def R_V10c(SOC, SOC, Op_RegF, 10, V10->as_VMReg()->next(2));
|
||||
reg_def R_V11c(SOC, SOC, Op_RegF, 11, V11->as_VMReg()->next(2));
|
||||
reg_def R_V12c(SOC, SOC, Op_RegF, 12, V12->as_VMReg()->next(2));
|
||||
reg_def R_V13c(SOC, SOC, Op_RegF, 13, V13->as_VMReg()->next(2));
|
||||
reg_def R_V14c(SOC, SOC, Op_RegF, 14, V14->as_VMReg()->next(2));
|
||||
reg_def R_V15c(SOC, SOC, Op_RegF, 15, V15->as_VMReg()->next(2));
|
||||
reg_def R_V16c(SOC, SOC, Op_RegF, 16, V16->as_VMReg()->next(2));
|
||||
reg_def R_V17c(SOC, SOC, Op_RegF, 17, V17->as_VMReg()->next(2));
|
||||
reg_def R_V18c(SOC, SOC, Op_RegF, 18, V18->as_VMReg()->next(2));
|
||||
reg_def R_V19c(SOC, SOC, Op_RegF, 19, V19->as_VMReg()->next(2));
|
||||
reg_def R_V20c(SOC, SOC, Op_RegF, 20, V20->as_VMReg()->next(2));
|
||||
reg_def R_V21c(SOC, SOC, Op_RegF, 21, V21->as_VMReg()->next(2));
|
||||
reg_def R_V22c(SOC, SOC, Op_RegF, 22, V22->as_VMReg()->next(2));
|
||||
reg_def R_V23c(SOC, SOC, Op_RegF, 23, V23->as_VMReg()->next(2));
|
||||
reg_def R_V24c(SOC, SOC, Op_RegF, 24, V24->as_VMReg()->next(2));
|
||||
reg_def R_V25c(SOC, SOC, Op_RegF, 25, V25->as_VMReg()->next(2));
|
||||
reg_def R_V26c(SOC, SOC, Op_RegF, 26, V26->as_VMReg()->next(2));
|
||||
reg_def R_V27c(SOC, SOC, Op_RegF, 27, V27->as_VMReg()->next(2));
|
||||
reg_def R_V28c(SOC, SOC, Op_RegF, 28, V28->as_VMReg()->next(2));
|
||||
reg_def R_V29c(SOC, SOC, Op_RegF, 29, V29->as_VMReg()->next(2));
|
||||
reg_def R_V30c(SOC, SOC, Op_RegF, 30, V30->as_VMReg()->next(2));
|
||||
reg_def R_V31c(SOC, SOC, Op_RegF, 31, V31->as_VMReg()->next(2));
|
||||
|
||||
reg_def R_V0d(SOC, SOC, Op_RegF, 0, V0->as_VMReg()->next(3));
|
||||
reg_def R_V1d(SOC, SOC, Op_RegF, 1, V1->as_VMReg()->next(3));
|
||||
reg_def R_V2d(SOC, SOC, Op_RegF, 2, V2->as_VMReg()->next(3));
|
||||
reg_def R_V3d(SOC, SOC, Op_RegF, 3, V3->as_VMReg()->next(3));
|
||||
reg_def R_V4d(SOC, SOC, Op_RegF, 4, V4->as_VMReg()->next(3));
|
||||
reg_def R_V5d(SOC, SOC, Op_RegF, 5, V5->as_VMReg()->next(3));
|
||||
reg_def R_V6d(SOC, SOC, Op_RegF, 6, V6->as_VMReg()->next(3));
|
||||
reg_def R_V7d(SOC, SOC, Op_RegF, 7, V7->as_VMReg()->next(3));
|
||||
reg_def R_V8d(SOC, SOC, Op_RegF, 8, V8->as_VMReg()->next(3));
|
||||
reg_def R_V9d(SOC, SOC, Op_RegF, 9, V9->as_VMReg()->next(3));
|
||||
reg_def R_V10d(SOC, SOC, Op_RegF, 10, V10->as_VMReg()->next(3));
|
||||
reg_def R_V11d(SOC, SOC, Op_RegF, 11, V11->as_VMReg()->next(3));
|
||||
reg_def R_V12d(SOC, SOC, Op_RegF, 12, V12->as_VMReg()->next(3));
|
||||
reg_def R_V13d(SOC, SOC, Op_RegF, 13, V13->as_VMReg()->next(3));
|
||||
reg_def R_V14d(SOC, SOC, Op_RegF, 14, V14->as_VMReg()->next(3));
|
||||
reg_def R_V15d(SOC, SOC, Op_RegF, 15, V15->as_VMReg()->next(3));
|
||||
reg_def R_V16d(SOC, SOC, Op_RegF, 16, V16->as_VMReg()->next(3));
|
||||
reg_def R_V17d(SOC, SOC, Op_RegF, 17, V17->as_VMReg()->next(3));
|
||||
reg_def R_V18d(SOC, SOC, Op_RegF, 18, V18->as_VMReg()->next(3));
|
||||
reg_def R_V19d(SOC, SOC, Op_RegF, 19, V19->as_VMReg()->next(3));
|
||||
reg_def R_V20d(SOC, SOC, Op_RegF, 20, V20->as_VMReg()->next(3));
|
||||
reg_def R_V21d(SOC, SOC, Op_RegF, 21, V21->as_VMReg()->next(3));
|
||||
reg_def R_V22d(SOC, SOC, Op_RegF, 22, V22->as_VMReg()->next(3));
|
||||
reg_def R_V23d(SOC, SOC, Op_RegF, 23, V23->as_VMReg()->next(3));
|
||||
reg_def R_V24d(SOC, SOC, Op_RegF, 24, V24->as_VMReg()->next(3));
|
||||
reg_def R_V25d(SOC, SOC, Op_RegF, 25, V25->as_VMReg()->next(3));
|
||||
reg_def R_V26d(SOC, SOC, Op_RegF, 26, V26->as_VMReg()->next(3));
|
||||
reg_def R_V27d(SOC, SOC, Op_RegF, 27, V27->as_VMReg()->next(3));
|
||||
reg_def R_V28d(SOC, SOC, Op_RegF, 28, V28->as_VMReg()->next(3));
|
||||
reg_def R_V29d(SOC, SOC, Op_RegF, 29, V29->as_VMReg()->next(3));
|
||||
reg_def R_V30d(SOC, SOC, Op_RegF, 30, V30->as_VMReg()->next(3));
|
||||
reg_def R_V31d(SOC, SOC, Op_RegF, 31, V31->as_VMReg()->next(3));
|
||||
|
||||
// ----------------------------
|
||||
// Special Registers
|
||||
// Condition Codes Flag Registers
|
||||
reg_def APSR (SOC, SOC, Op_RegFlags, 255, VMRegImpl::Bad());
|
||||
reg_def FPSCR(SOC, SOC, Op_RegFlags, 255, VMRegImpl::Bad());
|
||||
|
||||
// ----------------------------
|
||||
// Specify the enum values for the registers. These enums are only used by the
|
||||
// OptoReg "class". We can convert these enum values at will to VMReg when needed
|
||||
// for visibility to the rest of the vm. The order of this enum influences the
|
||||
// register allocator so having the freedom to set this order and not be stuck
|
||||
// with the order that is natural for the rest of the vm is worth it.
|
||||
|
||||
// Quad vector must be aligned here, so list them first.
|
||||
alloc_class fprs(
|
||||
R_V8, R_V8b, R_V8c, R_V8d, R_V9, R_V9b, R_V9c, R_V9d,
|
||||
R_V10, R_V10b, R_V10c, R_V10d, R_V11, R_V11b, R_V11c, R_V11d,
|
||||
R_V12, R_V12b, R_V12c, R_V12d, R_V13, R_V13b, R_V13c, R_V13d,
|
||||
R_V14, R_V14b, R_V14c, R_V14d, R_V15, R_V15b, R_V15c, R_V15d,
|
||||
R_V16, R_V16b, R_V16c, R_V16d, R_V17, R_V17b, R_V17c, R_V17d,
|
||||
R_V18, R_V18b, R_V18c, R_V18d, R_V19, R_V19b, R_V19c, R_V19d,
|
||||
R_V20, R_V20b, R_V20c, R_V20d, R_V21, R_V21b, R_V21c, R_V21d,
|
||||
R_V22, R_V22b, R_V22c, R_V22d, R_V23, R_V23b, R_V23c, R_V23d,
|
||||
R_V24, R_V24b, R_V24c, R_V24d, R_V25, R_V25b, R_V25c, R_V25d,
|
||||
R_V26, R_V26b, R_V26c, R_V26d, R_V27, R_V27b, R_V27c, R_V27d,
|
||||
R_V28, R_V28b, R_V28c, R_V28d, R_V29, R_V29b, R_V29c, R_V29d,
|
||||
R_V30, R_V30b, R_V30c, R_V30d, R_V31, R_V31b, R_V31c, R_V31d,
|
||||
R_V0, R_V0b, R_V0c, R_V0d, R_V1, R_V1b, R_V1c, R_V1d,
|
||||
R_V2, R_V2b, R_V2c, R_V2d, R_V3, R_V3b, R_V3c, R_V3d,
|
||||
R_V4, R_V4b, R_V4c, R_V4d, R_V5, R_V5b, R_V5c, R_V5d,
|
||||
R_V6, R_V6b, R_V6c, R_V6d, R_V7, R_V7b, R_V7c, R_V7d
|
||||
);
|
||||
|
||||
// Need double-register alignment here.
|
||||
// We are already quad-register aligned because of vectors above.
|
||||
alloc_class gprs(
|
||||
R_R0, R_R0x, R_R1, R_R1x, R_R2, R_R2x, R_R3, R_R3x,
|
||||
R_R4, R_R4x, R_R5, R_R5x, R_R6, R_R6x, R_R7, R_R7x,
|
||||
R_R8, R_R8x, R_R9, R_R9x, R_R10, R_R10x, R_R11, R_R11x,
|
||||
R_R12, R_R12x, R_R13, R_R13x, R_R14, R_R14x, R_R15, R_R15x,
|
||||
R_R16, R_R16x, R_R17, R_R17x, R_R18, R_R18x, R_R19, R_R19x,
|
||||
R_R20, R_R20x, R_R21, R_R21x, R_R22, R_R22x, R_R23, R_R23x,
|
||||
R_R24, R_R24x, R_R25, R_R25x, R_R26, R_R26x, R_R27, R_R27x,
|
||||
R_R28, R_R28x, R_R29, R_R29x, R_R30, R_R30x
|
||||
);
|
||||
// Continuing with double-reigister alignment...
|
||||
alloc_class chunk2(APSR, FPSCR);
|
||||
alloc_class chunk3(R_SP, R_SPx);
|
||||
alloc_class chunk4(R_ZR, R_ZRx);
|
||||
|
||||
//----------Architecture Description Register Classes--------------------------
|
||||
// Several register classes are automatically defined based upon information in
|
||||
// this architecture description.
|
||||
// 1) reg_class inline_cache_reg ( as defined in frame section )
|
||||
// 2) reg_class interpreter_method_oop_reg ( as defined in frame section )
|
||||
// 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
|
||||
//
|
||||
|
||||
// ----------------------------
|
||||
// Integer Register Classes
|
||||
// ----------------------------
|
||||
reg_class int_reg_all(R_R0, R_R1, R_R2, R_R3, R_R4, R_R5, R_R6, R_R7,
|
||||
R_R8, R_R9, R_R10, R_R11, R_R12, R_R13, R_R14, R_R15,
|
||||
R_R16, R_R17, R_R18, R_R19, R_R20, R_R21, R_R22, R_R23,
|
||||
R_R24, R_R25, R_R26, R_R27, R_R28, R_R29, R_R30
|
||||
);
|
||||
|
||||
// Exclusions from i_reg:
|
||||
// SP (R31)
|
||||
// Rthread/R28: reserved by HotSpot to the TLS register (invariant within Java)
|
||||
reg_class int_reg %{
|
||||
return _INT_REG_mask;
|
||||
%}
|
||||
reg_class ptr_reg %{
|
||||
return _PTR_REG_mask;
|
||||
%}
|
||||
reg_class vectorx_reg %{
|
||||
return _VECTORX_REG_mask;
|
||||
%}
|
||||
|
||||
reg_class R0_regI(R_R0);
|
||||
reg_class R1_regI(R_R1);
|
||||
reg_class R2_regI(R_R2);
|
||||
reg_class R3_regI(R_R3);
|
||||
//reg_class R12_regI(R_R12);
|
||||
|
||||
// ----------------------------
|
||||
// Pointer Register Classes
|
||||
// ----------------------------
|
||||
|
||||
// Special class for storeP instructions, which can store SP or RPC to TLS.
|
||||
// It is also used for memory addressing, allowing direct TLS addressing.
|
||||
|
||||
reg_class sp_ptr_reg %{
|
||||
return _SP_PTR_REG_mask;
|
||||
%}
|
||||
|
||||
reg_class store_reg %{
|
||||
return _STR_REG_mask;
|
||||
%}
|
||||
|
||||
reg_class store_ptr_reg %{
|
||||
return _STR_PTR_REG_mask;
|
||||
%}
|
||||
|
||||
reg_class spillP_reg %{
|
||||
return _SPILLP_REG_mask;
|
||||
%}
|
||||
|
||||
// Other special pointer regs
|
||||
reg_class R0_regP(R_R0, R_R0x);
|
||||
reg_class R1_regP(R_R1, R_R1x);
|
||||
reg_class R2_regP(R_R2, R_R2x);
|
||||
reg_class Rexception_regP(R_R19, R_R19x);
|
||||
reg_class Ricklass_regP(R_R8, R_R8x);
|
||||
reg_class Rmethod_regP(R_R27, R_R27x);
|
||||
|
||||
reg_class Rthread_regP(R_R28, R_R28x);
|
||||
reg_class IP_regP(R_R16, R_R16x);
|
||||
#define RtempRegP IPRegP
|
||||
reg_class LR_regP(R_R30, R_R30x);
|
||||
|
||||
reg_class SP_regP(R_SP, R_SPx);
|
||||
reg_class FP_regP(R_R29, R_R29x);
|
||||
|
||||
reg_class ZR_regP(R_ZR, R_ZRx);
|
||||
reg_class ZR_regI(R_ZR);
|
||||
|
||||
// ----------------------------
|
||||
// Long Register Classes
|
||||
// ----------------------------
|
||||
reg_class long_reg %{ return _PTR_REG_mask; %}
|
||||
// for ldrexd, strexd: first reg of pair must be even
|
||||
reg_class long_reg_align %{ return LONG_REG_mask(); %}
|
||||
|
||||
reg_class R0_regL(R_R0,R_R0x); // arg 1 or return value
|
||||
|
||||
// ----------------------------
|
||||
// Special Class for Condition Code Flags Register
|
||||
reg_class int_flags(APSR);
|
||||
reg_class float_flags(FPSCR);
|
||||
|
||||
|
||||
// ----------------------------
|
||||
// Float Point Register Classes
|
||||
// ----------------------------
|
||||
reg_class sflt_reg_0(
|
||||
R_V0, R_V1, R_V2, R_V3, R_V4, R_V5, R_V6, R_V7,
|
||||
R_V8, R_V9, R_V10, R_V11, R_V12, R_V13, R_V14, R_V15,
|
||||
R_V16, R_V17, R_V18, R_V19, R_V20, R_V21, R_V22, R_V23,
|
||||
R_V24, R_V25, R_V26, R_V27, R_V28, R_V29, R_V30, R_V31);
|
||||
|
||||
reg_class sflt_reg %{
|
||||
return _SFLT_REG_mask;
|
||||
%}
|
||||
|
||||
reg_class dflt_low_reg %{
|
||||
return _DFLT_REG_mask;
|
||||
%}
|
||||
|
||||
reg_class actual_dflt_reg %{
|
||||
return _DFLT_REG_mask;
|
||||
%}
|
||||
|
||||
reg_class vectorx_reg_0(
|
||||
R_V0, R_V1, R_V2, R_V3, R_V4, R_V5, R_V6, R_V7,
|
||||
R_V8, R_V9, R_V10, R_V11, R_V12, R_V13, R_V14, R_V15,
|
||||
R_V16, R_V17, R_V18, R_V19, R_V20, R_V21, R_V22, R_V23,
|
||||
R_V24, R_V25, R_V26, R_V27, R_V28, R_V29, R_V30, /*R_V31,*/
|
||||
R_V0b, R_V1b, R_V2b, R_V3b, R_V4b, R_V5b, R_V6b, R_V7b,
|
||||
R_V8b, R_V9b, R_V10b, R_V11b, R_V12b, R_V13b, R_V14b, R_V15b,
|
||||
R_V16b, R_V17b, R_V18b, R_V19b, R_V20b, R_V21b, R_V22b, R_V23b,
|
||||
R_V24b, R_V25b, R_V26b, R_V27b, R_V28b, R_V29b, R_V30b, /*R_V31b,*/
|
||||
R_V0c, R_V1c, R_V2c, R_V3c, R_V4c, R_V5c, R_V6c, R_V7c,
|
||||
R_V8c, R_V9c, R_V10c, R_V11c, R_V12c, R_V13c, R_V14c, R_V15c,
|
||||
R_V16c, R_V17c, R_V18c, R_V19c, R_V20c, R_V21c, R_V22c, R_V23c,
|
||||
R_V24c, R_V25c, R_V26c, R_V27c, R_V28c, R_V29c, R_V30c, /*R_V31c,*/
|
||||
R_V0d, R_V1d, R_V2d, R_V3d, R_V4d, R_V5d, R_V6d, R_V7d,
|
||||
R_V8d, R_V9d, R_V10d, R_V11d, R_V12d, R_V13d, R_V14d, R_V15d,
|
||||
R_V16d, R_V17d, R_V18d, R_V19d, R_V20d, R_V21d, R_V22d, R_V23d,
|
||||
R_V24d, R_V25d, R_V26d, R_V27d, R_V28d, R_V29d, R_V30d, /*R_V31d*/);
|
||||
|
||||
reg_class Rmemcopy_reg %{
|
||||
return _RMEMCOPY_REG_mask;
|
||||
%}
|
||||
|
||||
%}
|
||||
|
||||
source_hpp %{
|
||||
|
||||
const MachRegisterNumbers R_mem_copy_lo_num = R_V31_num;
|
||||
const MachRegisterNumbers R_mem_copy_hi_num = R_V31b_num;
|
||||
const FloatRegister Rmemcopy = V31;
|
||||
|
||||
const MachRegisterNumbers R_hf_ret_lo_num = R_V0_num;
|
||||
const MachRegisterNumbers R_hf_ret_hi_num = R_V0b_num;
|
||||
const FloatRegister Rhfret = V0;
|
||||
|
||||
extern OptoReg::Name R_Ricklass_num;
|
||||
extern OptoReg::Name R_Rmethod_num;
|
||||
extern OptoReg::Name R_tls_num;
|
||||
extern OptoReg::Name R_Rheap_base_num;
|
||||
|
||||
extern RegMask _INT_REG_mask;
|
||||
extern RegMask _PTR_REG_mask;
|
||||
extern RegMask _SFLT_REG_mask;
|
||||
extern RegMask _DFLT_REG_mask;
|
||||
extern RegMask _VECTORX_REG_mask;
|
||||
extern RegMask _RMEMCOPY_REG_mask;
|
||||
extern RegMask _SP_PTR_REG_mask;
|
||||
extern RegMask _SPILLP_REG_mask;
|
||||
extern RegMask _STR_REG_mask;
|
||||
extern RegMask _STR_PTR_REG_mask;
|
||||
|
||||
#define LDR_DOUBLE "LDR_D"
|
||||
#define LDR_FLOAT "LDR_S"
|
||||
#define STR_DOUBLE "STR_D"
|
||||
#define STR_FLOAT "STR_S"
|
||||
#define STR_64 "STR"
|
||||
#define LDR_64 "LDR"
|
||||
#define STR_32 "STR_W"
|
||||
#define LDR_32 "LDR_W"
|
||||
#define MOV_DOUBLE "FMOV_D"
|
||||
#define MOV_FLOAT "FMOV_S"
|
||||
#define FMSR "FMOV_SW"
|
||||
#define FMRS "FMOV_WS"
|
||||
#define LDREX "ldxr "
|
||||
#define STREX "stxr "
|
||||
|
||||
#define str_64 str
|
||||
#define ldr_64 ldr
|
||||
#define ldr_32 ldr_w
|
||||
#define ldrex ldxr
|
||||
#define strex stxr
|
||||
|
||||
#define fmsr fmov_sw
|
||||
#define fmrs fmov_ws
|
||||
#define fconsts fmov_s
|
||||
#define fconstd fmov_d
|
||||
|
||||
static inline bool is_uimm12(jlong imm, int shift) {
|
||||
return Assembler::is_unsigned_imm_in_range(imm, 12, shift);
|
||||
}
|
||||
|
||||
static inline bool is_memoryD(int offset) {
|
||||
int scale = 3; // LogBytesPerDouble
|
||||
return is_uimm12(offset, scale);
|
||||
}
|
||||
|
||||
static inline bool is_memoryfp(int offset) {
|
||||
int scale = LogBytesPerInt; // include 32-bit word accesses
|
||||
return is_uimm12(offset, scale);
|
||||
}
|
||||
|
||||
static inline bool is_memoryI(int offset) {
|
||||
int scale = LogBytesPerInt;
|
||||
return is_uimm12(offset, scale);
|
||||
}
|
||||
|
||||
static inline bool is_memoryP(int offset) {
|
||||
int scale = LogBytesPerWord;
|
||||
return is_uimm12(offset, scale);
|
||||
}
|
||||
|
||||
static inline bool is_memoryHD(int offset) {
|
||||
int scale = LogBytesPerInt; // include 32-bit word accesses
|
||||
return is_uimm12(offset, scale);
|
||||
}
|
||||
|
||||
uintx limmL_low(uintx imm, int n);
|
||||
|
||||
static inline bool Xis_aimm(int imm) {
|
||||
return Assembler::ArithmeticImmediate(imm).is_encoded();
|
||||
}
|
||||
|
||||
static inline bool is_aimm(intptr_t imm) {
|
||||
return Assembler::ArithmeticImmediate(imm).is_encoded();
|
||||
}
|
||||
|
||||
static inline bool is_limmL(uintptr_t imm) {
|
||||
return Assembler::LogicalImmediate(imm).is_encoded();
|
||||
}
|
||||
|
||||
static inline bool is_limmL_low(intptr_t imm, int n) {
|
||||
return is_limmL(limmL_low(imm, n));
|
||||
}
|
||||
|
||||
static inline bool is_limmI(jint imm) {
|
||||
return Assembler::LogicalImmediate(imm, true).is_encoded();
|
||||
}
|
||||
|
||||
static inline uintx limmI_low(jint imm, int n) {
|
||||
return limmL_low(imm, n);
|
||||
}
|
||||
|
||||
static inline bool is_limmI_low(jint imm, int n) {
|
||||
return is_limmL_low(imm, n);
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
source %{
|
||||
|
||||
// Given a register encoding, produce a Integer Register object
|
||||
static Register reg_to_register_object(int register_encoding) {
|
||||
assert(R0->encoding() == R_R0_enc && R30->encoding() == R_R30_enc, "right coding");
|
||||
assert(Rthread->encoding() == R_R28_enc, "right coding");
|
||||
assert(SP->encoding() == R_SP_enc, "right coding");
|
||||
return as_Register(register_encoding);
|
||||
}
|
||||
|
||||
// Given a register encoding, produce a single-precision Float Register object
|
||||
static FloatRegister reg_to_FloatRegister_object(int register_encoding) {
|
||||
assert(V0->encoding() == R_V0_enc && V31->encoding() == R_V31_enc, "right coding");
|
||||
return as_FloatRegister(register_encoding);
|
||||
}
|
||||
|
||||
RegMask _INT_REG_mask;
|
||||
RegMask _PTR_REG_mask;
|
||||
RegMask _SFLT_REG_mask;
|
||||
RegMask _DFLT_REG_mask;
|
||||
RegMask _VECTORX_REG_mask;
|
||||
RegMask _RMEMCOPY_REG_mask;
|
||||
RegMask _SP_PTR_REG_mask;
|
||||
RegMask _SPILLP_REG_mask;
|
||||
RegMask _STR_REG_mask;
|
||||
RegMask _STR_PTR_REG_mask;
|
||||
|
||||
OptoReg::Name R_Ricklass_num = -1;
|
||||
OptoReg::Name R_Rmethod_num = -1;
|
||||
OptoReg::Name R_tls_num = -1;
|
||||
OptoReg::Name R_Rtemp_num = -1;
|
||||
OptoReg::Name R_Rheap_base_num = -1;
|
||||
|
||||
static int mov_oop_size = -1;
|
||||
|
||||
#ifdef ASSERT
|
||||
static bool same_mask(const RegMask &a, const RegMask &b) {
|
||||
RegMask a_sub_b = a; a_sub_b.SUBTRACT(b);
|
||||
RegMask b_sub_a = b; b_sub_a.SUBTRACT(a);
|
||||
return a_sub_b.Size() == 0 && b_sub_a.Size() == 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
void Compile::pd_compiler2_init() {
|
||||
|
||||
R_Ricklass_num = OptoReg::as_OptoReg(Ricklass->as_VMReg());
|
||||
R_Rmethod_num = OptoReg::as_OptoReg(Rmethod->as_VMReg());
|
||||
R_tls_num = OptoReg::as_OptoReg(Rthread->as_VMReg());
|
||||
R_Rtemp_num = OptoReg::as_OptoReg(Rtemp->as_VMReg());
|
||||
R_Rheap_base_num = OptoReg::as_OptoReg(Rheap_base->as_VMReg());
|
||||
|
||||
_INT_REG_mask = _INT_REG_ALL_mask;
|
||||
_INT_REG_mask.Remove(R_tls_num);
|
||||
_INT_REG_mask.Remove(R_SP_num);
|
||||
if (UseCompressedOops) {
|
||||
_INT_REG_mask.Remove(R_Rheap_base_num);
|
||||
}
|
||||
// Remove Rtemp because safepoint poll can trash it
|
||||
// (see SharedRuntime::generate_handler_blob)
|
||||
_INT_REG_mask.Remove(R_Rtemp_num);
|
||||
|
||||
_PTR_REG_mask = _INT_REG_mask;
|
||||
_PTR_REG_mask.smear_to_sets(2);
|
||||
|
||||
// STR_REG = INT_REG+ZR
|
||||
// SPILLP_REG = INT_REG+SP
|
||||
// SP_PTR_REG = INT_REG+SP+TLS
|
||||
_STR_REG_mask = _INT_REG_mask;
|
||||
_SP_PTR_REG_mask = _STR_REG_mask;
|
||||
_STR_REG_mask.Insert(R_ZR_num);
|
||||
_SP_PTR_REG_mask.Insert(R_SP_num);
|
||||
_SPILLP_REG_mask = _SP_PTR_REG_mask;
|
||||
_SP_PTR_REG_mask.Insert(R_tls_num);
|
||||
_STR_PTR_REG_mask = _STR_REG_mask;
|
||||
_STR_PTR_REG_mask.smear_to_sets(2);
|
||||
_SP_PTR_REG_mask.smear_to_sets(2);
|
||||
_SPILLP_REG_mask.smear_to_sets(2);
|
||||
|
||||
_RMEMCOPY_REG_mask = RegMask(R_mem_copy_lo_num);
|
||||
assert(OptoReg::as_OptoReg(Rmemcopy->as_VMReg()) == R_mem_copy_lo_num, "!");
|
||||
|
||||
_SFLT_REG_mask = _SFLT_REG_0_mask;
|
||||
_SFLT_REG_mask.SUBTRACT(_RMEMCOPY_REG_mask);
|
||||
_DFLT_REG_mask = _SFLT_REG_mask;
|
||||
_DFLT_REG_mask.smear_to_sets(2);
|
||||
_VECTORX_REG_mask = _SFLT_REG_mask;
|
||||
_VECTORX_REG_mask.smear_to_sets(4);
|
||||
assert(same_mask(_VECTORX_REG_mask, _VECTORX_REG_0_mask), "!");
|
||||
|
||||
#ifdef ASSERT
|
||||
RegMask r((RegMask *)&SFLT_REG_mask());
|
||||
r.smear_to_sets(2);
|
||||
assert(same_mask(r, _DFLT_REG_mask), "!");
|
||||
#endif
|
||||
|
||||
if (VM_Version::prefer_moves_over_load_literal()) {
|
||||
mov_oop_size = 4;
|
||||
} else {
|
||||
mov_oop_size = 1;
|
||||
}
|
||||
|
||||
assert(Matcher::interpreter_method_oop_reg_encode() == Rmethod->encoding(), "should be");
|
||||
}
|
||||
|
||||
uintx limmL_low(uintx imm, int n) {
|
||||
// 1: try as is
|
||||
if (is_limmL(imm)) {
|
||||
return imm;
|
||||
}
|
||||
// 2: try low bits + all 0's
|
||||
uintx imm0 = imm & right_n_bits(n);
|
||||
if (is_limmL(imm0)) {
|
||||
return imm0;
|
||||
}
|
||||
// 3: try low bits + all 1's
|
||||
uintx imm1 = imm0 | left_n_bits(BitsPerWord - n);
|
||||
if (is_limmL(imm1)) {
|
||||
return imm1;
|
||||
}
|
||||
#if 0
|
||||
// 4: try low bits replicated
|
||||
int field = 1 << log2_intptr(n + n - 1);
|
||||
assert(field >= n, "!");
|
||||
assert(field / n == 1, "!");
|
||||
intptr_t immr = immx;
|
||||
while (field < BitsPerWord) {
|
||||
intrptr_t bits = immr & right_n_bits(field);
|
||||
immr = bits | (bits << field);
|
||||
field = field << 1;
|
||||
}
|
||||
// replicate at power-of-2 boundary
|
||||
if (is_limmL(immr)) {
|
||||
return immr;
|
||||
}
|
||||
#endif
|
||||
return imm;
|
||||
}
|
||||
|
||||
// Convert the raw encoding form into the form expected by the
|
||||
// constructor for Address.
|
||||
Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) {
|
||||
RelocationHolder rspec;
|
||||
if (disp_reloc != relocInfo::none) {
|
||||
rspec = Relocation::spec_simple(disp_reloc);
|
||||
}
|
||||
|
||||
Register rbase = (base == 0xff) ? SP : as_Register(base);
|
||||
if (index != 0xff) {
|
||||
Register rindex = as_Register(index);
|
||||
if (disp == 0x7fffffff) { // special value to indicate sign-extend
|
||||
Address madr(rbase, rindex, ex_sxtw, scale);
|
||||
madr._rspec = rspec;
|
||||
return madr;
|
||||
} else {
|
||||
assert(disp == 0, "unsupported");
|
||||
Address madr(rbase, rindex, ex_lsl, scale);
|
||||
madr._rspec = rspec;
|
||||
return madr;
|
||||
}
|
||||
} else {
|
||||
assert(scale == 0, "not supported");
|
||||
Address madr(rbase, disp);
|
||||
madr._rspec = rspec;
|
||||
return madr;
|
||||
}
|
||||
}
|
||||
|
||||
// Location of compiled Java return values. Same as C
|
||||
OptoRegPair c2::return_value(int ideal_reg) {
|
||||
assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
|
||||
static int lo[Op_RegL+1] = { 0, 0, OptoReg::Bad, R_R0_num, R_R0_num, R_hf_ret_lo_num, R_hf_ret_lo_num, R_R0_num };
|
||||
static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, R_R0x_num, OptoReg::Bad, R_hf_ret_hi_num, R_R0x_num };
|
||||
return OptoRegPair( hi[ideal_reg], lo[ideal_reg]);
|
||||
}
|
||||
|
||||
// !!!!! Special hack to get all type of calls to specify the byte offset
|
||||
// from the start of the call to the point where the return address
|
||||
// will point.
|
||||
|
||||
int MachCallStaticJavaNode::ret_addr_offset() {
|
||||
bool far = (_method == NULL) ? maybe_far_call(this) : !cache_reachable();
|
||||
bool patchable = _method != NULL;
|
||||
int call_size = MacroAssembler::call_size(entry_point(), far, patchable);
|
||||
return (call_size + (_method_handle_invoke ? 1 : 0)) * NativeInstruction::instruction_size;
|
||||
}
|
||||
|
||||
int MachCallDynamicJavaNode::ret_addr_offset() {
|
||||
bool far = !cache_reachable();
|
||||
int call_size = MacroAssembler::call_size(entry_point(), far, true);
|
||||
return (mov_oop_size + call_size) * NativeInstruction::instruction_size;
|
||||
}
|
||||
|
||||
int MachCallRuntimeNode::ret_addr_offset() {
|
||||
int call_size = 0;
|
||||
// TODO: check if Leaf nodes also need this
|
||||
if (!is_MachCallLeaf()) {
|
||||
// adr $temp, ret_addr
|
||||
// str $temp, [SP + last_java_pc]
|
||||
call_size += 2;
|
||||
}
|
||||
// bl or mov_slow; blr
|
||||
bool far = maybe_far_call(this);
|
||||
call_size += MacroAssembler::call_size(entry_point(), far, false);
|
||||
return call_size * NativeInstruction::instruction_size;
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
// The intptr_t operand types, defined by textual substitution.
|
||||
// (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.)
|
||||
#define immX immL
|
||||
#define iRegX iRegL
|
||||
#define aimmX aimmL
|
||||
#define limmX limmL
|
||||
#define immX9 immL9
|
||||
#define LShiftX LShiftL
|
||||
#define shimmX immU6
|
||||
|
||||
#define store_RegLd store_RegL
|
||||
|
||||
//----------ATTRIBUTES---------------------------------------------------------
|
||||
//----------Operand Attributes-------------------------------------------------
|
||||
op_attrib op_cost(1); // Required cost attribute
|
||||
|
||||
//----------OPERANDS-----------------------------------------------------------
|
||||
// Operand definitions must precede instruction definitions for correct parsing
|
||||
// in the ADLC because operands constitute user defined types which are used in
|
||||
// instruction definitions.
|
||||
|
||||
//----------Simple Operands----------------------------------------------------
|
||||
// Immediate Operands
|
||||
|
||||
// Integer Immediate: 9-bit (including sign bit), so same as immI8?
|
||||
// FIXME: simm9 allows -256, but immI8 doesn't...
|
||||
operand simm9() %{
|
||||
predicate(Assembler::is_imm_in_range(n->get_int(), 9, 0));
|
||||
match(ConI);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
|
||||
operand uimm12() %{
|
||||
predicate(Assembler::is_unsigned_imm_in_range(n->get_int(), 12, 0));
|
||||
match(ConI);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand aimmP() %{
|
||||
predicate(n->get_ptr() == 0 || (is_aimm(n->get_ptr()) && ((ConPNode*)n)->type()->reloc() == relocInfo::none));
|
||||
match(ConP);
|
||||
|
||||
op_cost(0);
|
||||
// formats are generated automatically for constants and base registers
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Long Immediate: 12-bit - for addressing mode
|
||||
operand immL12() %{
|
||||
predicate((-4096 < n->get_long()) && (n->get_long() < 4096));
|
||||
match(ConL);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Long Immediate: 9-bit - for addressing mode
|
||||
operand immL9() %{
|
||||
predicate((-256 <= n->get_long()) && (n->get_long() < 256));
|
||||
match(ConL);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immIMov() %{
|
||||
predicate(n->get_int() >> 16 == 0);
|
||||
match(ConI);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immLMov() %{
|
||||
predicate(n->get_long() >> 16 == 0);
|
||||
match(ConL);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immUL12() %{
|
||||
predicate(is_uimm12(n->get_long(), 0));
|
||||
match(ConL);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immUL12x2() %{
|
||||
predicate(is_uimm12(n->get_long(), 1));
|
||||
match(ConL);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immUL12x4() %{
|
||||
predicate(is_uimm12(n->get_long(), 2));
|
||||
match(ConL);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immUL12x8() %{
|
||||
predicate(is_uimm12(n->get_long(), 3));
|
||||
match(ConL);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immUL12x16() %{
|
||||
predicate(is_uimm12(n->get_long(), 4));
|
||||
match(ConL);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Used for long shift
|
||||
operand immU6() %{
|
||||
predicate(0 <= n->get_int() && (n->get_int() <= 63));
|
||||
match(ConI);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Used for register extended shift
|
||||
operand immI_0_4() %{
|
||||
predicate(0 <= n->get_int() && (n->get_int() <= 4));
|
||||
match(ConI);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Compressed Pointer Register
|
||||
operand iRegN() %{
|
||||
constraint(ALLOC_IN_RC(int_reg));
|
||||
match(RegN);
|
||||
match(ZRRegN);
|
||||
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand SPRegP() %{
|
||||
constraint(ALLOC_IN_RC(SP_regP));
|
||||
match(RegP);
|
||||
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand ZRRegP() %{
|
||||
constraint(ALLOC_IN_RC(ZR_regP));
|
||||
match(RegP);
|
||||
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand ZRRegL() %{
|
||||
constraint(ALLOC_IN_RC(ZR_regP));
|
||||
match(RegL);
|
||||
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand ZRRegI() %{
|
||||
constraint(ALLOC_IN_RC(ZR_regI));
|
||||
match(RegI);
|
||||
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand ZRRegN() %{
|
||||
constraint(ALLOC_IN_RC(ZR_regI));
|
||||
match(RegN);
|
||||
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
57
hotspot/src/cpu/arm/vm/assembler_arm.cpp
Normal file
57
hotspot/src/cpu/arm/vm/assembler_arm.cpp
Normal file
@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "asm/assembler.inline.hpp"
|
||||
#include "ci/ciEnv.hpp"
|
||||
#include "gc/shared/cardTableModRefBS.hpp"
|
||||
#include "gc/shared/collectedHeap.inline.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/interpreterRuntime.hpp"
|
||||
#include "interpreter/templateInterpreterGenerator.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "prims/jvm_misc.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/interfaceSupport.hpp"
|
||||
#include "runtime/objectMonitor.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
int AbstractAssembler::code_fill_byte() {
|
||||
return 0xff; // illegal instruction 0xffffffff
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
bool AbstractAssembler::pd_check_instruction_mark() { return false; }
|
||||
#endif
|
404
hotspot/src/cpu/arm/vm/assembler_arm.hpp
Normal file
404
hotspot/src/cpu/arm/vm/assembler_arm.hpp
Normal file
@ -0,0 +1,404 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_ASSEMBLER_ARM_HPP
|
||||
#define CPU_ARM_VM_ASSEMBLER_ARM_HPP
|
||||
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
enum AsmCondition {
|
||||
eq, ne, cs, cc, mi, pl, vs, vc,
|
||||
hi, ls, ge, lt, gt, le, al, nv,
|
||||
number_of_conditions,
|
||||
// alternative names
|
||||
hs = cs,
|
||||
lo = cc
|
||||
};
|
||||
|
||||
enum AsmShift {
|
||||
lsl, lsr, asr, ror
|
||||
};
|
||||
|
||||
#ifdef AARCH64
|
||||
enum AsmExtendOp {
|
||||
ex_uxtb, ex_uxth, ex_uxtw, ex_uxtx,
|
||||
ex_sxtb, ex_sxth, ex_sxtw, ex_sxtx,
|
||||
|
||||
ex_lsl = ex_uxtx
|
||||
};
|
||||
#endif
|
||||
|
||||
enum AsmOffset {
|
||||
#ifdef AARCH64
|
||||
basic_offset = 0b00,
|
||||
pre_indexed = 0b11,
|
||||
post_indexed = 0b01
|
||||
#else
|
||||
basic_offset = 1 << 24,
|
||||
pre_indexed = 1 << 24 | 1 << 21,
|
||||
post_indexed = 0
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
#ifndef AARCH64
|
||||
enum AsmWriteback {
|
||||
no_writeback,
|
||||
writeback
|
||||
};
|
||||
|
||||
enum AsmOffsetOp {
|
||||
sub_offset = 0,
|
||||
add_offset = 1
|
||||
};
|
||||
#endif
|
||||
|
||||
|
||||
// ARM Addressing Modes 2 and 3 - Load and store
|
||||
class Address VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
Register _base;
|
||||
Register _index;
|
||||
int _disp;
|
||||
AsmOffset _mode;
|
||||
RelocationHolder _rspec;
|
||||
int _shift_imm;
|
||||
#ifdef AARCH64
|
||||
AsmExtendOp _extend;
|
||||
#else
|
||||
AsmShift _shift;
|
||||
AsmOffsetOp _offset_op;
|
||||
|
||||
static inline int abs(int x) { return x < 0 ? -x : x; }
|
||||
static inline int up (int x) { return x < 0 ? 0 : 1; }
|
||||
#endif
|
||||
|
||||
#ifdef AARCH64
|
||||
static const AsmExtendOp LSL = ex_lsl;
|
||||
#else
|
||||
static const AsmShift LSL = lsl;
|
||||
#endif
|
||||
|
||||
public:
|
||||
Address() : _base(noreg) {}
|
||||
|
||||
Address(Register rn, int offset = 0, AsmOffset mode = basic_offset) {
|
||||
_base = rn;
|
||||
_index = noreg;
|
||||
_disp = offset;
|
||||
_mode = mode;
|
||||
_shift_imm = 0;
|
||||
#ifdef AARCH64
|
||||
_extend = ex_lsl;
|
||||
#else
|
||||
_shift = lsl;
|
||||
_offset_op = add_offset;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
Address(Register rn, ByteSize offset, AsmOffset mode = basic_offset) {
|
||||
_base = rn;
|
||||
_index = noreg;
|
||||
_disp = in_bytes(offset);
|
||||
_mode = mode;
|
||||
_shift_imm = 0;
|
||||
#ifdef AARCH64
|
||||
_extend = ex_lsl;
|
||||
#else
|
||||
_shift = lsl;
|
||||
_offset_op = add_offset;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef AARCH64
|
||||
Address(Register rn, Register rm, AsmExtendOp extend = ex_lsl, int shift_imm = 0) {
|
||||
assert ((extend == ex_uxtw) || (extend == ex_lsl) || (extend == ex_sxtw) || (extend == ex_sxtx), "invalid extend for address mode");
|
||||
assert ((0 <= shift_imm) && (shift_imm <= 4), "shift amount is out of range");
|
||||
_base = rn;
|
||||
_index = rm;
|
||||
_disp = 0;
|
||||
_mode = basic_offset;
|
||||
_extend = extend;
|
||||
_shift_imm = shift_imm;
|
||||
}
|
||||
#else
|
||||
Address(Register rn, Register rm, AsmShift shift = lsl,
|
||||
int shift_imm = 0, AsmOffset mode = basic_offset,
|
||||
AsmOffsetOp offset_op = add_offset) {
|
||||
_base = rn;
|
||||
_index = rm;
|
||||
_disp = 0;
|
||||
_shift = shift;
|
||||
_shift_imm = shift_imm;
|
||||
_mode = mode;
|
||||
_offset_op = offset_op;
|
||||
}
|
||||
|
||||
Address(Register rn, RegisterOrConstant offset, AsmShift shift = lsl,
|
||||
int shift_imm = 0) {
|
||||
_base = rn;
|
||||
if (offset.is_constant()) {
|
||||
_index = noreg;
|
||||
{
|
||||
int off = (int) offset.as_constant();
|
||||
if (shift_imm != 0) {
|
||||
assert(shift == lsl,"shift not yet encoded");
|
||||
off = off << shift_imm;
|
||||
}
|
||||
_disp = off;
|
||||
}
|
||||
_shift = lsl;
|
||||
_shift_imm = 0;
|
||||
} else {
|
||||
_index = offset.as_register();
|
||||
_disp = 0;
|
||||
_shift = shift;
|
||||
_shift_imm = shift_imm;
|
||||
}
|
||||
_mode = basic_offset;
|
||||
_offset_op = add_offset;
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
// [base + index * wordSize]
|
||||
static Address indexed_ptr(Register base, Register index) {
|
||||
return Address(base, index, LSL, LogBytesPerWord);
|
||||
}
|
||||
|
||||
// [base + index * BytesPerInt]
|
||||
static Address indexed_32(Register base, Register index) {
|
||||
return Address(base, index, LSL, LogBytesPerInt);
|
||||
}
|
||||
|
||||
// [base + index * BytesPerHeapOop]
|
||||
static Address indexed_oop(Register base, Register index) {
|
||||
return Address(base, index, LSL, LogBytesPerHeapOop);
|
||||
}
|
||||
|
||||
Address plus_disp(int disp) const {
|
||||
assert((disp == 0) || (_index == noreg),"can't apply an offset to a register indexed address");
|
||||
Address a = (*this);
|
||||
a._disp += disp;
|
||||
return a;
|
||||
}
|
||||
|
||||
Address rebase(Register new_base) const {
|
||||
Address a = (*this);
|
||||
a._base = new_base;
|
||||
return a;
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
int encoding_simd() const {
|
||||
assert(_index != SP, "encoding constraint");
|
||||
assert(_disp == 0 || _mode == post_indexed, "encoding constraint");
|
||||
assert(_index == noreg || _mode == basic_offset, "encoding constraint");
|
||||
assert(_mode == basic_offset || _mode == post_indexed, "encoding constraint");
|
||||
assert(_extend == ex_lsl, "encoding constraint");
|
||||
int index;
|
||||
if (_index == noreg) {
|
||||
if (_mode == post_indexed)
|
||||
index = 0b100 << 5 | 31;
|
||||
else
|
||||
index = 0;
|
||||
} else {
|
||||
index = 0b100 << 5 | _index->encoding();
|
||||
}
|
||||
return index << 16 | _base->encoding_with_sp() << 5;
|
||||
}
|
||||
#else /* !AARCH64 */
|
||||
int encoding2() const {
|
||||
assert(_mode == basic_offset || _base != PC, "unpredictable instruction");
|
||||
if (_index == noreg) {
|
||||
assert(-4096 < _disp && _disp < 4096, "encoding constraint");
|
||||
return _mode | up(_disp) << 23 | _base->encoding() << 16 | abs(_disp);
|
||||
} else {
|
||||
assert(_index != PC && (_mode == basic_offset || _index != _base), "unpredictable instruction");
|
||||
assert(_disp == 0 && (_shift_imm >> 5) == 0, "encoding constraint");
|
||||
return 1 << 25 | _offset_op << 23 | _mode | _base->encoding() << 16 |
|
||||
_shift_imm << 7 | _shift << 5 | _index->encoding();
|
||||
}
|
||||
}
|
||||
|
||||
int encoding3() const {
|
||||
assert(_mode == basic_offset || _base != PC, "unpredictable instruction");
|
||||
if (_index == noreg) {
|
||||
assert(-256 < _disp && _disp < 256, "encoding constraint");
|
||||
return _mode | up(_disp) << 23 | 1 << 22 | _base->encoding() << 16 |
|
||||
(abs(_disp) & 0xf0) << 4 | abs(_disp) & 0x0f;
|
||||
} else {
|
||||
assert(_index != PC && (_mode == basic_offset || _index != _base), "unpredictable instruction");
|
||||
assert(_disp == 0 && _shift == lsl && _shift_imm == 0, "encoding constraint");
|
||||
return _mode | _offset_op << 23 | _base->encoding() << 16 | _index->encoding();
|
||||
}
|
||||
}
|
||||
|
||||
int encoding_ex() const {
|
||||
assert(_index == noreg && _disp == 0 && _mode == basic_offset &&
|
||||
_base != PC, "encoding constraint");
|
||||
return _base->encoding() << 16;
|
||||
}
|
||||
|
||||
int encoding_vfp() const {
|
||||
assert(_index == noreg && _mode == basic_offset, "encoding constraint");
|
||||
assert(-1024 < _disp && _disp < 1024 && (_disp & 3) == 0, "encoding constraint");
|
||||
return _base->encoding() << 16 | up(_disp) << 23 | abs(_disp) >> 2;
|
||||
}
|
||||
|
||||
int encoding_simd() const {
|
||||
assert(_base != PC, "encoding constraint");
|
||||
assert(_index != PC && _index != SP, "encoding constraint");
|
||||
assert(_disp == 0, "encoding constraint");
|
||||
assert(_shift == 0, "encoding constraint");
|
||||
assert(_index == noreg || _mode == basic_offset, "encoding constraint");
|
||||
assert(_mode == basic_offset || _mode == post_indexed, "encoding constraint");
|
||||
int index;
|
||||
if (_index == noreg) {
|
||||
if (_mode == post_indexed)
|
||||
index = 13;
|
||||
else
|
||||
index = 15;
|
||||
} else {
|
||||
index = _index->encoding();
|
||||
}
|
||||
|
||||
return _base->encoding() << 16 | index;
|
||||
}
|
||||
#endif // !AARCH64
|
||||
|
||||
Register base() const {
|
||||
return _base;
|
||||
}
|
||||
|
||||
Register index() const {
|
||||
return _index;
|
||||
}
|
||||
|
||||
int disp() const {
|
||||
return _disp;
|
||||
}
|
||||
|
||||
AsmOffset mode() const {
|
||||
return _mode;
|
||||
}
|
||||
|
||||
int shift_imm() const {
|
||||
return _shift_imm;
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
AsmExtendOp extend() const {
|
||||
return _extend;
|
||||
}
|
||||
#else
|
||||
AsmShift shift() const {
|
||||
return _shift;
|
||||
}
|
||||
|
||||
AsmOffsetOp offset_op() const {
|
||||
return _offset_op;
|
||||
}
|
||||
#endif
|
||||
|
||||
bool uses(Register reg) const { return _base == reg || _index == reg; }
|
||||
|
||||
const relocInfo::relocType rtype() { return _rspec.type(); }
|
||||
const RelocationHolder& rspec() { return _rspec; }
|
||||
|
||||
// Convert the raw encoding form into the form expected by the
|
||||
// constructor for Address.
|
||||
static Address make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc);
|
||||
};
|
||||
|
||||
#ifdef COMPILER2
|
||||
class VFP VALUE_OBJ_CLASS_SPEC {
|
||||
// Helper classes to detect whether a floating point constant can be
|
||||
// encoded in a fconstd or fconsts instruction
|
||||
// The conversion from the imm8, 8 bit constant, to the floating
|
||||
// point value encoding is done with either:
|
||||
// for single precision: imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,5):imm8<5:0>:Zeros(19)
|
||||
// or
|
||||
// for double precision: imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,8):imm8<5:0>:Zeros(48)
|
||||
|
||||
private:
|
||||
class fpnum {
|
||||
public:
|
||||
virtual unsigned int f_hi4() const = 0;
|
||||
virtual bool f_lo_is_null() const = 0;
|
||||
virtual int e() const = 0;
|
||||
virtual unsigned int s() const = 0;
|
||||
|
||||
inline bool can_be_imm8() const { return e() >= -3 && e() <= 4 && f_lo_is_null(); }
|
||||
inline unsigned char imm8() const { int v = (s() << 7) | (((e() - 1) & 0x7) << 4) | f_hi4(); assert((v >> 8) == 0, "overflow"); return v; }
|
||||
};
|
||||
|
||||
public:
|
||||
class float_num : public fpnum {
|
||||
public:
|
||||
float_num(float v) {
|
||||
_num.val = v;
|
||||
}
|
||||
|
||||
virtual unsigned int f_hi4() const { return (_num.bits << 9) >> (19+9); }
|
||||
virtual bool f_lo_is_null() const { return (_num.bits & ((1 << 19) - 1)) == 0; }
|
||||
virtual int e() const { return ((_num.bits << 1) >> (23+1)) - 127; }
|
||||
virtual unsigned int s() const { return _num.bits >> 31; }
|
||||
|
||||
private:
|
||||
union {
|
||||
float val;
|
||||
unsigned int bits;
|
||||
} _num;
|
||||
};
|
||||
|
||||
class double_num : public fpnum {
|
||||
public:
|
||||
double_num(double v) {
|
||||
_num.val = v;
|
||||
}
|
||||
|
||||
virtual unsigned int f_hi4() const { return (_num.bits << 12) >> (48+12); }
|
||||
virtual bool f_lo_is_null() const { return (_num.bits & ((1LL << 48) - 1)) == 0; }
|
||||
virtual int e() const { return ((_num.bits << 1) >> (52+1)) - 1023; }
|
||||
virtual unsigned int s() const { return _num.bits >> 63; }
|
||||
|
||||
private:
|
||||
union {
|
||||
double val;
|
||||
unsigned long long bits;
|
||||
} _num;
|
||||
};
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef AARCH64
|
||||
#include "assembler_arm_64.hpp"
|
||||
#else
|
||||
#include "assembler_arm_32.hpp"
|
||||
#endif
|
||||
|
||||
|
||||
#endif // CPU_ARM_VM_ASSEMBLER_ARM_HPP
|
29
hotspot/src/cpu/arm/vm/assembler_arm.inline.hpp
Normal file
29
hotspot/src/cpu/arm/vm/assembler_arm.inline.hpp
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_ASSEMBLER_ARM_INLINE_HPP
|
||||
#define CPU_ARM_VM_ASSEMBLER_ARM_INLINE_HPP
|
||||
|
||||
|
||||
#endif // CPU_ARM_VM_ASSEMBLER_ARM_INLINE_HPP
|
99
hotspot/src/cpu/arm/vm/assembler_arm_32.cpp
Normal file
99
hotspot/src/cpu/arm/vm/assembler_arm_32.cpp
Normal file
@ -0,0 +1,99 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "asm/assembler.inline.hpp"
|
||||
#include "ci/ciEnv.hpp"
|
||||
#include "gc/shared/cardTableModRefBS.hpp"
|
||||
#include "gc/shared/collectedHeap.inline.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/interpreterRuntime.hpp"
|
||||
#include "interpreter/templateInterpreterGenerator.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "prims/jvm_misc.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/interfaceSupport.hpp"
|
||||
#include "runtime/objectMonitor.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
#ifdef COMPILER2
|
||||
// Convert the raw encoding form into the form expected by the
|
||||
// constructor for Address.
|
||||
Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) {
|
||||
RelocationHolder rspec;
|
||||
if (disp_reloc != relocInfo::none) {
|
||||
rspec = Relocation::spec_simple(disp_reloc);
|
||||
}
|
||||
|
||||
Register rindex = as_Register(index);
|
||||
if (rindex != PC) {
|
||||
assert(disp == 0, "unsupported");
|
||||
Address madr(as_Register(base), rindex, lsl, scale);
|
||||
madr._rspec = rspec;
|
||||
return madr;
|
||||
} else {
|
||||
assert(scale == 0, "not supported");
|
||||
Address madr(as_Register(base), disp);
|
||||
madr._rspec = rspec;
|
||||
return madr;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void AsmOperand::initialize_rotated_imm(unsigned int imm) {
|
||||
for (int shift = 2; shift <= 24; shift += 2) {
|
||||
if ((imm & ~(0xff << shift)) == 0) {
|
||||
_encoding = 1 << 25 | (32 - shift) << 7 | imm >> shift;
|
||||
return;
|
||||
}
|
||||
}
|
||||
assert((imm & 0x0ffffff0) == 0, "too complicated constant: %d (%x)", imm, imm);
|
||||
_encoding = 1 << 25 | 4 << 7 | imm >> 28 | imm << 4;
|
||||
}
|
||||
|
||||
bool AsmOperand::is_rotated_imm(unsigned int imm) {
|
||||
if ((imm >> 8) == 0) {
|
||||
return true;
|
||||
}
|
||||
for (int shift = 2; shift <= 24; shift += 2) {
|
||||
if ((imm & ~(0xff << shift)) == 0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if ((imm & 0x0ffffff0) == 0) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
1245
hotspot/src/cpu/arm/vm/assembler_arm_32.hpp
Normal file
1245
hotspot/src/cpu/arm/vm/assembler_arm_32.hpp
Normal file
File diff suppressed because it is too large
Load Diff
191
hotspot/src/cpu/arm/vm/assembler_arm_64.cpp
Normal file
191
hotspot/src/cpu/arm/vm/assembler_arm_64.cpp
Normal file
@ -0,0 +1,191 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "asm/assembler.inline.hpp"
|
||||
#include "ci/ciEnv.hpp"
|
||||
#include "gc/shared/cardTableModRefBS.hpp"
|
||||
#include "gc/shared/collectedHeap.inline.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/interpreterRuntime.hpp"
|
||||
#include "interpreter/templateInterpreterGenerator.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "prims/jvm_misc.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/interfaceSupport.hpp"
|
||||
#include "runtime/objectMonitor.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// Returns whether given imm has equal bit fields <0:size-1> and <size:2*size-1>.
|
||||
inline bool Assembler::LogicalImmediate::has_equal_subpatterns(uintx imm, int size) {
|
||||
uintx mask = right_n_bits(size);
|
||||
uintx subpattern1 = mask_bits(imm, mask);
|
||||
uintx subpattern2 = mask_bits(imm >> size, mask);
|
||||
return subpattern1 == subpattern2;
|
||||
}
|
||||
|
||||
// Returns least size that is a power of two from 2 to 64 with the proviso that given
|
||||
// imm is composed of repeating patterns of this size.
|
||||
inline int Assembler::LogicalImmediate::least_pattern_size(uintx imm) {
|
||||
int size = BitsPerWord;
|
||||
while (size > 2 && has_equal_subpatterns(imm, size >> 1)) {
|
||||
size >>= 1;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
// Returns count of set bits in given imm. Based on variable-precision SWAR algorithm.
|
||||
inline int Assembler::LogicalImmediate::population_count(uintx x) {
|
||||
x -= ((x >> 1) & 0x5555555555555555L);
|
||||
x = (((x >> 2) & 0x3333333333333333L) + (x & 0x3333333333333333L));
|
||||
x = (((x >> 4) + x) & 0x0f0f0f0f0f0f0f0fL);
|
||||
x += (x >> 8);
|
||||
x += (x >> 16);
|
||||
x += (x >> 32);
|
||||
return(x & 0x7f);
|
||||
}
|
||||
|
||||
// Let given x be <A:B> where B = 0 and least bit of A = 1. Returns <A:C>, where C is B-size set bits.
|
||||
inline uintx Assembler::LogicalImmediate::set_least_zeroes(uintx x) {
|
||||
return x | (x - 1);
|
||||
}
|
||||
|
||||
|
||||
#ifdef ASSERT
|
||||
|
||||
// Restores immediate by encoded bit masks.
|
||||
uintx Assembler::LogicalImmediate::decode() {
|
||||
assert (_encoded, "should be");
|
||||
|
||||
int len_code = (_immN << 6) | ((~_imms) & 0x3f);
|
||||
assert (len_code != 0, "should be");
|
||||
|
||||
int len = 6;
|
||||
while (!is_set_nth_bit(len_code, len)) len--;
|
||||
int esize = 1 << len;
|
||||
assert (len > 0, "should be");
|
||||
assert ((_is32bit ? 32 : 64) >= esize, "should be");
|
||||
|
||||
int levels = right_n_bits(len);
|
||||
int S = _imms & levels;
|
||||
int R = _immr & levels;
|
||||
|
||||
assert (S != levels, "should be");
|
||||
|
||||
uintx welem = right_n_bits(S + 1);
|
||||
uintx wmask = (R == 0) ? welem : ((welem >> R) | (welem << (esize - R)));
|
||||
|
||||
for (int size = esize; size < 64; size <<= 1) {
|
||||
wmask |= (wmask << size);
|
||||
}
|
||||
|
||||
return wmask;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
// Constructs LogicalImmediate by given imm. Figures out if given imm can be used in AArch64 logical
|
||||
// instructions (AND, ANDS, EOR, ORR) and saves its encoding.
|
||||
void Assembler::LogicalImmediate::construct(uintx imm, bool is32) {
|
||||
_is32bit = is32;
|
||||
|
||||
if (is32) {
|
||||
assert(((imm >> 32) == 0) || (((intx)imm >> 31) == -1), "32-bit immediate is out of range");
|
||||
|
||||
// Replicate low 32 bits.
|
||||
imm &= 0xffffffff;
|
||||
imm |= imm << 32;
|
||||
}
|
||||
|
||||
// All-zeroes and all-ones can not be encoded.
|
||||
if (imm != 0 && (~imm != 0)) {
|
||||
|
||||
// Let LPS (least pattern size) be the least size (power of two from 2 to 64) of repeating
|
||||
// patterns in the immediate. If immediate value can be encoded, it is encoded by pattern
|
||||
// of exactly LPS size (due to structure of valid patterns). In order to verify
|
||||
// that immediate value can be encoded, LPS is calculated and <LPS-1:0> bits of immediate
|
||||
// are verified to be valid pattern.
|
||||
int lps = least_pattern_size(imm);
|
||||
uintx lps_mask = right_n_bits(lps);
|
||||
|
||||
// A valid pattern has one of the following forms:
|
||||
// | 0 x A | 1 x B | 0 x C |, where B > 0 and C > 0, or
|
||||
// | 1 x A | 0 x B | 1 x C |, where B > 0 and C > 0.
|
||||
// For simplicity, the second form of the pattern is inverted into the first form.
|
||||
bool inverted = imm & 0x1;
|
||||
uintx pattern = (inverted ? ~imm : imm) & lps_mask;
|
||||
|
||||
// | 0 x A | 1 x (B + C) |
|
||||
uintx without_least_zeroes = set_least_zeroes(pattern);
|
||||
|
||||
// Pattern is valid iff without least zeroes it is a power of two - 1.
|
||||
if ((without_least_zeroes & (without_least_zeroes + 1)) == 0) {
|
||||
|
||||
// Count B as population count of pattern.
|
||||
int bits_count = population_count(pattern);
|
||||
|
||||
// Count B+C as population count of pattern without least zeroes
|
||||
int left_range = population_count(without_least_zeroes);
|
||||
|
||||
// S-prefix is a part of imms field which encodes LPS.
|
||||
// LPS | S prefix
|
||||
// 64 | not defined
|
||||
// 32 | 0b0
|
||||
// 16 | 0b10
|
||||
// 8 | 0b110
|
||||
// 4 | 0b1110
|
||||
// 2 | 0b11110
|
||||
int s_prefix = (lps == 64) ? 0 : ~set_least_zeroes(lps) & 0x3f;
|
||||
|
||||
// immN bit is set iff LPS == 64.
|
||||
_immN = (lps == 64) ? 1 : 0;
|
||||
assert (!is32 || (_immN == 0), "32-bit immediate should be encoded with zero N-bit");
|
||||
|
||||
// immr is the rotation size.
|
||||
_immr = lps + (inverted ? 0 : bits_count) - left_range;
|
||||
|
||||
// imms is the field that encodes bits count and S-prefix.
|
||||
_imms = ((inverted ? (lps - bits_count) : bits_count) - 1) | s_prefix;
|
||||
|
||||
_encoded = true;
|
||||
assert (decode() == imm, "illegal encoding");
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
_encoded = false;
|
||||
}
|
1717
hotspot/src/cpu/arm/vm/assembler_arm_64.hpp
Normal file
1717
hotspot/src/cpu/arm/vm/assembler_arm_64.hpp
Normal file
File diff suppressed because it is too large
Load Diff
195
hotspot/src/cpu/arm/vm/bytes_arm.hpp
Normal file
195
hotspot/src/cpu/arm/vm/bytes_arm.hpp
Normal file
@ -0,0 +1,195 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_BYTES_ARM_HPP
|
||||
#define CPU_ARM_VM_BYTES_ARM_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
#ifndef VM_LITTLE_ENDIAN
|
||||
#define VM_LITTLE_ENDIAN 1
|
||||
#endif
|
||||
|
||||
class Bytes: AllStatic {
|
||||
|
||||
public:
|
||||
// Returns true if the byte ordering used by Java is different from the native byte ordering
|
||||
// of the underlying machine.
|
||||
static inline bool is_Java_byte_ordering_different() {
|
||||
return VM_LITTLE_ENDIAN != 0;
|
||||
}
|
||||
|
||||
static inline u2 get_Java_u2(address p) {
|
||||
return (u2(p[0]) << 8) | u2(p[1]);
|
||||
}
|
||||
|
||||
static inline u4 get_Java_u4(address p) {
|
||||
return u4(p[0]) << 24 |
|
||||
u4(p[1]) << 16 |
|
||||
u4(p[2]) << 8 |
|
||||
u4(p[3]);
|
||||
}
|
||||
|
||||
static inline u8 get_Java_u8(address p) {
|
||||
return u8(p[0]) << 56 |
|
||||
u8(p[1]) << 48 |
|
||||
u8(p[2]) << 40 |
|
||||
u8(p[3]) << 32 |
|
||||
u8(p[4]) << 24 |
|
||||
u8(p[5]) << 16 |
|
||||
u8(p[6]) << 8 |
|
||||
u8(p[7]);
|
||||
}
|
||||
|
||||
static inline void put_Java_u2(address p, u2 x) {
|
||||
p[0] = x >> 8;
|
||||
p[1] = x;
|
||||
}
|
||||
|
||||
static inline void put_Java_u4(address p, u4 x) {
|
||||
((u1*)p)[0] = x >> 24;
|
||||
((u1*)p)[1] = x >> 16;
|
||||
((u1*)p)[2] = x >> 8;
|
||||
((u1*)p)[3] = x;
|
||||
}
|
||||
|
||||
static inline void put_Java_u8(address p, u8 x) {
|
||||
((u1*)p)[0] = x >> 56;
|
||||
((u1*)p)[1] = x >> 48;
|
||||
((u1*)p)[2] = x >> 40;
|
||||
((u1*)p)[3] = x >> 32;
|
||||
((u1*)p)[4] = x >> 24;
|
||||
((u1*)p)[5] = x >> 16;
|
||||
((u1*)p)[6] = x >> 8;
|
||||
((u1*)p)[7] = x;
|
||||
}
|
||||
|
||||
#ifdef VM_LITTLE_ENDIAN
|
||||
|
||||
static inline u2 get_native_u2(address p) {
|
||||
return (intptr_t(p) & 1) == 0 ? *(u2*)p : u2(p[0]) | (u2(p[1]) << 8);
|
||||
}
|
||||
|
||||
static inline u4 get_native_u4(address p) {
|
||||
switch (intptr_t(p) & 3) {
|
||||
case 0: return *(u4*)p;
|
||||
case 2: return u4(((u2*)p)[0]) |
|
||||
u4(((u2*)p)[1]) << 16;
|
||||
default: return u4(p[0]) |
|
||||
u4(p[1]) << 8 |
|
||||
u4(p[2]) << 16 |
|
||||
u4(p[3]) << 24;
|
||||
}
|
||||
}
|
||||
|
||||
static inline u8 get_native_u8(address p) {
|
||||
switch (intptr_t(p) & 7) {
|
||||
case 0: return *(u8*)p;
|
||||
case 4: return u8(((u4*)p)[0]) |
|
||||
u8(((u4*)p)[1]) << 32;
|
||||
case 2: return u8(((u2*)p)[0]) |
|
||||
u8(((u2*)p)[1]) << 16 |
|
||||
u8(((u2*)p)[2]) << 32 |
|
||||
u8(((u2*)p)[3]) << 48;
|
||||
default: return u8(p[0]) |
|
||||
u8(p[1]) << 8 |
|
||||
u8(p[2]) << 16 |
|
||||
u8(p[3]) << 24 |
|
||||
u8(p[4]) << 32 |
|
||||
u8(p[5]) << 40 |
|
||||
u8(p[6]) << 48 |
|
||||
u8(p[7]) << 56;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void put_native_u2(address p, u2 x) {
|
||||
if ((intptr_t(p) & 1) == 0) {
|
||||
*(u2*)p = x;
|
||||
} else {
|
||||
p[0] = x;
|
||||
p[1] = x >> 8;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void put_native_u4(address p, u4 x) {
|
||||
switch (intptr_t(p) & 3) {
|
||||
case 0: *(u4*)p = x;
|
||||
break;
|
||||
case 2: ((u2*)p)[0] = x;
|
||||
((u2*)p)[1] = x >> 16;
|
||||
break;
|
||||
default: ((u1*)p)[0] = x;
|
||||
((u1*)p)[1] = x >> 8;
|
||||
((u1*)p)[2] = x >> 16;
|
||||
((u1*)p)[3] = x >> 24;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void put_native_u8(address p, u8 x) {
|
||||
switch (intptr_t(p) & 7) {
|
||||
case 0: *(u8*)p = x;
|
||||
break;
|
||||
case 4: ((u4*)p)[0] = x;
|
||||
((u4*)p)[1] = x >> 32;
|
||||
break;
|
||||
case 2: ((u2*)p)[0] = x;
|
||||
((u2*)p)[1] = x >> 16;
|
||||
((u2*)p)[2] = x >> 32;
|
||||
((u2*)p)[3] = x >> 48;
|
||||
break;
|
||||
default: ((u1*)p)[0] = x;
|
||||
((u1*)p)[1] = x >> 8;
|
||||
((u1*)p)[2] = x >> 16;
|
||||
((u1*)p)[3] = x >> 24;
|
||||
((u1*)p)[4] = x >> 32;
|
||||
((u1*)p)[5] = x >> 40;
|
||||
((u1*)p)[6] = x >> 48;
|
||||
((u1*)p)[7] = x >> 56;
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline u2 get_native_u2(address p) { return get_Java_u2(p); }
|
||||
static inline u4 get_native_u4(address p) { return get_Java_u4(p); }
|
||||
static inline u8 get_native_u8(address p) { return get_Java_u8(p); }
|
||||
static inline void put_native_u2(address p, u2 x) { put_Java_u2(p, x); }
|
||||
static inline void put_native_u4(address p, u4 x) { put_Java_u4(p, x); }
|
||||
static inline void put_native_u8(address p, u8 x) { put_Java_u8(p, x); }
|
||||
|
||||
#endif // VM_LITTLE_ENDIAN
|
||||
|
||||
// Efficient swapping of byte ordering
|
||||
static inline u2 swap_u2(u2 x);
|
||||
static inline u4 swap_u4(u4 x);
|
||||
static inline u8 swap_u8(u8 x);
|
||||
};
|
||||
|
||||
|
||||
// The following header contains the implementations of swap_u2, swap_u4, and swap_u8
|
||||
#include OS_CPU_HEADER_INLINE(bytes)
|
||||
|
||||
#endif // CPU_ARM_VM_BYTES_ARM_HPP
|
510
hotspot/src/cpu/arm/vm/c1_CodeStubs_arm.cpp
Normal file
510
hotspot/src/cpu/arm/vm/c1_CodeStubs_arm.cpp
Normal file
@ -0,0 +1,510 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "c1/c1_CodeStubs.hpp"
|
||||
#include "c1/c1_FrameMap.hpp"
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#include "nativeInst_arm.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "vmreg_arm.inline.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
#define __ ce->masm()->
|
||||
|
||||
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
ce->store_parameter(_bci, 0);
|
||||
ce->store_parameter(_method->as_constant_ptr()->as_metadata(), 1);
|
||||
__ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
|
||||
__ b(_continuation);
|
||||
}
|
||||
|
||||
|
||||
// TODO: ARM - is it possible to inline these stubs into the main code stream?
|
||||
|
||||
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
|
||||
bool throw_index_out_of_bounds_exception)
|
||||
: _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
|
||||
, _index(index)
|
||||
{
|
||||
_info = info == NULL ? NULL : new CodeEmitInfo(info);
|
||||
}
|
||||
|
||||
|
||||
void RangeCheckStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
|
||||
if (_info->deoptimize_on_exception()) {
|
||||
#ifdef AARCH64
|
||||
__ NOT_TESTED();
|
||||
#endif
|
||||
__ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type);
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
debug_only(__ should_not_reach_here());
|
||||
return;
|
||||
}
|
||||
// Pass the array index on stack because all registers must be preserved
|
||||
ce->verify_reserved_argument_area_size(1);
|
||||
if (_index->is_cpu_register()) {
|
||||
__ str_32(_index->as_register(), Address(SP));
|
||||
} else {
|
||||
__ mov_slow(Rtemp, _index->as_jint()); // Rtemp should be OK in C1
|
||||
__ str_32(Rtemp, Address(SP));
|
||||
}
|
||||
|
||||
if (_throw_index_out_of_bounds_exception) {
|
||||
#ifdef AARCH64
|
||||
__ NOT_TESTED();
|
||||
#endif
|
||||
__ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type);
|
||||
} else {
|
||||
__ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type);
|
||||
}
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
DEBUG_ONLY(STOP("RangeCheck");)
|
||||
}
|
||||
|
||||
PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
|
||||
_info = new CodeEmitInfo(info);
|
||||
}
|
||||
|
||||
void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
__ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type);
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
debug_only(__ should_not_reach_here());
|
||||
}
|
||||
|
||||
void DivByZeroStub::emit_code(LIR_Assembler* ce) {
|
||||
if (_offset != -1) {
|
||||
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
|
||||
}
|
||||
__ bind(_entry);
|
||||
__ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id),
|
||||
relocInfo::runtime_call_type);
|
||||
ce->add_call_info_here(_info);
|
||||
DEBUG_ONLY(STOP("DivByZero");)
|
||||
}
|
||||
|
||||
|
||||
// Implementation of NewInstanceStub
|
||||
|
||||
NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
|
||||
_result = result;
|
||||
_klass = klass;
|
||||
_klass_reg = klass_reg;
|
||||
_info = new CodeEmitInfo(info);
|
||||
assert(stub_id == Runtime1::new_instance_id ||
|
||||
stub_id == Runtime1::fast_new_instance_id ||
|
||||
stub_id == Runtime1::fast_new_instance_init_check_id,
|
||||
"need new_instance id");
|
||||
_stub_id = stub_id;
|
||||
}
|
||||
|
||||
|
||||
void NewInstanceStub::emit_code(LIR_Assembler* ce) {
|
||||
assert(_result->as_register() == R0, "runtime call setup");
|
||||
assert(_klass_reg->as_register() == R1, "runtime call setup");
|
||||
__ bind(_entry);
|
||||
__ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type);
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
__ b(_continuation);
|
||||
}
|
||||
|
||||
|
||||
// Implementation of NewTypeArrayStub
|
||||
|
||||
NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
|
||||
_klass_reg = klass_reg;
|
||||
_length = length;
|
||||
_result = result;
|
||||
_info = new CodeEmitInfo(info);
|
||||
}
|
||||
|
||||
|
||||
void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
|
||||
assert(_result->as_register() == R0, "runtime call setup");
|
||||
assert(_klass_reg->as_register() == R1, "runtime call setup");
|
||||
assert(_length->as_register() == R2, "runtime call setup");
|
||||
__ bind(_entry);
|
||||
__ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type);
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
__ b(_continuation);
|
||||
}
|
||||
|
||||
|
||||
// Implementation of NewObjectArrayStub
|
||||
|
||||
NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
|
||||
_klass_reg = klass_reg;
|
||||
_result = result;
|
||||
_length = length;
|
||||
_info = new CodeEmitInfo(info);
|
||||
}
|
||||
|
||||
|
||||
void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
|
||||
assert(_result->as_register() == R0, "runtime call setup");
|
||||
assert(_klass_reg->as_register() == R1, "runtime call setup");
|
||||
assert(_length->as_register() == R2, "runtime call setup");
|
||||
__ bind(_entry);
|
||||
__ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type);
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
__ b(_continuation);
|
||||
}
|
||||
|
||||
|
||||
// Implementation of MonitorAccessStubs
|
||||
|
||||
MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
|
||||
: MonitorAccessStub(obj_reg, lock_reg)
|
||||
{
|
||||
_info = new CodeEmitInfo(info);
|
||||
}
|
||||
|
||||
|
||||
void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
const Register obj_reg = _obj_reg->as_pointer_register();
|
||||
const Register lock_reg = _lock_reg->as_pointer_register();
|
||||
|
||||
ce->verify_reserved_argument_area_size(2);
|
||||
#ifdef AARCH64
|
||||
__ stp(obj_reg, lock_reg, Address(SP));
|
||||
#else
|
||||
if (obj_reg < lock_reg) {
|
||||
__ stmia(SP, RegisterSet(obj_reg) | RegisterSet(lock_reg));
|
||||
} else {
|
||||
__ str(obj_reg, Address(SP));
|
||||
__ str(lock_reg, Address(SP, BytesPerWord));
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
Runtime1::StubID enter_id = ce->compilation()->has_fpu_code() ?
|
||||
Runtime1::monitorenter_id :
|
||||
Runtime1::monitorenter_nofpu_id;
|
||||
__ call(Runtime1::entry_for(enter_id), relocInfo::runtime_call_type);
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
__ b(_continuation);
|
||||
}
|
||||
|
||||
|
||||
void MonitorExitStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
if (_compute_lock) {
|
||||
ce->monitor_address(_monitor_ix, _lock_reg);
|
||||
}
|
||||
const Register lock_reg = _lock_reg->as_pointer_register();
|
||||
|
||||
ce->verify_reserved_argument_area_size(1);
|
||||
__ str(lock_reg, Address(SP));
|
||||
|
||||
// Non-blocking leaf routine - no call info needed
|
||||
Runtime1::StubID exit_id = ce->compilation()->has_fpu_code() ?
|
||||
Runtime1::monitorexit_id :
|
||||
Runtime1::monitorexit_nofpu_id;
|
||||
__ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type);
|
||||
__ b(_continuation);
|
||||
}
|
||||
|
||||
|
||||
// Call return is directly after patch word
|
||||
int PatchingStub::_patch_info_offset = 0;
|
||||
|
||||
void PatchingStub::align_patch_site(MacroAssembler* masm) {
|
||||
#if 0
|
||||
// TODO: investigate if we required to implement this
|
||||
ShouldNotReachHere();
|
||||
#endif
|
||||
}
|
||||
|
||||
void PatchingStub::emit_code(LIR_Assembler* ce) {
|
||||
const int patchable_instruction_offset = AARCH64_ONLY(NativeInstruction::instruction_size) NOT_AARCH64(0);
|
||||
|
||||
assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
|
||||
"not enough room for call");
|
||||
assert((_bytes_to_copy & 3) == 0, "must copy a multiple of four bytes");
|
||||
Label call_patch;
|
||||
bool is_load = (_id == load_klass_id) || (_id == load_mirror_id) || (_id == load_appendix_id);
|
||||
|
||||
#ifdef AARCH64
|
||||
assert(nativeInstruction_at(_pc_start)->is_nop(), "required for MT safe patching");
|
||||
|
||||
// Same alignment of reg2mem code and PatchingStub code. Required to make copied bind_literal() code properly aligned.
|
||||
__ align(wordSize);
|
||||
#endif // AARCH64
|
||||
|
||||
if (is_load NOT_AARCH64(&& !VM_Version::supports_movw())) {
|
||||
address start = __ pc();
|
||||
|
||||
// The following sequence duplicates code provided in MacroAssembler::patchable_mov_oop()
|
||||
// without creating relocation info entry.
|
||||
#ifdef AARCH64
|
||||
// Extra nop for MT safe patching
|
||||
__ nop();
|
||||
#endif // AARCH64
|
||||
|
||||
assert((__ pc() - start) == patchable_instruction_offset, "should be");
|
||||
#ifdef AARCH64
|
||||
__ ldr(_obj, __ pc());
|
||||
#else
|
||||
__ ldr(_obj, Address(PC));
|
||||
// Extra nop to handle case of large offset of oop placeholder (see NativeMovConstReg::set_data).
|
||||
__ nop();
|
||||
#endif // AARCH64
|
||||
|
||||
#ifdef ASSERT
|
||||
for (int i = 0; i < _bytes_to_copy; i++) {
|
||||
assert(((address)_pc_start)[i] == start[i], "should be the same code");
|
||||
}
|
||||
#endif // ASSERT
|
||||
}
|
||||
|
||||
address being_initialized_entry = __ pc();
|
||||
if (CommentedAssembly) {
|
||||
__ block_comment(" patch template");
|
||||
}
|
||||
if (is_load) {
|
||||
address start = __ pc();
|
||||
if (_id == load_mirror_id || _id == load_appendix_id) {
|
||||
__ patchable_mov_oop(_obj, (jobject)Universe::non_oop_word(), _index);
|
||||
} else {
|
||||
__ patchable_mov_metadata(_obj, (Metadata*)Universe::non_oop_word(), _index);
|
||||
}
|
||||
#ifdef ASSERT
|
||||
for (int i = 0; i < _bytes_to_copy; i++) {
|
||||
assert(((address)_pc_start)[i] == start[i], "should be the same code");
|
||||
}
|
||||
#endif // ASSERT
|
||||
} else {
|
||||
int* start = (int*)_pc_start;
|
||||
int* end = start + (_bytes_to_copy / BytesPerInt);
|
||||
while (start < end) {
|
||||
__ emit_int32(*start++);
|
||||
}
|
||||
}
|
||||
address end_of_patch = __ pc();
|
||||
|
||||
int bytes_to_skip = 0;
|
||||
if (_id == load_mirror_id) {
|
||||
int offset = __ offset();
|
||||
if (CommentedAssembly) {
|
||||
__ block_comment(" being_initialized check");
|
||||
}
|
||||
|
||||
assert(_obj != noreg, "must be a valid register");
|
||||
// Rtemp should be OK in C1
|
||||
__ ldr(Rtemp, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
|
||||
__ ldr(Rtemp, Address(Rtemp, InstanceKlass::init_thread_offset()));
|
||||
__ cmp(Rtemp, Rthread);
|
||||
__ b(call_patch, ne);
|
||||
__ b(_patch_site_continuation);
|
||||
|
||||
bytes_to_skip += __ offset() - offset;
|
||||
}
|
||||
|
||||
if (CommentedAssembly) {
|
||||
__ block_comment("patch data - 3 high bytes of the word");
|
||||
}
|
||||
const int sizeof_patch_record = 4;
|
||||
bytes_to_skip += sizeof_patch_record;
|
||||
int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
|
||||
__ emit_int32(0xff | being_initialized_entry_offset << 8 | bytes_to_skip << 16 | _bytes_to_copy << 24);
|
||||
|
||||
address patch_info_pc = __ pc();
|
||||
assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
|
||||
|
||||
// runtime call will return here
|
||||
Label call_return;
|
||||
__ bind(call_return);
|
||||
ce->add_call_info_here(_info);
|
||||
assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
|
||||
__ b(_patch_site_entry);
|
||||
|
||||
address entry = __ pc();
|
||||
NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
|
||||
address target = NULL;
|
||||
relocInfo::relocType reloc_type = relocInfo::none;
|
||||
switch (_id) {
|
||||
case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
|
||||
case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
|
||||
case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
|
||||
case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
__ bind(call_patch);
|
||||
|
||||
if (CommentedAssembly) {
|
||||
__ block_comment("patch entry point");
|
||||
}
|
||||
|
||||
// arrange for call to return just after patch word
|
||||
__ adr(LR, call_return);
|
||||
__ jump(target, relocInfo::runtime_call_type, Rtemp);
|
||||
|
||||
if (is_load) {
|
||||
CodeSection* cs = __ code_section();
|
||||
address pc = (address)_pc_start;
|
||||
RelocIterator iter(cs, pc, pc + 1);
|
||||
relocInfo::change_reloc_info_for_address(&iter, pc, reloc_type, relocInfo::none);
|
||||
}
|
||||
}
|
||||
|
||||
void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
__ mov_slow(Rtemp, _trap_request);
|
||||
ce->verify_reserved_argument_area_size(1);
|
||||
__ str(Rtemp, Address(SP));
|
||||
__ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type);
|
||||
ce->add_call_info_here(_info);
|
||||
DEBUG_ONLY(__ should_not_reach_here());
|
||||
}
|
||||
|
||||
|
||||
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
|
||||
address a;
|
||||
if (_info->deoptimize_on_exception()) {
|
||||
// Deoptimize, do not throw the exception, because it is
|
||||
// probably wrong to do it here.
|
||||
a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
|
||||
} else {
|
||||
a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
|
||||
}
|
||||
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
|
||||
__ bind(_entry);
|
||||
__ call(a, relocInfo::runtime_call_type);
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
DEBUG_ONLY(STOP("ImplicitNullCheck");)
|
||||
}
|
||||
|
||||
|
||||
void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
// Pass the object on stack because all registers must be preserved
|
||||
if (_obj->is_cpu_register()) {
|
||||
ce->verify_reserved_argument_area_size(1);
|
||||
__ str(_obj->as_pointer_register(), Address(SP));
|
||||
} else {
|
||||
assert(_obj->is_illegal(), "should be");
|
||||
}
|
||||
__ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type);
|
||||
ce->add_call_info_here(_info);
|
||||
DEBUG_ONLY(STOP("SimpleException");)
|
||||
}
|
||||
|
||||
|
||||
void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
|
||||
VMRegPair args[5];
|
||||
BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT };
|
||||
SharedRuntime::java_calling_convention(signature, args, 5, true);
|
||||
|
||||
Register r[5];
|
||||
r[0] = src()->as_pointer_register();
|
||||
r[1] = src_pos()->as_register();
|
||||
r[2] = dst()->as_pointer_register();
|
||||
r[3] = dst_pos()->as_register();
|
||||
r[4] = length()->as_register();
|
||||
|
||||
for (int i = 0; i < 5; i++) {
|
||||
VMReg arg = args[i].first();
|
||||
if (arg->is_stack()) {
|
||||
__ str(r[i], Address(SP, arg->reg2stack() * VMRegImpl::stack_slot_size));
|
||||
} else {
|
||||
assert(r[i] == arg->as_Register(), "Calling conventions must match");
|
||||
}
|
||||
}
|
||||
|
||||
ce->emit_static_call_stub();
|
||||
if (ce->compilation()->bailed_out()) {
|
||||
return; // CodeCache is full
|
||||
}
|
||||
int ret_addr_offset = __ patchable_call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
|
||||
assert(ret_addr_offset == __ offset(), "embedded return address not allowed");
|
||||
ce->add_call_info_here(info());
|
||||
ce->verify_oop_map(info());
|
||||
__ b(_continuation);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
#if INCLUDE_ALL_GCS
|
||||
|
||||
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
// At this point we know that marking is in progress.
|
||||
// If do_load() is true then we have to emit the
|
||||
// load of the previous value; otherwise it has already
|
||||
// been loaded into _pre_val.
|
||||
|
||||
__ bind(_entry);
|
||||
assert(pre_val()->is_register(), "Precondition.");
|
||||
|
||||
Register pre_val_reg = pre_val()->as_register();
|
||||
|
||||
if (do_load()) {
|
||||
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
|
||||
}
|
||||
|
||||
__ cbz(pre_val_reg, _continuation);
|
||||
ce->verify_reserved_argument_area_size(1);
|
||||
__ str(pre_val_reg, Address(SP));
|
||||
__ call(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id), relocInfo::runtime_call_type);
|
||||
|
||||
__ b(_continuation);
|
||||
}
|
||||
|
||||
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
assert(addr()->is_register(), "Precondition.");
|
||||
assert(new_val()->is_register(), "Precondition.");
|
||||
Register new_val_reg = new_val()->as_register();
|
||||
__ cbz(new_val_reg, _continuation);
|
||||
ce->verify_reserved_argument_area_size(1);
|
||||
__ str(addr()->as_pointer_register(), Address(SP));
|
||||
__ call(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id), relocInfo::runtime_call_type);
|
||||
__ b(_continuation);
|
||||
}
|
||||
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#undef __
|
85
hotspot/src/cpu/arm/vm/c1_Defs_arm.hpp
Normal file
85
hotspot/src/cpu/arm/vm/c1_Defs_arm.hpp
Normal file
@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_C1_DEFS_ARM_HPP
|
||||
#define CPU_ARM_VM_C1_DEFS_ARM_HPP
|
||||
|
||||
// native word offsets from memory address (little endian)
|
||||
enum {
|
||||
pd_lo_word_offset_in_bytes = 0,
|
||||
pd_hi_word_offset_in_bytes = BytesPerWord
|
||||
};
|
||||
|
||||
// explicit rounding operations are required to implement the strictFP mode
|
||||
enum {
|
||||
pd_strict_fp_requires_explicit_rounding = false
|
||||
};
|
||||
|
||||
#ifdef __SOFTFP__
|
||||
#define SOFT(n) n
|
||||
#define VFP(n)
|
||||
#else // __SOFTFP__
|
||||
#define SOFT(n)
|
||||
#define VFP(n) n
|
||||
#endif // __SOFTFP__
|
||||
|
||||
|
||||
// registers
|
||||
enum {
|
||||
pd_nof_cpu_regs_frame_map = AARCH64_ONLY(33) NOT_AARCH64(16), // number of registers used during code emission
|
||||
pd_nof_caller_save_cpu_regs_frame_map = AARCH64_ONLY(27) NOT_AARCH64(10), // number of registers killed by calls
|
||||
pd_nof_cpu_regs_reg_alloc = AARCH64_ONLY(27) NOT_AARCH64(10), // number of registers that are visible to register allocator (including Rheap_base which is visible only if compressed pointers are not enabled)
|
||||
pd_nof_cpu_regs_linearscan = pd_nof_cpu_regs_frame_map, // number of registers visible to linear scan
|
||||
pd_nof_cpu_regs_processed_in_linearscan = pd_nof_cpu_regs_reg_alloc + 1, // number of registers processed in linear scan; includes LR as it is used as temporary register in c1_LIRGenerator_arm
|
||||
pd_first_cpu_reg = 0,
|
||||
pd_last_cpu_reg = pd_nof_cpu_regs_frame_map - 1,
|
||||
|
||||
pd_nof_fpu_regs_frame_map = VFP(32) SOFT(0), // number of float registers used during code emission
|
||||
pd_nof_caller_save_fpu_regs_frame_map = VFP(32) SOFT(0), // number of float registers killed by calls
|
||||
pd_nof_fpu_regs_reg_alloc = AARCH64_ONLY(32) NOT_AARCH64(VFP(30) SOFT(0)), // number of float registers that are visible to register allocator
|
||||
pd_nof_fpu_regs_linearscan = pd_nof_fpu_regs_frame_map, // number of float registers visible to linear scan
|
||||
pd_first_fpu_reg = pd_nof_cpu_regs_frame_map,
|
||||
pd_last_fpu_reg = pd_first_fpu_reg + pd_nof_fpu_regs_frame_map - 1,
|
||||
|
||||
pd_nof_xmm_regs_linearscan = 0,
|
||||
pd_nof_caller_save_xmm_regs = 0,
|
||||
pd_first_xmm_reg = -1,
|
||||
pd_last_xmm_reg = -1
|
||||
};
|
||||
|
||||
|
||||
// encoding of float value in debug info:
|
||||
enum {
|
||||
pd_float_saved_as_double = false
|
||||
};
|
||||
|
||||
#ifdef AARCH64
|
||||
#define PATCHED_ADDR 0xff8
|
||||
#else
|
||||
#define PATCHED_ADDR (204)
|
||||
#endif
|
||||
#define CARDTABLEMODREF_POST_BARRIER_HELPER
|
||||
#define GENERATE_ADDRESS_IS_PREFERRED
|
||||
|
||||
#endif // CPU_ARM_VM_C1_DEFS_ARM_HPP
|
31
hotspot/src/cpu/arm/vm/c1_FpuStackSim_arm.cpp
Normal file
31
hotspot/src/cpu/arm/vm/c1_FpuStackSim_arm.cpp
Normal file
@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "c1/c1_FpuStackSim.hpp"
|
||||
#include "c1/c1_FrameMap.hpp"
|
||||
#include "utilities/array.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
// Nothing needed here
|
30
hotspot/src/cpu/arm/vm/c1_FpuStackSim_arm.hpp
Normal file
30
hotspot/src/cpu/arm/vm/c1_FpuStackSim_arm.hpp
Normal file
@ -0,0 +1,30 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_C1_FPUSTACKSIM_ARM_HPP
|
||||
#define CPU_ARM_VM_C1_FPUSTACKSIM_ARM_HPP
|
||||
|
||||
// Nothing needed here
|
||||
|
||||
#endif // CPU_ARM_VM_C1_FPUSTACKSIM_ARM_HPP
|
230
hotspot/src/cpu/arm/vm/c1_FrameMap_arm.cpp
Normal file
230
hotspot/src/cpu/arm/vm/c1_FrameMap_arm.cpp
Normal file
@ -0,0 +1,230 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "c1/c1_FrameMap.hpp"
|
||||
#include "c1/c1_LIR.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "vmreg_arm.inline.hpp"
|
||||
|
||||
LIR_Opr FrameMap::R0_opr;
|
||||
LIR_Opr FrameMap::R1_opr;
|
||||
LIR_Opr FrameMap::R2_opr;
|
||||
LIR_Opr FrameMap::R3_opr;
|
||||
LIR_Opr FrameMap::R4_opr;
|
||||
LIR_Opr FrameMap::R5_opr;
|
||||
|
||||
LIR_Opr FrameMap::R0_oop_opr;
|
||||
LIR_Opr FrameMap::R1_oop_opr;
|
||||
LIR_Opr FrameMap::R2_oop_opr;
|
||||
LIR_Opr FrameMap::R3_oop_opr;
|
||||
LIR_Opr FrameMap::R4_oop_opr;
|
||||
LIR_Opr FrameMap::R5_oop_opr;
|
||||
|
||||
LIR_Opr FrameMap::R0_metadata_opr;
|
||||
LIR_Opr FrameMap::R1_metadata_opr;
|
||||
LIR_Opr FrameMap::R2_metadata_opr;
|
||||
LIR_Opr FrameMap::R3_metadata_opr;
|
||||
LIR_Opr FrameMap::R4_metadata_opr;
|
||||
LIR_Opr FrameMap::R5_metadata_opr;
|
||||
|
||||
#ifdef AARCH64
|
||||
LIR_Opr FrameMap::ZR_opr;
|
||||
#endif // AARCH64
|
||||
|
||||
LIR_Opr FrameMap::LR_opr;
|
||||
LIR_Opr FrameMap::LR_oop_opr;
|
||||
LIR_Opr FrameMap::LR_ptr_opr;
|
||||
LIR_Opr FrameMap::FP_opr;
|
||||
LIR_Opr FrameMap::SP_opr;
|
||||
LIR_Opr FrameMap::Rthread_opr;
|
||||
|
||||
LIR_Opr FrameMap::Int_result_opr;
|
||||
LIR_Opr FrameMap::Long_result_opr;
|
||||
LIR_Opr FrameMap::Object_result_opr;
|
||||
LIR_Opr FrameMap::Float_result_opr;
|
||||
LIR_Opr FrameMap::Double_result_opr;
|
||||
|
||||
LIR_Opr FrameMap::Exception_oop_opr;
|
||||
LIR_Opr FrameMap::Exception_pc_opr;
|
||||
|
||||
LIR_Opr FrameMap::_caller_save_cpu_regs[] = { 0 };
|
||||
LIR_Opr FrameMap::_caller_save_fpu_regs[]; // same as initialize to zero
|
||||
|
||||
LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool) {
|
||||
LIR_Opr opr = LIR_OprFact::illegalOpr;
|
||||
VMReg r_1 = reg->first();
|
||||
VMReg r_2 = reg->second();
|
||||
if (r_1->is_stack()) {
|
||||
int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
|
||||
opr = LIR_OprFact::address(new LIR_Address(SP_opr, st_off, type));
|
||||
} else if (r_1->is_Register()) {
|
||||
Register reg = r_1->as_Register();
|
||||
if (r_2->is_Register() && (type == T_LONG || type == T_DOUBLE)) {
|
||||
#ifdef AARCH64
|
||||
assert(r_1->next() == r_2, "should be the same");
|
||||
opr = as_long_opr(reg);
|
||||
#else
|
||||
opr = as_long_opr(reg, r_2->as_Register());
|
||||
#endif
|
||||
} else if (type == T_OBJECT || type == T_ARRAY) {
|
||||
opr = as_oop_opr(reg);
|
||||
} else if (type == T_METADATA) {
|
||||
opr = as_metadata_opr(reg);
|
||||
} else {
|
||||
// PreferInterpreterNativeStubs should ensure we never need to
|
||||
// handle a long opr passed as R3+stack_slot
|
||||
assert(! r_2->is_stack(), "missing support for ALIGN_WIDE_ARGUMENTS==0");
|
||||
opr = as_opr(reg);
|
||||
}
|
||||
} else if (r_1->is_FloatRegister()) {
|
||||
FloatRegister reg = r_1->as_FloatRegister();
|
||||
opr = type == T_FLOAT ? as_float_opr(reg) : as_double_opr(reg);
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
return opr;
|
||||
}
|
||||
|
||||
|
||||
void FrameMap::initialize() {
|
||||
if (_init_done) return;
|
||||
|
||||
int i;
|
||||
int rnum = 0;
|
||||
|
||||
// Registers used for allocation
|
||||
#ifdef AARCH64
|
||||
assert(Rthread == R28 && Rheap_base == R27 && Rtemp == R16, "change the code here");
|
||||
for (i = 0; i < 16; i++) {
|
||||
map_register(rnum++, as_Register(i));
|
||||
}
|
||||
for (i = 17; i < 28; i++) {
|
||||
map_register(rnum++, as_Register(i));
|
||||
}
|
||||
#else
|
||||
assert(Rthread == R10 && Rtemp == R12, "change the code here");
|
||||
for (i = 0; i < 10; i++) {
|
||||
map_register(rnum++, as_Register(i));
|
||||
}
|
||||
#endif // AARCH64
|
||||
assert(rnum == pd_nof_cpu_regs_reg_alloc, "should be");
|
||||
|
||||
// Registers not used for allocation
|
||||
map_register(rnum++, LR); // LR register should be listed first, see c1_LinearScan_arm.hpp::is_processed_reg_num.
|
||||
assert(rnum == pd_nof_cpu_regs_processed_in_linearscan, "should be");
|
||||
|
||||
map_register(rnum++, Rtemp);
|
||||
map_register(rnum++, Rthread);
|
||||
map_register(rnum++, FP); // ARM32: R7 or R11
|
||||
map_register(rnum++, SP);
|
||||
#ifdef AARCH64
|
||||
map_register(rnum++, ZR);
|
||||
#else
|
||||
map_register(rnum++, PC);
|
||||
#endif
|
||||
assert(rnum == pd_nof_cpu_regs_frame_map, "should be");
|
||||
|
||||
_init_done = true;
|
||||
|
||||
R0_opr = as_opr(R0); R0_oop_opr = as_oop_opr(R0); R0_metadata_opr = as_metadata_opr(R0);
|
||||
R1_opr = as_opr(R1); R1_oop_opr = as_oop_opr(R1); R1_metadata_opr = as_metadata_opr(R1);
|
||||
R2_opr = as_opr(R2); R2_oop_opr = as_oop_opr(R2); R2_metadata_opr = as_metadata_opr(R2);
|
||||
R3_opr = as_opr(R3); R3_oop_opr = as_oop_opr(R3); R3_metadata_opr = as_metadata_opr(R3);
|
||||
R4_opr = as_opr(R4); R4_oop_opr = as_oop_opr(R4); R4_metadata_opr = as_metadata_opr(R4);
|
||||
R5_opr = as_opr(R5); R5_oop_opr = as_oop_opr(R5); R5_metadata_opr = as_metadata_opr(R5);
|
||||
|
||||
#ifdef AARCH64
|
||||
ZR_opr = as_opr(ZR);
|
||||
#endif // AARCH64
|
||||
|
||||
LR_opr = as_opr(LR);
|
||||
LR_oop_opr = as_oop_opr(LR);
|
||||
LR_ptr_opr = as_pointer_opr(LR);
|
||||
FP_opr = as_pointer_opr(FP);
|
||||
SP_opr = as_pointer_opr(SP);
|
||||
Rthread_opr = as_pointer_opr(Rthread);
|
||||
|
||||
// LIR operands for result
|
||||
Int_result_opr = R0_opr;
|
||||
Object_result_opr = R0_oop_opr;
|
||||
#ifdef AARCH64
|
||||
Long_result_opr = as_long_opr(R0);
|
||||
Float_result_opr = as_float_opr(S0);
|
||||
Double_result_opr = as_double_opr(D0);
|
||||
#else
|
||||
Long_result_opr = as_long_opr(R0, R1);
|
||||
#ifdef __ABI_HARD__
|
||||
Float_result_opr = as_float_opr(S0);
|
||||
Double_result_opr = as_double_opr(D0);
|
||||
#else
|
||||
Float_result_opr = LIR_OprFact::single_softfp(0);
|
||||
Double_result_opr = LIR_OprFact::double_softfp(0, 1);
|
||||
#endif // __ABI_HARD__
|
||||
#endif // AARCH64
|
||||
|
||||
Exception_oop_opr = as_oop_opr(Rexception_obj);
|
||||
Exception_pc_opr = as_opr(Rexception_pc);
|
||||
|
||||
for (i = 0; i < nof_caller_save_cpu_regs(); i++) {
|
||||
_caller_save_cpu_regs[i] = LIR_OprFact::single_cpu(i);
|
||||
}
|
||||
for (i = 0; i < nof_caller_save_fpu_regs; i++) {
|
||||
_caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Address FrameMap::make_new_address(ByteSize sp_offset) const {
|
||||
return Address(SP, sp_offset);
|
||||
}
|
||||
|
||||
LIR_Opr FrameMap::stack_pointer() {
|
||||
return FrameMap::SP_opr;
|
||||
}
|
||||
|
||||
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
|
||||
assert(Rmh_SP_save == FP, "Fix register used for saving SP for MethodHandle calls");
|
||||
return FP_opr;
|
||||
}
|
||||
|
||||
bool FrameMap::validate_frame() {
|
||||
int max_offset = in_bytes(framesize_in_bytes());
|
||||
int java_index = 0;
|
||||
for (int i = 0; i < _incoming_arguments->length(); i++) {
|
||||
LIR_Opr opr = _incoming_arguments->at(i);
|
||||
if (opr->is_stack()) {
|
||||
int arg_offset = _argument_locations->at(java_index);
|
||||
if (arg_offset > max_offset) {
|
||||
max_offset = arg_offset;
|
||||
}
|
||||
}
|
||||
java_index += type2size[opr->type()];
|
||||
}
|
||||
return max_offset < AARCH64_ONLY(16384) NOT_AARCH64(4096); // TODO-AARCH64 check that LIRAssembler does not generate load/store of byte and half-word with SP as address base
|
||||
}
|
||||
|
||||
VMReg FrameMap::fpu_regname(int n) {
|
||||
return as_FloatRegister(n)->as_VMReg();
|
||||
}
|
128
hotspot/src/cpu/arm/vm/c1_FrameMap_arm.hpp
Normal file
128
hotspot/src/cpu/arm/vm/c1_FrameMap_arm.hpp
Normal file
@ -0,0 +1,128 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_C1_FRAMEMAP_ARM_HPP
|
||||
#define CPU_ARM_VM_C1_FRAMEMAP_ARM_HPP
|
||||
|
||||
public:
|
||||
|
||||
enum {
|
||||
first_available_sp_in_frame = 0,
|
||||
frame_pad_in_bytes = 2*wordSize // Account for FP/LR saved at build_frame().
|
||||
};
|
||||
|
||||
static LIR_Opr R0_opr;
|
||||
static LIR_Opr R1_opr;
|
||||
static LIR_Opr R2_opr;
|
||||
static LIR_Opr R3_opr;
|
||||
static LIR_Opr R4_opr;
|
||||
static LIR_Opr R5_opr;
|
||||
// add more predefined register oprs as needed
|
||||
|
||||
static LIR_Opr R0_oop_opr;
|
||||
static LIR_Opr R1_oop_opr;
|
||||
static LIR_Opr R2_oop_opr;
|
||||
static LIR_Opr R3_oop_opr;
|
||||
static LIR_Opr R4_oop_opr;
|
||||
static LIR_Opr R5_oop_opr;
|
||||
|
||||
static LIR_Opr R0_metadata_opr;
|
||||
static LIR_Opr R1_metadata_opr;
|
||||
static LIR_Opr R2_metadata_opr;
|
||||
static LIR_Opr R3_metadata_opr;
|
||||
static LIR_Opr R4_metadata_opr;
|
||||
static LIR_Opr R5_metadata_opr;
|
||||
|
||||
#ifdef AARCH64
|
||||
static LIR_Opr ZR_opr;
|
||||
#endif // AARCH64
|
||||
|
||||
static LIR_Opr LR_opr;
|
||||
static LIR_Opr LR_oop_opr;
|
||||
static LIR_Opr LR_ptr_opr;
|
||||
|
||||
static LIR_Opr FP_opr;
|
||||
static LIR_Opr SP_opr;
|
||||
static LIR_Opr Rthread_opr;
|
||||
|
||||
static LIR_Opr Int_result_opr;
|
||||
static LIR_Opr Long_result_opr;
|
||||
static LIR_Opr Object_result_opr;
|
||||
static LIR_Opr Float_result_opr;
|
||||
static LIR_Opr Double_result_opr;
|
||||
|
||||
static LIR_Opr Exception_oop_opr;
|
||||
static LIR_Opr Exception_pc_opr;
|
||||
|
||||
#ifdef AARCH64
|
||||
static LIR_Opr as_long_opr(Register r) {
|
||||
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
|
||||
}
|
||||
|
||||
static LIR_Opr as_pointer_opr(Register r) {
|
||||
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
|
||||
}
|
||||
|
||||
static LIR_Opr as_double_opr(FloatRegister r) {
|
||||
return LIR_OprFact::double_fpu(r->encoding());
|
||||
}
|
||||
#else
|
||||
static LIR_Opr as_long_opr(Register r, Register r2) {
|
||||
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r2));
|
||||
}
|
||||
|
||||
static LIR_Opr as_pointer_opr(Register r) {
|
||||
return LIR_OprFact::single_cpu(cpu_reg2rnr(r));
|
||||
}
|
||||
|
||||
static LIR_Opr as_double_opr(FloatRegister r) {
|
||||
return LIR_OprFact::double_fpu(r->encoding(), r->successor()->encoding());
|
||||
}
|
||||
#endif
|
||||
|
||||
static LIR_Opr as_float_opr(FloatRegister r) {
|
||||
return LIR_OprFact::single_fpu(r->encoding());
|
||||
}
|
||||
|
||||
static VMReg fpu_regname(int n);
|
||||
|
||||
static bool is_caller_save_register(LIR_Opr opr) {
|
||||
return true;
|
||||
}
|
||||
|
||||
static int adjust_reg_range(int range) {
|
||||
// Reduce the number of available regs (to free Rheap_base) in case of compressed oops
|
||||
if (UseCompressedOops || UseCompressedClassPointers) return range - 1;
|
||||
return range;
|
||||
}
|
||||
|
||||
static int nof_caller_save_cpu_regs() {
|
||||
return adjust_reg_range(pd_nof_caller_save_cpu_regs_frame_map);
|
||||
}
|
||||
|
||||
static int last_cpu_reg() {
|
||||
return pd_last_cpu_reg;
|
||||
}
|
||||
|
||||
#endif // CPU_ARM_VM_C1_FRAMEMAP_ARM_HPP
|
3608
hotspot/src/cpu/arm/vm/c1_LIRAssembler_arm.cpp
Normal file
3608
hotspot/src/cpu/arm/vm/c1_LIRAssembler_arm.cpp
Normal file
File diff suppressed because it is too large
Load Diff
71
hotspot/src/cpu/arm/vm/c1_LIRAssembler_arm.hpp
Normal file
71
hotspot/src/cpu/arm/vm/c1_LIRAssembler_arm.hpp
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_C1_LIRASSEMBLER_ARM_HPP
|
||||
#define CPU_ARM_VM_C1_LIRASSEMBLER_ARM_HPP
|
||||
|
||||
private:
|
||||
|
||||
// Record the type of the receiver in ReceiverTypeData
|
||||
void type_profile_helper(Register mdo, int mdo_offset_bias,
|
||||
ciMethodData *md, ciProfileData *data,
|
||||
Register recv, Register tmp1, Label* update_done);
|
||||
// Setup pointers to MDO, MDO slot, also compute offset bias to access the slot.
|
||||
void setup_md_access(ciMethod* method, int bci,
|
||||
ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias);
|
||||
|
||||
void typecheck_profile_helper1(ciMethod* method, int bci,
|
||||
ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias,
|
||||
Register obj, Register mdo, Register data_val, Label* obj_is_null);
|
||||
|
||||
void typecheck_profile_helper2(ciMethodData* md, ciProfileData* data, int mdo_offset_bias,
|
||||
Register mdo, Register recv, Register value, Register tmp1,
|
||||
Label* profile_cast_success, Label* profile_cast_failure,
|
||||
Label* success, Label* failure);
|
||||
|
||||
#ifdef AARCH64
|
||||
void long_compare_helper(LIR_Opr opr1, LIR_Opr opr2);
|
||||
#endif // AARCH64
|
||||
|
||||
// Saves 4 given registers in reserved argument area.
|
||||
void save_in_reserved_area(Register r1, Register r2, Register r3, Register r4);
|
||||
|
||||
// Restores 4 given registers from reserved argument area.
|
||||
void restore_from_reserved_area(Register r1, Register r2, Register r3, Register r4);
|
||||
|
||||
enum {
|
||||
_call_stub_size = AARCH64_ONLY(32) NOT_AARCH64(16),
|
||||
_call_aot_stub_size = 0,
|
||||
_exception_handler_size = PRODUCT_ONLY(AARCH64_ONLY(256) NOT_AARCH64(68)) NOT_PRODUCT(AARCH64_ONLY(256+216) NOT_AARCH64(68+60)),
|
||||
_deopt_handler_size = AARCH64_ONLY(32) NOT_AARCH64(16)
|
||||
};
|
||||
|
||||
public:
|
||||
|
||||
void verify_reserved_argument_area_size(int args_count) PRODUCT_RETURN;
|
||||
|
||||
void store_parameter(jint c, int offset_from_sp_in_words);
|
||||
void store_parameter(Metadata* m, int offset_from_sp_in_words);
|
||||
|
||||
#endif // CPU_ARM_VM_C1_LIRASSEMBLER_ARM_HPP
|
1767
hotspot/src/cpu/arm/vm/c1_LIRGenerator_arm.cpp
Normal file
1767
hotspot/src/cpu/arm/vm/c1_LIRGenerator_arm.cpp
Normal file
File diff suppressed because it is too large
Load Diff
33
hotspot/src/cpu/arm/vm/c1_LIRGenerator_arm.hpp
Normal file
33
hotspot/src/cpu/arm/vm/c1_LIRGenerator_arm.hpp
Normal file
@ -0,0 +1,33 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// Helper to set the card at the given address to the given value.
|
||||
void set_card(LIR_Opr value, LIR_Address* card_addr);
|
||||
|
||||
void make_div_by_zero_check(LIR_Opr right_arg, BasicType type, CodeEmitInfo* info);
|
||||
|
||||
#ifdef AARCH64
|
||||
// the helper for arithmetic
|
||||
void add_constant(LIR_Opr src, jlong c, LIR_Opr dest);
|
||||
#endif // AARCH64
|
86
hotspot/src/cpu/arm/vm/c1_LIR_arm.cpp
Normal file
86
hotspot/src/cpu/arm/vm/c1_LIR_arm.cpp
Normal file
@ -0,0 +1,86 @@
|
||||
/*
|
||||
* Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "c1/c1_LIR.hpp"
|
||||
|
||||
FloatRegister LIR_OprDesc::as_float_reg() const {
|
||||
return as_FloatRegister(fpu_regnr());
|
||||
}
|
||||
|
||||
FloatRegister LIR_OprDesc::as_double_reg() const {
|
||||
return as_FloatRegister(fpu_regnrLo());
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
// Reg2 unused.
|
||||
LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) {
|
||||
assert(as_FloatRegister(reg2) == fnoreg, "Not used on this platform");
|
||||
return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
|
||||
(reg1 << LIR_OprDesc::reg2_shift) |
|
||||
LIR_OprDesc::double_type |
|
||||
LIR_OprDesc::fpu_register |
|
||||
LIR_OprDesc::double_size);
|
||||
}
|
||||
#else
|
||||
LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) {
|
||||
assert(as_FloatRegister(reg2) != fnoreg, "Arm32 holds double in two regs.");
|
||||
return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
|
||||
(reg2 << LIR_OprDesc::reg2_shift) |
|
||||
LIR_OprDesc::double_type |
|
||||
LIR_OprDesc::fpu_register |
|
||||
LIR_OprDesc::double_size);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef PRODUCT
|
||||
void LIR_Address::verify() const {
|
||||
#ifdef _LP64
|
||||
assert(base()->is_cpu_register(), "wrong base operand");
|
||||
#endif
|
||||
#ifdef AARCH64
|
||||
if (base()->type() == T_INT) {
|
||||
assert(index()->is_single_cpu() && (index()->type() == T_INT), "wrong index operand");
|
||||
} else {
|
||||
assert(index()->is_illegal() || index()->is_double_cpu() ||
|
||||
(index()->is_single_cpu() && (index()->is_oop_register() || index()->type() == T_INT)), "wrong index operand");
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA, "wrong type for addresses");
|
||||
}
|
||||
#else
|
||||
assert(disp() == 0 || index()->is_illegal(), "can't have both");
|
||||
// Note: offsets higher than 4096 must not be rejected here. They can
|
||||
// be handled by the back-end or will be rejected if not.
|
||||
#ifdef _LP64
|
||||
assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand");
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
|
||||
"wrong type for addresses");
|
||||
#else
|
||||
assert(base()->is_single_cpu(), "wrong base operand");
|
||||
assert(index()->is_illegal() || index()->is_single_cpu(), "wrong index operand");
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_INT || base()->type() == T_METADATA,
|
||||
"wrong type for addresses");
|
||||
#endif
|
||||
#endif // AARCH64
|
||||
}
|
||||
#endif // PRODUCT
|
32
hotspot/src/cpu/arm/vm/c1_LinearScan_arm.cpp
Normal file
32
hotspot/src/cpu/arm/vm/c1_LinearScan_arm.cpp
Normal file
@ -0,0 +1,32 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "c1/c1_Instruction.hpp"
|
||||
#include "c1/c1_LinearScan.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
void LinearScan::allocate_fpu_stack() {
|
||||
// No FPU stack on ARM
|
||||
}
|
78
hotspot/src/cpu/arm/vm/c1_LinearScan_arm.hpp
Normal file
78
hotspot/src/cpu/arm/vm/c1_LinearScan_arm.hpp
Normal file
@ -0,0 +1,78 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_C1_LINEARSCAN_ARM_HPP
|
||||
#define CPU_ARM_VM_C1_LINEARSCAN_ARM_HPP
|
||||
|
||||
inline bool LinearScan::is_processed_reg_num(int reg_num) {
|
||||
return reg_num < pd_nof_cpu_regs_processed_in_linearscan ||
|
||||
reg_num >= pd_nof_cpu_regs_frame_map;
|
||||
}
|
||||
|
||||
inline int LinearScan::num_physical_regs(BasicType type) {
|
||||
#ifndef AARCH64
|
||||
if (type == T_LONG || type == T_DOUBLE) return 2;
|
||||
#endif // !AARCH64
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
inline bool LinearScan::requires_adjacent_regs(BasicType type) {
|
||||
#ifdef AARCH64
|
||||
return false;
|
||||
#else
|
||||
return type == T_DOUBLE || type == T_LONG;
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
inline bool LinearScan::is_caller_save(int assigned_reg) {
|
||||
assert(assigned_reg >= 0 && assigned_reg < nof_regs, "should call this only for registers");
|
||||
// TODO-AARCH64 try to add callee-saved registers
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
inline void LinearScan::pd_add_temps(LIR_Op* op) {
|
||||
// No extra temporals on ARM
|
||||
}
|
||||
|
||||
|
||||
// Implementation of LinearScanWalker
|
||||
|
||||
inline bool LinearScanWalker::pd_init_regs_for_alloc(Interval* cur) {
|
||||
#ifndef __SOFTFP__
|
||||
if (cur->type() == T_FLOAT || cur->type() == T_DOUBLE) {
|
||||
_first_reg = pd_first_fpu_reg;
|
||||
_last_reg = pd_first_fpu_reg + pd_nof_fpu_regs_reg_alloc - 1;
|
||||
return true;
|
||||
}
|
||||
#endif // !__SOFTFP__
|
||||
|
||||
// Use allocatable CPU registers otherwise
|
||||
_first_reg = pd_first_cpu_reg;
|
||||
_last_reg = pd_first_cpu_reg + FrameMap::adjust_reg_range(pd_nof_cpu_regs_reg_alloc) - 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif // CPU_ARM_VM_C1_LINEARSCAN_ARM_HPP
|
408
hotspot/src/cpu/arm/vm/c1_MacroAssembler_arm.cpp
Normal file
408
hotspot/src/cpu/arm/vm/c1_MacroAssembler_arm.cpp
Normal file
@ -0,0 +1,408 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "oops/arrayOop.hpp"
|
||||
#include "oops/markOop.hpp"
|
||||
#include "runtime/basicLock.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
|
||||
// Note: Rtemp usage is this file should not impact C2 and should be
|
||||
// correct as long as it is not implicitly used in lower layers (the
|
||||
// arm [macro]assembler) and used with care in the other C1 specific
|
||||
// files.
|
||||
|
||||
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
|
||||
Label verified;
|
||||
load_klass(Rtemp, receiver);
|
||||
cmp(Rtemp, iCache);
|
||||
b(verified, eq); // jump over alignment no-ops
|
||||
#ifdef AARCH64
|
||||
jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type, Rtemp);
|
||||
#else
|
||||
jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
|
||||
#endif
|
||||
align(CodeEntryAlignment);
|
||||
bind(verified);
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
|
||||
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
|
||||
assert((frame_size_in_bytes % StackAlignmentInBytes) == 0, "frame size should be aligned");
|
||||
|
||||
#ifdef AARCH64
|
||||
// Extra nop for MT-safe patching in NativeJump::patch_verified_entry
|
||||
nop();
|
||||
#endif // AARCH64
|
||||
|
||||
arm_stack_overflow_check(bang_size_in_bytes, Rtemp);
|
||||
|
||||
// FP can no longer be used to memorize SP. It may be modified
|
||||
// if this method contains a methodHandle call site
|
||||
raw_push(FP, LR);
|
||||
sub_slow(SP, SP, frame_size_in_bytes);
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::remove_frame(int frame_size_in_bytes) {
|
||||
add_slow(SP, SP, frame_size_in_bytes);
|
||||
raw_pop(FP, LR);
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::verified_entry() {
|
||||
if (C1Breakpoint) {
|
||||
breakpoint();
|
||||
}
|
||||
}
|
||||
|
||||
// Puts address of allocated object into register `obj` and end of allocated object into register `obj_end`.
|
||||
void C1_MacroAssembler::try_allocate(Register obj, Register obj_end, Register tmp1, Register tmp2,
|
||||
RegisterOrConstant size_expression, Label& slow_case) {
|
||||
if (UseTLAB) {
|
||||
tlab_allocate(obj, obj_end, tmp1, size_expression, slow_case);
|
||||
} else {
|
||||
eden_allocate(obj, obj_end, tmp1, tmp2, size_expression, slow_case);
|
||||
incr_allocated_bytes(size_expression, tmp1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register tmp) {
|
||||
assert_different_registers(obj, klass, len, tmp);
|
||||
|
||||
if(UseBiasedLocking && !len->is_valid()) {
|
||||
ldr(tmp, Address(klass, Klass::prototype_header_offset()));
|
||||
} else {
|
||||
mov(tmp, (intptr_t)markOopDesc::prototype());
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
if (UseCompressedClassPointers) {
|
||||
str(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
|
||||
encode_klass_not_null(tmp, klass); // Take care not to kill klass
|
||||
str_w(tmp, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
} else {
|
||||
assert(oopDesc::mark_offset_in_bytes() + wordSize == oopDesc::klass_offset_in_bytes(), "adjust this code");
|
||||
stp(tmp, klass, Address(obj, oopDesc::mark_offset_in_bytes()));
|
||||
}
|
||||
#else
|
||||
str(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
|
||||
str(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
#endif // AARCH64
|
||||
|
||||
if (len->is_valid()) {
|
||||
str_32(len, Address(obj, arrayOopDesc::length_offset_in_bytes()));
|
||||
}
|
||||
#ifdef AARCH64
|
||||
else if (UseCompressedClassPointers) {
|
||||
store_klass_gap(obj);
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
|
||||
// Cleans object body [base..obj_end]. Clobbers `base` and `tmp` registers.
|
||||
void C1_MacroAssembler::initialize_body(Register base, Register obj_end, Register tmp) {
|
||||
zero_memory(base, obj_end, tmp);
|
||||
}
|
||||
|
||||
|
||||
void C1_MacroAssembler::initialize_object(Register obj, Register obj_end, Register klass,
|
||||
Register len, Register tmp1, Register tmp2,
|
||||
RegisterOrConstant header_size, int obj_size_in_bytes,
|
||||
bool is_tlab_allocated)
|
||||
{
|
||||
assert_different_registers(obj, obj_end, klass, len, tmp1, tmp2);
|
||||
initialize_header(obj, klass, len, tmp1);
|
||||
|
||||
const Register ptr = tmp2;
|
||||
|
||||
if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) {
|
||||
#ifdef AARCH64
|
||||
if (obj_size_in_bytes < 0) {
|
||||
add_rc(ptr, obj, header_size);
|
||||
initialize_body(ptr, obj_end, tmp1);
|
||||
|
||||
} else {
|
||||
int base = instanceOopDesc::header_size() * HeapWordSize;
|
||||
assert(obj_size_in_bytes >= base, "should be");
|
||||
|
||||
const int zero_bytes = obj_size_in_bytes - base;
|
||||
assert((zero_bytes % wordSize) == 0, "should be");
|
||||
|
||||
if ((zero_bytes % (2*wordSize)) != 0) {
|
||||
str(ZR, Address(obj, base));
|
||||
base += wordSize;
|
||||
}
|
||||
|
||||
const int stp_count = zero_bytes / (2*wordSize);
|
||||
|
||||
if (zero_bytes > 8 * wordSize) {
|
||||
Label loop;
|
||||
add(ptr, obj, base);
|
||||
mov(tmp1, stp_count);
|
||||
bind(loop);
|
||||
subs(tmp1, tmp1, 1);
|
||||
stp(ZR, ZR, Address(ptr, 2*wordSize, post_indexed));
|
||||
b(loop, gt);
|
||||
} else {
|
||||
for (int i = 0; i < stp_count; i++) {
|
||||
stp(ZR, ZR, Address(obj, base + i * 2 * wordSize));
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
if (obj_size_in_bytes >= 0 && obj_size_in_bytes <= 8 * BytesPerWord) {
|
||||
mov(tmp1, 0);
|
||||
const int base = instanceOopDesc::header_size() * HeapWordSize;
|
||||
for (int i = base; i < obj_size_in_bytes; i += wordSize) {
|
||||
str(tmp1, Address(obj, i));
|
||||
}
|
||||
} else {
|
||||
assert(header_size.is_constant() || header_size.as_register() == ptr, "code assumption");
|
||||
add(ptr, obj, header_size);
|
||||
initialize_body(ptr, obj_end, tmp1);
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
// StoreStore barrier required after complete initialization
|
||||
// (headers + content zeroing), before the object may escape.
|
||||
membar(MacroAssembler::StoreStore, tmp1);
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::allocate_object(Register obj, Register tmp1, Register tmp2, Register tmp3,
|
||||
int header_size, int object_size,
|
||||
Register klass, Label& slow_case) {
|
||||
assert_different_registers(obj, tmp1, tmp2, tmp3, klass, Rtemp);
|
||||
assert(header_size >= 0 && object_size >= header_size, "illegal sizes");
|
||||
const int object_size_in_bytes = object_size * BytesPerWord;
|
||||
|
||||
const Register obj_end = tmp1;
|
||||
const Register len = noreg;
|
||||
|
||||
if (Assembler::is_arith_imm_in_range(object_size_in_bytes)) {
|
||||
try_allocate(obj, obj_end, tmp2, tmp3, object_size_in_bytes, slow_case);
|
||||
} else {
|
||||
// Rtemp should be free at c1 LIR level
|
||||
mov_slow(Rtemp, object_size_in_bytes);
|
||||
try_allocate(obj, obj_end, tmp2, tmp3, Rtemp, slow_case);
|
||||
}
|
||||
initialize_object(obj, obj_end, klass, len, tmp2, tmp3, instanceOopDesc::header_size() * HeapWordSize, object_size_in_bytes, /* is_tlab_allocated */ UseTLAB);
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::allocate_array(Register obj, Register len,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
int header_size, int element_size,
|
||||
Register klass, Label& slow_case) {
|
||||
assert_different_registers(obj, len, tmp1, tmp2, tmp3, klass, Rtemp);
|
||||
const int header_size_in_bytes = header_size * BytesPerWord;
|
||||
const int scale_shift = exact_log2(element_size);
|
||||
const Register obj_size = Rtemp; // Rtemp should be free at c1 LIR level
|
||||
|
||||
#ifdef AARCH64
|
||||
mov_slow(Rtemp, max_array_allocation_length);
|
||||
cmp_32(len, Rtemp);
|
||||
#else
|
||||
cmp_32(len, max_array_allocation_length);
|
||||
#endif // AARCH64
|
||||
b(slow_case, hs);
|
||||
|
||||
bool align_header = ((header_size_in_bytes | element_size) & MinObjAlignmentInBytesMask) != 0;
|
||||
assert(align_header || ((header_size_in_bytes & MinObjAlignmentInBytesMask) == 0), "must be");
|
||||
assert(align_header || ((element_size & MinObjAlignmentInBytesMask) == 0), "must be");
|
||||
|
||||
mov(obj_size, header_size_in_bytes + (align_header ? (MinObjAlignmentInBytes - 1) : 0));
|
||||
add_ptr_scaled_int32(obj_size, obj_size, len, scale_shift);
|
||||
|
||||
if (align_header) {
|
||||
align_reg(obj_size, obj_size, MinObjAlignmentInBytes);
|
||||
}
|
||||
|
||||
try_allocate(obj, tmp1, tmp2, tmp3, obj_size, slow_case);
|
||||
initialize_object(obj, tmp1, klass, len, tmp2, tmp3, header_size_in_bytes, -1, /* is_tlab_allocated */ UseTLAB);
|
||||
}
|
||||
|
||||
int C1_MacroAssembler::lock_object(Register hdr, Register obj,
|
||||
Register disp_hdr, Register tmp1,
|
||||
Label& slow_case) {
|
||||
Label done, fast_lock, fast_lock_done;
|
||||
int null_check_offset = 0;
|
||||
|
||||
const Register tmp2 = Rtemp; // Rtemp should be free at c1 LIR level
|
||||
assert_different_registers(hdr, obj, disp_hdr, tmp1, tmp2);
|
||||
|
||||
assert(BasicObjectLock::lock_offset_in_bytes() == 0, "ajust this code");
|
||||
const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
|
||||
const int mark_offset = BasicLock::displaced_header_offset_in_bytes();
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
// load object
|
||||
str(obj, Address(disp_hdr, obj_offset));
|
||||
null_check_offset = biased_locking_enter(obj, hdr/*scratched*/, tmp1, false, tmp2, done, slow_case);
|
||||
}
|
||||
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions");
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
str(obj, Address(disp_hdr, obj_offset));
|
||||
|
||||
if (!UseBiasedLocking) {
|
||||
null_check_offset = offset();
|
||||
}
|
||||
ldr(hdr, obj);
|
||||
|
||||
// Test if object is already locked
|
||||
assert(markOopDesc::unlocked_value == 1, "adjust this code");
|
||||
tbnz(hdr, exact_log2(markOopDesc::unlocked_value), fast_lock);
|
||||
|
||||
// Check for recursive locking
|
||||
// See comments in InterpreterMacroAssembler::lock_object for
|
||||
// explanations on the fast recursive locking check.
|
||||
intptr_t mask = ((intptr_t)3) - ((intptr_t)os::vm_page_size());
|
||||
Assembler::LogicalImmediate imm(mask, false);
|
||||
mov(tmp2, SP);
|
||||
sub(tmp2, hdr, tmp2);
|
||||
ands(tmp2, tmp2, imm);
|
||||
b(slow_case, ne);
|
||||
|
||||
// Recursive locking: store 0 into a lock record
|
||||
str(ZR, Address(disp_hdr, mark_offset));
|
||||
b(fast_lock_done);
|
||||
|
||||
#else // AARCH64
|
||||
|
||||
if (!UseBiasedLocking) {
|
||||
null_check_offset = offset();
|
||||
}
|
||||
|
||||
// On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
|
||||
// That would be acceptable as ether CAS or slow case path is taken in that case.
|
||||
|
||||
// Must be the first instruction here, because implicit null check relies on it
|
||||
ldr(hdr, Address(obj, oopDesc::mark_offset_in_bytes()));
|
||||
|
||||
str(obj, Address(disp_hdr, obj_offset));
|
||||
tst(hdr, markOopDesc::unlocked_value);
|
||||
b(fast_lock, ne);
|
||||
|
||||
// Check for recursive locking
|
||||
// See comments in InterpreterMacroAssembler::lock_object for
|
||||
// explanations on the fast recursive locking check.
|
||||
// -1- test low 2 bits
|
||||
movs(tmp2, AsmOperand(hdr, lsl, 30));
|
||||
// -2- test (hdr - SP) if the low two bits are 0
|
||||
sub(tmp2, hdr, SP, eq);
|
||||
movs(tmp2, AsmOperand(tmp2, lsr, exact_log2(os::vm_page_size())), eq);
|
||||
// If 'eq' then OK for recursive fast locking: store 0 into a lock record.
|
||||
str(tmp2, Address(disp_hdr, mark_offset), eq);
|
||||
b(fast_lock_done, eq);
|
||||
// else need slow case
|
||||
b(slow_case);
|
||||
|
||||
#endif // AARCH64
|
||||
|
||||
bind(fast_lock);
|
||||
// Save previous object header in BasicLock structure and update the header
|
||||
str(hdr, Address(disp_hdr, mark_offset));
|
||||
|
||||
cas_for_lock_acquire(hdr, disp_hdr, obj, tmp2, slow_case);
|
||||
|
||||
bind(fast_lock_done);
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
cond_atomic_inc32(al, BiasedLocking::fast_path_entry_count_addr());
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
bind(done);
|
||||
|
||||
return null_check_offset;
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::unlock_object(Register hdr, Register obj,
|
||||
Register disp_hdr, Register tmp,
|
||||
Label& slow_case) {
|
||||
// Note: this method is not using its 'tmp' argument
|
||||
|
||||
assert_different_registers(hdr, obj, disp_hdr, Rtemp);
|
||||
Register tmp2 = Rtemp;
|
||||
|
||||
assert(BasicObjectLock::lock_offset_in_bytes() == 0, "ajust this code");
|
||||
const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
|
||||
const int mark_offset = BasicLock::displaced_header_offset_in_bytes();
|
||||
|
||||
Label done;
|
||||
if (UseBiasedLocking) {
|
||||
// load object
|
||||
ldr(obj, Address(disp_hdr, obj_offset));
|
||||
biased_locking_exit(obj, hdr, done);
|
||||
}
|
||||
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions");
|
||||
Label retry;
|
||||
|
||||
// Load displaced header and object from the lock
|
||||
ldr(hdr, Address(disp_hdr, mark_offset));
|
||||
// If hdr is NULL, we've got recursive locking and there's nothing more to do
|
||||
cbz(hdr, done);
|
||||
|
||||
if(!UseBiasedLocking) {
|
||||
// load object
|
||||
ldr(obj, Address(disp_hdr, obj_offset));
|
||||
}
|
||||
|
||||
// Restore the object header
|
||||
cas_for_lock_release(disp_hdr, hdr, obj, tmp2, slow_case);
|
||||
|
||||
bind(done);
|
||||
}
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
|
||||
if (!VerifyOops) return;
|
||||
verify_oop_addr(Address(SP, stack_offset));
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::verify_not_null_oop(Register r) {
|
||||
Label not_null;
|
||||
cbnz(r, not_null);
|
||||
stop("non-null oop required");
|
||||
bind(not_null);
|
||||
if (!VerifyOops) return;
|
||||
verify_oop(r);
|
||||
}
|
||||
|
||||
#endif // !PRODUCT
|
69
hotspot/src/cpu/arm/vm/c1_MacroAssembler_arm.hpp
Normal file
69
hotspot/src/cpu/arm/vm/c1_MacroAssembler_arm.hpp
Normal file
@ -0,0 +1,69 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_C1_MACROASSEMBLER_ARM_HPP
|
||||
#define CPU_ARM_VM_C1_MACROASSEMBLER_ARM_HPP
|
||||
|
||||
private:
|
||||
|
||||
void pd_init() { /* not used */ }
|
||||
|
||||
public:
|
||||
|
||||
// Puts address of allocated object into register `obj` and end of allocated object into register `obj_end`.
|
||||
// `size_expression` should be a register or constant which can be used as immediate in "add" instruction.
|
||||
void try_allocate(Register obj, Register obj_end, Register tmp1, Register tmp2,
|
||||
RegisterOrConstant size_expression, Label& slow_case);
|
||||
|
||||
void initialize_header(Register obj, Register klass, Register len, Register tmp);
|
||||
|
||||
// Cleans object body [base..obj_end]. Clobbers `base` and `tmp` registers.
|
||||
void initialize_body(Register base, Register obj_end, Register tmp);
|
||||
|
||||
void initialize_object(Register obj, Register obj_end, Register klass,
|
||||
Register len, Register tmp1, Register tmp2,
|
||||
RegisterOrConstant header_size_expression, int obj_size_in_bytes,
|
||||
bool is_tlab_allocated);
|
||||
|
||||
void allocate_object(Register obj, Register tmp1, Register tmp2, Register tmp3,
|
||||
int header_size, int object_size,
|
||||
Register klass, Label& slow_case);
|
||||
|
||||
void allocate_array(Register obj, Register len,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
int header_size, int element_size,
|
||||
Register klass, Label& slow_case);
|
||||
|
||||
enum {
|
||||
max_array_allocation_length = 0x01000000
|
||||
};
|
||||
|
||||
int lock_object(Register hdr, Register obj, Register disp_hdr, Register tmp, Label& slow_case);
|
||||
|
||||
void unlock_object(Register hdr, Register obj, Register disp_hdr, Register tmp, Label& slow_case);
|
||||
|
||||
// This platform only uses signal-based null checks. The Label is not needed.
|
||||
void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); }
|
||||
|
||||
#endif // CPU_ARM_VM_C1_MACROASSEMBLER_ARM_HPP
|
1226
hotspot/src/cpu/arm/vm/c1_Runtime1_arm.cpp
Normal file
1226
hotspot/src/cpu/arm/vm/c1_Runtime1_arm.cpp
Normal file
File diff suppressed because it is too large
Load Diff
73
hotspot/src/cpu/arm/vm/c1_globals_arm.hpp
Normal file
73
hotspot/src/cpu/arm/vm/c1_globals_arm.hpp
Normal file
@ -0,0 +1,73 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_C1_GLOBALS_ARM_HPP
|
||||
#define CPU_ARM_VM_C1_GLOBALS_ARM_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
//
|
||||
// Sets the default values for platform dependent flags used by the client compiler.
|
||||
// (see c1_globals.hpp)
|
||||
//
|
||||
|
||||
#ifndef COMPILER2 // avoid duplicated definitions, favoring C2 version
|
||||
define_pd_global(bool, BackgroundCompilation, true );
|
||||
define_pd_global(bool, UseTLAB, true );
|
||||
define_pd_global(bool, ResizeTLAB, true );
|
||||
define_pd_global(bool, InlineIntrinsics, false); // TODO: ARM
|
||||
define_pd_global(bool, PreferInterpreterNativeStubs, false);
|
||||
define_pd_global(bool, ProfileTraps, false);
|
||||
define_pd_global(bool, UseOnStackReplacement, true );
|
||||
define_pd_global(bool, TieredCompilation, false);
|
||||
define_pd_global(intx, CompileThreshold, 1500 );
|
||||
|
||||
define_pd_global(intx, OnStackReplacePercentage, 933 );
|
||||
define_pd_global(intx, FreqInlineSize, 325 );
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, 4*K );
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 160*K);
|
||||
define_pd_global(size_t, ReservedCodeCacheSize, 32*M );
|
||||
define_pd_global(size_t, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(size_t, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(bool, ProfileInterpreter, false);
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 32*K );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(size_t, MetaspaceSize, 12*M );
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, true);
|
||||
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
|
||||
define_pd_global(bool, CICompileOSR, true );
|
||||
#endif // COMPILER2
|
||||
define_pd_global(bool, UseTypeProfile, false);
|
||||
define_pd_global(bool, RoundFPResults, false);
|
||||
|
||||
|
||||
define_pd_global(bool, LIRFillDelaySlots, false);
|
||||
define_pd_global(bool, OptimizeSinglePrecision, true);
|
||||
define_pd_global(bool, CSEArrayLength, true);
|
||||
define_pd_global(bool, TwoOperandLIRForm, false);
|
||||
|
||||
#endif // CPU_ARM_VM_C1_GLOBALS_ARM_HPP
|
124
hotspot/src/cpu/arm/vm/c2_globals_arm.hpp
Normal file
124
hotspot/src/cpu/arm/vm/c2_globals_arm.hpp
Normal file
@ -0,0 +1,124 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_C2_GLOBALS_ARM_HPP
|
||||
#define CPU_ARM_VM_C2_GLOBALS_ARM_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
//
|
||||
// Sets the default values for platform dependent flags used by the server compiler.
|
||||
// (see c2_globals.hpp). Alpha-sorted.
|
||||
|
||||
define_pd_global(bool, BackgroundCompilation, true);
|
||||
define_pd_global(bool, CICompileOSR, true);
|
||||
define_pd_global(bool, InlineIntrinsics, false);
|
||||
define_pd_global(bool, PreferInterpreterNativeStubs, false);
|
||||
define_pd_global(bool, ProfileTraps, true);
|
||||
define_pd_global(bool, UseOnStackReplacement, true);
|
||||
define_pd_global(bool, ProfileInterpreter, true);
|
||||
#ifdef AARCH64
|
||||
define_pd_global(bool, TieredCompilation, trueInTiered);
|
||||
#else
|
||||
define_pd_global(bool, TieredCompilation, false);
|
||||
#endif
|
||||
define_pd_global(intx, CompileThreshold, 10000);
|
||||
|
||||
define_pd_global(intx, OnStackReplacePercentage, 140);
|
||||
define_pd_global(intx, ConditionalMoveLimit, 4);
|
||||
// C2 gets to use all the float/double registers
|
||||
#ifdef AARCH64
|
||||
define_pd_global(intx, FLOATPRESSURE, 31);
|
||||
#else
|
||||
define_pd_global(intx, FLOATPRESSURE, 30);
|
||||
#endif
|
||||
define_pd_global(intx, FreqInlineSize, 175);
|
||||
#ifdef AARCH64
|
||||
define_pd_global(intx, INTPRESSURE, 27);
|
||||
#else
|
||||
define_pd_global(intx, INTPRESSURE, 12);
|
||||
#endif
|
||||
define_pd_global(intx, InteriorEntryAlignment, 16); // = CodeEntryAlignment
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, ScaleForWordSize(4*K));
|
||||
// The default setting 16/16 seems to work best.
|
||||
// (For _228_jack 16/16 is 2% better than 4/4, 16/4, 32/32, 32/16, or 16/32.)
|
||||
//define_pd_global(intx, OptoLoopAlignment, 16); // = 4*wordSize
|
||||
define_pd_global(intx, RegisterCostAreaRatio, 16000);
|
||||
define_pd_global(bool, UseTLAB, true);
|
||||
define_pd_global(bool, ResizeTLAB, true);
|
||||
define_pd_global(intx, LoopUnrollLimit, 60); // Design center runs on 1.3.1
|
||||
define_pd_global(intx, LoopPercentProfileLimit, 10);
|
||||
define_pd_global(intx, PostLoopMultiversioning, false);
|
||||
define_pd_global(intx, MinJumpTableSize, 16);
|
||||
|
||||
// Peephole and CISC spilling both break the graph, and so makes the
|
||||
// scheduler sick.
|
||||
define_pd_global(bool, OptoPeephole, false);
|
||||
define_pd_global(bool, UseCISCSpill, false);
|
||||
define_pd_global(bool, OptoBundling, false);
|
||||
define_pd_global(bool, OptoScheduling, true);
|
||||
define_pd_global(bool, OptoRegScheduling, false);
|
||||
define_pd_global(bool, SuperWordLoopUnrollAnalysis, false);
|
||||
define_pd_global(bool, IdealizeClearArrayNode, true);
|
||||
|
||||
#ifdef _LP64
|
||||
// We need to make sure that all generated code is within
|
||||
// 2 gigs of the libjvm.so runtime routines so we can use
|
||||
// the faster "call" instruction rather than the expensive
|
||||
// sequence of instructions to load a 64 bit pointer.
|
||||
//
|
||||
// InitialCodeCacheSize derived from specjbb2000 run.
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(size_t, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(size_t, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(size_t, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 64*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t, MaxRAM, 128ULL*G);
|
||||
#else
|
||||
// InitialCodeCacheSize derived from specjbb2000 run.
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 1536*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(size_t, ReservedCodeCacheSize, 32*M);
|
||||
define_pd_global(size_t, NonProfiledCodeHeapSize, 13*M);
|
||||
define_pd_global(size_t, ProfiledCodeHeapSize, 14*M);
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 32*K);
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t, MaxRAM, 4ULL*G);
|
||||
#endif
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
|
||||
define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed
|
||||
|
||||
// Heap related flags
|
||||
define_pd_global(size_t, MetaspaceSize, ScaleForWordSize(16*M));
|
||||
|
||||
// Ergonomics related flags
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, false);
|
||||
|
||||
#endif // CPU_ARM_VM_C2_GLOBALS_ARM_HPP
|
34
hotspot/src/cpu/arm/vm/codeBuffer_arm.hpp
Normal file
34
hotspot/src/cpu/arm/vm/codeBuffer_arm.hpp
Normal file
@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_CODEBUFFER_ARM_HPP
|
||||
#define CPU_ARM_VM_CODEBUFFER_ARM_HPP
|
||||
|
||||
private:
|
||||
void pd_initialize() {}
|
||||
|
||||
public:
|
||||
void flush_bundle(bool start_new_bundle) {}
|
||||
|
||||
#endif // CPU_ARM_VM_CODEBUFFER_ARM_HPP
|
166
hotspot/src/cpu/arm/vm/compiledIC_arm.cpp
Normal file
166
hotspot/src/cpu/arm/vm/compiledIC_arm.cpp
Normal file
@ -0,0 +1,166 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "code/compiledIC.hpp"
|
||||
#include "code/icBuffer.hpp"
|
||||
#include "code/nativeInst.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
#if defined(COMPILER2) || INCLUDE_JVMCI
|
||||
#define __ _masm.
|
||||
// emit call stub, compiled java to interpreter
|
||||
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
|
||||
// Stub is fixed up when the corresponding call is converted from calling
|
||||
// compiled code to calling interpreted code.
|
||||
// set (empty), R9
|
||||
// b -1
|
||||
|
||||
if (mark == NULL) {
|
||||
mark = cbuf.insts_mark(); // get mark within main instrs section
|
||||
}
|
||||
|
||||
MacroAssembler _masm(&cbuf);
|
||||
|
||||
address base = __ start_a_stub(to_interp_stub_size());
|
||||
if (base == NULL) {
|
||||
return NULL; // CodeBuffer::expand failed
|
||||
}
|
||||
|
||||
// static stub relocation stores the instruction address of the call
|
||||
__ relocate(static_stub_Relocation::spec(mark));
|
||||
|
||||
InlinedMetadata object_literal(NULL);
|
||||
// single instruction, see NativeMovConstReg::next_instruction_address() in
|
||||
// CompiledStaticCall::set_to_interpreted()
|
||||
__ ldr_literal(Rmethod, object_literal);
|
||||
|
||||
__ set_inst_mark(); // Who uses this?
|
||||
|
||||
bool near_range = __ cache_fully_reachable();
|
||||
InlinedAddress dest((address)-1);
|
||||
address branch_site = __ pc();
|
||||
if (near_range) {
|
||||
__ b(branch_site); // special NativeJump -1 destination
|
||||
} else {
|
||||
// Can't trash LR, FP, or argument registers
|
||||
__ indirect_jump(dest, Rtemp);
|
||||
}
|
||||
__ bind_literal(object_literal); // includes spec_for_immediate reloc
|
||||
if (!near_range) {
|
||||
__ bind_literal(dest); // special NativeJump -1 destination
|
||||
}
|
||||
|
||||
assert(__ pc() - base <= to_interp_stub_size(), "wrong stub size");
|
||||
|
||||
// Update current stubs pointer and restore code_end.
|
||||
__ end_a_stub();
|
||||
return base;
|
||||
}
|
||||
#undef __
|
||||
|
||||
// size of C2 call stub, compiled java to interpretor
|
||||
int CompiledStaticCall::to_interp_stub_size() {
|
||||
return 8 * NativeInstruction::instruction_size;
|
||||
}
|
||||
|
||||
// Relocation entries for call stub, compiled java to interpreter.
|
||||
int CompiledStaticCall::reloc_to_interp_stub() {
|
||||
return 10; // 4 in emit_to_interp_stub + 1 in Java_Static_Call
|
||||
}
|
||||
#endif // COMPILER2 || JVMCI
|
||||
|
||||
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
|
||||
address stub = find_stub(/*is_aot*/ false);
|
||||
guarantee(stub != NULL, "stub not found");
|
||||
|
||||
if (TraceICs) {
|
||||
ResourceMark rm;
|
||||
tty->print_cr("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
|
||||
p2i(instruction_address()),
|
||||
callee->name_and_sig_as_C_string());
|
||||
}
|
||||
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
|
||||
#ifdef ASSERT
|
||||
// read the value once
|
||||
volatile intptr_t data = method_holder->data();
|
||||
volatile address destination = jump->jump_destination();
|
||||
assert(data == 0 || data == (intptr_t)callee(),
|
||||
"a) MT-unsafe modification of inline cache");
|
||||
assert(destination == (address)-1 || destination == entry,
|
||||
"b) MT-unsafe modification of inline cache");
|
||||
#endif
|
||||
|
||||
// Update stub.
|
||||
method_holder->set_data((intptr_t)callee());
|
||||
jump->set_jump_destination(entry);
|
||||
|
||||
// Update jump to call.
|
||||
set_destination_mt_safe(stub);
|
||||
}
|
||||
|
||||
void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
|
||||
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
|
||||
// Reset stub.
|
||||
address stub = static_stub->addr();
|
||||
assert(stub != NULL, "stub not found");
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
method_holder->set_data(0);
|
||||
jump->set_jump_destination((address)-1);
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Non-product mode code
|
||||
#ifndef PRODUCT
|
||||
|
||||
void CompiledDirectStaticCall::verify() {
|
||||
// Verify call.
|
||||
_call->verify();
|
||||
if (os::is_MP()) {
|
||||
_call->verify_alignment();
|
||||
}
|
||||
|
||||
// Verify stub.
|
||||
address stub = find_stub(/*is_aot*/ false);
|
||||
assert(stub != NULL, "no stub found for static call");
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
|
||||
// Verify state.
|
||||
assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
|
||||
}
|
||||
|
||||
#endif // !PRODUCT
|
59
hotspot/src/cpu/arm/vm/copy_arm.hpp
Normal file
59
hotspot/src/cpu/arm/vm/copy_arm.hpp
Normal file
@ -0,0 +1,59 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_COPY_ARM_HPP
|
||||
#define CPU_ARM_VM_COPY_ARM_HPP
|
||||
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
// Inline functions for memory copy and fill.
|
||||
|
||||
// Contains inline asm implementations
|
||||
#include OS_CPU_HEADER_INLINE(copy)
|
||||
|
||||
static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
|
||||
juint* to = (juint*)tohw;
|
||||
count *= HeapWordSize / BytesPerInt;
|
||||
while (count-- > 0) {
|
||||
*to++ = value;
|
||||
}
|
||||
}
|
||||
|
||||
static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
|
||||
pd_fill_to_words(tohw, count, value);
|
||||
}
|
||||
|
||||
static void pd_fill_to_bytes(void* to, size_t count, jubyte value) {
|
||||
memset(to, value, count);
|
||||
}
|
||||
|
||||
static void pd_zero_to_words(HeapWord* tohw, size_t count) {
|
||||
pd_fill_to_words(tohw, count, 0);
|
||||
}
|
||||
|
||||
static void pd_zero_to_bytes(void* to, size_t count) {
|
||||
memset(to, 0, count);
|
||||
}
|
||||
|
||||
#endif // CPU_ARM_VM_COPY_ARM_HPP
|
33
hotspot/src/cpu/arm/vm/debug_arm.cpp
Normal file
33
hotspot/src/cpu/arm/vm/debug_arm.cpp
Normal file
@ -0,0 +1,33 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "runtime/frame.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
void pd_ps(frame f) {}
|
29
hotspot/src/cpu/arm/vm/depChecker_arm.cpp
Normal file
29
hotspot/src/cpu/arm/vm/depChecker_arm.cpp
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "compiler/disassembler.hpp"
|
||||
#include "depChecker_arm.hpp"
|
||||
|
||||
// Nothing to do
|
30
hotspot/src/cpu/arm/vm/depChecker_arm.hpp
Normal file
30
hotspot/src/cpu/arm/vm/depChecker_arm.hpp
Normal file
@ -0,0 +1,30 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_DEPCHECKER_ARM_HPP
|
||||
#define CPU_ARM_VM_DEPCHECKER_ARM_HPP
|
||||
|
||||
// Nothing to do
|
||||
|
||||
#endif // CPU_ARM_VM_DEPCHECKER_ARM_HPP
|
36
hotspot/src/cpu/arm/vm/disassembler_arm.hpp
Normal file
36
hotspot/src/cpu/arm/vm/disassembler_arm.hpp
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_DISASSEMBLER_ARM_HPP
|
||||
#define CPU_ARM_VM_DISASSEMBLER_ARM_HPP
|
||||
|
||||
static int pd_instruction_alignment() {
|
||||
return sizeof(int);
|
||||
}
|
||||
|
||||
static const char* pd_cpu_opts() {
|
||||
return "";
|
||||
}
|
||||
|
||||
#endif // CPU_ARM_VM_DISASSEMBLER_ARM_HPP
|
655
hotspot/src/cpu/arm/vm/frame_arm.cpp
Normal file
655
hotspot/src/cpu/arm/vm/frame_arm.cpp
Normal file
@ -0,0 +1,655 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/markOop.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/javaCalls.hpp"
|
||||
#include "runtime/monitorChunk.hpp"
|
||||
#include "runtime/signature.hpp"
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "vmreg_arm.inline.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#include "runtime/vframeArray.hpp"
|
||||
#endif
|
||||
#include "prims/methodHandles.hpp"
|
||||
|
||||
#ifdef ASSERT
|
||||
void RegisterMap::check_location_valid() {
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
// Profiling/safepoint support
|
||||
|
||||
bool frame::safe_for_sender(JavaThread *thread) {
|
||||
address sp = (address)_sp;
|
||||
address fp = (address)_fp;
|
||||
address unextended_sp = (address)_unextended_sp;
|
||||
|
||||
static size_t stack_guard_size = os::uses_stack_guard_pages() ?
|
||||
(JavaThread::stack_red_zone_size() + JavaThread::stack_yellow_zone_size()) : 0;
|
||||
size_t usable_stack_size = thread->stack_size() - stack_guard_size;
|
||||
|
||||
// sp must be within the usable part of the stack (not in guards)
|
||||
bool sp_safe = (sp != NULL &&
|
||||
(sp <= thread->stack_base()) &&
|
||||
(sp >= thread->stack_base() - usable_stack_size));
|
||||
|
||||
if (!sp_safe) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool unextended_sp_safe = (unextended_sp != NULL &&
|
||||
(unextended_sp <= thread->stack_base()) &&
|
||||
(unextended_sp >= sp));
|
||||
if (!unextended_sp_safe) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// We know sp/unextended_sp are safe. Only fp is questionable here.
|
||||
|
||||
bool fp_safe = (fp != NULL &&
|
||||
(fp <= thread->stack_base()) &&
|
||||
fp >= sp);
|
||||
|
||||
if (_cb != NULL ) {
|
||||
|
||||
// First check if frame is complete and tester is reliable
|
||||
// Unfortunately we can only check frame complete for runtime stubs and nmethod
|
||||
// other generic buffer blobs are more problematic so we just assume they are
|
||||
// ok. adapter blobs never have a frame complete and are never ok.
|
||||
|
||||
if (!_cb->is_frame_complete_at(_pc)) {
|
||||
if (_cb->is_compiled() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Could just be some random pointer within the codeBlob
|
||||
if (!_cb->code_contains(_pc)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Entry frame checks
|
||||
if (is_entry_frame()) {
|
||||
// an entry frame must have a valid fp.
|
||||
return fp_safe && is_entry_frame_valid(thread);
|
||||
}
|
||||
|
||||
intptr_t* sender_sp = NULL;
|
||||
address sender_pc = NULL;
|
||||
|
||||
if (is_interpreted_frame()) {
|
||||
// fp must be safe
|
||||
if (!fp_safe) {
|
||||
return false;
|
||||
}
|
||||
|
||||
sender_pc = (address) this->fp()[return_addr_offset];
|
||||
sender_sp = (intptr_t*) addr_at(sender_sp_offset);
|
||||
|
||||
} else {
|
||||
// must be some sort of compiled/runtime frame
|
||||
// fp does not have to be safe (although it could be check for c1?)
|
||||
|
||||
sender_sp = _unextended_sp + _cb->frame_size();
|
||||
// Is sender_sp safe?
|
||||
if ((address)sender_sp >= thread->stack_base()) {
|
||||
return false;
|
||||
}
|
||||
// With our calling conventions, the return_address should
|
||||
// end up being the word on the stack
|
||||
sender_pc = (address) *(sender_sp - sender_sp_offset + return_addr_offset);
|
||||
}
|
||||
|
||||
// We must always be able to find a recognizable pc
|
||||
CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
|
||||
if (sender_pc == NULL || sender_blob == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
// If the potential sender is the interpreter then we can do some more checking
|
||||
if (Interpreter::contains(sender_pc)) {
|
||||
|
||||
// FP is always saved in a recognizable place in any code we generate. However
|
||||
// only if the sender is interpreted/call_stub (c1 too?) are we certain that the saved FP
|
||||
// is really a frame pointer.
|
||||
|
||||
intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset + link_offset);
|
||||
bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp);
|
||||
|
||||
if (!saved_fp_safe) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// construct the potential sender
|
||||
|
||||
frame sender(sender_sp, saved_fp, sender_pc);
|
||||
|
||||
return sender.is_interpreted_frame_valid(thread);
|
||||
}
|
||||
|
||||
if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Could just be some random pointer within the codeBlob
|
||||
if (!sender_blob->code_contains(sender_pc)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// We should never be able to see an adapter if the current frame is something from code cache
|
||||
if (sender_blob->is_adapter_blob()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Could be the call_stub
|
||||
if (StubRoutines::returns_to_call_stub(sender_pc)) {
|
||||
intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset + link_offset);
|
||||
bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp >= sender_sp);
|
||||
|
||||
if (!saved_fp_safe) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// construct the potential sender
|
||||
|
||||
frame sender(sender_sp, saved_fp, sender_pc);
|
||||
|
||||
// Validate the JavaCallWrapper an entry frame must have
|
||||
address jcw = (address)sender.entry_frame_call_wrapper();
|
||||
|
||||
bool jcw_safe = (jcw <= thread->stack_base()) && (jcw > (address)sender.fp());
|
||||
|
||||
return jcw_safe;
|
||||
}
|
||||
|
||||
// If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
|
||||
// because the return address counts against the callee's frame.
|
||||
|
||||
if (sender_blob->frame_size() <= 0) {
|
||||
assert(!sender_blob->is_compiled(), "should count return address at least");
|
||||
return false;
|
||||
}
|
||||
|
||||
// We should never be able to see anything here except an nmethod. If something in the
|
||||
// code cache (current frame) is called by an entity within the code cache that entity
|
||||
// should not be anything but the call stub (already covered), the interpreter (already covered)
|
||||
// or an nmethod.
|
||||
|
||||
if (!sender_blob->is_compiled()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Could put some more validation for the potential non-interpreted sender
|
||||
// frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
|
||||
|
||||
// One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb
|
||||
|
||||
// We've validated the potential sender that would be created
|
||||
return true;
|
||||
}
|
||||
|
||||
// Must be native-compiled frame. Since sender will try and use fp to find
|
||||
// linkages it must be safe
|
||||
|
||||
if (!fp_safe) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Will the pc we fetch be non-zero (which we'll find at the oldest frame)
|
||||
|
||||
if ((address) this->fp()[return_addr_offset] == NULL) return false;
|
||||
|
||||
|
||||
// could try and do some more potential verification of native frame if we could think of some...
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void frame::patch_pc(Thread* thread, address pc) {
|
||||
address* pc_addr = &((address *)sp())[-sender_sp_offset+return_addr_offset];
|
||||
if (TracePcPatching) {
|
||||
tty->print_cr("patch_pc at address" INTPTR_FORMAT " [" INTPTR_FORMAT " -> " INTPTR_FORMAT "] ",
|
||||
p2i(pc_addr), p2i(*pc_addr), p2i(pc));
|
||||
}
|
||||
*pc_addr = pc;
|
||||
_cb = CodeCache::find_blob(pc);
|
||||
address original_pc = CompiledMethod::get_deopt_original_pc(this);
|
||||
if (original_pc != NULL) {
|
||||
assert(original_pc == _pc, "expected original PC to be stored before patching");
|
||||
_deopt_state = is_deoptimized;
|
||||
// leave _pc as is
|
||||
} else {
|
||||
_deopt_state = not_deoptimized;
|
||||
_pc = pc;
|
||||
}
|
||||
}
|
||||
|
||||
bool frame::is_interpreted_frame() const {
|
||||
return Interpreter::contains(pc());
|
||||
}
|
||||
|
||||
int frame::frame_size(RegisterMap* map) const {
|
||||
frame sender = this->sender(map);
|
||||
return sender.sp() - sp();
|
||||
}
|
||||
|
||||
intptr_t* frame::entry_frame_argument_at(int offset) const {
|
||||
assert(is_entry_frame(), "entry frame expected");
|
||||
// convert offset to index to deal with tsi
|
||||
int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
|
||||
// Entry frame's arguments are always in relation to unextended_sp()
|
||||
return &unextended_sp()[index];
|
||||
}
|
||||
|
||||
// sender_sp
|
||||
intptr_t* frame::interpreter_frame_sender_sp() const {
|
||||
assert(is_interpreted_frame(), "interpreted frame expected");
|
||||
return (intptr_t*) at(interpreter_frame_sender_sp_offset);
|
||||
}
|
||||
|
||||
void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) {
|
||||
assert(is_interpreted_frame(), "interpreted frame expected");
|
||||
ptr_at_put(interpreter_frame_sender_sp_offset, (intptr_t) sender_sp);
|
||||
}
|
||||
|
||||
|
||||
// monitor elements
|
||||
|
||||
BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
|
||||
return (BasicObjectLock*) addr_at(interpreter_frame_monitor_block_bottom_offset);
|
||||
}
|
||||
|
||||
BasicObjectLock* frame::interpreter_frame_monitor_end() const {
|
||||
BasicObjectLock* result = (BasicObjectLock*) *addr_at(interpreter_frame_monitor_block_top_offset);
|
||||
// make sure the pointer points inside the frame
|
||||
assert((intptr_t) fp() > (intptr_t) result, "result must < than frame pointer");
|
||||
assert((intptr_t) sp() <= (intptr_t) result, "result must >= than stack pointer");
|
||||
return result;
|
||||
}
|
||||
|
||||
void frame::interpreter_frame_set_monitor_end(BasicObjectLock* value) {
|
||||
*((BasicObjectLock**)addr_at(interpreter_frame_monitor_block_top_offset)) = value;
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
// Used by template based interpreter deoptimization
|
||||
void frame::interpreter_frame_set_stack_top(intptr_t* stack_top) {
|
||||
*((intptr_t**)addr_at(interpreter_frame_stack_top_offset)) = stack_top;
|
||||
}
|
||||
|
||||
// Used by template based interpreter deoptimization
|
||||
void frame::interpreter_frame_set_extended_sp(intptr_t* sp) {
|
||||
*((intptr_t**)addr_at(interpreter_frame_extended_sp_offset)) = sp;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
// Used by template based interpreter deoptimization
|
||||
void frame::interpreter_frame_set_last_sp(intptr_t* sp) {
|
||||
*((intptr_t**)addr_at(interpreter_frame_last_sp_offset)) = sp;
|
||||
}
|
||||
|
||||
#endif // AARCH64
|
||||
|
||||
frame frame::sender_for_entry_frame(RegisterMap* map) const {
|
||||
assert(map != NULL, "map must be set");
|
||||
// Java frame called from C; skip all C frames and return top C
|
||||
// frame of that chunk as the sender
|
||||
JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
|
||||
assert(!entry_frame_is_first(), "next Java fp must be non zero");
|
||||
assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
|
||||
map->clear();
|
||||
assert(map->include_argument_oops(), "should be set by clear");
|
||||
#ifdef AARCH64
|
||||
assert (jfa->last_Java_pc() != NULL, "pc should be stored");
|
||||
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
|
||||
return fr;
|
||||
#else
|
||||
if (jfa->last_Java_pc() != NULL) {
|
||||
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
|
||||
return fr;
|
||||
}
|
||||
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp());
|
||||
return fr;
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::verify_deopt_original_pc
|
||||
//
|
||||
// Verifies the calculated original PC of a deoptimization PC for the
|
||||
// given unextended SP. The unextended SP might also be the saved SP
|
||||
// for MethodHandle call sites.
|
||||
#ifdef ASSERT
|
||||
void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp, bool is_method_handle_return) {
|
||||
frame fr;
|
||||
|
||||
// This is ugly but it's better than to change {get,set}_original_pc
|
||||
// to take an SP value as argument. And it's only a debugging
|
||||
// method anyway.
|
||||
fr._unextended_sp = unextended_sp;
|
||||
|
||||
address original_pc = nm->get_original_pc(&fr);
|
||||
assert(nm->insts_contains(original_pc), "original PC must be in nmethod");
|
||||
assert(nm->is_method_handle_return(original_pc) == is_method_handle_return, "must be");
|
||||
}
|
||||
#endif
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::adjust_unextended_sp
|
||||
void frame::adjust_unextended_sp() {
|
||||
// same as on x86
|
||||
|
||||
// If we are returning to a compiled MethodHandle call site, the
|
||||
// saved_fp will in fact be a saved value of the unextended SP. The
|
||||
// simplest way to tell whether we are returning to such a call site
|
||||
// is as follows:
|
||||
|
||||
CompiledMethod* sender_cm = (_cb == NULL) ? NULL : _cb->as_compiled_method_or_null();
|
||||
if (sender_cm != NULL) {
|
||||
// If the sender PC is a deoptimization point, get the original
|
||||
// PC. For MethodHandle call site the unextended_sp is stored in
|
||||
// saved_fp.
|
||||
if (sender_cm->is_deopt_mh_entry(_pc)) {
|
||||
DEBUG_ONLY(verify_deopt_mh_original_pc(sender_cm, _fp));
|
||||
_unextended_sp = _fp;
|
||||
}
|
||||
else if (sender_cm->is_deopt_entry(_pc)) {
|
||||
DEBUG_ONLY(verify_deopt_original_pc(sender_cm, _unextended_sp));
|
||||
}
|
||||
else if (sender_cm->is_method_handle_return(_pc)) {
|
||||
_unextended_sp = _fp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// frame::update_map_with_saved_link
|
||||
void frame::update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr) {
|
||||
// see x86 for comments
|
||||
map->set_location(FP->as_VMReg(), (address) link_addr);
|
||||
#ifdef AARCH64
|
||||
// also adjust a high part of register
|
||||
map->set_location(FP->as_VMReg()->next(), (address) link_addr);
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
|
||||
// SP is the raw SP from the sender after adapter or interpreter
|
||||
// extension.
|
||||
intptr_t* sender_sp = this->sender_sp();
|
||||
|
||||
// This is the sp before any possible extension (adapter/locals).
|
||||
intptr_t* unextended_sp = interpreter_frame_sender_sp();
|
||||
|
||||
#ifdef COMPILER2
|
||||
if (map->update_map()) {
|
||||
update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset));
|
||||
}
|
||||
#endif // COMPILER2
|
||||
|
||||
return frame(sender_sp, unextended_sp, link(), sender_pc());
|
||||
}
|
||||
|
||||
frame frame::sender_for_compiled_frame(RegisterMap* map) const {
|
||||
assert(map != NULL, "map must be set");
|
||||
|
||||
// frame owned by optimizing compiler
|
||||
assert(_cb->frame_size() >= 0, "must have non-zero frame size");
|
||||
intptr_t* sender_sp = unextended_sp() + _cb->frame_size();
|
||||
intptr_t* unextended_sp = sender_sp;
|
||||
|
||||
address sender_pc = (address) *(sender_sp - sender_sp_offset + return_addr_offset);
|
||||
|
||||
// This is the saved value of FP which may or may not really be an FP.
|
||||
// It is only an FP if the sender is an interpreter frame (or C1?).
|
||||
intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - sender_sp_offset + link_offset);
|
||||
|
||||
if (map->update_map()) {
|
||||
// Tell GC to use argument oopmaps for some runtime stubs that need it.
|
||||
// For C1, the runtime stub might not have oop maps, so set this flag
|
||||
// outside of update_register_map.
|
||||
map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
|
||||
if (_cb->oop_maps() != NULL) {
|
||||
OopMapSet::update_register_map(this, map);
|
||||
}
|
||||
|
||||
// Since the prolog does the save and restore of FP there is no oopmap
|
||||
// for it so we must fill in its location as if there was an oopmap entry
|
||||
// since if our caller was compiled code there could be live jvm state in it.
|
||||
update_map_with_saved_link(map, saved_fp_addr);
|
||||
}
|
||||
|
||||
assert(sender_sp != sp(), "must have changed");
|
||||
return frame(sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
|
||||
}
|
||||
|
||||
frame frame::sender(RegisterMap* map) const {
|
||||
// Default is we done have to follow them. The sender_for_xxx will
|
||||
// update it accordingly
|
||||
map->set_include_argument_oops(false);
|
||||
|
||||
if (is_entry_frame()) return sender_for_entry_frame(map);
|
||||
if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
|
||||
assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
|
||||
|
||||
if (_cb != NULL) {
|
||||
return sender_for_compiled_frame(map);
|
||||
}
|
||||
|
||||
assert(false, "should not be called for a C frame");
|
||||
return frame();
|
||||
}
|
||||
|
||||
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
|
||||
assert(is_interpreted_frame(), "Not an interpreted frame");
|
||||
// These are reasonable sanity checks
|
||||
if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
|
||||
return false;
|
||||
}
|
||||
if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) {
|
||||
return false;
|
||||
}
|
||||
if (fp() + interpreter_frame_initial_sp_offset < sp()) {
|
||||
return false;
|
||||
}
|
||||
// These are hacks to keep us out of trouble.
|
||||
// The problem with these is that they mask other problems
|
||||
if (fp() <= sp()) { // this attempts to deal with unsigned comparison above
|
||||
return false;
|
||||
}
|
||||
// do some validation of frame elements
|
||||
|
||||
// first the method
|
||||
|
||||
Method* m = *interpreter_frame_method_addr();
|
||||
|
||||
// validate the method we'd find in this potential sender
|
||||
if (!m->is_valid_method()) return false;
|
||||
|
||||
// stack frames shouldn't be much larger than max_stack elements
|
||||
|
||||
if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// validate bci/bcp
|
||||
|
||||
address bcp = interpreter_frame_bcp();
|
||||
if (m->validate_bci_from_bcp(bcp) < 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// validate ConstantPoolCache*
|
||||
ConstantPoolCache* cp = *interpreter_frame_cache_addr();
|
||||
if (cp == NULL || !cp->is_metaspace_object()) return false;
|
||||
|
||||
// validate locals
|
||||
|
||||
address locals = (address) *interpreter_frame_locals_addr();
|
||||
|
||||
if (locals > thread->stack_base() || locals < (address) fp()) return false;
|
||||
|
||||
// We'd have to be pretty unlucky to be mislead at this point
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
|
||||
assert(is_interpreted_frame(), "interpreted frame expected");
|
||||
Method* method = interpreter_frame_method();
|
||||
BasicType type = method->result_type();
|
||||
|
||||
intptr_t* res_addr;
|
||||
if (method->is_native()) {
|
||||
// Prior to calling into the runtime to report the method_exit both of
|
||||
// the possible return value registers are saved.
|
||||
#ifdef AARCH64
|
||||
// Return value registers are saved into the frame
|
||||
if (type == T_FLOAT || type == T_DOUBLE) {
|
||||
res_addr = addr_at(interpreter_frame_fp_saved_result_offset);
|
||||
} else {
|
||||
res_addr = addr_at(interpreter_frame_gp_saved_result_offset);
|
||||
}
|
||||
#else
|
||||
// Return value registers are pushed to the native stack
|
||||
res_addr = (intptr_t*)sp();
|
||||
#ifdef __ABI_HARD__
|
||||
// FP result is pushed onto a stack along with integer result registers
|
||||
if (type == T_FLOAT || type == T_DOUBLE) {
|
||||
res_addr += 2;
|
||||
}
|
||||
#endif // __ABI_HARD__
|
||||
#endif // AARCH64
|
||||
} else {
|
||||
res_addr = (intptr_t*)interpreter_frame_tos_address();
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case T_OBJECT :
|
||||
case T_ARRAY : {
|
||||
oop obj;
|
||||
if (method->is_native()) {
|
||||
obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
|
||||
} else {
|
||||
obj = *(oop*)res_addr;
|
||||
}
|
||||
assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
|
||||
*oop_result = obj;
|
||||
break;
|
||||
}
|
||||
case T_BOOLEAN : value_result->z = *(jboolean*)res_addr; break;
|
||||
case T_BYTE : value_result->b = *(jbyte*)res_addr; break;
|
||||
case T_CHAR : value_result->c = *(jchar*)res_addr; break;
|
||||
case T_SHORT : value_result->s = *(jshort*)res_addr; break;
|
||||
case T_INT : value_result->i = *(jint*)res_addr; break;
|
||||
case T_LONG : value_result->j = *(jlong*)res_addr; break;
|
||||
case T_FLOAT : value_result->f = *(jfloat*)res_addr; break;
|
||||
case T_DOUBLE : value_result->d = *(jdouble*)res_addr; break;
|
||||
case T_VOID : /* Nothing to do */ break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
|
||||
return type;
|
||||
}
|
||||
|
||||
|
||||
intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
|
||||
int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
|
||||
return &interpreter_frame_tos_address()[index];
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
#define DESCRIBE_FP_OFFSET(name) \
|
||||
values.describe(frame_no, fp() + frame::name##_offset, #name)
|
||||
|
||||
void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
if (is_interpreted_frame()) {
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
|
||||
#ifdef AARCH64
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_stack_top);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_extended_sp);
|
||||
#else
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
|
||||
#endif // AARCH64
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_method);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_mdp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_cache);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_locals);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_bcp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
|
||||
}
|
||||
}
|
||||
|
||||
// This is a generic constructor which is only used by pns() in debug.cpp.
|
||||
frame::frame(void* sp, void* fp, void* pc) {
|
||||
init((intptr_t*)sp, (intptr_t*)fp, (address)pc);
|
||||
}
|
||||
#endif
|
||||
|
||||
intptr_t *frame::initial_deoptimization_info() {
|
||||
// used to reset the saved FP
|
||||
return fp();
|
||||
}
|
||||
|
||||
intptr_t* frame::real_fp() const {
|
||||
#ifndef AARCH64
|
||||
if (is_entry_frame()) {
|
||||
// Work-around: FP (currently) does not conform to the ABI for entry
|
||||
// frames (see generate_call_stub). Might be worth fixing as another CR.
|
||||
// Following code assumes (and asserts) this has not yet been fixed.
|
||||
assert(frame::entry_frame_call_wrapper_offset == 0, "adjust this code");
|
||||
intptr_t* new_fp = fp();
|
||||
new_fp += 5; // saved R0,R1,R2,R4,R10
|
||||
#ifndef __SOFTFP__
|
||||
new_fp += 8*2; // saved D8..D15
|
||||
#endif
|
||||
return new_fp;
|
||||
}
|
||||
#endif // !AARCH64
|
||||
if (_cb != NULL) {
|
||||
// use the frame size if valid
|
||||
int size = _cb->frame_size();
|
||||
if (size > 0) {
|
||||
return unextended_sp() + size;
|
||||
}
|
||||
}
|
||||
// else rely on fp()
|
||||
assert(! is_compiled_frame(), "unknown compiled frame size");
|
||||
return fp();
|
||||
}
|
138
hotspot/src/cpu/arm/vm/frame_arm.hpp
Normal file
138
hotspot/src/cpu/arm/vm/frame_arm.hpp
Normal file
@ -0,0 +1,138 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_FRAME_ARM_HPP
|
||||
#define CPU_ARM_VM_FRAME_ARM_HPP
|
||||
|
||||
#include "runtime/synchronizer.hpp"
|
||||
|
||||
public:
|
||||
enum {
|
||||
pc_return_offset = 0,
|
||||
// All frames
|
||||
link_offset = 0,
|
||||
return_addr_offset = 1,
|
||||
// non-interpreter frames
|
||||
sender_sp_offset = 2,
|
||||
|
||||
// Interpreter frames
|
||||
#ifdef AARCH64
|
||||
interpreter_frame_gp_saved_result_offset = 4, // for native calls only
|
||||
interpreter_frame_fp_saved_result_offset = 3, // for native calls only
|
||||
#endif
|
||||
interpreter_frame_oop_temp_offset = 2, // for native calls only
|
||||
|
||||
interpreter_frame_sender_sp_offset = -1,
|
||||
#ifdef AARCH64
|
||||
interpreter_frame_stack_top_offset = interpreter_frame_sender_sp_offset - 1,
|
||||
interpreter_frame_extended_sp_offset = interpreter_frame_stack_top_offset - 1,
|
||||
interpreter_frame_method_offset = interpreter_frame_extended_sp_offset - 1,
|
||||
#else
|
||||
// outgoing sp before a call to an invoked method
|
||||
interpreter_frame_last_sp_offset = interpreter_frame_sender_sp_offset - 1,
|
||||
interpreter_frame_method_offset = interpreter_frame_last_sp_offset - 1,
|
||||
#endif // AARCH64
|
||||
interpreter_frame_mirror_offset = interpreter_frame_method_offset - 1,
|
||||
interpreter_frame_mdp_offset = interpreter_frame_mirror_offset - 1,
|
||||
interpreter_frame_cache_offset = interpreter_frame_mdp_offset - 1,
|
||||
interpreter_frame_locals_offset = interpreter_frame_cache_offset - 1,
|
||||
interpreter_frame_bcp_offset = interpreter_frame_locals_offset - 1,
|
||||
interpreter_frame_initial_sp_offset = interpreter_frame_bcp_offset - 1,
|
||||
|
||||
interpreter_frame_monitor_block_top_offset = interpreter_frame_initial_sp_offset,
|
||||
interpreter_frame_monitor_block_bottom_offset = interpreter_frame_initial_sp_offset,
|
||||
|
||||
// Entry frames
|
||||
entry_frame_call_wrapper_offset = AARCH64_ONLY(2) NOT_AARCH64(0)
|
||||
};
|
||||
|
||||
intptr_t ptr_at(int offset) const {
|
||||
return *ptr_at_addr(offset);
|
||||
}
|
||||
|
||||
void ptr_at_put(int offset, intptr_t value) {
|
||||
*ptr_at_addr(offset) = value;
|
||||
}
|
||||
|
||||
private:
|
||||
// an additional field beyond _sp and _pc:
|
||||
intptr_t* _fp; // frame pointer
|
||||
// The interpreter and adapters will extend the frame of the caller.
|
||||
// Since oopMaps are based on the sp of the caller before extension
|
||||
// we need to know that value. However in order to compute the address
|
||||
// of the return address we need the real "raw" sp. Since sparc already
|
||||
// uses sp() to mean "raw" sp and unextended_sp() to mean the caller's
|
||||
// original sp we use that convention.
|
||||
|
||||
intptr_t* _unextended_sp;
|
||||
void adjust_unextended_sp();
|
||||
|
||||
intptr_t* ptr_at_addr(int offset) const {
|
||||
return (intptr_t*) addr_at(offset);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
// Used in frame::sender_for_{interpreter,compiled}_frame
|
||||
static void verify_deopt_original_pc( CompiledMethod* nm, intptr_t* unextended_sp, bool is_method_handle_return = false);
|
||||
static void verify_deopt_mh_original_pc(CompiledMethod* nm, intptr_t* unextended_sp) {
|
||||
verify_deopt_original_pc(nm, unextended_sp, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
public:
|
||||
// Constructors
|
||||
|
||||
frame(intptr_t* sp, intptr_t* fp, address pc);
|
||||
|
||||
frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc);
|
||||
|
||||
#ifndef AARCH64
|
||||
frame(intptr_t* sp, intptr_t* fp);
|
||||
#endif // !AARCH64
|
||||
|
||||
void init(intptr_t* sp, intptr_t* fp, address pc);
|
||||
|
||||
// accessors for the instance variables
|
||||
// Note: not necessarily the real 'frame pointer' (see real_fp)
|
||||
intptr_t* fp() const { return _fp; }
|
||||
|
||||
inline address* sender_pc_addr() const;
|
||||
|
||||
#ifdef AARCH64
|
||||
// Used by template based interpreter deoptimization
|
||||
void interpreter_frame_set_stack_top(intptr_t* stack_top);
|
||||
void interpreter_frame_set_extended_sp(intptr_t* sp);
|
||||
|
||||
#else
|
||||
// expression stack tos if we are nested in a java call
|
||||
intptr_t* interpreter_frame_last_sp() const;
|
||||
|
||||
// deoptimization support
|
||||
void interpreter_frame_set_last_sp(intptr_t* sp);
|
||||
#endif // AARCH64
|
||||
|
||||
// helper to update a map with callee-saved FP
|
||||
static void update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr);
|
||||
|
||||
#endif // CPU_ARM_VM_FRAME_ARM_HPP
|
248
hotspot/src/cpu/arm/vm/frame_arm.inline.hpp
Normal file
248
hotspot/src/cpu/arm/vm/frame_arm.inline.hpp
Normal file
@ -0,0 +1,248 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_FRAME_ARM_INLINE_HPP
|
||||
#define CPU_ARM_VM_FRAME_ARM_INLINE_HPP
|
||||
|
||||
#include "code/codeCache.hpp"
|
||||
#include "code/vmreg.inline.hpp"
|
||||
|
||||
// Inline functions for ARM frames:
|
||||
|
||||
// Constructors:
|
||||
|
||||
inline frame::frame() {
|
||||
_pc = NULL;
|
||||
_sp = NULL;
|
||||
_unextended_sp = NULL;
|
||||
_fp = NULL;
|
||||
_cb = NULL;
|
||||
_deopt_state = unknown;
|
||||
}
|
||||
|
||||
inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
|
||||
_sp = sp;
|
||||
_unextended_sp = sp;
|
||||
_fp = fp;
|
||||
_pc = pc;
|
||||
assert(pc != NULL, "no pc?");
|
||||
_cb = CodeCache::find_blob(pc);
|
||||
adjust_unextended_sp();
|
||||
|
||||
address original_pc = CompiledMethod::get_deopt_original_pc(this);
|
||||
if (original_pc != NULL) {
|
||||
_pc = original_pc;
|
||||
_deopt_state = is_deoptimized;
|
||||
} else {
|
||||
_deopt_state = not_deoptimized;
|
||||
}
|
||||
}
|
||||
|
||||
inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
|
||||
init(sp, fp, pc);
|
||||
}
|
||||
|
||||
inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
|
||||
_sp = sp;
|
||||
_unextended_sp = unextended_sp;
|
||||
_fp = fp;
|
||||
_pc = pc;
|
||||
assert(pc != NULL, "no pc?");
|
||||
_cb = CodeCache::find_blob(pc);
|
||||
adjust_unextended_sp();
|
||||
|
||||
address original_pc = CompiledMethod::get_deopt_original_pc(this);
|
||||
if (original_pc != NULL) {
|
||||
_pc = original_pc;
|
||||
assert(_cb->as_compiled_method()->insts_contains(_pc), "original PC must be in CompiledMethod");
|
||||
_deopt_state = is_deoptimized;
|
||||
} else {
|
||||
_deopt_state = not_deoptimized;
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef AARCH64
|
||||
|
||||
inline frame::frame(intptr_t* sp, intptr_t* fp) {
|
||||
_sp = sp;
|
||||
_unextended_sp = sp;
|
||||
_fp = fp;
|
||||
assert(sp != NULL,"null SP ?");
|
||||
_pc = (address)(sp[-1]);
|
||||
// assert(_pc != NULL, "no pc?"); // see comments in x86
|
||||
_cb = CodeCache::find_blob(_pc);
|
||||
adjust_unextended_sp();
|
||||
|
||||
address original_pc = CompiledMethod::get_deopt_original_pc(this);
|
||||
if (original_pc != NULL) {
|
||||
_pc = original_pc;
|
||||
_deopt_state = is_deoptimized;
|
||||
} else {
|
||||
_deopt_state = not_deoptimized;
|
||||
}
|
||||
}
|
||||
|
||||
#endif // !AARCH64
|
||||
|
||||
// Accessors
|
||||
|
||||
inline bool frame::equal(frame other) const {
|
||||
bool ret = sp() == other.sp()
|
||||
&& unextended_sp() == other.unextended_sp()
|
||||
&& fp() == other.fp()
|
||||
&& pc() == other.pc();
|
||||
assert(!ret || ret && cb() == other.cb() && _deopt_state == other._deopt_state, "inconsistent construction");
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Return unique id for this frame. The id must have a value where we can distinguish
|
||||
// identity and younger/older relationship. NULL represents an invalid (incomparable)
|
||||
// frame.
|
||||
inline intptr_t* frame::id(void) const { return unextended_sp(); }
|
||||
|
||||
// Relationals on frames based
|
||||
// Return true if the frame is younger (more recent activation) than the frame represented by id
|
||||
inline bool frame::is_younger(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id");
|
||||
return this->id() < id ; }
|
||||
|
||||
// Return true if the frame is older (less recent activation) than the frame represented by id
|
||||
inline bool frame::is_older(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id");
|
||||
return this->id() > id ; }
|
||||
|
||||
|
||||
|
||||
inline intptr_t* frame::link() const { return (intptr_t*) *(intptr_t **)addr_at(link_offset); }
|
||||
|
||||
inline intptr_t* frame::unextended_sp() const { return _unextended_sp; }
|
||||
|
||||
// Return address:
|
||||
|
||||
inline address* frame::sender_pc_addr() const { return (address*) addr_at(return_addr_offset); }
|
||||
inline address frame::sender_pc() const { return *sender_pc_addr(); }
|
||||
|
||||
inline intptr_t* frame::sender_sp() const { return addr_at(sender_sp_offset); }
|
||||
|
||||
inline intptr_t** frame::interpreter_frame_locals_addr() const {
|
||||
return (intptr_t**)addr_at(interpreter_frame_locals_offset);
|
||||
}
|
||||
|
||||
#ifndef AARCH64
|
||||
inline intptr_t* frame::interpreter_frame_last_sp() const {
|
||||
return *(intptr_t**)addr_at(interpreter_frame_last_sp_offset);
|
||||
}
|
||||
#endif // !AARCH64
|
||||
|
||||
inline intptr_t* frame::interpreter_frame_bcp_addr() const {
|
||||
return (intptr_t*)addr_at(interpreter_frame_bcp_offset);
|
||||
}
|
||||
|
||||
inline intptr_t* frame::interpreter_frame_mdp_addr() const {
|
||||
return (intptr_t*)addr_at(interpreter_frame_mdp_offset);
|
||||
}
|
||||
|
||||
|
||||
// Constant pool cache
|
||||
|
||||
inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
|
||||
return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset);
|
||||
}
|
||||
|
||||
// Method
|
||||
|
||||
inline Method** frame::interpreter_frame_method_addr() const {
|
||||
return (Method**)addr_at(interpreter_frame_method_offset);
|
||||
}
|
||||
|
||||
inline oop* frame::interpreter_frame_mirror_addr() const {
|
||||
return (oop*)addr_at(interpreter_frame_mirror_offset);
|
||||
}
|
||||
|
||||
// top of expression stack
|
||||
inline intptr_t* frame::interpreter_frame_tos_address() const {
|
||||
#ifdef AARCH64
|
||||
intptr_t* stack_top = (intptr_t*)*addr_at(interpreter_frame_stack_top_offset);
|
||||
assert(stack_top != NULL, "should be stored before call");
|
||||
assert(stack_top <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
|
||||
return stack_top;
|
||||
#else
|
||||
intptr_t* last_sp = interpreter_frame_last_sp();
|
||||
if (last_sp == NULL ) {
|
||||
return sp();
|
||||
} else {
|
||||
// sp() may have been extended or shrunk by an adapter. At least
|
||||
// check that we don't fall behind the legal region.
|
||||
// For top deoptimized frame last_sp == interpreter_frame_monitor_end.
|
||||
assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
|
||||
return last_sp;
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
inline oop* frame::interpreter_frame_temp_oop_addr() const {
|
||||
return (oop *)(fp() + interpreter_frame_oop_temp_offset);
|
||||
}
|
||||
|
||||
inline int frame::interpreter_frame_monitor_size() {
|
||||
return BasicObjectLock::size();
|
||||
}
|
||||
|
||||
|
||||
// expression stack
|
||||
// (the max_stack arguments are used by the GC; see class FrameClosure)
|
||||
|
||||
inline intptr_t* frame::interpreter_frame_expression_stack() const {
|
||||
intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end();
|
||||
return monitor_end-1;
|
||||
}
|
||||
|
||||
|
||||
inline jint frame::interpreter_frame_expression_stack_direction() { return -1; }
|
||||
|
||||
|
||||
// Entry frames
|
||||
|
||||
inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
|
||||
return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset);
|
||||
}
|
||||
|
||||
|
||||
// Compiled frames
|
||||
|
||||
inline bool frame::volatile_across_calls(Register reg) {
|
||||
return true;
|
||||
}
|
||||
|
||||
inline oop frame::saved_oop_result(RegisterMap* map) const {
|
||||
oop* result_adr = (oop*) map->location(R0->as_VMReg());
|
||||
guarantee(result_adr != NULL, "bad register save location");
|
||||
return (*result_adr);
|
||||
}
|
||||
|
||||
inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
|
||||
oop* result_adr = (oop*) map->location(R0->as_VMReg());
|
||||
guarantee(result_adr != NULL, "bad register save location");
|
||||
*result_adr = obj;
|
||||
}
|
||||
|
||||
#endif // CPU_ARM_VM_FRAME_ARM_INLINE_HPP
|
79
hotspot/src/cpu/arm/vm/globalDefinitions_arm.hpp
Normal file
79
hotspot/src/cpu/arm/vm/globalDefinitions_arm.hpp
Normal file
@ -0,0 +1,79 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_GLOBALDEFINITIONS_ARM_HPP
|
||||
#define CPU_ARM_VM_GLOBALDEFINITIONS_ARM_HPP
|
||||
|
||||
#ifdef AARCH64
|
||||
#define AARCH64_ONLY(code) code
|
||||
#define AARCH64_ONLY_ARG(arg) , arg
|
||||
#define NOT_AARCH64(code)
|
||||
#define NOT_AARCH64_ARG(arg)
|
||||
#else
|
||||
#define AARCH64_ONLY(code)
|
||||
#define AARCH64_ONLY_ARG(arg)
|
||||
#define NOT_AARCH64(code) code
|
||||
#define NOT_AARCH64_ARG(arg) , arg
|
||||
#endif
|
||||
|
||||
const int StackAlignmentInBytes = AARCH64_ONLY(16) NOT_AARCH64(8);
|
||||
|
||||
// Indicates whether the C calling conventions require that
|
||||
// 32-bit integer argument values are extended to 64 bits.
|
||||
const bool CCallingConventionRequiresIntsAsLongs = false;
|
||||
|
||||
#ifdef __SOFTFP__
|
||||
const bool HaveVFP = false;
|
||||
#else
|
||||
const bool HaveVFP = true;
|
||||
#endif
|
||||
|
||||
#if defined(__ARM_PCS_VFP) || defined(AARCH64)
|
||||
#define __ABI_HARD__
|
||||
#endif
|
||||
|
||||
#if defined(__ARM_ARCH_7A__) || defined(AARCH64)
|
||||
#define SUPPORTS_NATIVE_CX8
|
||||
#endif
|
||||
|
||||
#define STUBROUTINES_MD_HPP "stubRoutines_arm.hpp"
|
||||
#define INTERP_MASM_MD_HPP "interp_masm_arm.hpp"
|
||||
#define TEMPLATETABLE_MD_HPP "templateTable_arm.hpp"
|
||||
#ifdef AARCH64
|
||||
#define ADGLOBALS_MD_HPP "adfiles/adGlobals_arm_64.hpp"
|
||||
#define AD_MD_HPP "adfiles/ad_arm_64.hpp"
|
||||
#else
|
||||
#define ADGLOBALS_MD_HPP "adfiles/adGlobals_arm_32.hpp"
|
||||
#define AD_MD_HPP "adfiles/ad_arm_32.hpp"
|
||||
#endif
|
||||
#define C1_LIRGENERATOR_MD_HPP "c1_LIRGenerator_arm.hpp"
|
||||
|
||||
#ifdef TARGET_COMPILER_gcc
|
||||
#ifdef ARM32
|
||||
#undef BREAKPOINT
|
||||
#define BREAKPOINT __asm__ volatile ("bkpt")
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif // CPU_ARM_VM_GLOBALDEFINITIONS_ARM_HPP
|
98
hotspot/src/cpu/arm/vm/globals_arm.hpp
Normal file
98
hotspot/src/cpu/arm/vm/globals_arm.hpp
Normal file
@ -0,0 +1,98 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_GLOBALS_ARM_HPP
|
||||
#define CPU_ARM_VM_GLOBALS_ARM_HPP
|
||||
|
||||
//
|
||||
// Sets the default values for platform dependent flags used by the runtime system.
|
||||
// (see globals.hpp)
|
||||
//
|
||||
|
||||
define_pd_global(bool, ShareVtableStubs, true);
|
||||
|
||||
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
|
||||
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast
|
||||
define_pd_global(bool, TrapBasedNullChecks, false); // Not needed
|
||||
|
||||
define_pd_global(uintx, CodeCacheSegmentSize, 64 TIERED_ONLY(+64)); // Tiered compilation has large code-entry alignment.
|
||||
define_pd_global(intx, CodeEntryAlignment, 16);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
|
||||
define_pd_global(bool, NeedsDeoptSuspend, false); // only register window machines need this
|
||||
|
||||
#define DEFAULT_STACK_YELLOW_PAGES (2)
|
||||
#define DEFAULT_STACK_RED_PAGES (1)
|
||||
#define DEFAULT_STACK_SHADOW_PAGES (5 DEBUG_ONLY(+1))
|
||||
#define DEFAULT_STACK_RESERVED_PAGES (0)
|
||||
|
||||
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
|
||||
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
|
||||
#define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES
|
||||
#define MIN_STACK_RESERVED_PAGES (0)
|
||||
|
||||
define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
|
||||
define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
|
||||
define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
|
||||
define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES);
|
||||
|
||||
define_pd_global(intx, InlineFrequencyCount, 50);
|
||||
#if defined(COMPILER1) || defined(COMPILER2)
|
||||
define_pd_global(intx, InlineSmallCode, 1500);
|
||||
#endif
|
||||
|
||||
define_pd_global(bool, RewriteBytecodes, true);
|
||||
define_pd_global(bool, RewriteFrequentPairs, true);
|
||||
|
||||
define_pd_global(bool, UseMembar, true);
|
||||
|
||||
define_pd_global(bool, PreserveFramePointer, false);
|
||||
|
||||
// GC Ergo Flags
|
||||
define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
|
||||
|
||||
define_pd_global(uintx, TypeProfileLevel, 0);
|
||||
|
||||
// No performance work done here yet.
|
||||
define_pd_global(bool, CompactStrings, false);
|
||||
|
||||
define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
|
||||
|
||||
#define ARCH_FLAGS(develop, \
|
||||
product, \
|
||||
diagnostic, \
|
||||
experimental, \
|
||||
notproduct, \
|
||||
range, \
|
||||
constraint, \
|
||||
writeable) \
|
||||
\
|
||||
develop(bool, VerifyInterpreterStackTop, false, \
|
||||
"Verify interpreter stack top at every stack expansion (AArch64 only)") \
|
||||
\
|
||||
develop(bool, ZapHighNonSignificantBits, false, \
|
||||
"Zap high non-significant bits of values (AArch64 only)") \
|
||||
\
|
||||
|
||||
#endif // CPU_ARM_VM_GLOBALS_ARM_HPP
|
66
hotspot/src/cpu/arm/vm/icBuffer_arm.cpp
Normal file
66
hotspot/src/cpu/arm/vm/icBuffer_arm.cpp
Normal file
@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "assembler_arm.inline.hpp"
|
||||
#include "code/icBuffer.hpp"
|
||||
#include "gc/shared/collectedHeap.inline.hpp"
|
||||
#include "interpreter/bytecodes.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "nativeInst_arm.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
#define __ masm->
|
||||
|
||||
int InlineCacheBuffer::ic_stub_code_size() {
|
||||
return (AARCH64_ONLY(8) NOT_AARCH64(4)) * Assembler::InstructionSize;
|
||||
}
|
||||
|
||||
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
|
||||
ResourceMark rm;
|
||||
CodeBuffer code(code_begin, ic_stub_code_size());
|
||||
MacroAssembler* masm = new MacroAssembler(&code);
|
||||
|
||||
InlinedAddress oop_literal((address) cached_value);
|
||||
__ ldr_literal(Ricklass, oop_literal);
|
||||
// FIXME: OK to remove reloc here?
|
||||
__ patchable_jump(entry_point, relocInfo::runtime_call_type, Rtemp);
|
||||
__ bind_literal(oop_literal);
|
||||
__ flush();
|
||||
}
|
||||
|
||||
address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
|
||||
address jump_address;
|
||||
jump_address = code_begin + NativeInstruction::instruction_size;
|
||||
NativeJump* jump = nativeJump_at(jump_address);
|
||||
return jump->jump_destination();
|
||||
}
|
||||
|
||||
void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
|
||||
NativeMovConstReg* move = nativeMovConstReg_at(code_begin);
|
||||
return (void*)move->data();
|
||||
}
|
||||
|
||||
#undef __
|
94
hotspot/src/cpu/arm/vm/icache_arm.cpp
Normal file
94
hotspot/src/cpu/arm/vm/icache_arm.cpp
Normal file
@ -0,0 +1,94 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "assembler_arm.inline.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
static int icache_flush(address addr, int lines, int magic) {
|
||||
// TODO-AARCH64 Figure out actual cache line size (mrs Xt, CTR_EL0)
|
||||
|
||||
address p = addr;
|
||||
for (int i = 0; i < lines; i++, p += ICache::line_size) {
|
||||
__asm__ volatile(
|
||||
" dc cvau, %[p]"
|
||||
:
|
||||
: [p] "r" (p)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
__asm__ volatile(
|
||||
" dsb ish"
|
||||
: : : "memory");
|
||||
|
||||
p = addr;
|
||||
for (int i = 0; i < lines; i++, p += ICache::line_size) {
|
||||
__asm__ volatile(
|
||||
" ic ivau, %[p]"
|
||||
:
|
||||
: [p] "r" (p)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
__asm__ volatile(
|
||||
" dsb ish\n\t"
|
||||
" isb\n\t"
|
||||
: : : "memory");
|
||||
|
||||
return magic;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static int icache_flush(address addr, int lines, int magic) {
|
||||
__builtin___clear_cache(addr, addr + (lines << ICache::log2_line_size));
|
||||
return magic;
|
||||
}
|
||||
|
||||
#endif // AARCH64
|
||||
|
||||
void ICacheStubGenerator::generate_icache_flush(ICache::flush_icache_stub_t* flush_icache_stub) {
|
||||
address start = (address)icache_flush;
|
||||
|
||||
*flush_icache_stub = (ICache::flush_icache_stub_t)start;
|
||||
|
||||
// ICache::invalidate_range() contains explicit condition that the first
|
||||
// call is invoked on the generated icache flush stub code range.
|
||||
ICache::invalidate_range(start, 0);
|
||||
|
||||
{
|
||||
// dummy code mark to make the shared code happy
|
||||
// (fields that would need to be modified to emulate the correct
|
||||
// mark are not accessible)
|
||||
StubCodeMark mark(this, "ICache", "fake_stub_for_inlined_icache_flush");
|
||||
__ ret();
|
||||
}
|
||||
}
|
||||
|
||||
#undef __
|
40
hotspot/src/cpu/arm/vm/icache_arm.hpp
Normal file
40
hotspot/src/cpu/arm/vm/icache_arm.hpp
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_ICACHE_ARM_HPP
|
||||
#define CPU_ARM_VM_ICACHE_ARM_HPP
|
||||
|
||||
// Interface for updating the instruction cache. Whenever the VM modifies
|
||||
// code, part of the processor instruction cache potentially has to be flushed.
|
||||
|
||||
class ICache : public AbstractICache {
|
||||
public:
|
||||
enum {
|
||||
stub_size = 32, // Size of the icache flush stub in bytes
|
||||
line_size = BytesPerWord, // conservative
|
||||
log2_line_size = LogBytesPerWord // log2(line_size)
|
||||
};
|
||||
};
|
||||
|
||||
#endif // CPU_ARM_VM_ICACHE_ARM_HPP
|
2272
hotspot/src/cpu/arm/vm/interp_masm_arm.cpp
Normal file
2272
hotspot/src/cpu/arm/vm/interp_masm_arm.cpp
Normal file
File diff suppressed because it is too large
Load Diff
355
hotspot/src/cpu/arm/vm/interp_masm_arm.hpp
Normal file
355
hotspot/src/cpu/arm/vm/interp_masm_arm.hpp
Normal file
@ -0,0 +1,355 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_INTERP_MASM_ARM_HPP
|
||||
#define CPU_ARM_VM_INTERP_MASM_ARM_HPP
|
||||
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "interpreter/invocationCounter.hpp"
|
||||
#include "runtime/frame.hpp"
|
||||
#include "prims/jvmtiExport.hpp"
|
||||
|
||||
// This file specializes the assember with interpreter-specific macros
|
||||
|
||||
|
||||
class InterpreterMacroAssembler: public MacroAssembler {
|
||||
|
||||
public:
|
||||
|
||||
// allow JvmtiExport checks to be extended
|
||||
bool can_force_early_return() { return JvmtiExport::can_force_early_return(); }
|
||||
bool can_post_interpreter_events() { return JvmtiExport::can_post_interpreter_events(); }
|
||||
bool can_pop_frame() { return JvmtiExport::can_pop_frame(); }
|
||||
bool can_post_breakpoint() { return JvmtiExport::can_post_breakpoint(); }
|
||||
bool can_post_field_access() { return JvmtiExport::can_post_field_access(); }
|
||||
bool can_post_field_modification() { return JvmtiExport::can_post_field_modification(); }
|
||||
// flags controlled by JVMTI settings
|
||||
bool rewrite_frequent_pairs() { return RewriteFrequentPairs; }
|
||||
|
||||
protected:
|
||||
|
||||
// Template interpreter specific version of call_VM_helper
|
||||
virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions);
|
||||
|
||||
virtual void check_and_handle_popframe();
|
||||
virtual void check_and_handle_earlyret();
|
||||
|
||||
// base routine for all dispatches
|
||||
typedef enum { DispatchDefault, DispatchNormal } DispatchTableMode;
|
||||
void dispatch_base(TosState state, DispatchTableMode table_mode, bool verifyoop = true);
|
||||
|
||||
public:
|
||||
InterpreterMacroAssembler(CodeBuffer* code);
|
||||
|
||||
// Interpreter-specific registers
|
||||
#if defined(AARCH64) && defined(ASSERT)
|
||||
|
||||
#define check_stack_top() _check_stack_top("invalid Rstack_top at " __FILE__ ":" XSTR(__LINE__))
|
||||
#define check_stack_top_on_expansion() _check_stack_top("invalid Rstack_top at " __FILE__ ":" XSTR(__LINE__), VerifyInterpreterStackTop)
|
||||
#define check_extended_sp(tmp) _check_extended_sp(tmp, "SP does not match extended SP in frame at " __FILE__ ":" XSTR(__LINE__))
|
||||
#define check_no_cached_stack_top(tmp) _check_no_cached_stack_top(tmp, "stack_top is already cached in frame at " __FILE__ ":" XSTR(__LINE__))
|
||||
|
||||
void _check_stack_top(const char* msg, bool enabled = true) {
|
||||
if (enabled) {
|
||||
Label L;
|
||||
cmp(SP, Rstack_top);
|
||||
b(L, ls);
|
||||
stop(msg);
|
||||
bind(L);
|
||||
}
|
||||
}
|
||||
|
||||
void _check_extended_sp(Register tmp, const char* msg) {
|
||||
Label L;
|
||||
ldr(tmp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
|
||||
cmp(SP, tmp);
|
||||
b(L, eq);
|
||||
stop(msg);
|
||||
bind(L);
|
||||
}
|
||||
|
||||
void _check_no_cached_stack_top(Register tmp, const char* msg) {
|
||||
Label L;
|
||||
ldr(tmp, Address(FP, frame::interpreter_frame_stack_top_offset * wordSize));
|
||||
cbz(tmp, L);
|
||||
stop(msg);
|
||||
bind(L);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
inline void check_stack_top() {}
|
||||
inline void check_stack_top_on_expansion() {}
|
||||
inline void check_extended_sp(Register tmp) {}
|
||||
inline void check_no_cached_stack_top(Register tmp) {}
|
||||
|
||||
#endif // AARCH64 && ASSERT
|
||||
|
||||
void save_bcp() { str(Rbcp, Address(FP, frame::interpreter_frame_bcp_offset * wordSize)); }
|
||||
void restore_bcp() { ldr(Rbcp, Address(FP, frame::interpreter_frame_bcp_offset * wordSize)); }
|
||||
void restore_locals() { ldr(Rlocals, Address(FP, frame::interpreter_frame_locals_offset * wordSize)); }
|
||||
void restore_method() { ldr(Rmethod, Address(FP, frame::interpreter_frame_method_offset * wordSize)); }
|
||||
void restore_dispatch();
|
||||
|
||||
#ifdef AARCH64
|
||||
void save_stack_top() { check_stack_top(); str(Rstack_top, Address(FP, frame::interpreter_frame_stack_top_offset * wordSize)); }
|
||||
void clear_cached_stack_top() { str(ZR, Address(FP, frame::interpreter_frame_stack_top_offset * wordSize)); }
|
||||
void restore_stack_top() { ldr(Rstack_top, Address(FP, frame::interpreter_frame_stack_top_offset * wordSize)); clear_cached_stack_top(); check_stack_top(); }
|
||||
void cut_sp_before_call() { align_reg(SP, Rstack_top, StackAlignmentInBytes); }
|
||||
void restore_sp_after_call(Register tmp) { ldr(tmp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize)); mov(SP, tmp); }
|
||||
#endif
|
||||
|
||||
// Helpers for runtime call arguments/results
|
||||
void get_const(Register reg) { ldr(reg, Address(Rmethod, Method::const_offset())); }
|
||||
void get_constant_pool(Register reg) { get_const(reg); ldr(reg, Address(reg, ConstMethod::constants_offset())); }
|
||||
void get_constant_pool_cache(Register reg) { get_constant_pool(reg); ldr(reg, Address(reg, ConstantPool::cache_offset_in_bytes())); }
|
||||
void get_cpool_and_tags(Register cpool, Register tags) { get_constant_pool(cpool); ldr(tags, Address(cpool, ConstantPool::tags_offset_in_bytes())); }
|
||||
|
||||
// Sets reg. Blows Rtemp.
|
||||
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
|
||||
|
||||
// Sets index. Blows reg_tmp.
|
||||
void get_index_at_bcp(Register index, int bcp_offset, Register reg_tmp, size_t index_size = sizeof(u2));
|
||||
// Sets cache, index.
|
||||
void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size = sizeof(u2));
|
||||
void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2));
|
||||
// Sets cache. Blows reg_tmp.
|
||||
void get_cache_entry_pointer_at_bcp(Register cache, Register reg_tmp, int bcp_offset, size_t index_size = sizeof(u2));
|
||||
|
||||
// Load object from cpool->resolved_references(*bcp+1)
|
||||
void load_resolved_reference_at_index(Register result, Register tmp);
|
||||
|
||||
void store_check_part1(Register card_table_base); // Sets card_table_base register.
|
||||
void store_check_part2(Register obj, Register card_table_base, Register tmp);
|
||||
|
||||
void set_card(Register card_table_base, Address card_table_addr, Register tmp);
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
// G1 pre-barrier.
|
||||
// Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
|
||||
// If store_addr != noreg, then previous value is loaded from [store_addr];
|
||||
// in such case store_addr and new_val registers are preserved;
|
||||
// otherwise pre_val register is preserved.
|
||||
void g1_write_barrier_pre(Register store_addr,
|
||||
Register new_val,
|
||||
Register pre_val,
|
||||
Register tmp1,
|
||||
Register tmp2);
|
||||
|
||||
// G1 post-barrier.
|
||||
// Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
|
||||
void g1_write_barrier_post(Register store_addr,
|
||||
Register new_val,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3);
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
void pop_ptr(Register r);
|
||||
void pop_i(Register r = R0_tos);
|
||||
#ifdef AARCH64
|
||||
void pop_l(Register r = R0_tos);
|
||||
#else
|
||||
void pop_l(Register lo = R0_tos_lo, Register hi = R1_tos_hi);
|
||||
#endif
|
||||
void pop_f(FloatRegister fd);
|
||||
void pop_d(FloatRegister fd);
|
||||
|
||||
void push_ptr(Register r);
|
||||
void push_i(Register r = R0_tos);
|
||||
#ifdef AARCH64
|
||||
void push_l(Register r = R0_tos);
|
||||
#else
|
||||
void push_l(Register lo = R0_tos_lo, Register hi = R1_tos_hi);
|
||||
#endif
|
||||
void push_f();
|
||||
void push_d();
|
||||
|
||||
// Transition vtos -> state. Blows R0, R1. Sets TOS cached value.
|
||||
void pop(TosState state);
|
||||
// Transition state -> vtos. Blows Rtemp.
|
||||
void push(TosState state);
|
||||
|
||||
#ifndef AARCH64
|
||||
// The following methods are overridden to allow overloaded calls to
|
||||
// MacroAssembler::push/pop(Register)
|
||||
// MacroAssembler::push/pop(RegisterSet)
|
||||
// InterpreterMacroAssembler::push/pop(TosState)
|
||||
void push(Register rd, AsmCondition cond = al) { MacroAssembler::push(rd, cond); }
|
||||
void pop(Register rd, AsmCondition cond = al) { MacroAssembler::pop(rd, cond); }
|
||||
|
||||
void push(RegisterSet reg_set, AsmCondition cond = al) { MacroAssembler::push(reg_set, cond); }
|
||||
void pop(RegisterSet reg_set, AsmCondition cond = al) { MacroAssembler::pop(reg_set, cond); }
|
||||
|
||||
// Converts return value in R0/R1 (interpreter calling conventions) to TOS cached value.
|
||||
void convert_retval_to_tos(TosState state);
|
||||
// Converts TOS cached value to return value in R0/R1 (according to interpreter calling conventions).
|
||||
void convert_tos_to_retval(TosState state);
|
||||
#endif
|
||||
|
||||
// JVMTI ForceEarlyReturn support
|
||||
void load_earlyret_value(TosState state);
|
||||
|
||||
void jump_to_entry(address entry);
|
||||
|
||||
// Blows Rtemp.
|
||||
void empty_expression_stack() {
|
||||
ldr(Rstack_top, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
|
||||
check_stack_top();
|
||||
#ifdef AARCH64
|
||||
clear_cached_stack_top();
|
||||
#else
|
||||
// NULL last_sp until next java call
|
||||
str(zero_register(Rtemp), Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
// Helpers for swap and dup
|
||||
void load_ptr(int n, Register val);
|
||||
void store_ptr(int n, Register val);
|
||||
|
||||
// Generate a subtype check: branch to not_subtype if sub_klass is
|
||||
// not a subtype of super_klass.
|
||||
// Profiling code for the subtype check failure (profile_typecheck_failed)
|
||||
// should be explicitly generated by the caller in the not_subtype case.
|
||||
// Blows Rtemp, tmp1, tmp2.
|
||||
void gen_subtype_check(Register Rsub_klass, Register Rsuper_klass,
|
||||
Label ¬_subtype, Register tmp1, Register tmp2);
|
||||
|
||||
// Dispatching
|
||||
void dispatch_prolog(TosState state, int step = 0);
|
||||
void dispatch_epilog(TosState state, int step = 0);
|
||||
void dispatch_only(TosState state); // dispatch by R3_bytecode
|
||||
void dispatch_only_normal(TosState state); // dispatch normal table by R3_bytecode
|
||||
void dispatch_only_noverify(TosState state);
|
||||
void dispatch_next(TosState state, int step = 0); // load R3_bytecode from [Rbcp + step] and dispatch by R3_bytecode
|
||||
|
||||
// jump to an invoked target
|
||||
void prepare_to_jump_from_interpreted();
|
||||
void jump_from_interpreted(Register method);
|
||||
|
||||
void narrow(Register result);
|
||||
|
||||
// Returning from interpreted functions
|
||||
//
|
||||
// Removes the current activation (incl. unlocking of monitors)
|
||||
// and sets up the return address. This code is also used for
|
||||
// exception unwindwing. In that case, we do not want to throw
|
||||
// IllegalMonitorStateExceptions, since that might get us into an
|
||||
// infinite rethrow exception loop.
|
||||
// Additionally this code is used for popFrame and earlyReturn.
|
||||
// In popFrame case we want to skip throwing an exception,
|
||||
// installing an exception, and notifying jvmdi.
|
||||
// In earlyReturn case we only want to skip throwing an exception
|
||||
// and installing an exception.
|
||||
void remove_activation(TosState state, Register ret_addr,
|
||||
bool throw_monitor_exception = true,
|
||||
bool install_monitor_exception = true,
|
||||
bool notify_jvmdi = true);
|
||||
|
||||
// At certain points in the method invocation the monitor of
|
||||
// synchronized methods hasn't been entered yet.
|
||||
// To correctly handle exceptions at these points, we set the thread local
|
||||
// variable _do_not_unlock_if_synchronized to true. The remove_activation will
|
||||
// check this flag.
|
||||
void set_do_not_unlock_if_synchronized(bool flag, Register tmp);
|
||||
|
||||
// Debugging
|
||||
void interp_verify_oop(Register reg, TosState state, const char* file, int line); // only if +VerifyOops && state == atos
|
||||
|
||||
void verify_FPU(int stack_depth, TosState state = ftos) {
|
||||
// No VFP state verification is required for ARM
|
||||
}
|
||||
|
||||
// Object locking
|
||||
void lock_object (Register lock_reg);
|
||||
void unlock_object(Register lock_reg);
|
||||
|
||||
// Interpreter profiling operations
|
||||
void set_method_data_pointer_for_bcp(); // Blows R0-R3/R0-R18, Rtemp, LR
|
||||
void test_method_data_pointer(Register mdp, Label& zero_continue);
|
||||
void verify_method_data_pointer();
|
||||
|
||||
void set_mdp_data_at(Register mdp_in, int offset, Register value);
|
||||
|
||||
// Increments mdp data. Sets bumped_count register to adjusted counter.
|
||||
void increment_mdp_data_at(Address data, Register bumped_count, bool decrement = false);
|
||||
// Increments mdp data. Sets bumped_count register to adjusted counter.
|
||||
void increment_mdp_data_at(Register mdp_in, int offset, Register bumped_count, bool decrement = false);
|
||||
void increment_mask_and_jump(Address counter_addr,
|
||||
int increment, Address mask_addr,
|
||||
Register scratch, Register scratch2,
|
||||
AsmCondition cond, Label* where);
|
||||
void set_mdp_flag_at(Register mdp_in, int flag_constant);
|
||||
|
||||
void test_mdp_data_at(Register mdp_in, int offset, Register value,
|
||||
Register test_value_out,
|
||||
Label& not_equal_continue);
|
||||
|
||||
void record_klass_in_profile(Register receiver, Register mdp,
|
||||
Register reg_tmp, bool is_virtual_call);
|
||||
void record_klass_in_profile_helper(Register receiver, Register mdp,
|
||||
Register reg_tmp,
|
||||
int start_row, Label& done, bool is_virtual_call);
|
||||
|
||||
void update_mdp_by_offset(Register mdp_in, int offset_of_offset, Register reg_tmp);
|
||||
void update_mdp_by_offset(Register mdp_in, Register reg_offset, Register reg_tmp);
|
||||
void update_mdp_by_constant(Register mdp_in, int constant);
|
||||
void update_mdp_for_ret(Register return_bci); // Blows R0-R3/R0-R18, Rtemp, LR
|
||||
|
||||
void profile_taken_branch(Register mdp, Register bumped_count); // Sets mdp, bumped_count registers, blows Rtemp.
|
||||
void profile_not_taken_branch(Register mdp); // Sets mdp, blows Rtemp.
|
||||
|
||||
void profile_call(Register mdp); // Sets mdp, blows Rtemp.
|
||||
void profile_final_call(Register mdp); // Sets mdp, blows Rtemp.
|
||||
void profile_virtual_call(Register mdp, Register receiver, // Sets mdp, blows Rtemp.
|
||||
bool receiver_can_be_null = false);
|
||||
void profile_ret(Register mdp, Register return_bci); // Sets mdp, blows R0-R3/R0-R18, Rtemp, LR
|
||||
void profile_null_seen(Register mdp); // Sets mdp.
|
||||
void profile_typecheck(Register mdp, Register klass); // Sets mdp, blows Rtemp.
|
||||
|
||||
void profile_typecheck_failed(Register mdp); // Sets mdp, blows Rtemp.
|
||||
void profile_switch_default(Register mdp); // Sets mdp, blows Rtemp.
|
||||
|
||||
// Sets mdp. Blows reg_tmp1, reg_tmp2. Index could be the same as reg_tmp2.
|
||||
void profile_switch_case(Register mdp, Register index, Register reg_tmp1, Register reg_tmp2);
|
||||
|
||||
void byteswap_u32(Register r, Register rtmp1, Register rtmp2);
|
||||
|
||||
void inc_global_counter(address address_of_counter, int offset_in_bytes, Register tmp1, Register tmp2, bool avoid_overflow);
|
||||
|
||||
typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
|
||||
|
||||
// support for jvmti
|
||||
void notify_method_entry();
|
||||
void notify_method_exit(TosState state, NotifyMethodExitMode mode,
|
||||
bool native = false, Register result_lo = noreg, Register result_hi = noreg, FloatRegister result_fp = fnoreg);
|
||||
|
||||
void trace_state(const char* msg) PRODUCT_RETURN;
|
||||
|
||||
void get_method_counters(Register method, Register Rcounters, Label& skip);
|
||||
};
|
||||
|
||||
#endif // CPU_ARM_VM_INTERP_MASM_ARM_HPP
|
449
hotspot/src/cpu/arm/vm/interpreterRT_arm.cpp
Normal file
449
hotspot/src/cpu/arm/vm/interpreterRT_arm.cpp
Normal file
@ -0,0 +1,449 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/interpreterRuntime.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/universe.inline.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
#include "runtime/interfaceSupport.hpp"
|
||||
#include "runtime/signature.hpp"
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
#ifdef SHARING_FAST_NATIVE_FINGERPRINTS
|
||||
// mapping from SignatureIterator param to (common) type of parsing
|
||||
static const u1 shared_type[] = {
|
||||
(u1) SignatureIterator::int_parm, // bool
|
||||
(u1) SignatureIterator::int_parm, // byte
|
||||
(u1) SignatureIterator::int_parm, // char
|
||||
(u1) SignatureIterator::int_parm, // short
|
||||
(u1) SignatureIterator::int_parm, // int
|
||||
(u1) SignatureIterator::long_parm, // long
|
||||
#ifndef __ABI_HARD__
|
||||
(u1) SignatureIterator::int_parm, // float, passed as int
|
||||
(u1) SignatureIterator::long_parm, // double, passed as long
|
||||
#else
|
||||
(u1) SignatureIterator::float_parm, // float
|
||||
(u1) SignatureIterator::double_parm, // double
|
||||
#endif
|
||||
(u1) SignatureIterator::obj_parm, // obj
|
||||
(u1) SignatureIterator::done_parm // done
|
||||
};
|
||||
|
||||
uint64_t InterpreterRuntime::normalize_fast_native_fingerprint(uint64_t fingerprint) {
|
||||
if (fingerprint == UCONST64(-1)) {
|
||||
// special signature used when the argument list cannot be encoded in a 64 bits value
|
||||
return fingerprint;
|
||||
}
|
||||
int shift = SignatureIterator::static_feature_size;
|
||||
uint64_t result = fingerprint & ((1 << shift) - 1);
|
||||
fingerprint >>= shift;
|
||||
|
||||
BasicType ret_type = (BasicType) (fingerprint & SignatureIterator::result_feature_mask);
|
||||
// For ARM, the fast signature handler only needs to know whether
|
||||
// the return value must be unboxed. T_OBJECT and T_ARRAY need not
|
||||
// be distinguished from each other and all other return values
|
||||
// behave like integers with respect to the handler.
|
||||
bool unbox = (ret_type == T_OBJECT) || (ret_type == T_ARRAY);
|
||||
if (unbox) {
|
||||
ret_type = T_OBJECT;
|
||||
} else {
|
||||
ret_type = T_INT;
|
||||
}
|
||||
result |= ((uint64_t) ret_type) << shift;
|
||||
shift += SignatureIterator::result_feature_size;
|
||||
fingerprint >>= SignatureIterator::result_feature_size;
|
||||
|
||||
while (true) {
|
||||
uint32_t type = (uint32_t) (fingerprint & SignatureIterator::parameter_feature_mask);
|
||||
if (type == SignatureIterator::done_parm) {
|
||||
result |= ((uint64_t) SignatureIterator::done_parm) << shift;
|
||||
return result;
|
||||
}
|
||||
assert((type >= SignatureIterator::bool_parm) && (type <= SignatureIterator::obj_parm), "check fingerprint encoding");
|
||||
int shared = shared_type[type - SignatureIterator::bool_parm];
|
||||
result |= ((uint64_t) shared) << shift;
|
||||
shift += SignatureIterator::parameter_feature_size;
|
||||
fingerprint >>= SignatureIterator::parameter_feature_size;
|
||||
}
|
||||
}
|
||||
#endif // SHARING_FAST_NATIVE_FINGERPRINTS
|
||||
|
||||
// Implementation of SignatureHandlerGenerator
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_int() {
|
||||
if (_ireg < GPR_PARAMS) {
|
||||
Register dst = as_Register(_ireg);
|
||||
__ ldr_s32(dst, Address(Rlocals, Interpreter::local_offset_in_bytes(offset())));
|
||||
_ireg++;
|
||||
} else {
|
||||
__ ldr_s32(Rtemp, Address(Rlocals, Interpreter::local_offset_in_bytes(offset())));
|
||||
__ str_32(Rtemp, Address(SP, _abi_offset * wordSize));
|
||||
_abi_offset++;
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
|
||||
#ifdef AARCH64
|
||||
if (_ireg < GPR_PARAMS) {
|
||||
Register dst = as_Register(_ireg);
|
||||
__ ldr(dst, Address(Rlocals, Interpreter::local_offset_in_bytes(offset() + 1)));
|
||||
_ireg++;
|
||||
} else {
|
||||
__ ldr(Rtemp, Address(Rlocals, Interpreter::local_offset_in_bytes(offset() + 1)));
|
||||
__ str(Rtemp, Address(SP, _abi_offset * wordSize));
|
||||
_abi_offset++;
|
||||
}
|
||||
#else
|
||||
if (_ireg <= 2) {
|
||||
#if (ALIGN_WIDE_ARGUMENTS == 1)
|
||||
if ((_ireg & 1) != 0) {
|
||||
// 64-bit values should be 8-byte aligned
|
||||
_ireg++;
|
||||
}
|
||||
#endif
|
||||
Register dst1 = as_Register(_ireg);
|
||||
Register dst2 = as_Register(_ireg+1);
|
||||
__ ldr(dst1, Address(Rlocals, Interpreter::local_offset_in_bytes(offset()+1)));
|
||||
__ ldr(dst2, Address(Rlocals, Interpreter::local_offset_in_bytes(offset())));
|
||||
_ireg += 2;
|
||||
#if (ALIGN_WIDE_ARGUMENTS == 0)
|
||||
} else if (_ireg == 3) {
|
||||
// uses R3 + one stack slot
|
||||
Register dst1 = as_Register(_ireg);
|
||||
__ ldr(Rtemp, Address(Rlocals, Interpreter::local_offset_in_bytes(offset())));
|
||||
__ ldr(dst1, Address(Rlocals, Interpreter::local_offset_in_bytes(offset()+1)));
|
||||
__ str(Rtemp, Address(SP, _abi_offset * wordSize));
|
||||
_ireg += 1;
|
||||
_abi_offset += 1;
|
||||
#endif
|
||||
} else {
|
||||
#if (ALIGN_WIDE_ARGUMENTS == 1)
|
||||
if(_abi_offset & 1) _abi_offset++;
|
||||
#endif
|
||||
__ ldr(Rtemp, Address(Rlocals, Interpreter::local_offset_in_bytes(offset()+1)));
|
||||
__ str(Rtemp, Address(SP, (_abi_offset) * wordSize));
|
||||
__ ldr(Rtemp, Address(Rlocals, Interpreter::local_offset_in_bytes(offset())));
|
||||
__ str(Rtemp, Address(SP, (_abi_offset+1) * wordSize));
|
||||
_abi_offset += 2;
|
||||
_ireg = 4;
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
|
||||
#ifdef AARCH64
|
||||
__ ldr(Rtemp, Address(Rlocals, Interpreter::local_offset_in_bytes(offset())));
|
||||
__ cmp(Rtemp, 0);
|
||||
__ sub(Rtemp, Rlocals, -Interpreter::local_offset_in_bytes(offset()));
|
||||
if (_ireg < GPR_PARAMS) {
|
||||
Register dst = as_Register(_ireg);
|
||||
__ csel(dst, ZR, Rtemp, eq);
|
||||
_ireg++;
|
||||
} else {
|
||||
__ csel(Rtemp, ZR, Rtemp, eq);
|
||||
__ str(Rtemp, Address(SP, _abi_offset * wordSize));
|
||||
_abi_offset++;
|
||||
}
|
||||
#else
|
||||
if (_ireg < 4) {
|
||||
Register dst = as_Register(_ireg);
|
||||
__ ldr(dst, Address(Rlocals, Interpreter::local_offset_in_bytes(offset())));
|
||||
__ cmp(dst, 0);
|
||||
__ sub(dst, Rlocals, -Interpreter::local_offset_in_bytes(offset()), ne);
|
||||
_ireg++;
|
||||
} else {
|
||||
__ ldr(Rtemp, Address(Rlocals, Interpreter::local_offset_in_bytes(offset())));
|
||||
__ cmp(Rtemp, 0);
|
||||
__ sub(Rtemp, Rlocals, -Interpreter::local_offset_in_bytes(offset()), ne);
|
||||
__ str(Rtemp, Address(SP, _abi_offset * wordSize));
|
||||
_abi_offset++;
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
#ifndef __ABI_HARD__
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
|
||||
if (_ireg < 4) {
|
||||
Register dst = as_Register(_ireg);
|
||||
__ ldr(dst, Address(Rlocals, Interpreter::local_offset_in_bytes(offset())));
|
||||
_ireg++;
|
||||
} else {
|
||||
__ ldr(Rtemp, Address(Rlocals, Interpreter::local_offset_in_bytes(offset())));
|
||||
__ str(Rtemp, Address(SP, _abi_offset * wordSize));
|
||||
_abi_offset++;
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
#ifndef __SOFTFP__
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
|
||||
#ifdef AARCH64
|
||||
if (_freg < FPR_PARAMS) {
|
||||
FloatRegister dst = as_FloatRegister(_freg);
|
||||
__ ldr_s(dst, Address(Rlocals, Interpreter::local_offset_in_bytes(offset())));
|
||||
_freg++;
|
||||
} else {
|
||||
__ ldr_u32(Rtemp, Address(Rlocals, Interpreter::local_offset_in_bytes(offset())));
|
||||
__ str_32(Rtemp, Address(SP, _abi_offset * wordSize));
|
||||
_abi_offset++;
|
||||
}
|
||||
#else
|
||||
if((_fp_slot < 16) || (_single_fpr_slot & 1)) {
|
||||
if ((_single_fpr_slot & 1) == 0) {
|
||||
_single_fpr_slot = _fp_slot;
|
||||
_fp_slot += 2;
|
||||
}
|
||||
__ flds(as_FloatRegister(_single_fpr_slot), Address(Rlocals, Interpreter::local_offset_in_bytes(offset())));
|
||||
_single_fpr_slot++;
|
||||
} else {
|
||||
__ ldr(Rtemp, Address(Rlocals, Interpreter::local_offset_in_bytes(offset())));
|
||||
__ str(Rtemp, Address(SP, _abi_offset * wordSize));
|
||||
_abi_offset++;
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_double() {
|
||||
#ifdef AARCH64
|
||||
if (_freg < FPR_PARAMS) {
|
||||
FloatRegister dst = as_FloatRegister(_freg);
|
||||
__ ldr_d(dst, Address(Rlocals, Interpreter::local_offset_in_bytes(offset() + 1)));
|
||||
_freg++;
|
||||
} else {
|
||||
__ ldr(Rtemp, Address(Rlocals, Interpreter::local_offset_in_bytes(offset() + 1)));
|
||||
__ str(Rtemp, Address(SP, _abi_offset * wordSize));
|
||||
_abi_offset++;
|
||||
}
|
||||
#else
|
||||
if(_fp_slot <= 14) {
|
||||
__ fldd(as_FloatRegister(_fp_slot), Address(Rlocals, Interpreter::local_offset_in_bytes(offset()+1)));
|
||||
_fp_slot += 2;
|
||||
} else {
|
||||
__ ldr(Rtemp, Address(Rlocals, Interpreter::local_offset_in_bytes(offset()+1)));
|
||||
__ str(Rtemp, Address(SP, (_abi_offset) * wordSize));
|
||||
__ ldr(Rtemp, Address(Rlocals, Interpreter::local_offset_in_bytes(offset())));
|
||||
__ str(Rtemp, Address(SP, (_abi_offset+1) * wordSize));
|
||||
_abi_offset += 2;
|
||||
_single_fpr_slot = 16;
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
#endif // __SOFTFP__
|
||||
#endif // __ABI_HARD__
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprint) {
|
||||
iterate(fingerprint);
|
||||
|
||||
BasicType result_type = SignatureIterator::return_type(fingerprint);
|
||||
|
||||
address result_handler = Interpreter::result_handler(result_type);
|
||||
|
||||
#ifdef AARCH64
|
||||
__ mov_slow(R0, (address)result_handler);
|
||||
#else
|
||||
// Check that result handlers are not real handler on ARM (0 or -1).
|
||||
// This ensures the signature handlers do not need symbolic information.
|
||||
assert((result_handler == NULL)||(result_handler==(address)0xffffffff),"");
|
||||
__ mov_slow(R0, (intptr_t)result_handler);
|
||||
#endif
|
||||
|
||||
__ ret();
|
||||
}
|
||||
|
||||
|
||||
// Implementation of SignatureHandlerLibrary
|
||||
|
||||
void SignatureHandlerLibrary::pd_set_handler(address handler) {}
|
||||
|
||||
class SlowSignatureHandler: public NativeSignatureIterator {
|
||||
private:
|
||||
address _from;
|
||||
intptr_t* _to;
|
||||
|
||||
#ifndef __ABI_HARD__
|
||||
virtual void pass_int() {
|
||||
*_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
_from -= Interpreter::stackElementSize;
|
||||
}
|
||||
|
||||
virtual void pass_float() {
|
||||
*_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
_from -= Interpreter::stackElementSize;
|
||||
}
|
||||
|
||||
virtual void pass_long() {
|
||||
#if (ALIGN_WIDE_ARGUMENTS == 1)
|
||||
if (((intptr_t)_to & 7) != 0) {
|
||||
// 64-bit values should be 8-byte aligned
|
||||
_to++;
|
||||
}
|
||||
#endif
|
||||
_to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
_to[1] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
_to += 2;
|
||||
_from -= 2*Interpreter::stackElementSize;
|
||||
}
|
||||
|
||||
virtual void pass_object() {
|
||||
intptr_t from_addr = (intptr_t)(_from + Interpreter::local_offset_in_bytes(0));
|
||||
*_to++ = (*(intptr_t*)from_addr == 0) ? (intptr_t)NULL : from_addr;
|
||||
_from -= Interpreter::stackElementSize;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
intptr_t* _toFP;
|
||||
intptr_t* _toGP;
|
||||
int _last_gp;
|
||||
int _last_fp;
|
||||
#ifndef AARCH64
|
||||
int _last_single_fp;
|
||||
#endif // !AARCH64
|
||||
|
||||
virtual void pass_int() {
|
||||
if(_last_gp < GPR_PARAMS) {
|
||||
_toGP[_last_gp++] = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
} else {
|
||||
*_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
}
|
||||
_from -= Interpreter::stackElementSize;
|
||||
}
|
||||
|
||||
virtual void pass_long() {
|
||||
#ifdef AARCH64
|
||||
if(_last_gp < GPR_PARAMS) {
|
||||
_toGP[_last_gp++] = *(jlong *)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
} else {
|
||||
*_to++ = *(jlong *)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
}
|
||||
#else
|
||||
assert(ALIGN_WIDE_ARGUMENTS == 1, "ABI_HARD not supported with unaligned wide arguments");
|
||||
if (_last_gp <= 2) {
|
||||
if(_last_gp & 1) _last_gp++;
|
||||
_toGP[_last_gp++] = *(jint *)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
_toGP[_last_gp++] = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
} else {
|
||||
if (((intptr_t)_to & 7) != 0) {
|
||||
// 64-bit values should be 8-byte aligned
|
||||
_to++;
|
||||
}
|
||||
_to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
_to[1] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
_to += 2;
|
||||
_last_gp = 4;
|
||||
}
|
||||
#endif // AARCH64
|
||||
_from -= 2*Interpreter::stackElementSize;
|
||||
}
|
||||
|
||||
virtual void pass_object() {
|
||||
intptr_t from_addr = (intptr_t)(_from + Interpreter::local_offset_in_bytes(0));
|
||||
if(_last_gp < GPR_PARAMS) {
|
||||
_toGP[_last_gp++] = (*(intptr_t*)from_addr == 0) ? NULL : from_addr;
|
||||
} else {
|
||||
*_to++ = (*(intptr_t*)from_addr == 0) ? NULL : from_addr;
|
||||
}
|
||||
_from -= Interpreter::stackElementSize;
|
||||
}
|
||||
|
||||
virtual void pass_float() {
|
||||
#ifdef AARCH64
|
||||
if(_last_fp < FPR_PARAMS) {
|
||||
_toFP[_last_fp++] = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
} else {
|
||||
*_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
}
|
||||
#else
|
||||
if((_last_fp < 16) || (_last_single_fp & 1)) {
|
||||
if ((_last_single_fp & 1) == 0) {
|
||||
_last_single_fp = _last_fp;
|
||||
_last_fp += 2;
|
||||
}
|
||||
|
||||
_toFP[_last_single_fp++] = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
} else {
|
||||
*_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
}
|
||||
#endif // AARCH64
|
||||
_from -= Interpreter::stackElementSize;
|
||||
}
|
||||
|
||||
virtual void pass_double() {
|
||||
#ifdef AARCH64
|
||||
if(_last_fp < FPR_PARAMS) {
|
||||
_toFP[_last_fp++] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
} else {
|
||||
*_to++ = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
}
|
||||
#else
|
||||
assert(ALIGN_WIDE_ARGUMENTS == 1, "ABI_HARD not supported with unaligned wide arguments");
|
||||
if(_last_fp <= 14) {
|
||||
_toFP[_last_fp++] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
_toFP[_last_fp++] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
} else {
|
||||
if (((intptr_t)_to & 7) != 0) { // 64-bit values should be 8-byte aligned
|
||||
_to++;
|
||||
}
|
||||
_to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
|
||||
_to[1] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
|
||||
_to += 2;
|
||||
_last_single_fp = 16;
|
||||
}
|
||||
#endif // AARCH64
|
||||
_from -= 2*Interpreter::stackElementSize;
|
||||
}
|
||||
|
||||
#endif // !__ABI_HARD__
|
||||
|
||||
public:
|
||||
SlowSignatureHandler(methodHandle method, address from, intptr_t* to) :
|
||||
NativeSignatureIterator(method) {
|
||||
_from = from;
|
||||
|
||||
#ifdef __ABI_HARD__
|
||||
_toGP = to;
|
||||
_toFP = _toGP + GPR_PARAMS;
|
||||
_to = _toFP + AARCH64_ONLY(FPR_PARAMS) NOT_AARCH64(8*2);
|
||||
_last_gp = (is_static() ? 2 : 1);
|
||||
_last_fp = 0;
|
||||
#ifndef AARCH64
|
||||
_last_single_fp = 0;
|
||||
#endif // !AARCH64
|
||||
#else
|
||||
_to = to + (is_static() ? 2 : 1);
|
||||
#endif // __ABI_HARD__
|
||||
}
|
||||
};
|
||||
|
||||
IRT_ENTRY(address, InterpreterRuntime::slow_signature_handler(JavaThread* thread, Method* method, intptr_t* from, intptr_t* to))
|
||||
methodHandle m(thread, (Method*)method);
|
||||
assert(m->is_native(), "sanity check");
|
||||
SlowSignatureHandler(m, (address)from, to).iterate(UCONST64(-1));
|
||||
return Interpreter::result_handler(m->result_type());
|
||||
IRT_END
|
84
hotspot/src/cpu/arm/vm/interpreterRT_arm.hpp
Normal file
84
hotspot/src/cpu/arm/vm/interpreterRT_arm.hpp
Normal file
@ -0,0 +1,84 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_INTERPRETERRT_ARM_HPP
|
||||
#define CPU_ARM_VM_INTERPRETERRT_ARM_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
// native method calls
|
||||
|
||||
class SignatureHandlerGenerator: public NativeSignatureIterator {
|
||||
private:
|
||||
MacroAssembler* _masm;
|
||||
int _abi_offset;
|
||||
int _ireg;
|
||||
|
||||
#ifdef __ABI_HARD__
|
||||
#ifdef AARCH64
|
||||
int _freg;
|
||||
#else
|
||||
int _fp_slot; // number of FPR's with arguments loaded
|
||||
int _single_fpr_slot;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
void move(int from_offset, int to_offset);
|
||||
void box(int from_offset, int to_offset);
|
||||
|
||||
void pass_int();
|
||||
void pass_long();
|
||||
void pass_float();
|
||||
void pass_object();
|
||||
#ifdef __ABI_HARD__
|
||||
void pass_double();
|
||||
#endif
|
||||
public:
|
||||
// Creation
|
||||
SignatureHandlerGenerator(methodHandle method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
|
||||
_masm = new MacroAssembler(buffer);
|
||||
_abi_offset = 0;
|
||||
_ireg = is_static() ? 2 : 1;
|
||||
#ifdef __ABI_HARD__
|
||||
#ifdef AARCH64
|
||||
_freg = 0;
|
||||
#else
|
||||
_fp_slot = 0;
|
||||
_single_fpr_slot = 0;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
// Code generation
|
||||
void generate(uint64_t fingerprint);
|
||||
|
||||
};
|
||||
|
||||
#ifndef AARCH64
|
||||
// ARM provides a normalized fingerprint for native calls (to increase
|
||||
// sharing). See normalize_fast_native_fingerprint
|
||||
#define SHARING_FAST_NATIVE_FINGERPRINTS
|
||||
#endif
|
||||
|
||||
#endif // CPU_ARM_VM_INTERPRETERRT_ARM_HPP
|
86
hotspot/src/cpu/arm/vm/javaFrameAnchor_arm.hpp
Normal file
86
hotspot/src/cpu/arm/vm/javaFrameAnchor_arm.hpp
Normal file
@ -0,0 +1,86 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_JAVAFRAMEANCHOR_ARM_HPP
|
||||
#define CPU_ARM_VM_JAVAFRAMEANCHOR_ARM_HPP
|
||||
|
||||
private:
|
||||
|
||||
// FP value associated with _last_Java_sp:
|
||||
intptr_t* volatile _last_Java_fp; // pointer is volatile not what it points to
|
||||
|
||||
public:
|
||||
// Each arch must define reset, save, restore
|
||||
// These are used by objects that only care about:
|
||||
// 1 - initializing a new state (thread creation, javaCalls)
|
||||
// 2 - saving a current state (javaCalls)
|
||||
// 3 - restoring an old state (javaCalls)
|
||||
|
||||
void clear(void) {
|
||||
// clearing _last_Java_sp must be first
|
||||
_last_Java_sp = NULL;
|
||||
// fence?
|
||||
_last_Java_fp = NULL;
|
||||
_last_Java_pc = NULL;
|
||||
}
|
||||
|
||||
void copy(JavaFrameAnchor* src) {
|
||||
// In order to make sure the transition state is valid for "this"
|
||||
// We must clear _last_Java_sp before copying the rest of the new data
|
||||
//
|
||||
// Hack Alert: Temporary bugfix for 4717480/4721647
|
||||
// To act like previous version (pd_cache_state) don't NULL _last_Java_sp
|
||||
// unless the value is changing
|
||||
//
|
||||
if (_last_Java_sp != src->_last_Java_sp)
|
||||
_last_Java_sp = NULL;
|
||||
|
||||
_last_Java_fp = src->_last_Java_fp;
|
||||
_last_Java_pc = src->_last_Java_pc;
|
||||
// Must be last so profiler will always see valid frame if has_last_frame() is true
|
||||
_last_Java_sp = src->_last_Java_sp;
|
||||
}
|
||||
|
||||
// Always walkable
|
||||
bool walkable(void) { return true; }
|
||||
// Never any thing to do since we are always walkable and can find address of return addresses
|
||||
void make_walkable(JavaThread* thread) { }
|
||||
|
||||
intptr_t* last_Java_sp(void) const { return _last_Java_sp; }
|
||||
|
||||
address last_Java_pc(void) { return _last_Java_pc; }
|
||||
|
||||
private:
|
||||
|
||||
static ByteSize last_Java_fp_offset() { return byte_offset_of(JavaFrameAnchor, _last_Java_fp); }
|
||||
|
||||
public:
|
||||
|
||||
void set_last_Java_sp(intptr_t* sp) { _last_Java_sp = sp; }
|
||||
|
||||
intptr_t* last_Java_fp(void) { return _last_Java_fp; }
|
||||
// Assert (last_Java_sp == NULL || fp == NULL)
|
||||
void set_last_Java_fp(intptr_t* fp) { _last_Java_fp = fp; }
|
||||
|
||||
#endif // CPU_ARM_VM_JAVAFRAMEANCHOR_ARM_HPP
|
277
hotspot/src/cpu/arm/vm/jniFastGetField_arm.cpp
Normal file
277
hotspot/src/cpu/arm/vm/jniFastGetField_arm.cpp
Normal file
@ -0,0 +1,277 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "assembler_arm.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "prims/jniFastGetField.hpp"
|
||||
#include "prims/jvm_misc.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
#define __ masm->
|
||||
|
||||
#define BUFFER_SIZE 96
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
const char* name = NULL;
|
||||
address slow_case_addr = NULL;
|
||||
switch (type) {
|
||||
case T_BOOLEAN:
|
||||
name = "jni_fast_GetBooleanField";
|
||||
slow_case_addr = jni_GetBooleanField_addr();
|
||||
break;
|
||||
case T_BYTE:
|
||||
name = "jni_fast_GetByteField";
|
||||
slow_case_addr = jni_GetByteField_addr();
|
||||
break;
|
||||
case T_CHAR:
|
||||
name = "jni_fast_GetCharField";
|
||||
slow_case_addr = jni_GetCharField_addr();
|
||||
break;
|
||||
case T_SHORT:
|
||||
name = "jni_fast_GetShortField";
|
||||
slow_case_addr = jni_GetShortField_addr();
|
||||
break;
|
||||
case T_INT:
|
||||
name = "jni_fast_GetIntField";
|
||||
slow_case_addr = jni_GetIntField_addr();
|
||||
break;
|
||||
case T_LONG:
|
||||
name = "jni_fast_GetLongField";
|
||||
slow_case_addr = jni_GetLongField_addr();
|
||||
break;
|
||||
case T_FLOAT:
|
||||
name = "jni_fast_GetFloatField";
|
||||
slow_case_addr = jni_GetFloatField_addr();
|
||||
break;
|
||||
case T_DOUBLE:
|
||||
name = "jni_fast_GetDoubleField";
|
||||
slow_case_addr = jni_GetDoubleField_addr();
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
// R0 - jni env
|
||||
// R1 - object handle
|
||||
// R2 - jfieldID
|
||||
|
||||
const Register Rsafepoint_counter_addr = AARCH64_ONLY(R4) NOT_AARCH64(R3);
|
||||
const Register Robj = AARCH64_ONLY(R5) NOT_AARCH64(R1);
|
||||
const Register Rres = AARCH64_ONLY(R6) NOT_AARCH64(R0);
|
||||
#ifndef AARCH64
|
||||
const Register Rres_hi = R1;
|
||||
#endif // !AARCH64
|
||||
const Register Rsafept_cnt = Rtemp;
|
||||
const Register Rsafept_cnt2 = Rsafepoint_counter_addr;
|
||||
const Register Rtmp1 = AARCH64_ONLY(R7) NOT_AARCH64(R3); // same as Rsafepoint_counter_addr on 32-bit ARM
|
||||
const Register Rtmp2 = AARCH64_ONLY(R8) NOT_AARCH64(R2); // same as jfieldID on 32-bit ARM
|
||||
|
||||
#ifdef AARCH64
|
||||
assert_different_registers(Rsafepoint_counter_addr, Rsafept_cnt, Robj, Rres, Rtmp1, Rtmp2, R0, R1, R2, LR);
|
||||
assert_different_registers(Rsafept_cnt2, Rsafept_cnt, Rres, R0, R1, R2, LR);
|
||||
#else
|
||||
assert_different_registers(Rsafepoint_counter_addr, Rsafept_cnt, Robj, Rres, LR);
|
||||
assert_different_registers(Rsafept_cnt, R1, R2, Rtmp1, LR);
|
||||
assert_different_registers(Rsafepoint_counter_addr, Rsafept_cnt, Rres, Rres_hi, Rtmp2, LR);
|
||||
assert_different_registers(Rsafept_cnt2, Rsafept_cnt, Rres, Rres_hi, LR);
|
||||
#endif // AARCH64
|
||||
|
||||
address fast_entry;
|
||||
|
||||
ResourceMark rm;
|
||||
BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE);
|
||||
CodeBuffer cbuf(blob);
|
||||
MacroAssembler* masm = new MacroAssembler(&cbuf);
|
||||
fast_entry = __ pc();
|
||||
|
||||
// Safepoint check
|
||||
InlinedAddress safepoint_counter_addr(SafepointSynchronize::safepoint_counter_addr());
|
||||
Label slow_case;
|
||||
__ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr);
|
||||
|
||||
#ifndef AARCH64
|
||||
__ push(RegisterSet(R0, R3)); // save incoming arguments for slow case
|
||||
#endif // !AARCH64
|
||||
|
||||
__ ldr_s32(Rsafept_cnt, Address(Rsafepoint_counter_addr));
|
||||
__ tbnz(Rsafept_cnt, 0, slow_case);
|
||||
|
||||
if (os::is_MP()) {
|
||||
// Address dependency restricts memory access ordering. It's cheaper than explicit LoadLoad barrier
|
||||
__ andr(Rtmp1, Rsafept_cnt, (unsigned)1);
|
||||
__ ldr(Robj, Address(R1, Rtmp1));
|
||||
} else {
|
||||
__ ldr(Robj, Address(R1));
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
__ add(Robj, Robj, AsmOperand(R2, lsr, 2));
|
||||
Address field_addr = Address(Robj);
|
||||
#else
|
||||
Address field_addr;
|
||||
if (type != T_BOOLEAN
|
||||
&& type != T_INT
|
||||
#ifndef __ABI_HARD__
|
||||
&& type != T_FLOAT
|
||||
#endif // !__ABI_HARD__
|
||||
) {
|
||||
// Only ldr and ldrb support embedded shift, other loads do not
|
||||
__ add(Robj, Robj, AsmOperand(R2, lsr, 2));
|
||||
field_addr = Address(Robj);
|
||||
} else {
|
||||
field_addr = Address(Robj, R2, lsr, 2);
|
||||
}
|
||||
#endif // AARCH64
|
||||
assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
|
||||
speculative_load_pclist[count] = __ pc();
|
||||
|
||||
switch (type) {
|
||||
case T_BOOLEAN:
|
||||
__ ldrb(Rres, field_addr);
|
||||
break;
|
||||
case T_BYTE:
|
||||
__ ldrsb(Rres, field_addr);
|
||||
break;
|
||||
case T_CHAR:
|
||||
__ ldrh(Rres, field_addr);
|
||||
break;
|
||||
case T_SHORT:
|
||||
__ ldrsh(Rres, field_addr);
|
||||
break;
|
||||
case T_INT:
|
||||
#ifndef __ABI_HARD__
|
||||
case T_FLOAT:
|
||||
#endif
|
||||
__ ldr_s32(Rres, field_addr);
|
||||
break;
|
||||
case T_LONG:
|
||||
#ifndef __ABI_HARD__
|
||||
case T_DOUBLE:
|
||||
#endif
|
||||
#ifdef AARCH64
|
||||
__ ldr(Rres, field_addr);
|
||||
#else
|
||||
// Safe to use ldrd since long and double fields are 8-byte aligned
|
||||
__ ldrd(Rres, field_addr);
|
||||
#endif // AARCH64
|
||||
break;
|
||||
#ifdef __ABI_HARD__
|
||||
case T_FLOAT:
|
||||
__ ldr_float(S0, field_addr);
|
||||
break;
|
||||
case T_DOUBLE:
|
||||
__ ldr_double(D0, field_addr);
|
||||
break;
|
||||
#endif // __ABI_HARD__
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
if(os::is_MP()) {
|
||||
// Address dependency restricts memory access ordering. It's cheaper than explicit LoadLoad barrier
|
||||
#if defined(__ABI_HARD__) && !defined(AARCH64)
|
||||
if (type == T_FLOAT || type == T_DOUBLE) {
|
||||
__ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr);
|
||||
__ fmrrd(Rres, Rres_hi, D0);
|
||||
__ eor(Rtmp2, Rres, Rres);
|
||||
__ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr, Rtmp2));
|
||||
} else
|
||||
#endif // __ABI_HARD__ && !AARCH64
|
||||
{
|
||||
#ifndef AARCH64
|
||||
__ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr);
|
||||
#endif // !AARCH64
|
||||
__ eor(Rtmp2, Rres, Rres);
|
||||
__ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr, Rtmp2));
|
||||
}
|
||||
} else {
|
||||
__ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr));
|
||||
}
|
||||
__ cmp(Rsafept_cnt2, Rsafept_cnt);
|
||||
#ifdef AARCH64
|
||||
__ b(slow_case, ne);
|
||||
__ mov(R0, Rres);
|
||||
__ ret();
|
||||
#else
|
||||
// discards saved R0 R1 R2 R3
|
||||
__ add(SP, SP, 4 * wordSize, eq);
|
||||
__ bx(LR, eq);
|
||||
#endif // AARCH64
|
||||
|
||||
slowcase_entry_pclist[count++] = __ pc();
|
||||
|
||||
__ bind(slow_case);
|
||||
#ifndef AARCH64
|
||||
__ pop(RegisterSet(R0, R3));
|
||||
#endif // !AARCH64
|
||||
// thumb mode switch handled by MacroAssembler::jump if needed
|
||||
__ jump(slow_case_addr, relocInfo::none, Rtemp);
|
||||
|
||||
__ bind_literal(safepoint_counter_addr);
|
||||
|
||||
__ flush();
|
||||
|
||||
guarantee((__ pc() - fast_entry) <= BUFFER_SIZE, "BUFFER_SIZE too small");
|
||||
|
||||
return fast_entry;
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_boolean_field() {
|
||||
return generate_fast_get_int_field0(T_BOOLEAN);
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_byte_field() {
|
||||
return generate_fast_get_int_field0(T_BYTE);
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_char_field() {
|
||||
return generate_fast_get_int_field0(T_CHAR);
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_short_field() {
|
||||
return generate_fast_get_int_field0(T_SHORT);
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_int_field() {
|
||||
return generate_fast_get_int_field0(T_INT);
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_long_field() {
|
||||
return generate_fast_get_int_field0(T_LONG);
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_float_field() {
|
||||
return generate_fast_get_int_field0(T_FLOAT);
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_double_field() {
|
||||
return generate_fast_get_int_field0(T_DOUBLE);
|
||||
}
|
98
hotspot/src/cpu/arm/vm/jniTypes_arm.hpp
Normal file
98
hotspot/src/cpu/arm/vm/jniTypes_arm.hpp
Normal file
@ -0,0 +1,98 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_JNITYPES_ARM_HPP
|
||||
#define CPU_ARM_VM_JNITYPES_ARM_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "prims/jni.h"
|
||||
|
||||
// This file holds platform-dependent routines used to write primitive jni
|
||||
// types to the array of arguments passed into JavaCalls::call
|
||||
|
||||
class JNITypes : AllStatic {
|
||||
// These functions write a java primitive type (in native format)
|
||||
// to a java stack slot array to be passed as an argument to JavaCalls:calls.
|
||||
// I.e., they are functionally 'push' operations if they have a 'pos'
|
||||
// formal parameter. Note that jlong's and jdouble's are written
|
||||
// _in reverse_ of the order in which they appear in the interpreter
|
||||
// stack. This is because call stubs (see stubGenerator_arm.cpp)
|
||||
// reverse the argument list constructed by JavaCallArguments (see
|
||||
// javaCalls.hpp).
|
||||
|
||||
private:
|
||||
|
||||
#ifndef AARCH64
|
||||
// 32bit Helper routines.
|
||||
static inline void put_int2r(jint *from, intptr_t *to) { *(jint *)(to++) = from[1];
|
||||
*(jint *)(to ) = from[0]; }
|
||||
static inline void put_int2r(jint *from, intptr_t *to, int& pos) { put_int2r(from, to + pos); pos += 2; }
|
||||
#endif
|
||||
|
||||
public:
|
||||
// Ints are stored in native format in one JavaCallArgument slot at *to.
|
||||
static inline void put_int(jint from, intptr_t *to) { *(jint *)(to + 0 ) = from; }
|
||||
static inline void put_int(jint from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = from; }
|
||||
static inline void put_int(jint *from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = *from; }
|
||||
|
||||
#ifdef AARCH64
|
||||
// Longs are stored in native format in one JavaCallArgument slot at *(to+1).
|
||||
static inline void put_long(jlong from, intptr_t *to) { *(jlong *)(to + 1 + 0) = from; }
|
||||
static inline void put_long(jlong from, intptr_t *to, int& pos) { *(jlong *)(to + 1 + pos) = from; pos += 2; }
|
||||
static inline void put_long(jlong *from, intptr_t *to, int& pos) { *(jlong *)(to + 1 + pos) = *from; pos += 2; }
|
||||
#else
|
||||
// Longs are stored in big-endian word format in two JavaCallArgument slots at *to.
|
||||
// The high half is in *to and the low half in *(to+1).
|
||||
static inline void put_long(jlong from, intptr_t *to) { put_int2r((jint *)&from, to); }
|
||||
static inline void put_long(jlong from, intptr_t *to, int& pos) { put_int2r((jint *)&from, to, pos); }
|
||||
static inline void put_long(jlong *from, intptr_t *to, int& pos) { put_int2r((jint *) from, to, pos); }
|
||||
#endif
|
||||
|
||||
// Oops are stored in native format in one JavaCallArgument slot at *to.
|
||||
static inline void put_obj(oop from, intptr_t *to) { *(oop *)(to + 0 ) = from; }
|
||||
static inline void put_obj(oop from, intptr_t *to, int& pos) { *(oop *)(to + pos++) = from; }
|
||||
static inline void put_obj(oop *from, intptr_t *to, int& pos) { *(oop *)(to + pos++) = *from; }
|
||||
|
||||
// Floats are stored in native format in one JavaCallArgument slot at *to.
|
||||
static inline void put_float(jfloat from, intptr_t *to) { *(jfloat *)(to + 0 ) = from; }
|
||||
static inline void put_float(jfloat from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = from; }
|
||||
static inline void put_float(jfloat *from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = *from; }
|
||||
|
||||
#ifdef AARCH64
|
||||
// Doubles are stored in native word format in one JavaCallArgument slot at *(to+1).
|
||||
static inline void put_double(jdouble from, intptr_t *to) { *(jdouble *)(to + 1 + 0) = from; }
|
||||
static inline void put_double(jdouble from, intptr_t *to, int& pos) { *(jdouble *)(to + 1 + pos) = from; pos += 2; }
|
||||
static inline void put_double(jdouble *from, intptr_t *to, int& pos) { *(jdouble *)(to + 1 + pos) = *from; pos += 2; }
|
||||
#else
|
||||
// Doubles are stored in big-endian word format in two JavaCallArgument slots at *to.
|
||||
// The high half is in *to and the low half in *(to+1).
|
||||
static inline void put_double(jdouble from, intptr_t *to) { put_int2r((jint *)&from, to); }
|
||||
static inline void put_double(jdouble from, intptr_t *to, int& pos) { put_int2r((jint *)&from, to, pos); }
|
||||
static inline void put_double(jdouble *from, intptr_t *to, int& pos) { put_int2r((jint *) from, to, pos); }
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
#endif // CPU_ARM_VM_JNITYPES_ARM_HPP
|
52
hotspot/src/cpu/arm/vm/jni_arm.h
Normal file
52
hotspot/src/cpu/arm/vm/jni_arm.h
Normal file
@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef _JAVASOFT_JNI_MD_H_
|
||||
#define _JAVASOFT_JNI_MD_H_
|
||||
|
||||
// Note: please do not change these without also changing jni_md.h in the JDK
|
||||
// repository
|
||||
#ifndef __has_attribute
|
||||
#define __has_attribute(x) 0
|
||||
#endif
|
||||
#if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility)
|
||||
#define JNIEXPORT __attribute__((externally_visible,visibility("default")))
|
||||
#define JNIIMPORT __attribute__((externally_visible,visibility("default")))
|
||||
#else
|
||||
#define JNIEXPORT
|
||||
#define JNIIMPORT
|
||||
#endif
|
||||
|
||||
#define JNICALL
|
||||
|
||||
typedef int jint;
|
||||
#if defined(_LP64)
|
||||
typedef long jlong;
|
||||
#else
|
||||
typedef long long jlong;
|
||||
#endif
|
||||
typedef signed char jbyte;
|
||||
|
||||
#endif /* !_JAVASOFT_JNI_MD_H_ */
|
69
hotspot/src/cpu/arm/vm/jvmciCodeInstaller_arm.cpp
Normal file
69
hotspot/src/cpu/arm/vm/jvmciCodeInstaller_arm.cpp
Normal file
@ -0,0 +1,69 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "jvmci/jvmciCodeInstaller.hpp"
|
||||
#include "jvmci/jvmciRuntime.hpp"
|
||||
#include "jvmci/jvmciCompilerToVM.hpp"
|
||||
#include "jvmci/jvmciJavaClasses.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "vmreg_arm.inline.hpp"
|
||||
|
||||
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Handle method, TRAPS) {
|
||||
Unimplemented();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS) {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset, TRAPS) {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, TRAPS) {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset, TRAPS) {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
void CodeInstaller::pd_relocate_poll(address pc, jint mark, TRAPS) {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
// convert JVMCI register indices (as used in oop maps) to HotSpot registers
|
||||
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, TRAPS) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool CodeInstaller::is_general_purpose_reg(VMReg hotspotRegister) {
|
||||
return false;
|
||||
}
|
3120
hotspot/src/cpu/arm/vm/macroAssembler_arm.cpp
Normal file
3120
hotspot/src/cpu/arm/vm/macroAssembler_arm.cpp
Normal file
File diff suppressed because it is too large
Load Diff
1390
hotspot/src/cpu/arm/vm/macroAssembler_arm.hpp
Normal file
1390
hotspot/src/cpu/arm/vm/macroAssembler_arm.hpp
Normal file
File diff suppressed because it is too large
Load Diff
104
hotspot/src/cpu/arm/vm/macroAssembler_arm.inline.hpp
Normal file
104
hotspot/src/cpu/arm/vm/macroAssembler_arm.inline.hpp
Normal file
@ -0,0 +1,104 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_MACROASSEMBLER_ARM_INLINE_HPP
|
||||
#define CPU_ARM_VM_MACROASSEMBLER_ARM_INLINE_HPP
|
||||
|
||||
#include "asm/assembler.inline.hpp"
|
||||
#include "asm/codeBuffer.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
|
||||
inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
|
||||
int instr = *(int*)branch;
|
||||
int new_offset = (int)(target - branch NOT_AARCH64(- 8));
|
||||
assert((new_offset & 3) == 0, "bad alignment");
|
||||
|
||||
#ifdef AARCH64
|
||||
if ((instr & (0x1f << 26)) == (0b00101 << 26)) {
|
||||
// Unconditional B or BL
|
||||
assert (is_offset_in_range(new_offset, 26), "offset is too large");
|
||||
*(int*)branch = (instr & ~right_n_bits(26)) | encode_offset(new_offset, 26, 0);
|
||||
} else if ((instr & (0xff << 24)) == (0b01010100 << 24) && (instr & (1 << 4)) == 0) {
|
||||
// Conditional B
|
||||
assert (is_offset_in_range(new_offset, 19), "offset is too large");
|
||||
*(int*)branch = (instr & ~(right_n_bits(19) << 5)) | encode_offset(new_offset, 19, 5);
|
||||
} else if ((instr & (0b111111 << 25)) == (0b011010 << 25)) {
|
||||
// Compare & branch CBZ/CBNZ
|
||||
assert (is_offset_in_range(new_offset, 19), "offset is too large");
|
||||
*(int*)branch = (instr & ~(right_n_bits(19) << 5)) | encode_offset(new_offset, 19, 5);
|
||||
} else if ((instr & (0b111111 << 25)) == (0b011011 << 25)) {
|
||||
// Test & branch TBZ/TBNZ
|
||||
assert (is_offset_in_range(new_offset, 14), "offset is too large");
|
||||
*(int*)branch = (instr & ~(right_n_bits(14) << 5)) | encode_offset(new_offset, 14, 5);
|
||||
} else if ((instr & (0b111011 << 24)) == (0b011000 << 24)) {
|
||||
// LDR (literal)
|
||||
unsigned opc = ((unsigned)instr >> 30);
|
||||
assert (opc != 0b01 || ((uintx)target & 7) == 0, "ldr target should be aligned");
|
||||
assert (is_offset_in_range(new_offset, 19), "offset is too large");
|
||||
*(int*)branch = (instr & ~(right_n_bits(19) << 5)) | encode_offset(new_offset, 19, 5);
|
||||
} else if (((instr & (1 << 31)) == 0) && ((instr & (0b11111 << 24)) == (0b10000 << 24))) {
|
||||
// ADR
|
||||
assert (is_imm_in_range(new_offset, 21, 0), "offset is too large");
|
||||
instr = (instr & ~(right_n_bits(2) << 29)) | (new_offset & 3) << 29;
|
||||
*(int*)branch = (instr & ~(right_n_bits(19) << 5)) | encode_imm(new_offset >> 2, 19, 0, 5);
|
||||
} else if((unsigned int)instr == address_placeholder_instruction) {
|
||||
// address
|
||||
assert (*(unsigned int *)(branch + InstructionSize) == address_placeholder_instruction, "address placeholder occupies two instructions");
|
||||
*(intx*)branch = (intx)target;
|
||||
} else {
|
||||
::tty->print_cr("=============== instruction: 0x%x ================\n", instr);
|
||||
Unimplemented(); // TODO-AARCH64
|
||||
}
|
||||
#else
|
||||
if ((instr & 0x0e000000) == 0x0a000000) {
|
||||
// B or BL instruction
|
||||
assert(new_offset < 0x2000000 && new_offset > -0x2000000, "encoding constraint");
|
||||
*(int*)branch = (instr & 0xff000000) | ((unsigned int)new_offset << 6 >> 8);
|
||||
} else if((unsigned int)instr == address_placeholder_instruction) {
|
||||
// address
|
||||
*(int*)branch = (int)target;
|
||||
} else if ((instr & 0x0fff0000) == 0x028f0000 || ((instr & 0x0fff0000) == 0x024f0000)) {
|
||||
// ADR
|
||||
int encoding = 0x8 << 20; // ADD
|
||||
if (new_offset < 0) {
|
||||
encoding = 0x4 << 20; // SUB
|
||||
new_offset = -new_offset;
|
||||
}
|
||||
AsmOperand o(new_offset);
|
||||
*(int*)branch = (instr & 0xff0ff000) | encoding | o.encoding();
|
||||
} else {
|
||||
// LDR Rd, [PC, offset] instruction
|
||||
assert((instr & 0x0f7f0000) == 0x051f0000, "Must be ldr_literal");
|
||||
assert(new_offset < 4096 && new_offset > -4096, "encoding constraint");
|
||||
if (new_offset >= 0) {
|
||||
*(int*)branch = (instr & 0xff0ff000) | 9 << 20 | new_offset;
|
||||
} else {
|
||||
*(int*)branch = (instr & 0xff0ff000) | 1 << 20 | -new_offset;
|
||||
}
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
#endif // CPU_ARM_VM_MACROASSEMBLER_ARM_INLINE_HPP
|
99
hotspot/src/cpu/arm/vm/metaspaceShared_arm.cpp
Normal file
99
hotspot/src/cpu/arm/vm/metaspaceShared_arm.cpp
Normal file
@ -0,0 +1,99 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "assembler_arm.inline.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
|
||||
// Generate the self-patching vtable method:
|
||||
//
|
||||
// This method will be called (as any other Klass virtual method) with
|
||||
// the Klass itself as the first argument. Example:
|
||||
//
|
||||
// oop obj;
|
||||
// int size = obj->klass()->oop_size(this);
|
||||
//
|
||||
// for which the virtual method call is Klass::oop_size();
|
||||
//
|
||||
// The dummy method is called with the Klass object as the first
|
||||
// operand, and an object as the second argument.
|
||||
//
|
||||
|
||||
//=====================================================================
|
||||
|
||||
// All of the dummy methods in the vtable are essentially identical,
|
||||
// differing only by an ordinal constant, and they bear no relationship
|
||||
// to the original method which the caller intended. Also, there needs
|
||||
// to be 'vtbl_list_size' instances of the vtable in order to
|
||||
// differentiate between the 'vtable_list_size' original Klass objects.
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
|
||||
void** vtable,
|
||||
char** md_top,
|
||||
char* md_end,
|
||||
char** mc_top,
|
||||
char* mc_end) {
|
||||
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
|
||||
*(intptr_t *)(*md_top) = vtable_bytes;
|
||||
*md_top += sizeof(intptr_t);
|
||||
void** dummy_vtable = (void**)*md_top;
|
||||
*vtable = dummy_vtable;
|
||||
*md_top += vtable_bytes;
|
||||
|
||||
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
|
||||
MacroAssembler* masm = new MacroAssembler(&cb);
|
||||
|
||||
for (int i = 0; i < vtbl_list_size; ++i) {
|
||||
Label common_code;
|
||||
for (int j = 0; j < num_virtuals; ++j) {
|
||||
dummy_vtable[num_virtuals * i + j] = (void*) __ pc();
|
||||
__ mov(Rtemp, j); // Rtemp contains an index of a virtual method in the table
|
||||
__ b(common_code);
|
||||
}
|
||||
|
||||
InlinedAddress vtable_address((address)&vtbl_list[i]);
|
||||
__ bind(common_code);
|
||||
const Register tmp2 = AARCH64_ONLY(Rtemp2) NOT_AARCH64(R4);
|
||||
assert_different_registers(Rtemp, tmp2);
|
||||
#ifndef AARCH64
|
||||
__ push(tmp2);
|
||||
#endif // !AARCH64
|
||||
// Do not use ldr_global since the code must be portable across all ARM architectures
|
||||
__ ldr_literal(tmp2, vtable_address);
|
||||
__ ldr(tmp2, Address(tmp2)); // get correct vtable address
|
||||
__ ldr(Rtemp, Address::indexed_ptr(tmp2, Rtemp)); // get real method pointer
|
||||
__ str(tmp2, Address(R0)); // update vtable. R0 = "this"
|
||||
#ifndef AARCH64
|
||||
__ pop(tmp2);
|
||||
#endif // !AARCH64
|
||||
__ jump(Rtemp);
|
||||
__ bind_literal(vtable_address);
|
||||
}
|
||||
|
||||
__ flush();
|
||||
*mc_top = (char*) __ pc();
|
||||
}
|
587
hotspot/src/cpu/arm/vm/methodHandles_arm.cpp
Normal file
587
hotspot/src/cpu/arm/vm/methodHandles_arm.cpp
Normal file
@ -0,0 +1,587 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// This file mirror as much as possible methodHandles_x86.cpp to ease
|
||||
// cross platform development for JSR292.
|
||||
// Last synchronization: changeset f8c9417e3571
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/interpreterRuntime.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define BLOCK_COMMENT(str) /* nothing */
|
||||
#else
|
||||
#define BLOCK_COMMENT(str) __ block_comment(str)
|
||||
#endif
|
||||
|
||||
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
|
||||
|
||||
void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp1, Register temp2) {
|
||||
if (VerifyMethodHandles) {
|
||||
verify_klass(_masm, klass_reg, temp1, temp2, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_Class),
|
||||
"MH argument is a Class");
|
||||
}
|
||||
__ ldr(klass_reg, Address(klass_reg, java_lang_Class::klass_offset_in_bytes()));
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
static int check_nonzero(const char* xname, int x) {
|
||||
assert(x != 0, "%s should be nonzero", xname);
|
||||
return x;
|
||||
}
|
||||
#define NONZERO(x) check_nonzero(#x, x)
|
||||
#else //ASSERT
|
||||
#define NONZERO(x) (x)
|
||||
#endif //ASSERT
|
||||
|
||||
#ifdef ASSERT
|
||||
void MethodHandles::verify_klass(MacroAssembler* _masm,
|
||||
Register obj, Register temp1, Register temp2, SystemDictionary::WKID klass_id,
|
||||
const char* error_message) {
|
||||
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
|
||||
KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
|
||||
Label L_ok, L_bad;
|
||||
BLOCK_COMMENT("verify_klass {");
|
||||
__ verify_oop(obj);
|
||||
__ cbz(obj, L_bad);
|
||||
__ load_klass(temp1, obj);
|
||||
__ lea(temp2, ExternalAddress((address) klass_addr));
|
||||
__ ldr(temp2, temp2); // the cmpptr on x86 dereferences the AddressLiteral (not lea)
|
||||
__ cmp(temp1, temp2);
|
||||
__ b(L_ok, eq);
|
||||
intptr_t super_check_offset = klass->super_check_offset();
|
||||
__ ldr(temp1, Address(temp1, super_check_offset));
|
||||
__ cmp(temp1, temp2);
|
||||
__ b(L_ok, eq);
|
||||
|
||||
__ bind(L_bad);
|
||||
__ stop(error_message);
|
||||
__ BIND(L_ok);
|
||||
BLOCK_COMMENT("} verify_klass");
|
||||
}
|
||||
|
||||
void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) {
|
||||
Label L;
|
||||
BLOCK_COMMENT("verify_ref_kind {");
|
||||
__ ldr_u32(temp, Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes())));
|
||||
__ logical_shift_right(temp, temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT);
|
||||
__ andr(temp, temp, (unsigned)java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK);
|
||||
__ cmp(temp, ref_kind);
|
||||
__ b(L, eq);
|
||||
{ char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal);
|
||||
jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind);
|
||||
if (ref_kind == JVM_REF_invokeVirtual ||
|
||||
ref_kind == JVM_REF_invokeSpecial)
|
||||
// could do this for all ref_kinds, but would explode assembly code size
|
||||
trace_method_handle(_masm, buf);
|
||||
__ stop(buf);
|
||||
}
|
||||
BLOCK_COMMENT("} verify_ref_kind");
|
||||
__ bind(L);
|
||||
}
|
||||
|
||||
#endif //ASSERT
|
||||
|
||||
void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, bool for_compiler_entry) {
|
||||
Label L_no_such_method;
|
||||
__ cbz(Rmethod, L_no_such_method);
|
||||
|
||||
// Note: JVMTI overhead seems small enough compared to invocation
|
||||
// cost and is not worth the complexity or code size overhead of
|
||||
// supporting several variants of each adapter.
|
||||
if (!for_compiler_entry && (JvmtiExport::can_post_interpreter_events())) {
|
||||
// JVMTI events, such as single-stepping, are implemented partly by avoiding running
|
||||
// compiled code in threads for which the event is enabled. Check here for
|
||||
// interp_only_mode if these events CAN be enabled.
|
||||
__ ldr_s32(Rtemp, Address(Rthread, JavaThread::interp_only_mode_offset()));
|
||||
#ifdef AARCH64
|
||||
Label L;
|
||||
__ cbz(Rtemp, L);
|
||||
__ indirect_jump(Address(Rmethod, Method::interpreter_entry_offset()), Rtemp);
|
||||
__ bind(L);
|
||||
#else
|
||||
__ cmp(Rtemp, 0);
|
||||
__ ldr(PC, Address(Rmethod, Method::interpreter_entry_offset()), ne);
|
||||
#endif // AARCH64
|
||||
}
|
||||
const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
|
||||
Method::from_interpreted_offset();
|
||||
|
||||
__ indirect_jump(Address(Rmethod, entry_offset), Rtemp);
|
||||
|
||||
__ bind(L_no_such_method);
|
||||
// throw exception
|
||||
__ jump(StubRoutines::throw_AbstractMethodError_entry(), relocInfo::runtime_call_type, Rtemp);
|
||||
}
|
||||
|
||||
void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
|
||||
Register recv, Register tmp,
|
||||
bool for_compiler_entry) {
|
||||
BLOCK_COMMENT("jump_to_lambda_form {");
|
||||
// This is the initial entry point of a lazy method handle.
|
||||
// After type checking, it picks up the invoker from the LambdaForm.
|
||||
assert_different_registers(recv, tmp, Rmethod);
|
||||
|
||||
// Load the invoker, as MH -> MH.form -> LF.vmentry
|
||||
__ load_heap_oop(tmp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes())));
|
||||
__ verify_oop(tmp);
|
||||
|
||||
__ load_heap_oop(tmp, Address(tmp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
|
||||
__ verify_oop(tmp);
|
||||
|
||||
// the following assumes that a Method* is normally compressed in the vmtarget field:
|
||||
__ ldr(Rmethod, Address(tmp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));
|
||||
|
||||
if (VerifyMethodHandles && !for_compiler_entry) {
|
||||
// make sure recv is already on stack
|
||||
__ ldr(tmp, Address(Rmethod, Method::const_offset()));
|
||||
__ load_sized_value(tmp,
|
||||
Address(tmp, ConstMethod::size_of_parameters_offset()),
|
||||
sizeof(u2), /*is_signed*/ false);
|
||||
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
|
||||
Label L;
|
||||
__ ldr(tmp, __ receiver_argument_address(Rparams, tmp, tmp));
|
||||
__ cmp(tmp, recv);
|
||||
__ b(L, eq);
|
||||
__ stop("receiver not on stack");
|
||||
__ bind(L);
|
||||
}
|
||||
|
||||
jump_from_method_handle(_masm, for_compiler_entry);
|
||||
BLOCK_COMMENT("} jump_to_lambda_form");
|
||||
}
|
||||
|
||||
|
||||
// Code generation
|
||||
address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm,
|
||||
vmIntrinsics::ID iid) {
|
||||
const bool not_for_compiler_entry = false; // this is the interpreter entry
|
||||
assert(is_signature_polymorphic(iid), "expected invoke iid");
|
||||
if (iid == vmIntrinsics::_invokeGeneric ||
|
||||
iid == vmIntrinsics::_compiledLambdaForm) {
|
||||
// Perhaps surprisingly, the user-visible names, and linkToCallSite, are not directly used.
|
||||
// They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
|
||||
// They all require an extra argument.
|
||||
__ should_not_reach_here(); // empty stubs make SG sick
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Rmethod: Method*
|
||||
// Rparams (SP on 32-bit ARM): pointer to parameters
|
||||
// Rsender_sp (R4/R19): sender SP (must preserve; see prepare_to_jump_from_interpreted)
|
||||
// R5_mh: receiver method handle (must load from sp[MethodTypeForm.vmslots])
|
||||
// R1, R2, Rtemp: garbage temp, blown away
|
||||
|
||||
// Use same name as x86 to ease future merges
|
||||
Register rdx_temp = R2_tmp;
|
||||
Register rdx_param_size = rdx_temp; // size of parameters
|
||||
Register rax_temp = R1_tmp;
|
||||
Register rcx_mh = R5_mh; // MH receiver; dies quickly and is recycled
|
||||
Register rbx_method = Rmethod; // eventual target of this invocation
|
||||
Register rdi_temp = Rtemp;
|
||||
|
||||
// here's where control starts out:
|
||||
__ align(CodeEntryAlignment);
|
||||
address entry_point = __ pc();
|
||||
|
||||
if (VerifyMethodHandles) {
|
||||
Label L;
|
||||
BLOCK_COMMENT("verify_intrinsic_id {");
|
||||
__ ldrh(rdi_temp, Address(rbx_method, Method::intrinsic_id_offset_in_bytes()));
|
||||
__ sub_slow(rdi_temp, rdi_temp, (int) iid);
|
||||
__ cbz(rdi_temp, L);
|
||||
if (iid == vmIntrinsics::_linkToVirtual ||
|
||||
iid == vmIntrinsics::_linkToSpecial) {
|
||||
// could do this for all kinds, but would explode assembly code size
|
||||
trace_method_handle(_masm, "bad Method*::intrinsic_id");
|
||||
}
|
||||
__ stop("bad Method*::intrinsic_id");
|
||||
__ bind(L);
|
||||
BLOCK_COMMENT("} verify_intrinsic_id");
|
||||
}
|
||||
|
||||
// First task: Find out how big the argument list is.
|
||||
Address rdx_first_arg_addr;
|
||||
int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
|
||||
assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
|
||||
if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
|
||||
__ ldr(rdx_param_size, Address(rbx_method, Method::const_offset()));
|
||||
__ load_sized_value(rdx_param_size,
|
||||
Address(rdx_param_size, ConstMethod::size_of_parameters_offset()),
|
||||
sizeof(u2), /*is_signed*/ false);
|
||||
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
|
||||
rdx_first_arg_addr = __ receiver_argument_address(Rparams, rdx_param_size, rdi_temp);
|
||||
} else {
|
||||
DEBUG_ONLY(rdx_param_size = noreg);
|
||||
}
|
||||
|
||||
if (!is_signature_polymorphic_static(iid)) {
|
||||
__ ldr(rcx_mh, rdx_first_arg_addr);
|
||||
DEBUG_ONLY(rdx_param_size = noreg);
|
||||
}
|
||||
|
||||
// rdx_first_arg_addr is live!
|
||||
|
||||
trace_method_handle_interpreter_entry(_masm, iid);
|
||||
|
||||
if (iid == vmIntrinsics::_invokeBasic) {
|
||||
generate_method_handle_dispatch(_masm, iid, rcx_mh, noreg, not_for_compiler_entry);
|
||||
|
||||
} else {
|
||||
// Adjust argument list by popping the trailing MemberName argument.
|
||||
Register rcx_recv = noreg;
|
||||
if (MethodHandles::ref_kind_has_receiver(ref_kind)) {
|
||||
// Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack.
|
||||
__ ldr(rcx_recv = rcx_mh, rdx_first_arg_addr);
|
||||
DEBUG_ONLY(rdx_param_size = noreg);
|
||||
}
|
||||
Register rbx_member = rbx_method; // MemberName ptr; incoming method ptr is dead now
|
||||
#ifdef AARCH64
|
||||
__ ldr(rbx_member, Address(Rparams, Interpreter::stackElementSize, post_indexed));
|
||||
#else
|
||||
__ pop(rbx_member);
|
||||
#endif
|
||||
generate_method_handle_dispatch(_masm, iid, rcx_recv, rbx_member, not_for_compiler_entry);
|
||||
}
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
vmIntrinsics::ID iid,
|
||||
Register receiver_reg,
|
||||
Register member_reg,
|
||||
bool for_compiler_entry) {
|
||||
assert(is_signature_polymorphic(iid), "expected invoke iid");
|
||||
// Use same name as x86 to ease future merges
|
||||
Register rbx_method = Rmethod; // eventual target of this invocation
|
||||
// temps used in this code are not used in *either* compiled or interpreted calling sequences
|
||||
Register temp1 = (for_compiler_entry ? saved_last_sp_register() : R1_tmp);
|
||||
Register temp2 = AARCH64_ONLY(R9) NOT_AARCH64(R8);
|
||||
Register temp3 = Rtemp; // R12/R16
|
||||
Register temp4 = AARCH64_ONLY(Rtemp2) NOT_AARCH64(R5);
|
||||
if (for_compiler_entry) {
|
||||
assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment");
|
||||
#ifdef AARCH64
|
||||
assert_different_registers(temp1, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7);
|
||||
assert_different_registers(temp2, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7);
|
||||
assert_different_registers(temp3, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7);
|
||||
assert_different_registers(temp4, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7);
|
||||
#else
|
||||
assert_different_registers(temp1, j_rarg0, j_rarg1, j_rarg2, j_rarg3);
|
||||
assert_different_registers(temp2, j_rarg0, j_rarg1, j_rarg2, j_rarg3);
|
||||
assert_different_registers(temp3, j_rarg0, j_rarg1, j_rarg2, j_rarg3);
|
||||
assert_different_registers(temp4, j_rarg0, j_rarg1, j_rarg2, j_rarg3);
|
||||
#endif // AARCH64
|
||||
}
|
||||
assert_different_registers(temp1, temp2, temp3, receiver_reg);
|
||||
assert_different_registers(temp1, temp2, temp3, temp4, member_reg);
|
||||
if (!for_compiler_entry)
|
||||
assert_different_registers(temp1, temp2, temp3, temp4, saved_last_sp_register()); // don't trash lastSP
|
||||
|
||||
if (iid == vmIntrinsics::_invokeBasic) {
|
||||
// indirect through MH.form.exactInvoker.vmtarget
|
||||
jump_to_lambda_form(_masm, receiver_reg, temp3, for_compiler_entry);
|
||||
|
||||
} else {
|
||||
// The method is a member invoker used by direct method handles.
|
||||
if (VerifyMethodHandles) {
|
||||
// make sure the trailing argument really is a MemberName (caller responsibility)
|
||||
verify_klass(_masm, member_reg, temp2, temp3, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_invoke_MemberName),
|
||||
"MemberName required for invokeVirtual etc.");
|
||||
}
|
||||
|
||||
Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
|
||||
Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
|
||||
Address member_vmtarget(member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));
|
||||
|
||||
Register temp1_recv_klass = temp1;
|
||||
if (iid != vmIntrinsics::_linkToStatic) {
|
||||
if (iid == vmIntrinsics::_linkToSpecial) {
|
||||
// Don't actually load the klass; just null-check the receiver.
|
||||
__ null_check(receiver_reg, temp3);
|
||||
} else {
|
||||
// load receiver klass itself
|
||||
__ null_check(receiver_reg, temp3, oopDesc::klass_offset_in_bytes());
|
||||
__ load_klass(temp1_recv_klass, receiver_reg);
|
||||
__ verify_klass_ptr(temp1_recv_klass);
|
||||
}
|
||||
BLOCK_COMMENT("check_receiver {");
|
||||
// The receiver for the MemberName must be in receiver_reg.
|
||||
// Check the receiver against the MemberName.clazz
|
||||
if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) {
|
||||
// Did not load it above...
|
||||
__ load_klass(temp1_recv_klass, receiver_reg);
|
||||
__ verify_klass_ptr(temp1_recv_klass);
|
||||
}
|
||||
// Check the receiver against the MemberName.clazz
|
||||
if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {
|
||||
Label L_ok;
|
||||
Register temp2_defc = temp2;
|
||||
__ load_heap_oop(temp2_defc, member_clazz);
|
||||
load_klass_from_Class(_masm, temp2_defc, temp3, temp4);
|
||||
__ verify_klass_ptr(temp2_defc);
|
||||
#ifdef AARCH64
|
||||
// TODO-AARCH64
|
||||
__ b(L_ok);
|
||||
#else
|
||||
__ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, temp4, noreg, L_ok);
|
||||
#endif
|
||||
// If we get here, the type check failed!
|
||||
__ stop("receiver class disagrees with MemberName.clazz");
|
||||
__ bind(L_ok);
|
||||
}
|
||||
BLOCK_COMMENT("} check_receiver");
|
||||
}
|
||||
if (iid == vmIntrinsics::_linkToSpecial ||
|
||||
iid == vmIntrinsics::_linkToStatic) {
|
||||
DEBUG_ONLY(temp1_recv_klass = noreg); // these guys didn't load the recv_klass
|
||||
}
|
||||
|
||||
// Live registers at this point:
|
||||
// member_reg - MemberName that was the extra argument
|
||||
// temp1_recv_klass - klass of stacked receiver, if needed
|
||||
|
||||
Label L_incompatible_class_change_error;
|
||||
switch (iid) {
|
||||
case vmIntrinsics::_linkToSpecial:
|
||||
if (VerifyMethodHandles) {
|
||||
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
|
||||
}
|
||||
__ ldr(Rmethod, member_vmtarget);
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_linkToStatic:
|
||||
if (VerifyMethodHandles) {
|
||||
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
|
||||
}
|
||||
__ ldr(Rmethod, member_vmtarget);
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_linkToVirtual:
|
||||
{
|
||||
// same as TemplateTable::invokevirtual,
|
||||
// minus the CP setup and profiling:
|
||||
|
||||
if (VerifyMethodHandles) {
|
||||
verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3);
|
||||
}
|
||||
|
||||
// pick out the vtable index from the MemberName, and then we can discard it:
|
||||
Register temp2_index = temp2;
|
||||
__ ldr(temp2_index, member_vmindex);
|
||||
|
||||
if (VerifyMethodHandles) {
|
||||
Label L_index_ok;
|
||||
__ cmp(temp2_index, 0);
|
||||
__ b(L_index_ok, ge);
|
||||
__ stop("no virtual index");
|
||||
__ bind(L_index_ok);
|
||||
}
|
||||
|
||||
// Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget
|
||||
// at this point. And VerifyMethodHandles has already checked clazz, if needed.
|
||||
|
||||
// get target Method* & entry point
|
||||
__ lookup_virtual_method(temp1_recv_klass, temp2_index, Rmethod);
|
||||
break;
|
||||
}
|
||||
|
||||
case vmIntrinsics::_linkToInterface:
|
||||
{
|
||||
// same as TemplateTable::invokeinterface
|
||||
// (minus the CP setup and profiling, with different argument motion)
|
||||
if (VerifyMethodHandles) {
|
||||
verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3);
|
||||
}
|
||||
|
||||
Register temp3_intf = temp3;
|
||||
__ load_heap_oop(temp3_intf, member_clazz);
|
||||
load_klass_from_Class(_masm, temp3_intf, temp2, temp4);
|
||||
__ verify_klass_ptr(temp3_intf);
|
||||
|
||||
Register rbx_index = rbx_method;
|
||||
__ ldr(rbx_index, member_vmindex);
|
||||
if (VerifyMethodHandles) {
|
||||
Label L;
|
||||
__ cmp(rbx_index, 0);
|
||||
__ b(L, ge);
|
||||
__ stop("invalid vtable index for MH.invokeInterface");
|
||||
__ bind(L);
|
||||
}
|
||||
|
||||
// given intf, index, and recv klass, dispatch to the implementation method
|
||||
Label L_no_such_interface;
|
||||
__ lookup_interface_method(temp1_recv_klass, temp3_intf,
|
||||
// note: next two args must be the same:
|
||||
rbx_index, rbx_method,
|
||||
temp2, temp4,
|
||||
L_incompatible_class_change_error);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
|
||||
break;
|
||||
}
|
||||
|
||||
// Live at this point:
|
||||
// Rmethod (target method)
|
||||
// Rsender_sp, Rparams (if interpreted)
|
||||
// register arguments (if compiled)
|
||||
|
||||
// After figuring out which concrete method to call, jump into it.
|
||||
__ verify_method_ptr(Rmethod);
|
||||
jump_from_method_handle(_masm, for_compiler_entry);
|
||||
|
||||
if (iid == vmIntrinsics::_linkToInterface) {
|
||||
__ bind(L_incompatible_class_change_error);
|
||||
__ jump(StubRoutines::throw_IncompatibleClassChangeError_entry(), relocInfo::runtime_call_type, Rtemp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
enum {
|
||||
ARG_LIMIT = 255, SLOP = 4,
|
||||
// use this parameter for checking for garbage stack movements:
|
||||
UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP)
|
||||
// the slop defends against false alarms due to fencepost errors
|
||||
};
|
||||
|
||||
#ifdef AARCH64
|
||||
const int trace_mh_nregs = 32; // R0-R30, PC
|
||||
#else
|
||||
const int trace_mh_nregs = 15;
|
||||
const Register trace_mh_regs[trace_mh_nregs] =
|
||||
{R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, PC};
|
||||
#endif // AARCH64
|
||||
|
||||
void trace_method_handle_stub(const char* adaptername,
|
||||
intptr_t* saved_regs,
|
||||
intptr_t* saved_bp,
|
||||
oop mh) {
|
||||
// called as a leaf from native code: do not block the JVM!
|
||||
bool has_mh = (strstr(adaptername, "/static") == NULL &&
|
||||
strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH
|
||||
intptr_t* entry_sp = (intptr_t*) &saved_regs[trace_mh_nregs]; // just after the saved regs
|
||||
intptr_t* saved_sp = (intptr_t*) saved_regs[Rsender_sp->encoding()]; // save of Rsender_sp
|
||||
intptr_t* last_sp = (intptr_t*) saved_bp[AARCH64_ONLY(frame::interpreter_frame_stack_top_offset) NOT_AARCH64(frame::interpreter_frame_last_sp_offset)];
|
||||
intptr_t* base_sp = last_sp;
|
||||
|
||||
intptr_t mh_reg = (intptr_t)saved_regs[R5_mh->encoding()];
|
||||
const char* mh_reg_name = "R5_mh";
|
||||
if (!has_mh) mh_reg_name = "R5";
|
||||
tty->print_cr("MH %s %s=" PTR_FORMAT " sp=(" PTR_FORMAT "+" INTX_FORMAT ") stack_size=" INTX_FORMAT " bp=" PTR_FORMAT,
|
||||
adaptername, mh_reg_name, mh_reg,
|
||||
(intptr_t)entry_sp, (intptr_t)saved_sp - (intptr_t)entry_sp, (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
|
||||
|
||||
if (last_sp != saved_sp && last_sp != NULL)
|
||||
tty->print_cr("*** last_sp=" INTPTR_FORMAT, p2i(last_sp));
|
||||
if (Verbose) {
|
||||
tty->print(" reg dump: ");
|
||||
int i;
|
||||
for (i = 0; i < trace_mh_nregs; i++) {
|
||||
if (i > 0 && i % AARCH64_ONLY(2) NOT_AARCH64(4) == 0)
|
||||
tty->print("\n + dump: ");
|
||||
#ifdef AARCH64
|
||||
const char* reg_name = (i == trace_mh_nregs-1) ? "pc" : as_Register(i)->name();
|
||||
#else
|
||||
const char* reg_name = trace_mh_regs[i]->name();
|
||||
#endif
|
||||
tty->print(" %s: " INTPTR_FORMAT, reg_name, p2i((void *)saved_regs[i]));
|
||||
}
|
||||
tty->cr();
|
||||
}
|
||||
|
||||
if (Verbose) {
|
||||
// dump last frame (from JavaThread::print_frame_layout)
|
||||
|
||||
// Note: code is robust but the dumped informationm may not be
|
||||
// 100% correct, particularly with respect to the dumped
|
||||
// "unextended_sp". Getting it right for all trace_method_handle
|
||||
// call paths is not worth the complexity/risk. The correct slot
|
||||
// will be identified by *Rsender_sp anyway in the dump.
|
||||
JavaThread* p = JavaThread::active();
|
||||
|
||||
ResourceMark rm;
|
||||
PRESERVE_EXCEPTION_MARK;
|
||||
FrameValues values;
|
||||
|
||||
intptr_t* dump_fp = (intptr_t *) saved_bp;
|
||||
address dump_pc = (address) saved_regs[trace_mh_nregs-2]; // LR (with LR,PC last in saved_regs)
|
||||
frame dump_frame((intptr_t *)entry_sp, dump_fp, dump_pc);
|
||||
|
||||
dump_frame.describe(values, 1);
|
||||
// mark Rsender_sp if seems valid
|
||||
if (has_mh) {
|
||||
if ((saved_sp >= entry_sp - UNREASONABLE_STACK_MOVE) && (saved_sp < dump_fp)) {
|
||||
values.describe(-1, saved_sp, "*Rsender_sp");
|
||||
}
|
||||
}
|
||||
|
||||
// Note: the unextended_sp may not be correct
|
||||
tty->print_cr(" stack layout:");
|
||||
values.print(p);
|
||||
}
|
||||
if (Verbose) {
|
||||
if (has_mh && mh->is_oop()) {
|
||||
mh->print();
|
||||
if (java_lang_invoke_MethodHandle::is_instance(mh)) {
|
||||
if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0)
|
||||
java_lang_invoke_MethodHandle::form(mh)->print();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
|
||||
if (!TraceMethodHandles) return;
|
||||
BLOCK_COMMENT("trace_method_handle {");
|
||||
// register saving
|
||||
// must correspond to trace_mh_nregs and trace_mh_regs defined above
|
||||
int push_size = __ save_all_registers();
|
||||
assert(trace_mh_nregs*wordSize == push_size,"saved register count mismatch");
|
||||
|
||||
__ mov_slow(R0, adaptername);
|
||||
__ mov(R1, SP); // entry_sp (after pushes)
|
||||
__ mov(R2, FP);
|
||||
if (R5_mh != R3) {
|
||||
assert_different_registers(R0, R1, R2, R5_mh);
|
||||
__ mov(R3, R5_mh);
|
||||
}
|
||||
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), R0, R1, R2, R3);
|
||||
|
||||
__ restore_all_registers();
|
||||
BLOCK_COMMENT("} trace_method_handle");
|
||||
}
|
||||
#endif //PRODUCT
|
55
hotspot/src/cpu/arm/vm/methodHandles_arm.hpp
Normal file
55
hotspot/src/cpu/arm/vm/methodHandles_arm.hpp
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// Platform-specific definitions for method handles.
|
||||
// These definitions are inlined into class MethodHandles.
|
||||
|
||||
// Adapters
|
||||
enum /* platform_dependent_constants */ {
|
||||
adapter_code_size = 18000 NOT_PRODUCT(+ 30000)
|
||||
};
|
||||
|
||||
// Additional helper methods for MethodHandles code generation:
|
||||
public:
|
||||
static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp1, Register temp2);
|
||||
|
||||
static void verify_klass(MacroAssembler* _masm,
|
||||
Register obj, Register temp1, Register temp2, SystemDictionary::WKID klass_id,
|
||||
const char* error_message = "wrong klass") NOT_DEBUG_RETURN;
|
||||
|
||||
static void verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) NOT_DEBUG_RETURN;
|
||||
|
||||
// Similar to InterpreterMacroAssembler::jump_from_interpreted.
|
||||
// Takes care of special dispatch from single stepping too.
|
||||
// Rmethod should contain target methodOop.
|
||||
static void jump_from_method_handle(MacroAssembler* _masm, bool for_compiler_entry);
|
||||
|
||||
static void jump_to_lambda_form(MacroAssembler* _masm,
|
||||
Register recv, Register tmp,
|
||||
bool for_compiler_entry);
|
||||
|
||||
static Register saved_last_sp_register() {
|
||||
// Should be in sharedRuntime, not here.
|
||||
return Rsender_sp;
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,31 +22,20 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_CODE_CODE_CACHE_EXTENSIONS_HPP
|
||||
#define SHARE_VM_CODE_CODE_CACHE_EXTENSIONS_HPP
|
||||
#ifndef CPU_ARM_VM_NATIVEINST_ARM_HPP
|
||||
#define CPU_ARM_VM_NATIVEINST_ARM_HPP
|
||||
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
||||
class CodeCacheExtensionsSteps: AllStatic {
|
||||
public:
|
||||
enum Step {
|
||||
// Support for optional fine grain initialization hooks
|
||||
// Note: these hooks must support refining the granularity
|
||||
// (e.g. adding intermediate steps in the ordered enum
|
||||
// if needed for future features)
|
||||
Start,
|
||||
VMVersion,
|
||||
StubRoutines1,
|
||||
Universe,
|
||||
TemplateInterpreter,
|
||||
Interpreter,
|
||||
StubRoutines2,
|
||||
InitGlobals,
|
||||
CreateVM,
|
||||
LastStep
|
||||
};
|
||||
};
|
||||
|
||||
#include "code/codeCacheExtensions_ext.hpp"
|
||||
#ifdef AARCH64
|
||||
#include "nativeInst_arm_64.hpp"
|
||||
#else
|
||||
#include "nativeInst_arm_32.hpp"
|
||||
#endif
|
||||
|
||||
#endif // SHARE_VM_CODE_CODE_CACHE_EXTENSIONS_HPP
|
||||
|
||||
#endif // CPU_ARM_VM_NATIVEINST_ARM_HPP
|
339
hotspot/src/cpu/arm/vm/nativeInst_arm_32.cpp
Normal file
339
hotspot/src/cpu/arm/vm/nativeInst_arm_32.cpp
Normal file
@ -0,0 +1,339 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "assembler_arm.inline.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "nativeInst_arm.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#endif
|
||||
#include "code/icBuffer.hpp"
|
||||
|
||||
int NativeMovRegMem::offset() const {
|
||||
switch (kind()) {
|
||||
case instr_ldr_str:
|
||||
return encoding() & 0xfff;
|
||||
case instr_ldrh_strh:
|
||||
return (encoding() & 0x0f) | ((encoding() >> 4) & 0xf0);
|
||||
case instr_fld_fst:
|
||||
return (encoding() & 0xff) << 2;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void NativeMovRegMem::set_offset(int x) {
|
||||
assert(x >= 0 && x < 65536, "encoding constraint");
|
||||
const int Rt = Rtemp->encoding();
|
||||
|
||||
// If offset is too large to be placed into single ldr/str instruction, we replace
|
||||
// ldr Rd, [Rn, #offset]
|
||||
// nop
|
||||
// with
|
||||
// add Rtemp, Rn, #offset_hi
|
||||
// ldr Rd, [Rtemp, #offset_lo]
|
||||
switch (kind()) {
|
||||
case instr_ldr_str:
|
||||
if (x < 4096) {
|
||||
set_encoding((encoding() & 0xfffff000) | x);
|
||||
} else {
|
||||
NativeInstruction* next = nativeInstruction_at(next_raw_instruction_address());
|
||||
assert(next->is_nop(), "must be");
|
||||
next->set_encoding((encoding() & 0xfff0f000) | Rt << 16 | (x & 0xfff));
|
||||
this->set_encoding((encoding() & 0x000f0000) | Rt << 12 | x >> 12 | 0xe2800a00);
|
||||
}
|
||||
break;
|
||||
case instr_ldrh_strh:
|
||||
if (x < 256) {
|
||||
set_encoding((encoding() & 0xfffff0f0) | (x & 0x0f) | (x & 0xf0) << 4);
|
||||
} else {
|
||||
NativeInstruction* next = nativeInstruction_at(next_raw_instruction_address());
|
||||
assert(next->is_nop(), "must be");
|
||||
next->set_encoding((encoding() & 0xfff0f0f0) | Rt << 16 | (x & 0x0f) | (x & 0xf0) << 4);
|
||||
this->set_encoding((encoding() & 0x000f0000) | Rt << 12 | x >> 8 | 0xe2800c00);
|
||||
}
|
||||
break;
|
||||
case instr_fld_fst:
|
||||
if (x < 1024) {
|
||||
set_encoding((encoding() & 0xffffff00) | (x >> 2));
|
||||
} else {
|
||||
NativeInstruction* next = nativeInstruction_at(next_raw_instruction_address());
|
||||
assert(next->is_nop(), "must be");
|
||||
next->set_encoding((encoding() & 0xfff0ff00) | Rt << 16 | ((x >> 2) & 0xff));
|
||||
this->set_encoding((encoding() & 0x000f0000) | Rt << 12 | x >> 10 | 0xe2800b00);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
intptr_t NativeMovConstReg::data() const {
|
||||
RawNativeInstruction* next = next_raw();
|
||||
if (is_movw()) {
|
||||
// Oop embedded in movw/movt instructions
|
||||
assert(VM_Version::supports_movw(), "must be");
|
||||
return (this->encoding() & 0x00000fff) | (this->encoding() & 0x000f0000) >> 4 |
|
||||
(next->encoding() & 0x00000fff) << 16 | (next->encoding() & 0x000f0000) << 12;
|
||||
} else {
|
||||
// Oop is loaded from oops section or inlined in the code
|
||||
int oop_offset;
|
||||
if (is_ldr_literal()) {
|
||||
// ldr Rd, [PC, #offset]
|
||||
oop_offset = ldr_offset();
|
||||
} else {
|
||||
assert(next->is_ldr(), "must be");
|
||||
oop_offset = (this->encoding() & 0xff) << 12 | (next->encoding() & 0xfff);
|
||||
if (is_add_pc()) {
|
||||
// add Rd, PC, #offset_hi
|
||||
// ldr Rd, [Rd, #offset_lo]
|
||||
assert(next->encoding() & (1 << 23), "sign mismatch");
|
||||
// offset OK (both positive)
|
||||
} else {
|
||||
assert(is_sub_pc(), "must be");
|
||||
// sub Rd, PC, #offset_hi
|
||||
// ldr Rd, [Rd, -#offset_lo]
|
||||
assert(!(next->encoding() & (1 << 23)), "sign mismatch");
|
||||
// negative offsets
|
||||
oop_offset = -oop_offset;
|
||||
}
|
||||
}
|
||||
return *(int*)(instruction_address() + 8 + oop_offset);
|
||||
}
|
||||
}
|
||||
|
||||
void NativeMovConstReg::set_data(intptr_t x, address pc) {
|
||||
// Find and replace the oop corresponding to this instruction in oops section
|
||||
RawNativeInstruction* next = next_raw();
|
||||
oop* oop_addr = NULL;
|
||||
Metadata** metadata_addr = NULL;
|
||||
CodeBlob* cb = CodeCache::find_blob(instruction_address());
|
||||
if (cb != NULL) {
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm != NULL) {
|
||||
RelocIterator iter(nm, instruction_address(), next->instruction_address());
|
||||
while (iter.next()) {
|
||||
if (iter.type() == relocInfo::oop_type) {
|
||||
oop_addr = iter.oop_reloc()->oop_addr();
|
||||
*oop_addr = cast_to_oop(x);
|
||||
break;
|
||||
} else if (iter.type() == relocInfo::metadata_type) {
|
||||
metadata_addr = iter.metadata_reloc()->metadata_addr();
|
||||
*metadata_addr = (Metadata*)x;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (is_movw()) {
|
||||
// data embedded in movw/movt instructions
|
||||
assert(VM_Version::supports_movw(), "must be");
|
||||
unsigned int lo = (unsigned int)x;
|
||||
unsigned int hi = (unsigned int)(x >> 16);
|
||||
this->set_encoding((this->encoding() & 0xfff0f000) | (lo & 0xf000) << 4 | (lo & 0xfff));
|
||||
next->set_encoding((next->encoding() & 0xfff0f000) | (hi & 0xf000) << 4 | (hi & 0xfff));
|
||||
} else if (oop_addr == NULL & metadata_addr == NULL) {
|
||||
// A static ldr_literal (without oop or metadata relocation)
|
||||
assert(is_ldr_literal(), "must be");
|
||||
int offset = ldr_offset();
|
||||
oop_addr = (oop*)(instruction_address() + 8 + offset);
|
||||
*oop_addr = cast_to_oop(x);
|
||||
} else {
|
||||
// data is loaded from oop or metadata section
|
||||
int offset;
|
||||
|
||||
address addr = oop_addr != NULL ? (address)oop_addr : (address)metadata_addr;
|
||||
|
||||
if(pc == 0) {
|
||||
offset = addr - instruction_address() - 8;
|
||||
} else {
|
||||
offset = addr - pc - 8;
|
||||
}
|
||||
|
||||
int sign = (offset >= 0) ? (1 << 23) : 0;
|
||||
int delta = (offset >= 0) ? offset : (-offset);
|
||||
assert(delta < 0x100000, "within accessible range");
|
||||
if (is_ldr_literal()) {
|
||||
// fix the ldr with the real offset to the oop/metadata table
|
||||
assert(next->is_nop(), "must be");
|
||||
if (delta < 4096) {
|
||||
// ldr Rd, [PC, #offset]
|
||||
set_encoding((encoding() & 0xff7ff000) | delta | sign);
|
||||
assert(ldr_offset() == offset, "check encoding");
|
||||
} else {
|
||||
int cc = encoding() & 0xf0000000;
|
||||
int Rd = (encoding() >> 12) & 0xf;
|
||||
int Rt = Rd;
|
||||
assert(Rt != 0xf, "Illegal destination register"); // or fix by using Rtemp
|
||||
// move the ldr, fixing delta_lo and the source register
|
||||
next->set_encoding((encoding() & 0xff70f000) | (Rt << 16) | (delta & 0xfff) | sign);
|
||||
assert(next->is_ldr(), "must be");
|
||||
if (offset > 0) {
|
||||
// add Rt, PC, #delta_hi
|
||||
// ldr Rd, [Rt, #delta_lo]
|
||||
this->set_encoding((Rt << 12) | (delta >> 12) | 0x028f0a00 | cc);
|
||||
assert(is_add_pc(), "must be");
|
||||
} else {
|
||||
// sub Rt, PC, #delta_hi
|
||||
// ldr Rd, [Rt, -#delta_lo]
|
||||
this->set_encoding((Rt << 12) | (delta >> 12) | 0x024f0a00 | cc);
|
||||
assert(is_sub_pc(), "must be");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
assert(is_pc_rel(), "must be");
|
||||
assert(next->is_ldr(), "must be");
|
||||
if (offset > 0) {
|
||||
// add Rt, PC, #delta_hi
|
||||
this->set_encoding((this->encoding() & 0xf00ff000) | 0x02800a00 | (delta >> 12));
|
||||
assert(is_add_pc(), "must be");
|
||||
} else {
|
||||
// sub Rt, PC, #delta_hi
|
||||
this->set_encoding((this->encoding() & 0xf00ff000) | 0x02400a00 | (delta >> 12));
|
||||
assert(is_sub_pc(), "must be");
|
||||
}
|
||||
// ldr Rd, Rt, #delta_lo (or -#delta_lo)
|
||||
next->set_encoding((next->encoding() & 0xff7ff000) | (delta & 0xfff) | sign);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void NativeMovConstReg::set_pc_relative_offset(address addr, address pc) {
|
||||
int offset;
|
||||
if (pc == 0) {
|
||||
offset = addr - instruction_address() - 8;
|
||||
} else {
|
||||
offset = addr - pc - 8;
|
||||
}
|
||||
|
||||
RawNativeInstruction* next = next_raw();
|
||||
|
||||
int sign = (offset >= 0) ? (1 << 23) : 0;
|
||||
int delta = (offset >= 0) ? offset : (-offset);
|
||||
assert(delta < 0x100000, "within accessible range");
|
||||
if (is_ldr_literal()) {
|
||||
if (delta < 4096) {
|
||||
// ldr Rd, [PC, #offset]
|
||||
set_encoding((encoding() & 0xff7ff000) | delta | sign);
|
||||
assert(ldr_offset() == offset, "check encoding");
|
||||
} else {
|
||||
assert(next->is_nop(), "must be");
|
||||
int cc = encoding() & 0xf0000000;
|
||||
int Rd = (encoding() >> 12) & 0xf;
|
||||
int Rt = Rd;
|
||||
assert(Rt != 0xf, "Illegal destination register"); // or fix by using Rtemp
|
||||
// move the ldr, fixing delta_lo and the source register
|
||||
next->set_encoding((encoding() & 0xff70f000) | (Rt << 16) | (delta & 0xfff) | sign);
|
||||
assert(next->is_ldr(), "must be");
|
||||
if (offset > 0) {
|
||||
// add Rt, PC, #delta_hi
|
||||
// ldr Rd, [Rt, #delta_lo]
|
||||
this->set_encoding((Rt << 12) | (delta >> 12) | 0x028f0a00 | cc);
|
||||
assert(is_add_pc(), "must be");
|
||||
} else {
|
||||
// sub Rt, PC, #delta_hi
|
||||
// ldr Rd, [Rt, -#delta_lo]
|
||||
this->set_encoding((Rt << 12) | (delta >> 12) | 0x024f0a00 | cc);
|
||||
assert(is_sub_pc(), "must be");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
assert(is_pc_rel(), "must be");
|
||||
assert(next->is_ldr(), "must be");
|
||||
if (offset > 0) {
|
||||
// add Rt, PC, #delta_hi
|
||||
this->set_encoding((this->encoding() & 0xf00ff000) | 0x02800a00 | (delta >> 12));
|
||||
assert(is_add_pc(), "must be");
|
||||
} else {
|
||||
// sub Rt, PC, #delta_hi
|
||||
this->set_encoding((this->encoding() & 0xf00ff000) | 0x02400a00 | (delta >> 12));
|
||||
assert(is_sub_pc(), "must be");
|
||||
}
|
||||
// ldr Rd, Rt, #delta_lo (or -#delta_lo)
|
||||
next->set_encoding((next->encoding() & 0xff7ff000) | (delta & 0xfff) | sign);
|
||||
}
|
||||
}
|
||||
|
||||
void RawNativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
|
||||
}
|
||||
|
||||
void RawNativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
|
||||
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "should be");
|
||||
int *a = (int *)verified_entry;
|
||||
a[0] = zombie_illegal_instruction; // always illegal
|
||||
ICache::invalidate_range((address)&a[0], sizeof a[0]);
|
||||
}
|
||||
|
||||
void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
|
||||
int offset = (int)(entry - code_pos - 8);
|
||||
assert(offset < 0x2000000 && offset > -0x2000000, "encoding constraint");
|
||||
nativeInstruction_at(code_pos)->set_encoding(0xea000000 | ((unsigned int)offset << 6 >> 8));
|
||||
}
|
||||
|
||||
static address raw_call_for(address return_address) {
|
||||
CodeBlob* cb = CodeCache::find_blob(return_address);
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm == NULL) {
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
// Look back 4 instructions, to allow for ic_call
|
||||
address begin = MAX2(return_address - 4*NativeInstruction::instruction_size, nm->code_begin());
|
||||
RelocIterator iter(nm, begin, return_address);
|
||||
while (iter.next()) {
|
||||
Relocation* reloc = iter.reloc();
|
||||
if (reloc->is_call()) {
|
||||
address call = reloc->addr();
|
||||
if (nativeInstruction_at(call)->is_call()) {
|
||||
if (nativeCall_at(call)->return_address() == return_address) {
|
||||
return call;
|
||||
}
|
||||
} else {
|
||||
// Some "calls" are really jumps
|
||||
assert(nativeInstruction_at(call)->is_jump(), "must be call or jump");
|
||||
}
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool RawNativeCall::is_call_before(address return_address) {
|
||||
return (raw_call_for(return_address) != NULL);
|
||||
}
|
||||
|
||||
NativeCall* rawNativeCall_before(address return_address) {
|
||||
address call = raw_call_for(return_address);
|
||||
assert(call != NULL, "must be");
|
||||
return nativeCall_at(call);
|
||||
}
|
||||
|
432
hotspot/src/cpu/arm/vm/nativeInst_arm_32.hpp
Normal file
432
hotspot/src/cpu/arm/vm/nativeInst_arm_32.hpp
Normal file
@ -0,0 +1,432 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_NATIVEINST_ARM_32_HPP
|
||||
#define CPU_ARM_VM_NATIVEINST_ARM_32_HPP
|
||||
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#include "register_arm.hpp"
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Some experimental projects extend the ARM back-end by implementing
|
||||
// what the front-end usually assumes is a single native instruction
|
||||
// with a sequence of instructions.
|
||||
//
|
||||
// The 'Raw' variants are the low level initial code (usually one
|
||||
// instruction wide but some of them were already composed
|
||||
// instructions). They should be used only by the back-end.
|
||||
//
|
||||
// The non-raw classes are the front-end entry point, hiding potential
|
||||
// back-end extensions or the actual instructions size.
|
||||
class NativeInstruction;
|
||||
|
||||
class RawNativeInstruction VALUE_OBJ_CLASS_SPEC {
|
||||
public:
|
||||
|
||||
enum ARM_specific {
|
||||
instruction_size = Assembler::InstructionSize
|
||||
};
|
||||
|
||||
enum InstructionKind {
|
||||
instr_ldr_str = 0x50,
|
||||
instr_ldrh_strh = 0x10,
|
||||
instr_fld_fst = 0xd0
|
||||
};
|
||||
|
||||
// illegal instruction used by NativeJump::patch_verified_entry
|
||||
// permanently undefined (UDF): 0xe << 28 | 0b1111111 << 20 | 0b1111 << 4
|
||||
static const int zombie_illegal_instruction = 0xe7f000f0;
|
||||
|
||||
static int decode_rotated_imm12(int encoding) {
|
||||
int base = encoding & 0xff;
|
||||
int right_rotation = (encoding & 0xf00) >> 7;
|
||||
int left_rotation = 32 - right_rotation;
|
||||
int val = (base >> right_rotation) | (base << left_rotation);
|
||||
return val;
|
||||
}
|
||||
|
||||
address addr_at(int offset) const { return (address)this + offset; }
|
||||
address instruction_address() const { return addr_at(0); }
|
||||
address next_raw_instruction_address() const { return addr_at(instruction_size); }
|
||||
|
||||
static RawNativeInstruction* at(address address) {
|
||||
return (RawNativeInstruction*)address;
|
||||
}
|
||||
RawNativeInstruction* next_raw() const {
|
||||
return at(next_raw_instruction_address());
|
||||
}
|
||||
|
||||
public:
|
||||
int encoding() const { return *(int*)this; }
|
||||
|
||||
void set_encoding(int value) {
|
||||
int old = *(int*)this;
|
||||
if (old != value) {
|
||||
*(int*)this = value;
|
||||
ICache::invalidate_word((address)this);
|
||||
}
|
||||
}
|
||||
|
||||
InstructionKind kind() const {
|
||||
return (InstructionKind) ((encoding() >> 20) & 0xf2);
|
||||
}
|
||||
|
||||
bool is_nop() const { return encoding() == (int)0xe1a00000; }
|
||||
bool is_b() const { return (encoding() & 0x0f000000) == 0x0a000000; }
|
||||
bool is_bx() const { return (encoding() & 0x0ffffff0) == 0x012fff10; }
|
||||
bool is_bl() const { return (encoding() & 0x0f000000) == 0x0b000000; }
|
||||
bool is_blx() const { return (encoding() & 0x0ffffff0) == 0x012fff30; }
|
||||
bool is_fat_call() const {
|
||||
return (is_add_lr() && next_raw()->is_jump());
|
||||
}
|
||||
bool is_ldr_call() const {
|
||||
return (is_add_lr() && next_raw()->is_ldr_pc());
|
||||
}
|
||||
bool is_jump() const { return is_b() || is_ldr_pc(); }
|
||||
bool is_call() const { return is_bl() || is_fat_call(); }
|
||||
bool is_branch() const { return is_b() || is_bl(); }
|
||||
bool is_far_branch() const { return is_movw() || is_ldr_literal(); }
|
||||
bool is_ldr_literal() const {
|
||||
// ldr Rx, [PC, #offset] for positive or negative offsets
|
||||
return (encoding() & 0x0f7f0000) == 0x051f0000;
|
||||
}
|
||||
bool is_ldr() const {
|
||||
// ldr Rd, [Rn, #offset] for positive or negative offsets
|
||||
return (encoding() & 0x0f700000) == 0x05100000;
|
||||
}
|
||||
int ldr_offset() const {
|
||||
assert(is_ldr(), "must be");
|
||||
int offset = encoding() & 0xfff;
|
||||
if (encoding() & (1 << 23)) {
|
||||
// positive offset
|
||||
} else {
|
||||
// negative offset
|
||||
offset = -offset;
|
||||
}
|
||||
return offset;
|
||||
}
|
||||
// is_ldr_pc: ldr PC, PC, #offset
|
||||
bool is_ldr_pc() const { return (encoding() & 0x0f7ff000) == 0x051ff000; }
|
||||
// is_setting_pc(): ldr PC, Rxx, #offset
|
||||
bool is_setting_pc() const { return (encoding() & 0x0f70f000) == 0x0510f000; }
|
||||
bool is_add_lr() const { return (encoding() & 0x0ffff000) == 0x028fe000; }
|
||||
bool is_add_pc() const { return (encoding() & 0x0fff0000) == 0x028f0000; }
|
||||
bool is_sub_pc() const { return (encoding() & 0x0fff0000) == 0x024f0000; }
|
||||
bool is_pc_rel() const { return is_add_pc() || is_sub_pc(); }
|
||||
bool is_movw() const { return (encoding() & 0x0ff00000) == 0x03000000; }
|
||||
bool is_movt() const { return (encoding() & 0x0ff00000) == 0x03400000; }
|
||||
// c2 doesn't use fixed registers for safepoint poll address
|
||||
bool is_safepoint_poll() const { return (encoding() & 0xfff0ffff) == 0xe590c000; }
|
||||
// For unit tests
|
||||
static void test() {}
|
||||
|
||||
};
|
||||
|
||||
inline RawNativeInstruction* rawNativeInstruction_at(address address) {
|
||||
return (RawNativeInstruction*)address;
|
||||
}
|
||||
|
||||
// Base class exported to the front-end
|
||||
class NativeInstruction: public RawNativeInstruction {
|
||||
public:
|
||||
static NativeInstruction* at(address address) {
|
||||
return (NativeInstruction*)address;
|
||||
}
|
||||
|
||||
public:
|
||||
// No need to consider indirections while parsing NativeInstruction
|
||||
address next_instruction_address() const {
|
||||
return next_raw_instruction_address();
|
||||
}
|
||||
|
||||
// next() is no longer defined to avoid confusion.
|
||||
//
|
||||
// The front end and most classes except for those defined in nativeInst_arm
|
||||
// or relocInfo_arm should only use next_instruction_address(), skipping
|
||||
// over composed instruction and ignoring back-end extensions.
|
||||
//
|
||||
// The back-end can use next_raw() when it knows the instruction sequence
|
||||
// and only wants to skip a single native instruction.
|
||||
};
|
||||
|
||||
inline NativeInstruction* nativeInstruction_at(address address) {
|
||||
return (NativeInstruction*)address;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Raw b() or bl() instructions, not used by the front-end.
|
||||
class RawNativeBranch: public RawNativeInstruction {
|
||||
public:
|
||||
|
||||
address destination(int adj = 0) const {
|
||||
return instruction_address() + (encoding() << 8 >> 6) + 8 + adj;
|
||||
}
|
||||
|
||||
void set_destination(address dest) {
|
||||
int new_offset = (int)(dest - instruction_address() - 8);
|
||||
assert(new_offset < 0x2000000 && new_offset > -0x2000000, "encoding constraint");
|
||||
set_encoding((encoding() & 0xff000000) | ((unsigned int)new_offset << 6 >> 8));
|
||||
}
|
||||
};
|
||||
|
||||
inline RawNativeBranch* rawNativeBranch_at(address address) {
|
||||
assert(rawNativeInstruction_at(address)->is_branch(), "must be");
|
||||
return (RawNativeBranch*)address;
|
||||
}
|
||||
|
||||
class NativeBranch: public RawNativeBranch {
|
||||
};
|
||||
|
||||
inline NativeBranch* nativeBranch_at(address address) {
|
||||
return (NativeBranch *) rawNativeBranch_at(address);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// NativeGeneralJump is for patchable internal (near) jumps
|
||||
// It is used directly by the front-end and must be a single instruction wide
|
||||
// (to support patching to other kind of instructions).
|
||||
class NativeGeneralJump: public RawNativeInstruction {
|
||||
public:
|
||||
|
||||
address jump_destination() const {
|
||||
return rawNativeBranch_at(instruction_address())->destination();
|
||||
}
|
||||
|
||||
void set_jump_destination(address dest) {
|
||||
return rawNativeBranch_at(instruction_address())->set_destination(dest);
|
||||
}
|
||||
|
||||
static void insert_unconditional(address code_pos, address entry);
|
||||
|
||||
static void replace_mt_safe(address instr_addr, address code_buffer) {
|
||||
assert(((int)instr_addr & 3) == 0 && ((int)code_buffer & 3) == 0, "must be aligned");
|
||||
// Writing a word is atomic on ARM, so no MT-safe tricks are needed
|
||||
rawNativeInstruction_at(instr_addr)->set_encoding(*(int*)code_buffer);
|
||||
}
|
||||
};
|
||||
|
||||
inline NativeGeneralJump* nativeGeneralJump_at(address address) {
|
||||
assert(rawNativeInstruction_at(address)->is_jump(), "must be");
|
||||
return (NativeGeneralJump*)address;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
class RawNativeJump: public NativeInstruction {
|
||||
public:
|
||||
|
||||
address jump_destination(int adj = 0) const {
|
||||
address a;
|
||||
if (is_b()) {
|
||||
a = rawNativeBranch_at(instruction_address())->destination(adj);
|
||||
// Jump destination -1 is encoded as a jump to self
|
||||
if (a == instruction_address()) {
|
||||
return (address)-1;
|
||||
}
|
||||
} else {
|
||||
assert(is_ldr_pc(), "must be");
|
||||
int offset = this->ldr_offset();
|
||||
a = *(address*)(instruction_address() + 8 + offset);
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
void set_jump_destination(address dest) {
|
||||
address a;
|
||||
if (is_b()) {
|
||||
// Jump destination -1 is encoded as a jump to self
|
||||
if (dest == (address)-1) {
|
||||
dest = instruction_address();
|
||||
}
|
||||
rawNativeBranch_at(instruction_address())->set_destination(dest);
|
||||
} else {
|
||||
assert(is_ldr_pc(), "must be");
|
||||
int offset = this->ldr_offset();
|
||||
*(address*)(instruction_address() + 8 + offset) = dest;
|
||||
OrderAccess::storeload(); // overkill if caller holds lock?
|
||||
}
|
||||
}
|
||||
|
||||
static void check_verified_entry_alignment(address entry, address verified_entry);
|
||||
|
||||
static void patch_verified_entry(address entry, address verified_entry, address dest);
|
||||
|
||||
};
|
||||
|
||||
inline RawNativeJump* rawNativeJump_at(address address) {
|
||||
assert(rawNativeInstruction_at(address)->is_jump(), "must be");
|
||||
return (RawNativeJump*)address;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
class RawNativeCall: public NativeInstruction {
|
||||
// See IC calls in LIR_Assembler::ic_call(): ARM v5/v6 doesn't use a
|
||||
// single bl for IC calls.
|
||||
|
||||
public:
|
||||
|
||||
address return_address() const {
|
||||
if (is_bl()) {
|
||||
return addr_at(instruction_size);
|
||||
} else {
|
||||
assert(is_fat_call(), "must be");
|
||||
int offset = encoding() & 0xff;
|
||||
return addr_at(offset + 8);
|
||||
}
|
||||
}
|
||||
|
||||
address destination(int adj = 0) const {
|
||||
if (is_bl()) {
|
||||
return rawNativeBranch_at(instruction_address())->destination(adj);
|
||||
} else {
|
||||
assert(is_add_lr(), "must be"); // fat_call
|
||||
RawNativeJump *next = rawNativeJump_at(next_raw_instruction_address());
|
||||
return next->jump_destination(adj);
|
||||
}
|
||||
}
|
||||
|
||||
void set_destination(address dest) {
|
||||
if (is_bl()) {
|
||||
return rawNativeBranch_at(instruction_address())->set_destination(dest);
|
||||
} else {
|
||||
assert(is_add_lr(), "must be"); // fat_call
|
||||
RawNativeJump *next = rawNativeJump_at(next_raw_instruction_address());
|
||||
return next->set_jump_destination(dest);
|
||||
}
|
||||
}
|
||||
|
||||
void set_destination_mt_safe(address dest) {
|
||||
assert(CodeCache::contains(dest), "external destination might be too far");
|
||||
set_destination(dest);
|
||||
}
|
||||
|
||||
void verify() {
|
||||
assert(RawNativeInstruction::is_call() || (!VM_Version::supports_movw() && RawNativeInstruction::is_jump()), "must be");
|
||||
}
|
||||
|
||||
void verify_alignment() {
|
||||
// Nothing to do on ARM
|
||||
}
|
||||
|
||||
static bool is_call_before(address return_address);
|
||||
};
|
||||
|
||||
inline RawNativeCall* rawNativeCall_at(address address) {
|
||||
assert(rawNativeInstruction_at(address)->is_call(), "must be");
|
||||
return (RawNativeCall*)address;
|
||||
}
|
||||
|
||||
NativeCall* rawNativeCall_before(address return_address);
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// NativeMovRegMem need not be extended with indirection support.
|
||||
// (field access patching is handled differently in that case)
|
||||
class NativeMovRegMem: public NativeInstruction {
|
||||
public:
|
||||
|
||||
int offset() const;
|
||||
void set_offset(int x);
|
||||
|
||||
void add_offset_in_bytes(int add_offset) {
|
||||
set_offset(offset() + add_offset);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
inline NativeMovRegMem* nativeMovRegMem_at(address address) {
|
||||
NativeMovRegMem* instr = (NativeMovRegMem*)address;
|
||||
assert(instr->kind() == NativeInstruction::instr_ldr_str ||
|
||||
instr->kind() == NativeInstruction::instr_ldrh_strh ||
|
||||
instr->kind() == NativeInstruction::instr_fld_fst, "must be");
|
||||
return instr;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// NativeMovConstReg is primarily for loading oops and metadata
|
||||
class NativeMovConstReg: public NativeInstruction {
|
||||
public:
|
||||
|
||||
intptr_t data() const;
|
||||
void set_data(intptr_t x, address pc = 0);
|
||||
bool is_pc_relative() {
|
||||
return !is_movw();
|
||||
}
|
||||
void set_pc_relative_offset(address addr, address pc);
|
||||
address next_instruction_address() const {
|
||||
// NOTE: CompiledStaticCall::set_to_interpreted() calls this but
|
||||
// are restricted to single-instruction ldr. No need to jump over
|
||||
// several instructions.
|
||||
assert(is_ldr_literal(), "Should only use single-instructions load");
|
||||
return next_raw_instruction_address();
|
||||
}
|
||||
};
|
||||
|
||||
inline NativeMovConstReg* nativeMovConstReg_at(address address) {
|
||||
NativeInstruction* ni = nativeInstruction_at(address);
|
||||
assert(ni->is_ldr_literal() || ni->is_pc_rel() ||
|
||||
ni->is_movw() && VM_Version::supports_movw(), "must be");
|
||||
return (NativeMovConstReg*)address;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Front end classes, hiding experimental back-end extensions.
|
||||
|
||||
// Extension to support indirections
|
||||
class NativeJump: public RawNativeJump {
|
||||
public:
|
||||
};
|
||||
|
||||
inline NativeJump* nativeJump_at(address address) {
|
||||
assert(nativeInstruction_at(address)->is_jump(), "must be");
|
||||
return (NativeJump*)address;
|
||||
}
|
||||
|
||||
class NativeCall: public RawNativeCall {
|
||||
public:
|
||||
// NativeCall::next_instruction_address() is used only to define the
|
||||
// range where to look for the relocation information. We need not
|
||||
// walk over composed instructions (as long as the relocation information
|
||||
// is associated to the first instruction).
|
||||
address next_instruction_address() const {
|
||||
return next_raw_instruction_address();
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
inline NativeCall* nativeCall_at(address address) {
|
||||
assert(nativeInstruction_at(address)->is_call() ||
|
||||
(!VM_Version::supports_movw() && nativeInstruction_at(address)->is_jump()), "must be");
|
||||
return (NativeCall*)address;
|
||||
}
|
||||
|
||||
inline NativeCall* nativeCall_before(address return_address) {
|
||||
return (NativeCall *) rawNativeCall_before(return_address);
|
||||
}
|
||||
|
||||
#endif // CPU_ARM_VM_NATIVEINST_ARM_32_HPP
|
243
hotspot/src/cpu/arm/vm/nativeInst_arm_64.cpp
Normal file
243
hotspot/src/cpu/arm/vm/nativeInst_arm_64.cpp
Normal file
@ -0,0 +1,243 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "assembler_arm.inline.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "nativeInst_arm.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#endif
|
||||
|
||||
void RawNativeInstruction::verify() {
|
||||
// make sure code pattern is actually an instruction address
|
||||
address addr = instruction_address();
|
||||
if (addr == NULL || ((intptr_t)addr & (instruction_size - 1)) != 0) {
|
||||
fatal("not an instruction address");
|
||||
}
|
||||
}
|
||||
|
||||
void NativeMovRegMem::set_offset(int x) {
|
||||
int scale = get_offset_scale();
|
||||
assert((x & right_n_bits(scale)) == 0, "offset should be aligned");
|
||||
guarantee((x >> 24) == 0, "encoding constraint");
|
||||
|
||||
if (Assembler::is_unsigned_imm_in_range(x, 12, scale)) {
|
||||
set_unsigned_imm(x, 12, get_offset_scale(), 10);
|
||||
return;
|
||||
}
|
||||
|
||||
// If offset is too large to be placed into single ldr/str instruction, we replace
|
||||
// ldr/str Rt, [Rn, #offset]
|
||||
// nop
|
||||
// with
|
||||
// add LR, Rn, #offset_hi
|
||||
// ldr/str Rt, [LR, #offset_lo]
|
||||
|
||||
// Note: Rtemp cannot be used as a temporary register as it could be used
|
||||
// for value being stored (see LIR_Assembler::reg2mem).
|
||||
// Patchable NativeMovRegMem instructions are generated in LIR_Assembler::mem2reg and LIR_Assembler::reg2mem
|
||||
// which do not use LR, so it is free. Also, it does not conflict with LR usages in c1_LIRGenerator_arm.cpp.
|
||||
const int tmp = LR->encoding();
|
||||
const int rn = (encoding() >> 5) & 0x1f;
|
||||
|
||||
NativeInstruction* next = nativeInstruction_at(next_raw_instruction_address());
|
||||
assert(next->is_nop(), "must be");
|
||||
|
||||
next->set_encoding((encoding() & 0xffc0001f) | Assembler::encode_unsigned_imm((x & 0xfff), 12, scale, 10) | tmp << 5);
|
||||
this->set_encoding(0x91400000 | Assembler::encode_unsigned_imm((x >> 12), 12, 0, 10) | rn << 5 | tmp);
|
||||
}
|
||||
|
||||
intptr_t NativeMovConstReg::_data() const {
|
||||
#ifdef COMPILER2
|
||||
if (is_movz()) {
|
||||
// narrow constant or ic call cached value
|
||||
RawNativeInstruction* ni = next_raw();
|
||||
assert(ni->is_movk(), "movz;movk expected");
|
||||
uint lo16 = (encoding() >> 5) & 0xffff;
|
||||
intptr_t hi = 0;
|
||||
int i = 0;
|
||||
while (ni->is_movk() && i < 3) {
|
||||
uint hi16 = (ni->encoding() >> 5) & 0xffff;
|
||||
int shift = ((ni->encoding() >> 21) & 0x3) << 4;
|
||||
hi |= (intptr_t)hi16 << shift;
|
||||
ni = ni->next_raw();
|
||||
++i;
|
||||
}
|
||||
return lo16 | hi;
|
||||
}
|
||||
#endif
|
||||
return (intptr_t)(nativeLdrLiteral_at(instruction_address())->literal_value());
|
||||
}
|
||||
|
||||
static void raw_set_data(RawNativeInstruction* si, intptr_t x, oop* oop_addr, Metadata** metadata_addr) {
|
||||
#ifdef COMPILER2
|
||||
if (si->is_movz()) {
|
||||
// narrow constant or ic call cached value
|
||||
uintptr_t nx = 0;
|
||||
int val_size = 32;
|
||||
if (oop_addr != NULL) {
|
||||
narrowOop encoded_oop = oopDesc::encode_heap_oop(*oop_addr);
|
||||
nx = encoded_oop;
|
||||
} else if (metadata_addr != NULL) {
|
||||
assert((*metadata_addr)->is_klass(), "expected Klass");
|
||||
narrowKlass encoded_k = Klass::encode_klass((Klass *)*metadata_addr);
|
||||
nx = encoded_k;
|
||||
} else {
|
||||
nx = x;
|
||||
val_size = 64;
|
||||
}
|
||||
RawNativeInstruction* ni = si->next_raw();
|
||||
uint lo16 = nx & 0xffff;
|
||||
int shift = 16;
|
||||
int imm16 = 0xffff << 5;
|
||||
si->set_encoding((si->encoding() & ~imm16) | (lo16 << 5));
|
||||
while (shift < val_size) {
|
||||
assert(ni->is_movk(), "movk expected");
|
||||
assert((((ni->encoding() >> 21) & 0x3) << 4) == shift, "wrong shift");
|
||||
uint hi16 = (nx >> shift) & 0xffff;
|
||||
ni->set_encoding((ni->encoding() & ~imm16) | (hi16 << 5));
|
||||
shift += 16;
|
||||
ni = ni->next_raw();
|
||||
}
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
assert(si->is_ldr_literal(), "should be");
|
||||
|
||||
if (oop_addr == NULL && metadata_addr == NULL) {
|
||||
// A static ldr_literal without oop_relocation
|
||||
nativeLdrLiteral_at(si->instruction_address())->set_literal_value((address)x);
|
||||
} else {
|
||||
// Oop is loaded from oops section
|
||||
address addr = oop_addr != NULL ? (address)oop_addr : (address)metadata_addr;
|
||||
int offset = addr - si->instruction_address();
|
||||
|
||||
assert((((intptr_t)addr) & 0x7) == 0, "target address should be aligned");
|
||||
assert((offset & 0x3) == 0, "offset should be aligned");
|
||||
|
||||
guarantee(Assembler::is_offset_in_range(offset, 19), "offset is not in range");
|
||||
nativeLdrLiteral_at(si->instruction_address())->set_literal_address(si->instruction_address() + offset);
|
||||
}
|
||||
}
|
||||
|
||||
void NativeMovConstReg::set_data(intptr_t x) {
|
||||
// Find and replace the oop corresponding to this instruction in oops section
|
||||
oop* oop_addr = NULL;
|
||||
Metadata** metadata_addr = NULL;
|
||||
CodeBlob* cb = CodeCache::find_blob(instruction_address());
|
||||
{
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm != NULL) {
|
||||
RelocIterator iter(nm, instruction_address(), next_raw()->instruction_address());
|
||||
while (iter.next()) {
|
||||
if (iter.type() == relocInfo::oop_type) {
|
||||
oop_addr = iter.oop_reloc()->oop_addr();
|
||||
*oop_addr = cast_to_oop(x);
|
||||
break;
|
||||
} else if (iter.type() == relocInfo::metadata_type) {
|
||||
metadata_addr = iter.metadata_reloc()->metadata_addr();
|
||||
*metadata_addr = (Metadata*)x;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
raw_set_data(adjust(this), x, oop_addr, metadata_addr);
|
||||
}
|
||||
|
||||
void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
|
||||
}
|
||||
|
||||
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
|
||||
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "should be");
|
||||
|
||||
NativeInstruction* instr = nativeInstruction_at(verified_entry);
|
||||
assert(instr->is_nop() || instr->encoding() == zombie_illegal_instruction, "required for MT-safe patching");
|
||||
instr->set_encoding(zombie_illegal_instruction);
|
||||
}
|
||||
|
||||
void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
|
||||
assert (nativeInstruction_at(instr_addr)->is_b(), "MT-safe patching of arbitrary instructions is not allowed");
|
||||
assert (nativeInstruction_at(code_buffer)->is_nop(), "MT-safe patching of arbitrary instructions is not allowed");
|
||||
nativeInstruction_at(instr_addr)->set_encoding(*(int*)code_buffer);
|
||||
}
|
||||
|
||||
void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
|
||||
// Insert at code_pos unconditional B instruction jumping to entry
|
||||
intx offset = entry - code_pos;
|
||||
assert (Assembler::is_offset_in_range(offset, 26), "offset is out of range");
|
||||
|
||||
NativeInstruction* instr = nativeInstruction_at(code_pos);
|
||||
assert (instr->is_b() || instr->is_nop(), "MT-safe patching of arbitrary instructions is not allowed");
|
||||
|
||||
instr->set_encoding(0x5 << 26 | Assembler::encode_offset(offset, 26, 0));
|
||||
}
|
||||
|
||||
static address call_for(address return_address) {
|
||||
CodeBlob* cb = CodeCache::find_blob(return_address);
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm == NULL) {
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Look back 8 instructions (for LIR_Assembler::ic_call and MacroAssembler::patchable_call)
|
||||
address begin = return_address - 8*NativeInstruction::instruction_size;
|
||||
if (begin < nm->code_begin()) {
|
||||
begin = nm->code_begin();
|
||||
}
|
||||
RelocIterator iter(nm, begin, return_address);
|
||||
while (iter.next()) {
|
||||
Relocation* reloc = iter.reloc();
|
||||
if (reloc->is_call()) {
|
||||
address call = reloc->addr();
|
||||
if (nativeInstruction_at(call)->is_call()) {
|
||||
if (nativeCall_at(call)->return_address() == return_address) {
|
||||
return call;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool NativeCall::is_call_before(address return_address) {
|
||||
return (call_for(return_address) != NULL);
|
||||
}
|
||||
|
||||
NativeCall* nativeCall_before(address return_address) {
|
||||
assert(NativeCall::is_call_before(return_address), "must be");
|
||||
return nativeCall_at(call_for(return_address));
|
||||
}
|
||||
|
772
hotspot/src/cpu/arm/vm/nativeInst_arm_64.hpp
Normal file
772
hotspot/src/cpu/arm/vm/nativeInst_arm_64.hpp
Normal file
@ -0,0 +1,772 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_NATIVEINST_ARM_64_HPP
|
||||
#define CPU_ARM_VM_NATIVEINST_ARM_64_HPP
|
||||
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Some experimental projects extend the ARM back-end by implementing
|
||||
// what the front-end usually assumes is a single native instruction
|
||||
// with a sequence of instructions.
|
||||
//
|
||||
// The 'Raw' variants are the low level initial code (usually one
|
||||
// instruction wide but some of them were already composed
|
||||
// instructions). They should be used only by the back-end.
|
||||
//
|
||||
// The non-raw classes are the front-end entry point, hiding potential
|
||||
// back-end extensions or the actual instructions size.
|
||||
class NativeInstruction;
|
||||
|
||||
class RawNativeInstruction VALUE_OBJ_CLASS_SPEC {
|
||||
public:
|
||||
|
||||
enum ARM_specific {
|
||||
instruction_size = Assembler::InstructionSize,
|
||||
instruction_size_in_bits = instruction_size * BitsPerByte,
|
||||
};
|
||||
|
||||
// illegal instruction used by NativeJump::patch_verified_entry
|
||||
static const int zombie_illegal_instruction = 0xd4000542; // hvc #42
|
||||
|
||||
address addr_at(int offset) const { return (address)this + offset; }
|
||||
address instruction_address() const { return addr_at(0); }
|
||||
address next_raw_instruction_address() const { return addr_at(instruction_size); }
|
||||
|
||||
static RawNativeInstruction* at(address address) {
|
||||
return (RawNativeInstruction*)address;
|
||||
}
|
||||
|
||||
RawNativeInstruction* next_raw() const {
|
||||
return at(next_raw_instruction_address());
|
||||
}
|
||||
|
||||
int encoding() const {
|
||||
return *(int*)this;
|
||||
}
|
||||
|
||||
void set_encoding(int value) {
|
||||
int old = encoding();
|
||||
if (old != value) {
|
||||
*(int*)this = value;
|
||||
ICache::invalidate_word((address)this);
|
||||
}
|
||||
}
|
||||
|
||||
bool is_nop() const { return encoding() == (int)0xd503201f; }
|
||||
bool is_b() const { return (encoding() & 0xfc000000) == 0x14000000; } // unconditional branch
|
||||
bool is_b_cond() const { return (encoding() & 0xff000010) == 0x54000000; } // conditional branch
|
||||
bool is_bl() const { return (encoding() & 0xfc000000) == 0x94000000; }
|
||||
bool is_br() const { return (encoding() & 0xfffffc1f) == 0xd61f0000; }
|
||||
bool is_blr() const { return (encoding() & 0xfffffc1f) == 0xd63f0000; }
|
||||
bool is_ldr_literal() const { return (encoding() & 0xff000000) == 0x58000000; }
|
||||
bool is_adr_aligned() const { return (encoding() & 0xff000000) == 0x10000000; } // adr Xn, <label>, where label is aligned to 4 bytes (address of instruction).
|
||||
bool is_adr_aligned_lr() const { return (encoding() & 0xff00001f) == 0x1000001e; } // adr LR, <label>, where label is aligned to 4 bytes (address of instruction).
|
||||
|
||||
bool is_ldr_str_gp_reg_unsigned_imm() const { return (encoding() & 0x3f000000) == 0x39000000; } // ldr/str{b, sb, h, sh, _w, sw} Rt, [Rn, #imm]
|
||||
bool is_ldr_str_fp_reg_unsigned_imm() const { return (encoding() & 0x3f000000) == 0x3D000000; } // ldr/str Rt(SIMD), [Rn, #imm]
|
||||
bool is_ldr_str_reg_unsigned_imm() const { return is_ldr_str_gp_reg_unsigned_imm() || is_ldr_str_fp_reg_unsigned_imm(); }
|
||||
|
||||
bool is_stp_preindex() const { return (encoding() & 0xffc00000) == 0xa9800000; } // stp Xt1, Xt2, [Xn, #imm]!
|
||||
bool is_ldp_postindex() const { return (encoding() & 0xffc00000) == 0xa8c00000; } // ldp Xt1, Xt2, [Xn] #imm
|
||||
bool is_mov_sp() const { return (encoding() & 0xfffffc00) == 0x91000000; } // mov <Xn|SP>, <Xm|SP>
|
||||
bool is_movn() const { return (encoding() & 0x7f800000) == 0x12800000; }
|
||||
bool is_movz() const { return (encoding() & 0x7f800000) == 0x52800000; }
|
||||
bool is_movk() const { return (encoding() & 0x7f800000) == 0x72800000; }
|
||||
bool is_orr_imm() const { return (encoding() & 0x7f800000) == 0x32000000; }
|
||||
bool is_cmp_rr() const { return (encoding() & 0x7fe00000) == 0x6b000000; }
|
||||
bool is_csel() const { return (encoding() & 0x7fe00000) == 0x1a800000; }
|
||||
bool is_sub_shift() const { return (encoding() & 0x7f200000) == 0x4b000000; } // sub Rd, Rn, shift (Rm, imm)
|
||||
bool is_mov() const { return (encoding() & 0x7fe0ffe0) == 0x2a0003e0; } // mov Rd, Rm (orr Rd, ZR, shift (Rm, 0))
|
||||
bool is_tst() const { return (encoding() & 0x7f20001f) == 0x6a00001f; } // tst Rn, shift (Rm, imm) (ands ZR, Rn, shift(Rm, imm))
|
||||
bool is_lsr_imm() const { return (encoding() & 0x7f807c00) == 0x53007c00; } // lsr Rd, Rn, imm (ubfm Rd, Rn, imm, 31/63)
|
||||
|
||||
bool is_far_jump() const { return is_ldr_literal() && next_raw()->is_br(); }
|
||||
bool is_fat_call() const {
|
||||
return
|
||||
#ifdef COMPILER2
|
||||
(is_blr() && next_raw()->is_b()) ||
|
||||
#endif
|
||||
(is_adr_aligned_lr() && next_raw()->is_br());
|
||||
}
|
||||
bool is_far_call() const {
|
||||
return is_ldr_literal() && next_raw()->is_fat_call();
|
||||
}
|
||||
|
||||
bool is_ic_near_call() const { return is_adr_aligned_lr() && next_raw()->is_b(); }
|
||||
bool is_ic_far_call() const { return is_adr_aligned_lr() && next_raw()->is_ldr_literal() && next_raw()->next_raw()->is_br(); }
|
||||
bool is_ic_call() const { return is_ic_near_call() || is_ic_far_call(); }
|
||||
|
||||
bool is_jump() const { return is_b() || is_far_jump(); }
|
||||
bool is_call() const { return is_bl() || is_far_call() || is_ic_call(); }
|
||||
bool is_branch() const { return is_b() || is_bl(); }
|
||||
|
||||
// c2 doesn't use fixed registers for safepoint poll address
|
||||
bool is_safepoint_poll() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool is_save_all_registers(const RawNativeInstruction** next) const {
|
||||
const RawNativeInstruction* current = this;
|
||||
|
||||
if (!current->is_stp_preindex()) return false; current = current->next_raw();
|
||||
for (int i = 28; i >= 0; i -= 2) {
|
||||
if (!current->is_stp_preindex()) return false; current = current->next_raw();
|
||||
}
|
||||
|
||||
if (!current->is_adr_aligned()) return false; current = current->next_raw();
|
||||
if (!current->is_ldr_str_gp_reg_unsigned_imm()) return false; current = current->next_raw();
|
||||
if (!current->is_ldr_str_gp_reg_unsigned_imm()) return false; current = current->next_raw();
|
||||
|
||||
*next = (RawNativeInstruction*) current;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool is_restore_all_registers(const RawNativeInstruction** next) const {
|
||||
const RawNativeInstruction* current = this;
|
||||
|
||||
for (int i = 0; i <= 28; i += 2) {
|
||||
if (!current->is_ldp_postindex()) return false; current = current->next_raw();
|
||||
}
|
||||
if (!current->is_ldp_postindex()) return false; current = current->next_raw();
|
||||
|
||||
*next = (RawNativeInstruction*) current;
|
||||
return true;
|
||||
}
|
||||
|
||||
const RawNativeInstruction* skip_bind_literal() const {
|
||||
const RawNativeInstruction* current = this;
|
||||
if (((uintptr_t)current) % wordSize != 0) {
|
||||
assert(current->is_nop(), "should be");
|
||||
current = current->next_raw();
|
||||
}
|
||||
assert(((uintptr_t)current) % wordSize == 0, "should be"); // bound literal should be aligned
|
||||
current = current->next_raw()->next_raw();
|
||||
return current;
|
||||
}
|
||||
|
||||
bool is_stop(const RawNativeInstruction** next) const {
|
||||
const RawNativeInstruction* current = this;
|
||||
|
||||
if (!current->is_save_all_registers(¤t)) return false;
|
||||
if (!current->is_ldr_literal()) return false; current = current->next_raw();
|
||||
if (!current->is_mov_sp()) return false; current = current->next_raw();
|
||||
if (!current->is_ldr_literal()) return false; current = current->next_raw();
|
||||
if (!current->is_br()) return false; current = current->next_raw();
|
||||
|
||||
current = current->skip_bind_literal();
|
||||
current = current->skip_bind_literal();
|
||||
|
||||
*next = (RawNativeInstruction*) current;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool is_mov_slow(const RawNativeInstruction** next = NULL) const {
|
||||
const RawNativeInstruction* current = this;
|
||||
|
||||
if (current->is_orr_imm()) {
|
||||
current = current->next_raw();
|
||||
|
||||
} else if (current->is_movn() || current->is_movz()) {
|
||||
current = current->next_raw();
|
||||
int movkCount = 0;
|
||||
while (current->is_movk()) {
|
||||
movkCount++;
|
||||
if (movkCount > 3) return false;
|
||||
current = current->next_raw();
|
||||
}
|
||||
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (next != NULL) {
|
||||
*next = (RawNativeInstruction*)current;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void skip_verify_heapbase(const RawNativeInstruction** next) const {
|
||||
const RawNativeInstruction* current = this;
|
||||
|
||||
if (CheckCompressedOops) {
|
||||
if (!current->is_ldr_str_gp_reg_unsigned_imm()) return; current = current->next_raw();
|
||||
if (!current->is_stp_preindex()) return; current = current->next_raw();
|
||||
// NOTE: temporary workaround, remove with m6-01?
|
||||
// skip saving condition flags
|
||||
current = current->next_raw();
|
||||
current = current->next_raw();
|
||||
|
||||
if (!current->is_mov_slow(¤t)) return;
|
||||
if (!current->is_cmp_rr()) return; current = current->next_raw();
|
||||
if (!current->is_b_cond()) return; current = current->next_raw();
|
||||
if (!current->is_stop(¤t)) return;
|
||||
|
||||
#ifdef COMPILER2
|
||||
if (current->is_nop()) current = current->next_raw();
|
||||
#endif
|
||||
// NOTE: temporary workaround, remove with m6-01?
|
||||
// skip restoring condition flags
|
||||
current = current->next_raw();
|
||||
current = current->next_raw();
|
||||
|
||||
if (!current->is_ldp_postindex()) return; current = current->next_raw();
|
||||
if (!current->is_ldr_str_gp_reg_unsigned_imm()) return; current = current->next_raw();
|
||||
}
|
||||
|
||||
*next = (RawNativeInstruction*) current;
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
bool is_ldr_global_ptr(const RawNativeInstruction** next) const {
|
||||
const RawNativeInstruction* current = this;
|
||||
|
||||
if (!current->is_mov_slow(¤t)) return false;
|
||||
if (!current->is_ldr_str_gp_reg_unsigned_imm()) return false; current = current->next_raw();
|
||||
|
||||
*next = (RawNativeInstruction*) current;
|
||||
return true;
|
||||
}
|
||||
|
||||
void skip_verify_oop(const RawNativeInstruction** next) const {
|
||||
const RawNativeInstruction* current = this;
|
||||
|
||||
if (VerifyOops) {
|
||||
if (!current->is_save_all_registers(¤t)) return;
|
||||
|
||||
if (current->is_mov()) {
|
||||
current = current->next_raw();
|
||||
}
|
||||
|
||||
if (!current->is_mov_sp()) return; current = current->next_raw();
|
||||
if (!current->is_ldr_literal()) return; current = current->next_raw();
|
||||
if (!current->is_ldr_global_ptr(¤t)) return;
|
||||
if (!current->is_blr()) return; current = current->next_raw();
|
||||
if (!current->is_restore_all_registers(¤t)) return;
|
||||
if (!current->is_b()) return; current = current->next_raw();
|
||||
|
||||
current = current->skip_bind_literal();
|
||||
}
|
||||
|
||||
*next = (RawNativeInstruction*) current;
|
||||
}
|
||||
|
||||
void skip_encode_heap_oop(const RawNativeInstruction** next) const {
|
||||
const RawNativeInstruction* current = this;
|
||||
|
||||
assert (Universe::heap() != NULL, "java heap should be initialized");
|
||||
#ifdef ASSERT
|
||||
current->skip_verify_heapbase(¤t);
|
||||
#endif // ASSERT
|
||||
current->skip_verify_oop(¤t);
|
||||
|
||||
if (Universe::narrow_oop_base() == NULL) {
|
||||
if (Universe::narrow_oop_shift() != 0) {
|
||||
if (!current->is_lsr_imm()) return; current = current->next_raw();
|
||||
} else {
|
||||
if (current->is_mov()) {
|
||||
current = current->next_raw();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (!current->is_tst()) return; current = current->next_raw();
|
||||
if (!current->is_csel()) return; current = current->next_raw();
|
||||
if (!current->is_sub_shift()) return; current = current->next_raw();
|
||||
if (Universe::narrow_oop_shift() != 0) {
|
||||
if (!current->is_lsr_imm()) return; current = current->next_raw();
|
||||
}
|
||||
}
|
||||
|
||||
*next = (RawNativeInstruction*) current;
|
||||
}
|
||||
|
||||
void verify();
|
||||
|
||||
// For unit tests
|
||||
static void test() {}
|
||||
|
||||
private:
|
||||
|
||||
void check_bits_range(int bits, int scale, int low_bit) const {
|
||||
assert((0 <= low_bit) && (0 < bits) && (low_bit + bits <= instruction_size_in_bits), "invalid bits range");
|
||||
assert((0 <= scale) && (scale <= 4), "scale is out of range");
|
||||
}
|
||||
|
||||
void set_imm(int imm_encoding, int bits, int low_bit) {
|
||||
int imm_mask = right_n_bits(bits) << low_bit;
|
||||
assert((imm_encoding & ~imm_mask) == 0, "invalid imm encoding");
|
||||
set_encoding((encoding() & ~imm_mask) | imm_encoding);
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
// Returns signed immediate from [low_bit .. low_bit + bits - 1] bits of this instruction, scaled by given scale.
|
||||
int get_signed_imm(int bits, int scale, int low_bit) const {
|
||||
check_bits_range(bits, scale, low_bit);
|
||||
int high_bits_to_clean = (instruction_size_in_bits - (low_bit + bits));
|
||||
return encoding() << high_bits_to_clean >> (high_bits_to_clean + low_bit) << scale;
|
||||
}
|
||||
|
||||
// Puts given signed immediate into the [low_bit .. low_bit + bits - 1] bits of this instruction.
|
||||
void set_signed_imm(int value, int bits, int scale, int low_bit) {
|
||||
set_imm(Assembler::encode_imm(value, bits, scale, low_bit), bits, low_bit);
|
||||
}
|
||||
|
||||
// Returns unsigned immediate from [low_bit .. low_bit + bits - 1] bits of this instruction, scaled by given scale.
|
||||
int get_unsigned_imm(int bits, int scale, int low_bit) const {
|
||||
check_bits_range(bits, scale, low_bit);
|
||||
return ((encoding() >> low_bit) & right_n_bits(bits)) << scale;
|
||||
}
|
||||
|
||||
// Puts given unsigned immediate into the [low_bit .. low_bit + bits - 1] bits of this instruction.
|
||||
void set_unsigned_imm(int value, int bits, int scale, int low_bit) {
|
||||
set_imm(Assembler::encode_unsigned_imm(value, bits, scale, low_bit), bits, low_bit);
|
||||
}
|
||||
|
||||
int get_signed_offset(int bits, int low_bit) const {
|
||||
return get_signed_imm(bits, 2, low_bit);
|
||||
}
|
||||
|
||||
void set_signed_offset(int offset, int bits, int low_bit) {
|
||||
set_signed_imm(offset, bits, 2, low_bit);
|
||||
}
|
||||
};
|
||||
|
||||
inline RawNativeInstruction* rawNativeInstruction_at(address address) {
|
||||
RawNativeInstruction* instr = RawNativeInstruction::at(address);
|
||||
#ifdef ASSERT
|
||||
instr->verify();
|
||||
#endif // ASSERT
|
||||
return instr;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Load/store register (unsigned scaled immediate)
|
||||
class NativeMovRegMem: public RawNativeInstruction {
|
||||
private:
|
||||
int get_offset_scale() const {
|
||||
return get_unsigned_imm(2, 0, 30);
|
||||
}
|
||||
|
||||
public:
|
||||
int offset() const {
|
||||
return get_unsigned_imm(12, get_offset_scale(), 10);
|
||||
}
|
||||
|
||||
void set_offset(int x);
|
||||
|
||||
void add_offset_in_bytes(int add_offset) {
|
||||
set_offset(offset() + add_offset);
|
||||
}
|
||||
};
|
||||
|
||||
inline NativeMovRegMem* nativeMovRegMem_at(address address) {
|
||||
const RawNativeInstruction* instr = rawNativeInstruction_at(address);
|
||||
|
||||
#ifdef COMPILER1
|
||||
// NOP required for C1 patching
|
||||
if (instr->is_nop()) {
|
||||
instr = instr->next_raw();
|
||||
}
|
||||
#endif
|
||||
|
||||
instr->skip_encode_heap_oop(&instr);
|
||||
|
||||
assert(instr->is_ldr_str_reg_unsigned_imm(), "must be");
|
||||
return (NativeMovRegMem*)instr;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
class NativeInstruction : public RawNativeInstruction {
|
||||
public:
|
||||
static NativeInstruction* at(address address) {
|
||||
return (NativeInstruction*)address;
|
||||
}
|
||||
|
||||
public:
|
||||
// No need to consider indirections while parsing NativeInstruction
|
||||
address next_instruction_address() const {
|
||||
return next_raw_instruction_address();
|
||||
}
|
||||
|
||||
// next() is no longer defined to avoid confusion.
|
||||
//
|
||||
// The front end and most classes except for those defined in nativeInst_arm
|
||||
// or relocInfo_arm should only use next_instruction_address(), skipping
|
||||
// over composed instruction and ignoring back-end extensions.
|
||||
//
|
||||
// The back-end can use next_raw() when it knows the instruction sequence
|
||||
// and only wants to skip a single native instruction.
|
||||
};
|
||||
|
||||
inline NativeInstruction* nativeInstruction_at(address address) {
|
||||
NativeInstruction* instr = NativeInstruction::at(address);
|
||||
#ifdef ASSERT
|
||||
instr->verify();
|
||||
#endif // ASSERT
|
||||
return instr;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
class NativeInstructionLdrLiteral: public NativeInstruction {
|
||||
public:
|
||||
address literal_address() {
|
||||
address la = instruction_address() + get_signed_offset(19, 5);
|
||||
assert(la != instruction_address(), "literal points to instruction");
|
||||
return la;
|
||||
}
|
||||
|
||||
address after_literal_address() {
|
||||
return literal_address() + wordSize;
|
||||
}
|
||||
|
||||
void set_literal_address(address addr, address pc) {
|
||||
assert(is_ldr_literal(), "must be");
|
||||
int opc = (encoding() >> 30) & 0x3;
|
||||
assert (opc != 0b01 || addr == pc || ((uintx)addr & 7) == 0, "ldr target should be aligned");
|
||||
set_signed_offset(addr - pc, 19, 5);
|
||||
}
|
||||
|
||||
void set_literal_address(address addr) {
|
||||
set_literal_address(addr, instruction_address());
|
||||
}
|
||||
|
||||
address literal_value() {
|
||||
return *(address*)literal_address();
|
||||
}
|
||||
|
||||
void set_literal_value(address dest) {
|
||||
*(address*)literal_address() = dest;
|
||||
}
|
||||
};
|
||||
|
||||
inline NativeInstructionLdrLiteral* nativeLdrLiteral_at(address address) {
|
||||
assert(nativeInstruction_at(address)->is_ldr_literal(), "must be");
|
||||
return (NativeInstructionLdrLiteral*)address;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Common class for branch instructions with 26-bit immediate offset: B (unconditional) and BL
|
||||
class NativeInstructionBranchImm26: public NativeInstruction {
|
||||
public:
|
||||
address destination(int adj = 0) const {
|
||||
return instruction_address() + get_signed_offset(26, 0) + adj;
|
||||
}
|
||||
|
||||
void set_destination(address dest) {
|
||||
intptr_t offset = (intptr_t)(dest - instruction_address());
|
||||
assert((offset & 0x3) == 0, "should be aligned");
|
||||
set_signed_offset(offset, 26, 0);
|
||||
}
|
||||
};
|
||||
|
||||
inline NativeInstructionBranchImm26* nativeB_at(address address) {
|
||||
assert(nativeInstruction_at(address)->is_b(), "must be");
|
||||
return (NativeInstructionBranchImm26*)address;
|
||||
}
|
||||
|
||||
inline NativeInstructionBranchImm26* nativeBL_at(address address) {
|
||||
assert(nativeInstruction_at(address)->is_bl(), "must be");
|
||||
return (NativeInstructionBranchImm26*)address;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
class NativeInstructionAdrLR: public NativeInstruction {
|
||||
public:
|
||||
// Returns address which is loaded into LR by this instruction.
|
||||
address target_lr_value() {
|
||||
return instruction_address() + get_signed_offset(19, 5);
|
||||
}
|
||||
};
|
||||
|
||||
inline NativeInstructionAdrLR* nativeAdrLR_at(address address) {
|
||||
assert(nativeInstruction_at(address)->is_adr_aligned_lr(), "must be");
|
||||
return (NativeInstructionAdrLR*)address;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
class RawNativeCall: public NativeInstruction {
|
||||
public:
|
||||
|
||||
address return_address() const {
|
||||
if (is_bl()) {
|
||||
return next_raw_instruction_address();
|
||||
|
||||
} else if (is_far_call()) {
|
||||
#ifdef COMPILER2
|
||||
if (next_raw()->is_blr()) {
|
||||
// ldr_literal; blr; ret_addr: b skip_literal;
|
||||
return addr_at(2 * instruction_size);
|
||||
}
|
||||
#endif
|
||||
assert(next_raw()->is_adr_aligned_lr() && next_raw()->next_raw()->is_br(), "must be");
|
||||
return nativeLdrLiteral_at(instruction_address())->after_literal_address();
|
||||
|
||||
} else if (is_ic_call()) {
|
||||
return nativeAdrLR_at(instruction_address())->target_lr_value();
|
||||
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
address destination(int adj = 0) const {
|
||||
if (is_bl()) {
|
||||
return nativeBL_at(instruction_address())->destination(adj);
|
||||
|
||||
} else if (is_far_call()) {
|
||||
return nativeLdrLiteral_at(instruction_address())->literal_value();
|
||||
|
||||
} else if (is_adr_aligned_lr()) {
|
||||
RawNativeInstruction *next = next_raw();
|
||||
if (next->is_b()) {
|
||||
// ic_near_call
|
||||
return nativeB_at(next->instruction_address())->destination(adj);
|
||||
} else if (next->is_far_jump()) {
|
||||
// ic_far_call
|
||||
return nativeLdrLiteral_at(next->instruction_address())->literal_value();
|
||||
}
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void set_destination(address dest) {
|
||||
if (is_bl()) {
|
||||
nativeBL_at(instruction_address())->set_destination(dest);
|
||||
return;
|
||||
}
|
||||
if (is_far_call()) {
|
||||
nativeLdrLiteral_at(instruction_address())->set_literal_value(dest);
|
||||
OrderAccess::storeload(); // overkill if caller holds lock?
|
||||
return;
|
||||
}
|
||||
if (is_adr_aligned_lr()) {
|
||||
RawNativeInstruction *next = next_raw();
|
||||
if (next->is_b()) {
|
||||
// ic_near_call
|
||||
nativeB_at(next->instruction_address())->set_destination(dest);
|
||||
return;
|
||||
}
|
||||
if (next->is_far_jump()) {
|
||||
// ic_far_call
|
||||
nativeLdrLiteral_at(next->instruction_address())->set_literal_value(dest);
|
||||
OrderAccess::storeload(); // overkill if caller holds lock?
|
||||
return;
|
||||
}
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
void set_destination_mt_safe(address dest) {
|
||||
assert(CodeCache::contains(dest), "call target should be from code cache (required by ic_call and patchable_call)");
|
||||
set_destination(dest);
|
||||
}
|
||||
|
||||
void verify() {
|
||||
assert(RawNativeInstruction::is_call(), "should be");
|
||||
}
|
||||
|
||||
void verify_alignment() {
|
||||
// Nothing to do on ARM
|
||||
}
|
||||
};
|
||||
|
||||
inline RawNativeCall* rawNativeCall_at(address address) {
|
||||
RawNativeCall * call = (RawNativeCall*)address;
|
||||
call->verify();
|
||||
return call;
|
||||
}
|
||||
|
||||
class NativeCall: public RawNativeCall {
|
||||
public:
|
||||
|
||||
// NativeCall::next_instruction_address() is used only to define the
|
||||
// range where to look for the relocation information. We need not
|
||||
// walk over composed instructions (as long as the relocation information
|
||||
// is associated to the first instruction).
|
||||
address next_instruction_address() const {
|
||||
return next_raw_instruction_address();
|
||||
}
|
||||
|
||||
static bool is_call_before(address return_address);
|
||||
};
|
||||
|
||||
inline NativeCall* nativeCall_at(address address) {
|
||||
NativeCall * call = (NativeCall*)address;
|
||||
call->verify();
|
||||
return call;
|
||||
}
|
||||
|
||||
NativeCall* nativeCall_before(address return_address);
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
class NativeGeneralJump: public NativeInstruction {
|
||||
public:
|
||||
|
||||
address jump_destination() const {
|
||||
return nativeB_at(instruction_address())->destination();
|
||||
}
|
||||
|
||||
static void replace_mt_safe(address instr_addr, address code_buffer);
|
||||
|
||||
static void insert_unconditional(address code_pos, address entry);
|
||||
|
||||
};
|
||||
|
||||
inline NativeGeneralJump* nativeGeneralJump_at(address address) {
|
||||
assert(nativeInstruction_at(address)->is_b(), "must be");
|
||||
return (NativeGeneralJump*)address;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
class RawNativeJump: public NativeInstruction {
|
||||
public:
|
||||
|
||||
address jump_destination(int adj = 0) const {
|
||||
if (is_b()) {
|
||||
address a = nativeB_at(instruction_address())->destination(adj);
|
||||
// Jump destination -1 is encoded as a jump to self
|
||||
if (a == instruction_address()) {
|
||||
return (address)-1;
|
||||
}
|
||||
return a;
|
||||
} else {
|
||||
assert(is_far_jump(), "should be");
|
||||
return nativeLdrLiteral_at(instruction_address())->literal_value();
|
||||
}
|
||||
}
|
||||
|
||||
void set_jump_destination(address dest) {
|
||||
if (is_b()) {
|
||||
// Jump destination -1 is encoded as a jump to self
|
||||
if (dest == (address)-1) {
|
||||
dest = instruction_address();
|
||||
}
|
||||
nativeB_at(instruction_address())->set_destination(dest);
|
||||
} else {
|
||||
assert(is_far_jump(), "should be");
|
||||
nativeLdrLiteral_at(instruction_address())->set_literal_value(dest);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
inline RawNativeJump* rawNativeJump_at(address address) {
|
||||
assert(rawNativeInstruction_at(address)->is_jump(), "must be");
|
||||
return (RawNativeJump*)address;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
class NativeMovConstReg: public NativeInstruction {
|
||||
|
||||
NativeMovConstReg *adjust() const {
|
||||
return (NativeMovConstReg *)adjust(this);
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
static RawNativeInstruction *adjust(const RawNativeInstruction *ni) {
|
||||
#ifdef COMPILER1
|
||||
// NOP required for C1 patching
|
||||
if (ni->is_nop()) {
|
||||
return ni->next_raw();
|
||||
}
|
||||
#endif
|
||||
return (RawNativeInstruction *)ni;
|
||||
}
|
||||
|
||||
intptr_t _data() const;
|
||||
void set_data(intptr_t x);
|
||||
|
||||
intptr_t data() const {
|
||||
return adjust()->_data();
|
||||
}
|
||||
|
||||
bool is_pc_relative() {
|
||||
return adjust()->is_ldr_literal();
|
||||
}
|
||||
|
||||
void _set_pc_relative_offset(address addr, address pc) {
|
||||
assert(is_ldr_literal(), "must be");
|
||||
nativeLdrLiteral_at(instruction_address())->set_literal_address(addr, pc);
|
||||
}
|
||||
|
||||
void set_pc_relative_offset(address addr, address pc) {
|
||||
NativeMovConstReg *ni = adjust();
|
||||
int dest_adj = ni->instruction_address() - instruction_address();
|
||||
ni->_set_pc_relative_offset(addr, pc + dest_adj);
|
||||
}
|
||||
|
||||
address _next_instruction_address() const {
|
||||
#ifdef COMPILER2
|
||||
if (is_movz()) {
|
||||
// narrow constant
|
||||
RawNativeInstruction* ni = next_raw();
|
||||
assert(ni->is_movk(), "movz;movk expected");
|
||||
return ni->next_raw_instruction_address();
|
||||
}
|
||||
#endif
|
||||
assert(is_ldr_literal(), "must be");
|
||||
return NativeInstruction::next_raw_instruction_address();
|
||||
}
|
||||
|
||||
address next_instruction_address() const {
|
||||
return adjust()->_next_instruction_address();
|
||||
}
|
||||
};
|
||||
|
||||
inline NativeMovConstReg* nativeMovConstReg_at(address address) {
|
||||
RawNativeInstruction* ni = rawNativeInstruction_at(address);
|
||||
|
||||
ni = NativeMovConstReg::adjust(ni);
|
||||
|
||||
assert(ni->is_mov_slow() || ni->is_ldr_literal(), "must be");
|
||||
return (NativeMovConstReg*)address;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
class NativeJump: public RawNativeJump {
|
||||
public:
|
||||
|
||||
static void check_verified_entry_alignment(address entry, address verified_entry);
|
||||
|
||||
static void patch_verified_entry(address entry, address verified_entry, address dest);
|
||||
};
|
||||
|
||||
inline NativeJump* nativeJump_at(address address) {
|
||||
assert(nativeInstruction_at(address)->is_jump(), "must be");
|
||||
return (NativeJump*)address;
|
||||
}
|
||||
|
||||
#endif // CPU_ARM_VM_NATIVEINST_ARM_64_HPP
|
44
hotspot/src/cpu/arm/vm/registerMap_arm.hpp
Normal file
44
hotspot/src/cpu/arm/vm/registerMap_arm.hpp
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_REGISTERMAP_ARM_HPP
|
||||
#define CPU_ARM_VM_REGISTERMAP_ARM_HPP
|
||||
|
||||
// machine-dependent implemention for register maps
|
||||
friend class frame;
|
||||
|
||||
private:
|
||||
// This is the hook for finding a register in an "well-known" location,
|
||||
// such as a register block of a predetermined format.
|
||||
// Since there is none, we just return NULL.
|
||||
// See registerMap_sparc.hpp for an example of grabbing registers
|
||||
// from register save areas of a standard layout.
|
||||
address pd_location(VMReg reg) const {return NULL;}
|
||||
|
||||
// no PD state to clear or copy:
|
||||
void pd_clear() {}
|
||||
void pd_initialize() {}
|
||||
void pd_initialize_from(const RegisterMap* map) {}
|
||||
|
||||
#endif // CPU_ARM_VM_REGISTERMAP_ARM_HPP
|
80
hotspot/src/cpu/arm/vm/register_arm.cpp
Normal file
80
hotspot/src/cpu/arm/vm/register_arm.cpp
Normal file
@ -0,0 +1,80 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "register_arm.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
const int ConcreteRegisterImpl::max_gpr = ConcreteRegisterImpl::num_gpr;
|
||||
const int ConcreteRegisterImpl::max_fpr = ConcreteRegisterImpl::num_fpr +
|
||||
ConcreteRegisterImpl::max_gpr;
|
||||
|
||||
const char* RegisterImpl::name() const {
|
||||
const char* names[number_of_registers] = {
|
||||
#ifdef AARCH64
|
||||
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
|
||||
"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
|
||||
"x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
|
||||
"x24", "x25", "x26", "x27", "x28", "fp", "lr", "xzr", "sp"
|
||||
#else
|
||||
"r0", "r1", "r2", "r3", "r4", "r5", "r6",
|
||||
#if (FP_REG_NUM == 7)
|
||||
"fp",
|
||||
#else
|
||||
"r7",
|
||||
#endif
|
||||
"r8", "r9", "r10",
|
||||
#if (FP_REG_NUM == 11)
|
||||
"fp",
|
||||
#else
|
||||
"r11",
|
||||
#endif
|
||||
"r12", "sp", "lr", "pc"
|
||||
#endif // AARCH64
|
||||
};
|
||||
return is_valid() ? names[encoding()] : "noreg";
|
||||
}
|
||||
|
||||
const char* FloatRegisterImpl::name() const {
|
||||
const char* names[number_of_registers] = {
|
||||
#ifdef AARCH64
|
||||
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
|
||||
"v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
|
||||
"v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
|
||||
"v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
|
||||
#else
|
||||
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
|
||||
"s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
|
||||
"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
|
||||
"s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31"
|
||||
#ifdef COMPILER2
|
||||
,"s32", "s33?","s34", "s35?","s36", "s37?","s38", "s39?",
|
||||
"s40", "s41?","s42", "s43?","s44", "s45?","s46", "s47?",
|
||||
"s48", "s49?","s50", "s51?","s52", "s53?","s54", "s55?",
|
||||
"s56", "s57?","s58", "s59?","s60", "s61?","s62", "s63?"
|
||||
#endif
|
||||
#endif // AARCH64
|
||||
};
|
||||
return is_valid() ? names[encoding()] : "fnoreg";
|
||||
}
|
570
hotspot/src/cpu/arm/vm/register_arm.hpp
Normal file
570
hotspot/src/cpu/arm/vm/register_arm.hpp
Normal file
@ -0,0 +1,570 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_REGISTER_ARM_HPP
|
||||
#define CPU_ARM_VM_REGISTER_ARM_HPP
|
||||
|
||||
#include "asm/register.hpp"
|
||||
#include "vm_version_arm.hpp"
|
||||
|
||||
class VMRegImpl;
|
||||
typedef VMRegImpl* VMReg;
|
||||
|
||||
// These are declared ucontext.h
|
||||
#undef R0
|
||||
#undef R1
|
||||
#undef R2
|
||||
#undef R3
|
||||
#undef R4
|
||||
#undef R5
|
||||
#undef R6
|
||||
#undef R7
|
||||
#undef R8
|
||||
#undef R9
|
||||
#undef R10
|
||||
#undef R11
|
||||
#undef R12
|
||||
#undef R13
|
||||
#undef R14
|
||||
#undef R15
|
||||
|
||||
#define R(r) ((Register)(r))
|
||||
|
||||
/////////////////////////////////
|
||||
// Support for different ARM ABIs
|
||||
// Note: default ABI is for linux
|
||||
|
||||
|
||||
// R9_IS_SCRATCHED
|
||||
//
|
||||
// The ARM ABI does not guarantee that R9 is callee saved.
|
||||
// Set R9_IS_SCRATCHED to 1 to ensure it is properly saved/restored by
|
||||
// the caller.
|
||||
#ifndef R9_IS_SCRATCHED
|
||||
// Default: R9 is callee saved
|
||||
#define R9_IS_SCRATCHED 0
|
||||
#endif
|
||||
|
||||
#ifndef AARCH64
|
||||
// FP_REG_NUM
|
||||
//
|
||||
// The ARM ABI does not state which register is used for the frame pointer.
|
||||
// Note: for the ABIs we are currently aware of, FP is currently
|
||||
// either R7 or R11. Code may have to be extended if a third register
|
||||
// register must be supported (see altFP_7_11).
|
||||
#ifndef FP_REG_NUM
|
||||
// Default: FP is R11
|
||||
#define FP_REG_NUM 11
|
||||
#endif
|
||||
#endif // AARCH64
|
||||
|
||||
// ALIGN_WIDE_ARGUMENTS
|
||||
//
|
||||
// The ARM ABI requires 64-bits arguments to be aligned on 4 words
|
||||
// or on even registers. Set ALIGN_WIDE_ARGUMENTS to 1 for that behavior.
|
||||
//
|
||||
// Unfortunately, some platforms do not endorse that part of the ABI.
|
||||
//
|
||||
// We are aware of one which expects 64-bit arguments to only be 4
|
||||
// bytes aligned and can for instance use R3 + a stack slot for such
|
||||
// an argument.
|
||||
//
|
||||
// This is the behavor implemented if (ALIGN_WIDE_ARGUMENTS == 0)
|
||||
#ifndef ALIGN_WIDE_ARGUMENTS
|
||||
// Default: align on 8 bytes and avoid using <r3+stack>
|
||||
#define ALIGN_WIDE_ARGUMENTS 1
|
||||
#endif
|
||||
|
||||
#define R0 ((Register)0)
|
||||
#define R1 ((Register)1)
|
||||
#define R2 ((Register)2)
|
||||
#define R3 ((Register)3)
|
||||
#define R4 ((Register)4)
|
||||
#define R5 ((Register)5)
|
||||
#define R6 ((Register)6)
|
||||
#define R7 ((Register)7)
|
||||
#define R8 ((Register)8)
|
||||
#define R9 ((Register)9)
|
||||
#define R10 ((Register)10)
|
||||
#define R11 ((Register)11)
|
||||
#define R12 ((Register)12)
|
||||
#define R13 ((Register)13)
|
||||
#define R14 ((Register)14)
|
||||
#define R15 ((Register)15)
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
#define R16 ((Register)16)
|
||||
#define R17 ((Register)17)
|
||||
#define R18 ((Register)18)
|
||||
#define R19 ((Register)19)
|
||||
#define R20 ((Register)20)
|
||||
#define R21 ((Register)21)
|
||||
#define R22 ((Register)22)
|
||||
#define R23 ((Register)23)
|
||||
#define R24 ((Register)24)
|
||||
#define R25 ((Register)25)
|
||||
#define R26 ((Register)26)
|
||||
#define R27 ((Register)27)
|
||||
#define R28 ((Register)28)
|
||||
#define R29 ((Register)29)
|
||||
#define R30 ((Register)30)
|
||||
#define ZR ((Register)31)
|
||||
#define SP ((Register)32)
|
||||
|
||||
#define FP R29
|
||||
#define LR R30
|
||||
|
||||
#define altFP_7_11 R7
|
||||
|
||||
#else // !AARCH64
|
||||
|
||||
#define FP ((Register)FP_REG_NUM)
|
||||
|
||||
// Safe use of registers which may be FP on some platforms.
|
||||
//
|
||||
// altFP_7_11: R7 if not equal to FP, else R11 (the default FP)
|
||||
//
|
||||
// Note: add additional altFP_#_11 for each register potentially used
|
||||
// as FP on supported ABIs (and replace R# by altFP_#_11). altFP_#_11
|
||||
// must be #define to R11 if and only if # is FP_REG_NUM.
|
||||
#if (FP_REG_NUM == 7)
|
||||
#define altFP_7_11 ((Register)11)
|
||||
#else
|
||||
#define altFP_7_11 ((Register)7)
|
||||
#endif
|
||||
#define SP R13
|
||||
#define LR R14
|
||||
#define PC R15
|
||||
|
||||
#endif // !AARCH64
|
||||
|
||||
|
||||
class RegisterImpl;
|
||||
typedef RegisterImpl* Register;
|
||||
|
||||
inline Register as_Register(int encoding) {
|
||||
return (Register)(intptr_t)encoding;
|
||||
}
|
||||
|
||||
class RegisterImpl : public AbstractRegisterImpl {
|
||||
public:
|
||||
enum {
|
||||
#ifdef AARCH64
|
||||
number_of_gprs = 31,
|
||||
zr_sp_encoding = 31,
|
||||
#endif
|
||||
number_of_registers = AARCH64_ONLY(number_of_gprs + 2) NOT_AARCH64(16)
|
||||
};
|
||||
|
||||
Register successor() const { return as_Register(encoding() + 1); }
|
||||
|
||||
inline friend Register as_Register(int encoding);
|
||||
|
||||
VMReg as_VMReg();
|
||||
|
||||
// accessors
|
||||
int encoding() const { assert(is_valid(), "invalid register"); return value(); }
|
||||
const char* name() const;
|
||||
|
||||
#ifdef AARCH64
|
||||
int encoding_with_zr() const { assert (is_valid_gpr_or_zr(), "invalid register"); return (this == ZR) ? zr_sp_encoding : value(); }
|
||||
int encoding_with_sp() const { assert (is_valid_gpr_or_sp(), "invalid register"); return (this == SP) ? zr_sp_encoding : value(); }
|
||||
#endif
|
||||
|
||||
// testers
|
||||
bool is_valid() const { return 0 <= value() && value() < number_of_registers; }
|
||||
|
||||
#ifdef AARCH64
|
||||
bool is_valid_gpr() const { return (0 <= value() && value() < number_of_gprs); }
|
||||
bool is_valid_gpr_or_zr() const { return is_valid_gpr() || (this == ZR); }
|
||||
bool is_valid_gpr_or_sp() const { return is_valid_gpr() || (this == SP); }
|
||||
#endif
|
||||
};
|
||||
|
||||
CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1));
|
||||
|
||||
|
||||
// Use FloatRegister as shortcut
|
||||
class FloatRegisterImpl;
|
||||
typedef FloatRegisterImpl* FloatRegister;
|
||||
|
||||
inline FloatRegister as_FloatRegister(int encoding) {
|
||||
return (FloatRegister)(intptr_t)encoding;
|
||||
}
|
||||
|
||||
class FloatRegisterImpl : public AbstractRegisterImpl {
|
||||
public:
|
||||
enum {
|
||||
#ifdef AARCH64
|
||||
number_of_registers = 32
|
||||
#else
|
||||
number_of_registers = NOT_COMPILER2(32) COMPILER2_PRESENT(64)
|
||||
#endif
|
||||
};
|
||||
|
||||
inline friend FloatRegister as_FloatRegister(int encoding);
|
||||
|
||||
VMReg as_VMReg();
|
||||
|
||||
int encoding() const { assert(is_valid(), "invalid register"); return value(); }
|
||||
bool is_valid() const { return 0 <= (intx)this && (intx)this < number_of_registers; }
|
||||
FloatRegister successor() const { return as_FloatRegister(encoding() + 1); }
|
||||
|
||||
const char* name() const;
|
||||
|
||||
#ifndef AARCH64
|
||||
int hi_bits() const {
|
||||
return (encoding() >> 1) & 0xf;
|
||||
}
|
||||
|
||||
int lo_bit() const {
|
||||
return encoding() & 1;
|
||||
}
|
||||
|
||||
int hi_bit() const {
|
||||
return encoding() >> 5;
|
||||
}
|
||||
#endif // !AARCH64
|
||||
};
|
||||
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, fnoreg, (-1));
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V0, ( 0));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V1, ( 1));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V2, ( 2));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V3, ( 3));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V4, ( 4));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V5, ( 5));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V6, ( 6));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V7, ( 7));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V8, ( 8));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V9, ( 9));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V10, (10));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V11, (11));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V12, (12));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V13, (13));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V14, (14));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V15, (15));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V16, (16));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V17, (17));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V18, (18));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V19, (19));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V20, (20));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V21, (21));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V22, (22));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V23, (23));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V24, (24));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V25, (25));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V26, (26));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V27, (27));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V28, (28));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V29, (29));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V30, (30));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, V31, (31));
|
||||
|
||||
#define S0 V0
|
||||
#define S1_reg V1
|
||||
#define Stemp V31
|
||||
|
||||
#define D0 V0
|
||||
#define D1 V1
|
||||
|
||||
#else // AARCH64
|
||||
|
||||
/*
|
||||
* S1-S6 are named with "_reg" suffix to avoid conflict with
|
||||
* constants defined in sharedRuntimeTrig.cpp
|
||||
*/
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S0, ( 0));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S1_reg, ( 1));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S2_reg, ( 2));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S3_reg, ( 3));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S4_reg, ( 4));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S5_reg, ( 5));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S6_reg, ( 6));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S7, ( 7));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S8, ( 8));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S9, ( 9));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S10, (10));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S11, (11));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S12, (12));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S13, (13));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S14, (14));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S15, (15));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S16, (16));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S17, (17));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S18, (18));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S19, (19));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S20, (20));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S21, (21));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S22, (22));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S23, (23));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S24, (24));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S25, (25));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S26, (26));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S27, (27));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S28, (28));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S29, (29));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S30, (30));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, S31, (31));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, Stemp, (30));
|
||||
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D0, ( 0));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D1, ( 2));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D2, ( 4));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D3, ( 6));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D4, ( 8));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D5, ( 10));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D6, ( 12));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D7, ( 14));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D8, ( 16));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D9, ( 18));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D10, ( 20));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D11, ( 22));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D12, ( 24));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D13, ( 26));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D14, ( 28));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D15, (30));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D16, (32));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D17, (34));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D18, (36));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D19, (38));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D20, (40));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D21, (42));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D22, (44));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D23, (46));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D24, (48));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D25, (50));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D26, (52));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D27, (54));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D28, (56));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D29, (58));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D30, (60));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, D31, (62));
|
||||
|
||||
#endif // AARCH64
|
||||
|
||||
class ConcreteRegisterImpl : public AbstractRegisterImpl {
|
||||
public:
|
||||
enum {
|
||||
log_vmregs_per_word = LogBytesPerWord - LogBytesPerInt, // VMRegs are of 4-byte size
|
||||
#ifdef COMPILER2
|
||||
log_bytes_per_fpr = AARCH64_ONLY(4) NOT_AARCH64(2), // quad vectors
|
||||
#else
|
||||
log_bytes_per_fpr = AARCH64_ONLY(3) NOT_AARCH64(2), // double vectors
|
||||
#endif
|
||||
log_words_per_fpr = log_bytes_per_fpr - LogBytesPerWord,
|
||||
words_per_fpr = 1 << log_words_per_fpr,
|
||||
log_vmregs_per_fpr = log_bytes_per_fpr - LogBytesPerInt,
|
||||
log_vmregs_per_gpr = log_vmregs_per_word,
|
||||
vmregs_per_gpr = 1 << log_vmregs_per_gpr,
|
||||
vmregs_per_fpr = 1 << log_vmregs_per_fpr,
|
||||
|
||||
num_gpr = RegisterImpl::number_of_registers << log_vmregs_per_gpr,
|
||||
max_gpr0 = num_gpr,
|
||||
num_fpr = FloatRegisterImpl::number_of_registers << log_vmregs_per_fpr,
|
||||
max_fpr0 = max_gpr0 + num_fpr,
|
||||
number_of_registers = num_gpr + num_fpr +
|
||||
// TODO-AARCH64 revise
|
||||
1+1 // APSR and FPSCR so that c2's REG_COUNT <= ConcreteRegisterImpl::number_of_registers
|
||||
};
|
||||
|
||||
static const int max_gpr;
|
||||
static const int max_fpr;
|
||||
};
|
||||
|
||||
// TODO-AARCH64 revise the following definitions
|
||||
|
||||
class VFPSystemRegisterImpl;
|
||||
typedef VFPSystemRegisterImpl* VFPSystemRegister;
|
||||
class VFPSystemRegisterImpl : public AbstractRegisterImpl {
|
||||
public:
|
||||
int encoding() const { return value(); }
|
||||
};
|
||||
|
||||
#define FPSID ((VFPSystemRegister)0)
|
||||
#define FPSCR ((VFPSystemRegister)1)
|
||||
#define MVFR0 ((VFPSystemRegister)0x6)
|
||||
#define MVFR1 ((VFPSystemRegister)0x7)
|
||||
|
||||
/*
|
||||
* Register definitions shared across interpreter and compiler
|
||||
*/
|
||||
#define Rexception_obj AARCH64_ONLY(R19) NOT_AARCH64(R4)
|
||||
#define Rexception_pc AARCH64_ONLY(R20) NOT_AARCH64(R5)
|
||||
|
||||
#ifdef AARCH64
|
||||
#define Rheap_base R27
|
||||
#endif // AARCH64
|
||||
|
||||
/*
|
||||
* Interpreter register definitions common to C++ and template interpreters.
|
||||
*/
|
||||
#ifdef AARCH64
|
||||
#define Rlocals R23
|
||||
#define Rmethod R26
|
||||
#define Rthread R28
|
||||
#define Rtemp R16
|
||||
#define Rtemp2 R17
|
||||
#else
|
||||
#define Rlocals R8
|
||||
#define Rmethod R9
|
||||
#define Rthread R10
|
||||
#define Rtemp R12
|
||||
#endif // AARCH64
|
||||
|
||||
// Interpreter calling conventions
|
||||
|
||||
#define Rparams AARCH64_ONLY(R8) NOT_AARCH64(SP)
|
||||
#define Rsender_sp AARCH64_ONLY(R19) NOT_AARCH64(R4)
|
||||
|
||||
// JSR292
|
||||
// Note: R5_mh is needed only during the call setup, including adapters
|
||||
// This does not seem to conflict with Rexception_pc
|
||||
// In case of issues, R3 might be OK but adapters calling the runtime would have to save it
|
||||
#define R5_mh R5 // MethodHandle register, used during the call setup
|
||||
#define Rmh_SP_save FP // for C1
|
||||
|
||||
/*
|
||||
* C++ Interpreter Register Defines
|
||||
*/
|
||||
#define Rsave0 R4
|
||||
#define Rsave1 R5
|
||||
#define Rsave2 R6
|
||||
#define Rstate altFP_7_11 // R7 or R11
|
||||
#define Ricklass R8
|
||||
|
||||
/*
|
||||
* TemplateTable Interpreter Register Usage
|
||||
*/
|
||||
|
||||
// Temporary registers
|
||||
#define R0_tmp R0
|
||||
#define R1_tmp R1
|
||||
#define R2_tmp R2
|
||||
#define R3_tmp R3
|
||||
#define R4_tmp R4
|
||||
#define R5_tmp R5
|
||||
#define R12_tmp R12
|
||||
#define LR_tmp LR
|
||||
|
||||
#define S0_tmp S0
|
||||
#define S1_tmp S1_reg
|
||||
|
||||
#define D0_tmp D0
|
||||
#define D1_tmp D1
|
||||
|
||||
// Temporary registers saved across VM calls (according to C calling conventions)
|
||||
#define Rtmp_save0 AARCH64_ONLY(R19) NOT_AARCH64(R4)
|
||||
#define Rtmp_save1 AARCH64_ONLY(R20) NOT_AARCH64(R5)
|
||||
|
||||
// Cached TOS value
|
||||
#define R0_tos R0
|
||||
|
||||
#ifndef AARCH64
|
||||
#define R0_tos_lo R0
|
||||
#define R1_tos_hi R1
|
||||
#endif
|
||||
|
||||
#define S0_tos S0
|
||||
#define D0_tos D0
|
||||
|
||||
// Dispatch table
|
||||
#define RdispatchTable AARCH64_ONLY(R22) NOT_AARCH64(R6)
|
||||
|
||||
// Bytecode pointer
|
||||
#define Rbcp AARCH64_ONLY(R24) NOT_AARCH64(altFP_7_11)
|
||||
|
||||
// Pre-loaded next bytecode for the dispatch
|
||||
#define R3_bytecode R3
|
||||
|
||||
// Conventions between bytecode templates and stubs
|
||||
#define R2_ClassCastException_obj R2
|
||||
#define R4_ArrayIndexOutOfBounds_index R4
|
||||
|
||||
// Interpreter expression stack top
|
||||
#define Rstack_top AARCH64_ONLY(R25) NOT_AARCH64(SP)
|
||||
|
||||
/*
|
||||
* Linux 32-bit ARM C ABI Register calling conventions
|
||||
*
|
||||
* REG use callee/caller saved
|
||||
*
|
||||
* R0 First argument reg caller
|
||||
* result register
|
||||
* R1 Second argument reg caller
|
||||
* result register
|
||||
* R2 Third argument reg caller
|
||||
* R3 Fourth argument reg caller
|
||||
*
|
||||
* R4 - R8 Local variable registers callee
|
||||
* R9
|
||||
* R10, R11 Local variable registers callee
|
||||
*
|
||||
* R12 (IP) Scratch register used in inter-procedural calling
|
||||
* R13 (SP) Stack Pointer callee
|
||||
* R14 (LR) Link register
|
||||
* R15 (PC) Program Counter
|
||||
*
|
||||
* TODO-AARCH64: document AArch64 ABI
|
||||
*
|
||||
*/
|
||||
#define c_rarg0 R0
|
||||
#define c_rarg1 R1
|
||||
#define c_rarg2 R2
|
||||
#define c_rarg3 R3
|
||||
|
||||
#ifdef AARCH64
|
||||
#define c_rarg4 R4
|
||||
#define c_rarg5 R5
|
||||
#define c_rarg6 R6
|
||||
#define c_rarg7 R7
|
||||
#endif
|
||||
|
||||
#ifdef AARCH64
|
||||
#define GPR_PARAMS 8
|
||||
#define FPR_PARAMS 8
|
||||
#else
|
||||
#define GPR_PARAMS 4
|
||||
#endif
|
||||
|
||||
|
||||
// Java ABI
|
||||
// XXX Is this correct?
|
||||
#define j_rarg0 c_rarg0
|
||||
#define j_rarg1 c_rarg1
|
||||
#define j_rarg2 c_rarg2
|
||||
#define j_rarg3 c_rarg3
|
||||
|
||||
#ifdef AARCH64
|
||||
#define j_rarg4 c_rarg4
|
||||
#define j_rarg5 c_rarg5
|
||||
#define j_rarg6 c_rarg6
|
||||
#define j_rarg7 c_rarg7
|
||||
#endif
|
||||
|
||||
#endif // CPU_ARM_VM_REGISTER_ARM_HPP
|
137
hotspot/src/cpu/arm/vm/register_definitions_arm.cpp
Normal file
137
hotspot/src/cpu/arm/vm/register_definitions_arm.cpp
Normal file
@ -0,0 +1,137 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "asm/register.hpp"
|
||||
#include "interp_masm_arm.hpp"
|
||||
#include "register_arm.hpp"
|
||||
|
||||
REGISTER_DEFINITION(Register, noreg);
|
||||
REGISTER_DEFINITION(FloatRegister, fnoreg);
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
REGISTER_DEFINITION(FloatRegister, V0);
|
||||
REGISTER_DEFINITION(FloatRegister, V1);
|
||||
REGISTER_DEFINITION(FloatRegister, V2);
|
||||
REGISTER_DEFINITION(FloatRegister, V3);
|
||||
REGISTER_DEFINITION(FloatRegister, V4);
|
||||
REGISTER_DEFINITION(FloatRegister, V5);
|
||||
REGISTER_DEFINITION(FloatRegister, V6);
|
||||
REGISTER_DEFINITION(FloatRegister, V7);
|
||||
REGISTER_DEFINITION(FloatRegister, V8);
|
||||
REGISTER_DEFINITION(FloatRegister, V9);
|
||||
REGISTER_DEFINITION(FloatRegister, V10);
|
||||
REGISTER_DEFINITION(FloatRegister, V11);
|
||||
REGISTER_DEFINITION(FloatRegister, V12);
|
||||
REGISTER_DEFINITION(FloatRegister, V13);
|
||||
REGISTER_DEFINITION(FloatRegister, V14);
|
||||
REGISTER_DEFINITION(FloatRegister, V15);
|
||||
REGISTER_DEFINITION(FloatRegister, V16);
|
||||
REGISTER_DEFINITION(FloatRegister, V17);
|
||||
REGISTER_DEFINITION(FloatRegister, V18);
|
||||
REGISTER_DEFINITION(FloatRegister, V19);
|
||||
REGISTER_DEFINITION(FloatRegister, V20);
|
||||
REGISTER_DEFINITION(FloatRegister, V21);
|
||||
REGISTER_DEFINITION(FloatRegister, V22);
|
||||
REGISTER_DEFINITION(FloatRegister, V23);
|
||||
REGISTER_DEFINITION(FloatRegister, V24);
|
||||
REGISTER_DEFINITION(FloatRegister, V25);
|
||||
REGISTER_DEFINITION(FloatRegister, V26);
|
||||
REGISTER_DEFINITION(FloatRegister, V27);
|
||||
REGISTER_DEFINITION(FloatRegister, V28);
|
||||
REGISTER_DEFINITION(FloatRegister, V29);
|
||||
REGISTER_DEFINITION(FloatRegister, V30);
|
||||
REGISTER_DEFINITION(FloatRegister, V31);
|
||||
|
||||
#else // AARCH64
|
||||
|
||||
REGISTER_DEFINITION(FloatRegister, S0);
|
||||
REGISTER_DEFINITION(FloatRegister, S1_reg);
|
||||
REGISTER_DEFINITION(FloatRegister, S2_reg);
|
||||
REGISTER_DEFINITION(FloatRegister, S3_reg);
|
||||
REGISTER_DEFINITION(FloatRegister, S4_reg);
|
||||
REGISTER_DEFINITION(FloatRegister, S5_reg);
|
||||
REGISTER_DEFINITION(FloatRegister, S6_reg);
|
||||
REGISTER_DEFINITION(FloatRegister, S7);
|
||||
REGISTER_DEFINITION(FloatRegister, S8);
|
||||
REGISTER_DEFINITION(FloatRegister, S9);
|
||||
REGISTER_DEFINITION(FloatRegister, S10);
|
||||
REGISTER_DEFINITION(FloatRegister, S11);
|
||||
REGISTER_DEFINITION(FloatRegister, S12);
|
||||
REGISTER_DEFINITION(FloatRegister, S13);
|
||||
REGISTER_DEFINITION(FloatRegister, S14);
|
||||
REGISTER_DEFINITION(FloatRegister, S15);
|
||||
REGISTER_DEFINITION(FloatRegister, S16);
|
||||
REGISTER_DEFINITION(FloatRegister, S17);
|
||||
REGISTER_DEFINITION(FloatRegister, S18);
|
||||
REGISTER_DEFINITION(FloatRegister, S19);
|
||||
REGISTER_DEFINITION(FloatRegister, S20);
|
||||
REGISTER_DEFINITION(FloatRegister, S21);
|
||||
REGISTER_DEFINITION(FloatRegister, S22);
|
||||
REGISTER_DEFINITION(FloatRegister, S23);
|
||||
REGISTER_DEFINITION(FloatRegister, S24);
|
||||
REGISTER_DEFINITION(FloatRegister, S25);
|
||||
REGISTER_DEFINITION(FloatRegister, S26);
|
||||
REGISTER_DEFINITION(FloatRegister, S27);
|
||||
REGISTER_DEFINITION(FloatRegister, S28);
|
||||
REGISTER_DEFINITION(FloatRegister, S29);
|
||||
REGISTER_DEFINITION(FloatRegister, S30);
|
||||
REGISTER_DEFINITION(FloatRegister, S31);
|
||||
REGISTER_DEFINITION(FloatRegister, Stemp);
|
||||
REGISTER_DEFINITION(FloatRegister, D0);
|
||||
REGISTER_DEFINITION(FloatRegister, D1);
|
||||
REGISTER_DEFINITION(FloatRegister, D2);
|
||||
REGISTER_DEFINITION(FloatRegister, D3);
|
||||
REGISTER_DEFINITION(FloatRegister, D4);
|
||||
REGISTER_DEFINITION(FloatRegister, D5);
|
||||
REGISTER_DEFINITION(FloatRegister, D6);
|
||||
REGISTER_DEFINITION(FloatRegister, D7);
|
||||
REGISTER_DEFINITION(FloatRegister, D8);
|
||||
REGISTER_DEFINITION(FloatRegister, D9);
|
||||
REGISTER_DEFINITION(FloatRegister, D10);
|
||||
REGISTER_DEFINITION(FloatRegister, D11);
|
||||
REGISTER_DEFINITION(FloatRegister, D12);
|
||||
REGISTER_DEFINITION(FloatRegister, D13);
|
||||
REGISTER_DEFINITION(FloatRegister, D14);
|
||||
REGISTER_DEFINITION(FloatRegister, D15);
|
||||
REGISTER_DEFINITION(FloatRegister, D16);
|
||||
REGISTER_DEFINITION(FloatRegister, D17);
|
||||
REGISTER_DEFINITION(FloatRegister, D18);
|
||||
REGISTER_DEFINITION(FloatRegister, D19);
|
||||
REGISTER_DEFINITION(FloatRegister, D20);
|
||||
REGISTER_DEFINITION(FloatRegister, D21);
|
||||
REGISTER_DEFINITION(FloatRegister, D22);
|
||||
REGISTER_DEFINITION(FloatRegister, D23);
|
||||
REGISTER_DEFINITION(FloatRegister, D24);
|
||||
REGISTER_DEFINITION(FloatRegister, D25);
|
||||
REGISTER_DEFINITION(FloatRegister, D26);
|
||||
REGISTER_DEFINITION(FloatRegister, D27);
|
||||
REGISTER_DEFINITION(FloatRegister, D28);
|
||||
REGISTER_DEFINITION(FloatRegister, D29);
|
||||
REGISTER_DEFINITION(FloatRegister, D30);
|
||||
REGISTER_DEFINITION(FloatRegister, D31);
|
||||
|
||||
#endif //AARCH64
|
167
hotspot/src/cpu/arm/vm/relocInfo_arm.cpp
Normal file
167
hotspot/src/cpu/arm/vm/relocInfo_arm.cpp
Normal file
@ -0,0 +1,167 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.inline.hpp"
|
||||
#include "assembler_arm.inline.hpp"
|
||||
#include "code/relocInfo.hpp"
|
||||
#include "nativeInst_arm.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
|
||||
NativeMovConstReg* ni = nativeMovConstReg_at(addr());
|
||||
#if defined(AARCH64) && defined(COMPILER2)
|
||||
if (ni->is_movz()) {
|
||||
assert(type() == relocInfo::oop_type, "!");
|
||||
if (verify_only) {
|
||||
uintptr_t d = ni->data();
|
||||
guarantee((d >> 32) == 0, "not narrow oop");
|
||||
narrowOop no = d;
|
||||
oop o = oopDesc::decode_heap_oop(no);
|
||||
guarantee(cast_from_oop<intptr_t>(o) == (intptr_t)x, "instructions must match");
|
||||
} else {
|
||||
ni->set_data((intptr_t)x);
|
||||
}
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
if (verify_only) {
|
||||
guarantee(ni->data() == (intptr_t)(x + o), "instructions must match");
|
||||
} else {
|
||||
ni->set_data((intptr_t)(x + o));
|
||||
}
|
||||
}
|
||||
|
||||
address Relocation::pd_call_destination(address orig_addr) {
|
||||
address pc = addr();
|
||||
|
||||
int adj = 0;
|
||||
if (orig_addr != NULL) {
|
||||
// We just moved this call instruction from orig_addr to addr().
|
||||
// This means that, when relative, its target will appear to have grown by addr() - orig_addr.
|
||||
adj = orig_addr - pc;
|
||||
}
|
||||
|
||||
RawNativeInstruction* ni = rawNativeInstruction_at(pc);
|
||||
|
||||
#if (!defined(AARCH64))
|
||||
if (NOT_AARCH64(ni->is_add_lr()) AARCH64_ONLY(ni->is_adr_aligned_lr())) {
|
||||
// On arm32, skip the optional 'add LR, PC, #offset'
|
||||
// (Allowing the jump support code to handle fat_call)
|
||||
pc = ni->next_raw_instruction_address();
|
||||
ni = nativeInstruction_at(pc);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (AARCH64_ONLY(ni->is_call()) NOT_AARCH64(ni->is_bl())) {
|
||||
// For arm32, fat_call are handled by is_jump for the new 'ni',
|
||||
// requiring only to support is_bl.
|
||||
//
|
||||
// For AARCH64, skipping a leading adr is not sufficient
|
||||
// to reduce calls to a simple bl.
|
||||
return rawNativeCall_at(pc)->destination(adj);
|
||||
}
|
||||
|
||||
if (ni->is_jump()) {
|
||||
return rawNativeJump_at(pc)->jump_destination(adj);
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void Relocation::pd_set_call_destination(address x) {
|
||||
address pc = addr();
|
||||
NativeInstruction* ni = nativeInstruction_at(pc);
|
||||
|
||||
#if (!defined(AARCH64))
|
||||
if (NOT_AARCH64(ni->is_add_lr()) AARCH64_ONLY(ni->is_adr_aligned_lr())) {
|
||||
// On arm32, skip the optional 'add LR, PC, #offset'
|
||||
// (Allowing the jump support code to handle fat_call)
|
||||
pc = ni->next_raw_instruction_address();
|
||||
ni = nativeInstruction_at(pc);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (AARCH64_ONLY(ni->is_call()) NOT_AARCH64(ni->is_bl())) {
|
||||
// For arm32, fat_call are handled by is_jump for the new 'ni',
|
||||
// requiring only to support is_bl.
|
||||
//
|
||||
// For AARCH64, skipping a leading adr is not sufficient
|
||||
// to reduce calls to a simple bl.
|
||||
rawNativeCall_at(pc)->set_destination(x);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ni->is_jump()) { // raw jump
|
||||
rawNativeJump_at(pc)->set_jump_destination(x);
|
||||
return;
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
|
||||
address* Relocation::pd_address_in_code() {
|
||||
return (address*)addr();
|
||||
}
|
||||
|
||||
address Relocation::pd_get_address_from_code() {
|
||||
return *pd_address_in_code();
|
||||
}
|
||||
|
||||
void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
|
||||
}
|
||||
|
||||
void metadata_Relocation::pd_fix_value(address x) {
|
||||
assert(! addr_in_const(), "Do not use");
|
||||
#ifdef AARCH64
|
||||
#ifdef COMPILER2
|
||||
NativeMovConstReg* ni = nativeMovConstReg_at(addr());
|
||||
if (ni->is_movz()) {
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
set_value(x);
|
||||
#else
|
||||
if (!VM_Version::supports_movw()) {
|
||||
set_value(x);
|
||||
#ifdef ASSERT
|
||||
} else {
|
||||
// the movw/movt data should be correct
|
||||
NativeMovConstReg* ni = nativeMovConstReg_at(addr());
|
||||
assert(ni->is_movw(), "not a movw");
|
||||
// The following assert should be correct but the shared code
|
||||
// currently 'fixes' the metadata instructions before the
|
||||
// metadata_table is copied in the new method (see
|
||||
// JDK-8042845). This means that 'x' (which comes from the table)
|
||||
// does not match the value inlined in the code (which is
|
||||
// correct). Failure can be temporarily ignored since the code is
|
||||
// correct and the table is copied shortly afterward.
|
||||
//
|
||||
// assert(ni->data() == (int)x, "metadata relocation mismatch");
|
||||
#endif
|
||||
}
|
||||
#endif // !AARCH64
|
||||
}
|
35
hotspot/src/cpu/arm/vm/relocInfo_arm.hpp
Normal file
35
hotspot/src/cpu/arm/vm/relocInfo_arm.hpp
Normal file
@ -0,0 +1,35 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_RELOCINFO_ARM_HPP
|
||||
#define CPU_ARM_VM_RELOCINFO_ARM_HPP
|
||||
|
||||
private:
|
||||
|
||||
enum {
|
||||
offset_unit = 4,
|
||||
format_width = 0
|
||||
};
|
||||
|
||||
#endif // CPU_ARM_VM_RELOCINFO_ARM_HPP
|
160
hotspot/src/cpu/arm/vm/runtime_arm.cpp
Normal file
160
hotspot/src/cpu/arm/vm/runtime_arm.cpp
Normal file
@ -0,0 +1,160 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#ifdef COMPILER2
|
||||
#include "asm/assembler.hpp"
|
||||
#include "assembler_arm.inline.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "code/vmreg.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "nativeInst_arm.hpp"
|
||||
#include "opto/runtime.hpp"
|
||||
#include "runtime/interfaceSupport.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "runtime/vframeArray.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "vmreg_arm.inline.hpp"
|
||||
#endif
|
||||
|
||||
#define __ masm->
|
||||
|
||||
//------------------------------ generate_exception_blob ---------------------------
|
||||
// creates exception blob at the end
|
||||
// Using exception blob, this code is jumped from a compiled method.
|
||||
// (see emit_exception_handler in sparc.ad file)
|
||||
//
|
||||
// Given an exception pc at a call we call into the runtime for the
|
||||
// handler in this method. This handler might merely restore state
|
||||
// (i.e. callee save registers) unwind the frame and jump to the
|
||||
// exception handler for the nmethod if there is no Java level handler
|
||||
// for the nmethod.
|
||||
//
|
||||
// This code is entered with a jmp.
|
||||
//
|
||||
// Arguments:
|
||||
// Rexception_obj (R4/R19): exception oop
|
||||
// Rexception_pc (R5/R20): exception pc
|
||||
//
|
||||
// Results:
|
||||
// Rexception_obj (R4/R19): exception oop
|
||||
// O1: exception pc in caller or ???
|
||||
// destination: exception handler of caller
|
||||
//
|
||||
// Note: the exception pc MUST be at a call (precise debug information)
|
||||
//
|
||||
void OptoRuntime::generate_exception_blob() {
|
||||
// allocate space for code
|
||||
ResourceMark rm;
|
||||
int pad = VerifyThread ? 256 : 0;// Extra slop space for more verify code
|
||||
|
||||
// setup code generation tools
|
||||
// Measured 8/7/03 at 256 in 32bit debug build (no VerifyThread)
|
||||
// Measured 8/7/03 at 528 in 32bit debug build (VerifyThread)
|
||||
CodeBuffer buffer("exception_blob", 600+pad, 512);
|
||||
MacroAssembler* masm = new MacroAssembler(&buffer);
|
||||
|
||||
int framesize_in_words = 2; // FP + LR
|
||||
int framesize_in_bytes = framesize_in_words * wordSize;
|
||||
int framesize_in_slots = framesize_in_bytes / sizeof(jint);
|
||||
|
||||
int start = __ offset();
|
||||
|
||||
__ str(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset()));
|
||||
__ str(Rexception_pc, Address(Rthread, JavaThread::exception_pc_offset()));
|
||||
|
||||
// This call does all the hard work. It checks if an exception catch
|
||||
// exists in the method.
|
||||
// If so, it returns the handler address.
|
||||
// If the nmethod has been deoptimized and it had a handler the handler
|
||||
// address is the deopt blob unpack_with_exception entry.
|
||||
//
|
||||
// If no handler exists it prepares for stack-unwinding, restoring the callee-save
|
||||
// registers of the frame being removed.
|
||||
//
|
||||
__ mov(LR, Rexception_pc);
|
||||
__ raw_push(FP, LR);
|
||||
int pc_offset = __ set_last_Java_frame(SP, FP, false, Rtemp);
|
||||
|
||||
__ mov(R0, Rthread);
|
||||
|
||||
// This call can block at exit and nmethod can be deoptimized at that
|
||||
// point. If the nmethod had a catch point we would jump to the
|
||||
// now deoptimized catch point and fall thru the vanilla deopt
|
||||
// path and lose the exception
|
||||
// Sure would be simpler if this call didn't block!
|
||||
__ call(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C));
|
||||
if (pc_offset == -1) {
|
||||
pc_offset = __ offset();
|
||||
}
|
||||
|
||||
// Set an oopmap for the call site. This oopmap will only be used if we
|
||||
// are unwinding the stack. Hence, all locations will be dead.
|
||||
// Callee-saved registers will be the same as the frame above (i.e.,
|
||||
// handle_exception_stub), since they were restored when we got the
|
||||
// exception.
|
||||
|
||||
OopMapSet *oop_maps = new OopMapSet();
|
||||
oop_maps->add_gc_map(pc_offset - start, new OopMap(framesize_in_slots, 0));
|
||||
|
||||
__ reset_last_Java_frame(Rtemp);
|
||||
|
||||
__ raw_pop(FP, LR);
|
||||
|
||||
// Restore SP from its saved reg (FP) if the exception PC is a MethodHandle call site.
|
||||
__ ldr(Rtemp, Address(Rthread, JavaThread::is_method_handle_return_offset()));
|
||||
#ifdef AARCH64
|
||||
Label skip;
|
||||
__ cbz(Rtemp, skip);
|
||||
__ mov(SP, Rmh_SP_save);
|
||||
__ bind(skip);
|
||||
#else
|
||||
__ cmp(Rtemp, 0);
|
||||
__ mov(SP, Rmh_SP_save, ne);
|
||||
#endif
|
||||
|
||||
// R0 contains handler address
|
||||
// Since this may be the deopt blob we must set R5 to look like we returned
|
||||
// from the original pc that threw the exception
|
||||
|
||||
__ ldr(Rexception_pc, Address(Rthread, JavaThread::exception_pc_offset())); // R5/R20
|
||||
|
||||
__ ldr(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset())); // R4/R19
|
||||
__ mov(Rtemp, 0);
|
||||
#ifdef ASSERT
|
||||
__ str(Rtemp, Address(Rthread, JavaThread::exception_handler_pc_offset()));
|
||||
__ str(Rtemp, Address(Rthread, JavaThread::exception_pc_offset()));
|
||||
#endif
|
||||
// Clear the exception oop so GC no longer processes it as a root.
|
||||
__ str(Rtemp, Address(Rthread, JavaThread::exception_oop_offset()));
|
||||
__ jump(R0);
|
||||
|
||||
// -------------
|
||||
// make sure all code is generated
|
||||
masm->flush();
|
||||
|
||||
_exception_blob = ExceptionBlob::create(&buffer, oop_maps, framesize_in_words);
|
||||
}
|
2501
hotspot/src/cpu/arm/vm/sharedRuntime_arm.cpp
Normal file
2501
hotspot/src/cpu/arm/vm/sharedRuntime_arm.cpp
Normal file
File diff suppressed because it is too large
Load Diff
4510
hotspot/src/cpu/arm/vm/stubGenerator_arm.cpp
Normal file
4510
hotspot/src/cpu/arm/vm/stubGenerator_arm.cpp
Normal file
File diff suppressed because it is too large
Load Diff
1033
hotspot/src/cpu/arm/vm/stubRoutinesCrypto_arm.cpp
Normal file
1033
hotspot/src/cpu/arm/vm/stubRoutinesCrypto_arm.cpp
Normal file
File diff suppressed because it is too large
Load Diff
39
hotspot/src/cpu/arm/vm/stubRoutines_arm.cpp
Normal file
39
hotspot/src/cpu/arm/vm/stubRoutines_arm.cpp
Normal file
@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "runtime/deoptimization.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
|
||||
#ifndef AARCH64
|
||||
address StubRoutines::Arm::_idiv_irem_entry = NULL;
|
||||
#endif
|
||||
|
||||
address StubRoutines::Arm::_partial_subtype_check = NULL;
|
||||
|
||||
#ifndef AARCH64
|
||||
address StubRoutines::_atomic_load_long_entry = NULL;
|
||||
address StubRoutines::_atomic_store_long_entry = NULL;
|
||||
#endif
|
69
hotspot/src/cpu/arm/vm/stubRoutines_arm.hpp
Normal file
69
hotspot/src/cpu/arm/vm/stubRoutines_arm.hpp
Normal file
@ -0,0 +1,69 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_STUBROUTINES_ARM_HPP
|
||||
#define CPU_ARM_VM_STUBROUTINES_ARM_HPP
|
||||
|
||||
// This file holds the platform specific parts of the StubRoutines
|
||||
// definition. See stubRoutines.hpp for a description on how to
|
||||
// extend it.
|
||||
|
||||
enum platform_dependent_constants {
|
||||
code_size1 = 9000, // simply increase if too small (assembler will crash if too small)
|
||||
code_size2 = 22000 // simply increase if too small (assembler will crash if too small)
|
||||
};
|
||||
|
||||
class Arm {
|
||||
friend class StubGenerator;
|
||||
friend class VMStructs;
|
||||
|
||||
private:
|
||||
|
||||
#ifndef AARCH64
|
||||
static address _idiv_irem_entry;
|
||||
#endif
|
||||
static address _partial_subtype_check;
|
||||
|
||||
public:
|
||||
|
||||
#ifndef AARCH64
|
||||
static address idiv_irem_entry() { return _idiv_irem_entry; }
|
||||
#endif
|
||||
static address partial_subtype_check() { return _partial_subtype_check; }
|
||||
};
|
||||
|
||||
static bool returns_to_call_stub(address return_pc) {
|
||||
return return_pc == _call_stub_return_address;
|
||||
}
|
||||
|
||||
#ifndef AARCH64
|
||||
static address _atomic_load_long_entry;
|
||||
static address _atomic_store_long_entry;
|
||||
|
||||
static address atomic_load_long_entry() { return _atomic_load_long_entry; }
|
||||
static address atomic_store_long_entry() { return _atomic_store_long_entry; }
|
||||
#endif
|
||||
|
||||
|
||||
#endif // CPU_ARM_VM_STUBROUTINES_ARM_HPP
|
1976
hotspot/src/cpu/arm/vm/templateInterpreterGenerator_arm.cpp
Normal file
1976
hotspot/src/cpu/arm/vm/templateInterpreterGenerator_arm.cpp
Normal file
File diff suppressed because it is too large
Load Diff
5030
hotspot/src/cpu/arm/vm/templateTable_arm.cpp
Normal file
5030
hotspot/src/cpu/arm/vm/templateTable_arm.cpp
Normal file
File diff suppressed because it is too large
Load Diff
61
hotspot/src/cpu/arm/vm/templateTable_arm.hpp
Normal file
61
hotspot/src/cpu/arm/vm/templateTable_arm.hpp
Normal file
@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_TEMPLATETABLE_ARM_HPP
|
||||
#define CPU_ARM_VM_TEMPLATETABLE_ARM_HPP
|
||||
|
||||
static void prepare_invoke(int byte_no,
|
||||
Register method, // linked method (or i-klass)
|
||||
Register index = noreg, // itable index, MethodType, etc.
|
||||
Register recv = noreg, // if caller wants to see it
|
||||
Register flags = noreg // if caller wants to test it
|
||||
);
|
||||
|
||||
static void invokevirtual_helper(Register index, Register recv,
|
||||
Register flags);
|
||||
|
||||
static void volatile_barrier(MacroAssembler::Membar_mask_bits order_constraint,
|
||||
Register tmp,
|
||||
bool preserve_flags = false,
|
||||
Register load_tgt = noreg);
|
||||
|
||||
// Helpers
|
||||
static void index_check(Register array, Register index);
|
||||
static void index_check_without_pop(Register array, Register index);
|
||||
|
||||
static void get_local_base_addr(Register r, Register index);
|
||||
|
||||
static Address load_iaddress(Register index, Register scratch);
|
||||
static Address load_aaddress(Register index, Register scratch);
|
||||
static Address load_faddress(Register index, Register scratch);
|
||||
static Address load_daddress(Register index, Register scratch);
|
||||
|
||||
static void load_category2_local(Register Rlocal_index, Register tmp);
|
||||
static void store_category2_local(Register Rlocal_index, Register tmp);
|
||||
|
||||
static Address get_array_elem_addr(BasicType elemType, Register array, Register index, Register temp);
|
||||
|
||||
static void jvmti_post_fast_field_mod(TosState state);
|
||||
|
||||
#endif // CPU_ARM_VM_TEMPLATETABLE_ARM_HPP
|
48
hotspot/src/cpu/arm/vm/vmStructs_arm.hpp
Normal file
48
hotspot/src/cpu/arm/vm/vmStructs_arm.hpp
Normal file
@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_VMSTRUCTS_ARM_HPP
|
||||
#define CPU_ARM_VM_VMSTRUCTS_ARM_HPP
|
||||
|
||||
// These are the CPU-specific fields, types and integer
|
||||
// constants required by the Serviceability Agent. This file is
|
||||
// referenced by vmStructs.cpp.
|
||||
|
||||
#define VM_STRUCTS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
|
||||
\
|
||||
/******************************/ \
|
||||
/* JavaCallWrapper */ \
|
||||
/******************************/ \
|
||||
/******************************/ \
|
||||
/* JavaFrameAnchor */ \
|
||||
/******************************/ \
|
||||
volatile_nonstatic_field(JavaFrameAnchor, _last_Java_fp, intptr_t*)
|
||||
|
||||
#define VM_TYPES_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type)
|
||||
|
||||
#define VM_INT_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
|
||||
|
||||
#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
|
||||
|
||||
#endif // CPU_ARM_VM_VMSTRUCTS_ARM_HPP
|
128
hotspot/src/cpu/arm/vm/vm_version_arm.hpp
Normal file
128
hotspot/src/cpu/arm/vm/vm_version_arm.hpp
Normal file
@ -0,0 +1,128 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_VM_VERSION_ARM_HPP
|
||||
#define CPU_ARM_VM_VM_VERSION_ARM_HPP
|
||||
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/vm_version.hpp"
|
||||
|
||||
class VM_Version: public Abstract_VM_Version {
|
||||
friend class JVMCIVMStructs;
|
||||
|
||||
static bool _has_simd;
|
||||
|
||||
protected:
|
||||
// Are we done with vm version initialization
|
||||
static bool _is_initialized;
|
||||
|
||||
public:
|
||||
static void initialize();
|
||||
static bool is_initialized() { return _is_initialized; }
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
public:
|
||||
static bool supports_ldrex() { return true; }
|
||||
static bool supports_ldrexd() { return true; }
|
||||
static bool supports_movw() { return true; }
|
||||
|
||||
// Override Abstract_VM_Version implementation
|
||||
static bool use_biased_locking();
|
||||
|
||||
static bool has_simd() { return _has_simd; }
|
||||
static bool has_vfp() { return has_simd(); }
|
||||
static bool simd_math_is_compliant() { return true; }
|
||||
|
||||
static bool prefer_moves_over_load_literal() { return true; }
|
||||
|
||||
#else
|
||||
|
||||
protected:
|
||||
enum Feature_Flag {
|
||||
vfp = 0,
|
||||
vfp3_32 = 1,
|
||||
simd = 2,
|
||||
};
|
||||
|
||||
enum Feature_Flag_Set {
|
||||
unknown_m = 0,
|
||||
all_features_m = -1,
|
||||
|
||||
vfp_m = 1 << vfp,
|
||||
vfp3_32_m = 1 << vfp3_32,
|
||||
simd_m = 1 << simd,
|
||||
};
|
||||
|
||||
// The value stored by "STR PC, [addr]" instruction can be either
|
||||
// (address of this instruction + 8) or (address of this instruction + 12)
|
||||
// depending on hardware implementation.
|
||||
// This adjustment is calculated in runtime.
|
||||
static int _stored_pc_adjustment;
|
||||
|
||||
// ARM architecture version: 5 = ARMv5, 6 = ARMv6, 7 = ARMv7 etc.
|
||||
static int _arm_arch;
|
||||
|
||||
// linux kernel atomic helper function version info
|
||||
// __kuser_cmpxchg() if version >= 2
|
||||
// __kuser_cmpxchg64() if version >= 5
|
||||
static int _kuser_helper_version;
|
||||
|
||||
#define KUSER_HELPER_VERSION_ADDR 0xffff0ffc
|
||||
#define KUSER_VERSION_CMPXCHG32 2
|
||||
#define KUSER_VERSION_CMPXCHG64 5
|
||||
|
||||
// Read additional info using OS-specific interfaces
|
||||
static void get_os_cpu_info();
|
||||
|
||||
public:
|
||||
static void early_initialize();
|
||||
|
||||
static int arm_arch() { return _arm_arch; }
|
||||
static int stored_pc_adjustment() { return _stored_pc_adjustment; }
|
||||
static bool supports_rev() { return _arm_arch >= 6; }
|
||||
static bool supports_ldrex() { return _arm_arch >= 6; }
|
||||
static bool supports_movw() { return _arm_arch >= 7; }
|
||||
static bool supports_ldrexd() { return _arm_arch >= 7; }
|
||||
static bool supports_compare_and_exchange() { return true; }
|
||||
static bool supports_kuser_cmpxchg32() { return _kuser_helper_version >= KUSER_VERSION_CMPXCHG32; }
|
||||
static bool supports_kuser_cmpxchg64() { return _kuser_helper_version >= KUSER_VERSION_CMPXCHG64; }
|
||||
// Override Abstract_VM_Version implementation
|
||||
static bool use_biased_locking();
|
||||
static const char* vm_info_string();
|
||||
|
||||
static bool has_vfp() { return (_features & vfp_m) != 0; }
|
||||
static bool has_vfp3_32() { return (_features & vfp3_32_m) != 0; }
|
||||
static bool has_simd() { return (_features & simd_m) != 0; }
|
||||
|
||||
static bool simd_math_is_compliant() { return false; }
|
||||
|
||||
static bool prefer_moves_over_load_literal() { return supports_movw(); }
|
||||
|
||||
friend class VM_Version_StubGenerator;
|
||||
|
||||
#endif // AARCH64
|
||||
};
|
||||
|
||||
#endif // CPU_ARM_VM_VM_VERSION_ARM_HPP
|
329
hotspot/src/cpu/arm/vm/vm_version_arm_32.cpp
Normal file
329
hotspot/src/cpu/arm/vm/vm_version_arm_32.cpp
Normal file
@ -0,0 +1,329 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/os.inline.hpp"
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "vm_version_arm.hpp"
|
||||
|
||||
int VM_Version::_stored_pc_adjustment = 4;
|
||||
int VM_Version::_arm_arch = 5;
|
||||
bool VM_Version::_is_initialized = false;
|
||||
int VM_Version::_kuser_helper_version = 0;
|
||||
|
||||
extern "C" {
|
||||
typedef int (*get_cpu_info_t)();
|
||||
typedef bool (*check_vfp_t)(double *d);
|
||||
typedef bool (*check_simd_t)();
|
||||
}
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
class VM_Version_StubGenerator: public StubCodeGenerator {
|
||||
public:
|
||||
|
||||
VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
|
||||
|
||||
address generate_get_cpu_info() {
|
||||
StubCodeMark mark(this, "VM_Version", "get_cpu_info");
|
||||
address start = __ pc();
|
||||
|
||||
__ mov(R0, PC);
|
||||
__ push(PC);
|
||||
__ pop(R1);
|
||||
__ sub(R0, R1, R0);
|
||||
// return the result in R0
|
||||
__ bx(LR);
|
||||
|
||||
return start;
|
||||
};
|
||||
|
||||
address generate_check_vfp() {
|
||||
StubCodeMark mark(this, "VM_Version", "check_vfp");
|
||||
address start = __ pc();
|
||||
|
||||
__ fstd(D0, Address(R0));
|
||||
__ mov(R0, 1);
|
||||
__ bx(LR);
|
||||
|
||||
return start;
|
||||
};
|
||||
|
||||
address generate_check_vfp3_32() {
|
||||
StubCodeMark mark(this, "VM_Version", "check_vfp3_32");
|
||||
address start = __ pc();
|
||||
|
||||
__ fstd(D16, Address(R0));
|
||||
__ mov(R0, 1);
|
||||
__ bx(LR);
|
||||
|
||||
return start;
|
||||
};
|
||||
|
||||
address generate_check_simd() {
|
||||
StubCodeMark mark(this, "VM_Version", "check_simd");
|
||||
address start = __ pc();
|
||||
|
||||
__ vcnt(Stemp, Stemp);
|
||||
__ mov(R0, 1);
|
||||
__ bx(LR);
|
||||
|
||||
return start;
|
||||
};
|
||||
};
|
||||
|
||||
#undef __
|
||||
|
||||
|
||||
extern "C" address check_vfp3_32_fault_instr;
|
||||
extern "C" address check_vfp_fault_instr;
|
||||
extern "C" address check_simd_fault_instr;
|
||||
|
||||
void VM_Version::initialize() {
|
||||
ResourceMark rm;
|
||||
|
||||
// Making this stub must be FIRST use of assembler
|
||||
const int stub_size = 128;
|
||||
BufferBlob* stub_blob = BufferBlob::create("get_cpu_info", stub_size);
|
||||
if (stub_blob == NULL) {
|
||||
vm_exit_during_initialization("Unable to allocate get_cpu_info stub");
|
||||
}
|
||||
|
||||
CodeBuffer c(stub_blob);
|
||||
VM_Version_StubGenerator g(&c);
|
||||
address get_cpu_info_pc = g.generate_get_cpu_info();
|
||||
get_cpu_info_t get_cpu_info = CAST_TO_FN_PTR(get_cpu_info_t, get_cpu_info_pc);
|
||||
|
||||
int pc_adjustment = get_cpu_info();
|
||||
|
||||
VM_Version::_stored_pc_adjustment = pc_adjustment;
|
||||
|
||||
#ifndef __SOFTFP__
|
||||
address check_vfp_pc = g.generate_check_vfp();
|
||||
check_vfp_t check_vfp = CAST_TO_FN_PTR(check_vfp_t, check_vfp_pc);
|
||||
|
||||
check_vfp_fault_instr = (address)check_vfp;
|
||||
double dummy;
|
||||
if (check_vfp(&dummy)) {
|
||||
_features |= vfp_m;
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
if (has_vfp()) {
|
||||
address check_vfp3_32_pc = g.generate_check_vfp3_32();
|
||||
check_vfp_t check_vfp3_32 = CAST_TO_FN_PTR(check_vfp_t, check_vfp3_32_pc);
|
||||
check_vfp3_32_fault_instr = (address)check_vfp3_32;
|
||||
double dummy;
|
||||
if (check_vfp3_32(&dummy)) {
|
||||
_features |= vfp3_32_m;
|
||||
}
|
||||
|
||||
address check_simd_pc =g.generate_check_simd();
|
||||
check_simd_t check_simd = CAST_TO_FN_PTR(check_simd_t, check_simd_pc);
|
||||
check_simd_fault_instr = (address)check_simd;
|
||||
if (check_simd()) {
|
||||
_features |= simd_m;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
|
||||
warning("AES intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
|
||||
}
|
||||
|
||||
if (UseAES && !FLAG_IS_DEFAULT(UseAES)) {
|
||||
warning("AES instructions are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseAES, false);
|
||||
}
|
||||
|
||||
if (UseAESCTRIntrinsics) {
|
||||
warning("AES/CTR intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
|
||||
}
|
||||
|
||||
if (UseFMA) {
|
||||
warning("FMA instructions are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseFMA, false);
|
||||
}
|
||||
|
||||
if (UseSHA) {
|
||||
warning("SHA instructions are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseSHA, false);
|
||||
}
|
||||
|
||||
if (UseSHA1Intrinsics) {
|
||||
warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseSHA256Intrinsics) {
|
||||
warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseSHA512Intrinsics) {
|
||||
warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseCRC32Intrinsics) {
|
||||
if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics))
|
||||
warning("CRC32 intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseCRC32CIntrinsics) {
|
||||
if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics))
|
||||
warning("CRC32C intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
|
||||
}
|
||||
|
||||
if (UseAdler32Intrinsics) {
|
||||
warning("Adler32 intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseVectorizedMismatchIntrinsic) {
|
||||
warning("vectorizedMismatch intrinsic is not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
|
||||
}
|
||||
|
||||
get_os_cpu_info();
|
||||
|
||||
_kuser_helper_version = *(int*)KUSER_HELPER_VERSION_ADDR;
|
||||
|
||||
#ifdef COMPILER2
|
||||
// C2 is only supported on v7+ VFP at this time
|
||||
if (_arm_arch < 7 || !has_vfp()) {
|
||||
vm_exit_during_initialization("Server VM is only supported on ARMv7+ VFP");
|
||||
}
|
||||
#endif
|
||||
|
||||
// armv7 has the ldrexd instruction that can be used to implement cx8
|
||||
// armv5 with linux >= 3.1 can use kernel helper routine
|
||||
_supports_cx8 = (supports_ldrexd() || supports_kuser_cmpxchg64());
|
||||
// ARM doesn't have special instructions for these but ldrex/ldrexd
|
||||
// enable shorter instruction sequences that the ones based on cas.
|
||||
_supports_atomic_getset4 = supports_ldrex();
|
||||
_supports_atomic_getadd4 = supports_ldrex();
|
||||
_supports_atomic_getset8 = supports_ldrexd();
|
||||
_supports_atomic_getadd8 = supports_ldrexd();
|
||||
|
||||
#ifdef COMPILER2
|
||||
assert(_supports_cx8 && _supports_atomic_getset4 && _supports_atomic_getadd4
|
||||
&& _supports_atomic_getset8 && _supports_atomic_getadd8, "C2: atomic operations must be supported");
|
||||
#endif
|
||||
char buf[512];
|
||||
jio_snprintf(buf, sizeof(buf), "(ARMv%d)%s%s%s",
|
||||
_arm_arch,
|
||||
(has_vfp() ? ", vfp" : ""),
|
||||
(has_vfp3_32() ? ", vfp3-32" : ""),
|
||||
(has_simd() ? ", simd" : ""));
|
||||
|
||||
// buf is started with ", " or is empty
|
||||
_features_string = os::strdup(buf);
|
||||
|
||||
if (has_simd()) {
|
||||
if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
|
||||
FLAG_SET_DEFAULT(UsePopCountInstruction, true);
|
||||
}
|
||||
}
|
||||
|
||||
AllocatePrefetchDistance = 128;
|
||||
|
||||
#ifdef COMPILER2
|
||||
FLAG_SET_DEFAULT(UseFPUForSpilling, true);
|
||||
|
||||
if (FLAG_IS_DEFAULT(MaxVectorSize)) {
|
||||
// FLAG_SET_DEFAULT(MaxVectorSize, has_simd() ? 16 : 8);
|
||||
// SIMD/NEON can use 16, but default is 8 because currently
|
||||
// larger than 8 will disable instruction scheduling
|
||||
FLAG_SET_DEFAULT(MaxVectorSize, 8);
|
||||
}
|
||||
|
||||
if (MaxVectorSize > 16) {
|
||||
FLAG_SET_DEFAULT(MaxVectorSize, 8);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (FLAG_IS_DEFAULT(Tier4CompileThreshold)) {
|
||||
Tier4CompileThreshold = 10000;
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(Tier3InvocationThreshold)) {
|
||||
Tier3InvocationThreshold = 1000;
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(Tier3CompileThreshold)) {
|
||||
Tier3CompileThreshold = 5000;
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(Tier3MinInvocationThreshold)) {
|
||||
Tier3MinInvocationThreshold = 500;
|
||||
}
|
||||
|
||||
FLAG_SET_DEFAULT(TypeProfileLevel, 0); // unsupported
|
||||
|
||||
// This machine does not allow unaligned memory accesses
|
||||
if (UseUnalignedAccesses) {
|
||||
if (!FLAG_IS_DEFAULT(UseUnalignedAccesses))
|
||||
warning("Unaligned memory access is not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseUnalignedAccesses, false);
|
||||
}
|
||||
|
||||
_is_initialized = true;
|
||||
}
|
||||
|
||||
bool VM_Version::use_biased_locking() {
|
||||
get_os_cpu_info();
|
||||
// The cost of CAS on uniprocessor ARM v6 and later is low compared to the
|
||||
// overhead related to slightly longer Biased Locking execution path.
|
||||
// Testing shows no improvement when running with Biased Locking enabled
|
||||
// on an ARMv6 and higher uniprocessor systems. The situation is different on
|
||||
// ARMv5 and MP systems.
|
||||
//
|
||||
// Therefore the Biased Locking is enabled on ARMv5 and ARM MP only.
|
||||
//
|
||||
return (!os::is_MP() && (arm_arch() > 5)) ? false : true;
|
||||
}
|
||||
|
||||
#define EXP
|
||||
|
||||
// Temporary override for experimental features
|
||||
// Copied from Abstract_VM_Version
|
||||
const char* VM_Version::vm_info_string() {
|
||||
switch (Arguments::mode()) {
|
||||
case Arguments::_int:
|
||||
return UseSharedSpaces ? "interpreted mode, sharing" EXP : "interpreted mode" EXP;
|
||||
case Arguments::_mixed:
|
||||
return UseSharedSpaces ? "mixed mode, sharing" EXP : "mixed mode" EXP;
|
||||
case Arguments::_comp:
|
||||
return UseSharedSpaces ? "compiled mode, sharing" EXP : "compiled mode" EXP;
|
||||
};
|
||||
ShouldNotReachHere();
|
||||
return "";
|
||||
}
|
258
hotspot/src/cpu/arm/vm/vm_version_arm_64.cpp
Normal file
258
hotspot/src/cpu/arm/vm/vm_version_arm_64.cpp
Normal file
@ -0,0 +1,258 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/os.inline.hpp"
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "vm_version_arm.hpp"
|
||||
#include <sys/auxv.h>
|
||||
#include <asm/hwcap.h>
|
||||
|
||||
#ifndef HWCAP_AES
|
||||
#define HWCAP_AES 1 << 3
|
||||
#endif
|
||||
|
||||
bool VM_Version::_is_initialized = false;
|
||||
bool VM_Version::_has_simd = false;
|
||||
|
||||
extern "C" {
|
||||
typedef bool (*check_simd_t)();
|
||||
}
|
||||
|
||||
|
||||
#ifdef COMPILER2
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
class VM_Version_StubGenerator: public StubCodeGenerator {
|
||||
public:
|
||||
|
||||
VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
|
||||
|
||||
address generate_check_simd() {
|
||||
StubCodeMark mark(this, "VM_Version", "check_simd");
|
||||
address start = __ pc();
|
||||
|
||||
__ vcnt(Stemp, Stemp);
|
||||
__ mov(R0, 1);
|
||||
__ ret(LR);
|
||||
|
||||
return start;
|
||||
};
|
||||
};
|
||||
|
||||
#undef __
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
extern "C" address check_simd_fault_instr;
|
||||
|
||||
|
||||
void VM_Version::initialize() {
|
||||
ResourceMark rm;
|
||||
|
||||
// Making this stub must be FIRST use of assembler
|
||||
const int stub_size = 128;
|
||||
BufferBlob* stub_blob = BufferBlob::create("get_cpu_info", stub_size);
|
||||
if (stub_blob == NULL) {
|
||||
vm_exit_during_initialization("Unable to allocate get_cpu_info stub");
|
||||
}
|
||||
|
||||
if (UseFMA) {
|
||||
warning("FMA instructions are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseFMA, false);
|
||||
}
|
||||
|
||||
if (UseSHA) {
|
||||
warning("SHA instructions are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseSHA, false);
|
||||
}
|
||||
|
||||
if (UseSHA1Intrinsics) {
|
||||
warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseSHA256Intrinsics) {
|
||||
warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseSHA512Intrinsics) {
|
||||
warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseCRC32Intrinsics) {
|
||||
if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics))
|
||||
warning("CRC32 intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseCRC32CIntrinsics) {
|
||||
if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics))
|
||||
warning("CRC32C intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
|
||||
}
|
||||
|
||||
if (UseAdler32Intrinsics) {
|
||||
warning("Adler32 intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseVectorizedMismatchIntrinsic) {
|
||||
warning("vectorizedMismatch intrinsic is not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
|
||||
}
|
||||
|
||||
CodeBuffer c(stub_blob);
|
||||
|
||||
#ifdef COMPILER2
|
||||
VM_Version_StubGenerator g(&c);
|
||||
|
||||
address check_simd_pc = g.generate_check_simd();
|
||||
if (check_simd_pc != NULL) {
|
||||
check_simd_t check_simd = CAST_TO_FN_PTR(check_simd_t, check_simd_pc);
|
||||
check_simd_fault_instr = (address)check_simd;
|
||||
_has_simd = check_simd();
|
||||
} else {
|
||||
assert(! _has_simd, "default _has_simd value must be 'false'");
|
||||
}
|
||||
#endif
|
||||
|
||||
unsigned long auxv = getauxval(AT_HWCAP);
|
||||
|
||||
char buf[512];
|
||||
jio_snprintf(buf, sizeof(buf), "AArch64%s",
|
||||
((auxv & HWCAP_AES) ? ", aes" : ""));
|
||||
|
||||
_features_string = os::strdup(buf);
|
||||
|
||||
#ifdef COMPILER2
|
||||
if (auxv & HWCAP_AES) {
|
||||
if (FLAG_IS_DEFAULT(UseAES)) {
|
||||
FLAG_SET_DEFAULT(UseAES, true);
|
||||
}
|
||||
if (!UseAES) {
|
||||
if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
|
||||
warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled.");
|
||||
}
|
||||
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
|
||||
} else {
|
||||
if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
|
||||
FLAG_SET_DEFAULT(UseAESIntrinsics, true);
|
||||
}
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
if (UseAES || UseAESIntrinsics) {
|
||||
if (UseAES && !FLAG_IS_DEFAULT(UseAES)) {
|
||||
warning("AES instructions are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseAES, false);
|
||||
}
|
||||
if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
|
||||
warning("AES intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
|
||||
}
|
||||
}
|
||||
|
||||
if (UseAESCTRIntrinsics) {
|
||||
warning("AES/CTR intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
|
||||
}
|
||||
|
||||
_supports_cx8 = true;
|
||||
_supports_atomic_getset4 = true;
|
||||
_supports_atomic_getadd4 = true;
|
||||
_supports_atomic_getset8 = true;
|
||||
_supports_atomic_getadd8 = true;
|
||||
|
||||
// TODO-AARCH64 revise C2 flags
|
||||
|
||||
if (has_simd()) {
|
||||
if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
|
||||
FLAG_SET_DEFAULT(UsePopCountInstruction, true);
|
||||
}
|
||||
}
|
||||
|
||||
AllocatePrefetchDistance = 128;
|
||||
|
||||
#ifdef COMPILER2
|
||||
FLAG_SET_DEFAULT(UseFPUForSpilling, true);
|
||||
|
||||
if (FLAG_IS_DEFAULT(MaxVectorSize)) {
|
||||
// FLAG_SET_DEFAULT(MaxVectorSize, has_simd() ? 16 : 8);
|
||||
// SIMD/NEON can use 16, but default is 8 because currently
|
||||
// larger than 8 will disable instruction scheduling
|
||||
FLAG_SET_DEFAULT(MaxVectorSize, 8);
|
||||
}
|
||||
|
||||
if (MaxVectorSize > 16) {
|
||||
FLAG_SET_DEFAULT(MaxVectorSize, 8);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (FLAG_IS_DEFAULT(Tier4CompileThreshold)) {
|
||||
Tier4CompileThreshold = 10000;
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(Tier3InvocationThreshold)) {
|
||||
Tier3InvocationThreshold = 1000;
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(Tier3CompileThreshold)) {
|
||||
Tier3CompileThreshold = 5000;
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(Tier3MinInvocationThreshold)) {
|
||||
Tier3MinInvocationThreshold = 500;
|
||||
}
|
||||
|
||||
FLAG_SET_DEFAULT(TypeProfileLevel, 0); // unsupported
|
||||
|
||||
// This machine does not allow unaligned memory accesses
|
||||
if (UseUnalignedAccesses) {
|
||||
if (!FLAG_IS_DEFAULT(UseUnalignedAccesses))
|
||||
warning("Unaligned memory access is not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseUnalignedAccesses, false);
|
||||
}
|
||||
|
||||
_is_initialized = true;
|
||||
}
|
||||
|
||||
bool VM_Version::use_biased_locking() {
|
||||
// TODO-AARCH64 measure performance and revise
|
||||
|
||||
// The cost of CAS on uniprocessor ARM v6 and later is low compared to the
|
||||
// overhead related to slightly longer Biased Locking execution path.
|
||||
// Testing shows no improvement when running with Biased Locking enabled
|
||||
// on an ARMv6 and higher uniprocessor systems. The situation is different on
|
||||
// ARMv5 and MP systems.
|
||||
//
|
||||
// Therefore the Biased Locking is enabled on ARMv5 and ARM MP only.
|
||||
//
|
||||
return os::is_MP();
|
||||
}
|
51
hotspot/src/cpu/arm/vm/vmreg_arm.cpp
Normal file
51
hotspot/src/cpu/arm/vm/vmreg_arm.cpp
Normal file
@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "code/vmreg.hpp"
|
||||
|
||||
void VMRegImpl::set_regName() {
|
||||
Register reg = ::as_Register(0);
|
||||
int i;
|
||||
for (i = 0; i < ConcreteRegisterImpl::max_gpr; reg = reg->successor()) {
|
||||
for (int j = 0; j < (1 << ConcreteRegisterImpl::log_vmregs_per_gpr); j++) {
|
||||
regName[i++] = reg->name();
|
||||
}
|
||||
}
|
||||
#ifndef __SOFTFP__
|
||||
FloatRegister freg = ::as_FloatRegister(0);
|
||||
for ( ; i < ConcreteRegisterImpl::max_fpr ; ) {
|
||||
for (int j = 0; j < (1 << ConcreteRegisterImpl::log_vmregs_per_fpr); j++) {
|
||||
regName[i++] = freg->name();
|
||||
}
|
||||
freg = freg->successor();
|
||||
}
|
||||
#endif
|
||||
|
||||
for ( ; i < ConcreteRegisterImpl::number_of_registers ; i ++ ) {
|
||||
regName[i] = "NON-GPR-FPR";
|
||||
}
|
||||
}
|
58
hotspot/src/cpu/arm/vm/vmreg_arm.hpp
Normal file
58
hotspot/src/cpu/arm/vm/vmreg_arm.hpp
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_VMREG_ARM_HPP
|
||||
#define CPU_ARM_VM_VMREG_ARM_HPP
|
||||
|
||||
inline bool is_Register() {
|
||||
return (unsigned int) value() < (unsigned int) ConcreteRegisterImpl::max_gpr;
|
||||
}
|
||||
|
||||
inline bool is_FloatRegister() {
|
||||
return value() >= ConcreteRegisterImpl::max_gpr && value() < ConcreteRegisterImpl::max_fpr;
|
||||
}
|
||||
|
||||
inline Register as_Register() {
|
||||
assert(is_Register(), "must be");
|
||||
assert(is_concrete(), "concrete register expected");
|
||||
return ::as_Register(value() >> ConcreteRegisterImpl::log_vmregs_per_gpr);
|
||||
}
|
||||
|
||||
inline FloatRegister as_FloatRegister() {
|
||||
assert(is_FloatRegister(), "must be");
|
||||
assert(is_concrete(), "concrete register expected");
|
||||
return ::as_FloatRegister((value() - ConcreteRegisterImpl::max_gpr) >> ConcreteRegisterImpl::log_vmregs_per_fpr);
|
||||
}
|
||||
|
||||
inline bool is_concrete() {
|
||||
if (is_Register()) {
|
||||
return ((value() & right_n_bits(ConcreteRegisterImpl::log_vmregs_per_gpr)) == 0);
|
||||
} else if (is_FloatRegister()) {
|
||||
return (((value() - ConcreteRegisterImpl::max_gpr) & right_n_bits(ConcreteRegisterImpl::log_vmregs_per_fpr)) == 0);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
#endif // CPU_ARM_VM_VMREG_ARM_HPP
|
35
hotspot/src/cpu/arm/vm/vmreg_arm.inline.hpp
Normal file
35
hotspot/src/cpu/arm/vm/vmreg_arm.inline.hpp
Normal file
@ -0,0 +1,35 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_VM_VMREG_ARM_INLINE_HPP
|
||||
#define CPU_ARM_VM_VMREG_ARM_INLINE_HPP
|
||||
|
||||
inline VMReg RegisterImpl::as_VMReg() {
|
||||
return VMRegImpl::as_VMReg(encoding() << ConcreteRegisterImpl::log_vmregs_per_gpr);
|
||||
}
|
||||
|
||||
inline VMReg FloatRegisterImpl::as_VMReg() {
|
||||
return VMRegImpl::as_VMReg((encoding() << ConcreteRegisterImpl::log_vmregs_per_fpr) + ConcreteRegisterImpl::max_gpr);
|
||||
}
|
||||
#endif // CPU_ARM_VM_VMREG_ARM_INLINE_HPP
|
222
hotspot/src/cpu/arm/vm/vtableStubs_arm.cpp
Normal file
222
hotspot/src/cpu/arm/vm/vtableStubs_arm.cpp
Normal file
@ -0,0 +1,222 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "assembler_arm.inline.hpp"
|
||||
#include "code/vtableStubs.hpp"
|
||||
#include "interp_masm_arm.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/klassVtable.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "vmreg_arm.inline.hpp"
|
||||
#ifdef COMPILER2
|
||||
#include "opto/runtime.hpp"
|
||||
#endif
|
||||
|
||||
// machine-dependent part of VtableStubs: create VtableStub of correct size and
|
||||
// initialize its code
|
||||
|
||||
#define __ masm->
|
||||
|
||||
#ifndef PRODUCT
|
||||
extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
|
||||
#endif
|
||||
|
||||
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
const int code_length = VtableStub::pd_code_size_limit(true);
|
||||
VtableStub* s = new(code_length) VtableStub(true, vtable_index);
|
||||
// Can be NULL if there is no free space in the code cache.
|
||||
if (s == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ResourceMark rm;
|
||||
CodeBuffer cb(s->entry_point(), code_length);
|
||||
MacroAssembler* masm = new MacroAssembler(&cb);
|
||||
|
||||
assert(VtableStub::receiver_location() == R0->as_VMReg(), "receiver expected in R0");
|
||||
|
||||
const Register tmp = Rtemp; // Rtemp OK, should be free at call sites
|
||||
|
||||
address npe_addr = __ pc();
|
||||
__ load_klass(tmp, R0);
|
||||
|
||||
{
|
||||
int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index * vtableEntry::size_in_bytes();
|
||||
int method_offset = vtableEntry::method_offset_in_bytes() + entry_offset;
|
||||
|
||||
assert ((method_offset & (wordSize - 1)) == 0, "offset should be aligned");
|
||||
int offset_mask = AARCH64_ONLY(0xfff << LogBytesPerWord) NOT_AARCH64(0xfff);
|
||||
if (method_offset & ~offset_mask) {
|
||||
__ add(tmp, tmp, method_offset & ~offset_mask);
|
||||
}
|
||||
__ ldr(Rmethod, Address(tmp, method_offset & offset_mask));
|
||||
}
|
||||
|
||||
address ame_addr = __ pc();
|
||||
#ifdef AARCH64
|
||||
__ ldr(tmp, Address(Rmethod, Method::from_compiled_offset()));
|
||||
__ br(tmp);
|
||||
#else
|
||||
__ ldr(PC, Address(Rmethod, Method::from_compiled_offset()));
|
||||
#endif // AARCH64
|
||||
|
||||
masm->flush();
|
||||
|
||||
if (PrintMiscellaneous && (WizardMode || Verbose)) {
|
||||
tty->print_cr("vtable #%d at " PTR_FORMAT "[%d] left over: %d",
|
||||
vtable_index, p2i(s->entry_point()),
|
||||
(int)(s->code_end() - s->entry_point()),
|
||||
(int)(s->code_end() - __ pc()));
|
||||
}
|
||||
guarantee(__ pc() <= s->code_end(), "overflowed buffer");
|
||||
// FIXME ARM: need correct 'slop' - below is x86 code
|
||||
// shut the door on sizing bugs
|
||||
//int slop = 8; // 32-bit offset is this much larger than a 13-bit one
|
||||
//assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");
|
||||
|
||||
s->set_exception_points(npe_addr, ame_addr);
|
||||
return s;
|
||||
}
|
||||
|
||||
VtableStub* VtableStubs::create_itable_stub(int itable_index) {
|
||||
const int code_length = VtableStub::pd_code_size_limit(false);
|
||||
VtableStub* s = new(code_length) VtableStub(false, itable_index);
|
||||
// Can be NULL if there is no free space in the code cache.
|
||||
if (s == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ResourceMark rm;
|
||||
CodeBuffer cb(s->entry_point(), code_length);
|
||||
MacroAssembler* masm = new MacroAssembler(&cb);
|
||||
|
||||
assert(VtableStub::receiver_location() == R0->as_VMReg(), "receiver expected in R0");
|
||||
|
||||
// R0-R3 / R0-R7 registers hold the arguments and cannot be spoiled
|
||||
const Register Rclass = AARCH64_ONLY(R9) NOT_AARCH64(R4);
|
||||
const Register Rlength = AARCH64_ONLY(R10) NOT_AARCH64(R5);
|
||||
const Register Rscan = AARCH64_ONLY(R11) NOT_AARCH64(R6);
|
||||
const Register tmp = Rtemp;
|
||||
|
||||
assert_different_registers(Ricklass, Rclass, Rlength, Rscan, tmp);
|
||||
|
||||
// Calculate the start of itable (itable goes after vtable)
|
||||
const int scale = exact_log2(vtableEntry::size_in_bytes());
|
||||
address npe_addr = __ pc();
|
||||
__ load_klass(Rclass, R0);
|
||||
__ ldr_s32(Rlength, Address(Rclass, Klass::vtable_length_offset()));
|
||||
|
||||
__ add(Rscan, Rclass, in_bytes(Klass::vtable_start_offset()));
|
||||
__ add(Rscan, Rscan, AsmOperand(Rlength, lsl, scale));
|
||||
|
||||
// Search through the itable for an interface equal to incoming Ricklass
|
||||
// itable looks like [intface][offset][intface][offset][intface][offset]
|
||||
const int entry_size = itableOffsetEntry::size() * HeapWordSize;
|
||||
assert(itableOffsetEntry::interface_offset_in_bytes() == 0, "not added for convenience");
|
||||
|
||||
Label loop;
|
||||
__ bind(loop);
|
||||
__ ldr(tmp, Address(Rscan, entry_size, post_indexed));
|
||||
#ifdef AARCH64
|
||||
Label found;
|
||||
__ cmp(tmp, Ricklass);
|
||||
__ b(found, eq);
|
||||
__ cbnz(tmp, loop);
|
||||
#else
|
||||
__ cmp(tmp, Ricklass); // set ZF and CF if interface is found
|
||||
__ cmn(tmp, 0, ne); // check if tmp == 0 and clear CF if it is
|
||||
__ b(loop, ne);
|
||||
#endif // AARCH64
|
||||
|
||||
assert(StubRoutines::throw_IncompatibleClassChangeError_entry() != NULL, "Check initialization order");
|
||||
#ifdef AARCH64
|
||||
__ jump(StubRoutines::throw_IncompatibleClassChangeError_entry(), relocInfo::runtime_call_type, tmp);
|
||||
__ bind(found);
|
||||
#else
|
||||
// CF == 0 means we reached the end of itable without finding icklass
|
||||
__ jump(StubRoutines::throw_IncompatibleClassChangeError_entry(), relocInfo::runtime_call_type, noreg, cc);
|
||||
#endif // !AARCH64
|
||||
|
||||
// Interface found at previous position of Rscan, now load the method oop
|
||||
__ ldr_s32(tmp, Address(Rscan, itableOffsetEntry::offset_offset_in_bytes() - entry_size));
|
||||
{
|
||||
const int method_offset = itableMethodEntry::size() * HeapWordSize * itable_index +
|
||||
itableMethodEntry::method_offset_in_bytes();
|
||||
__ add_slow(Rmethod, Rclass, method_offset);
|
||||
}
|
||||
__ ldr(Rmethod, Address(Rmethod, tmp));
|
||||
|
||||
address ame_addr = __ pc();
|
||||
|
||||
#ifdef AARCH64
|
||||
__ ldr(tmp, Address(Rmethod, Method::from_compiled_offset()));
|
||||
__ br(tmp);
|
||||
#else
|
||||
__ ldr(PC, Address(Rmethod, Method::from_compiled_offset()));
|
||||
#endif // AARCH64
|
||||
|
||||
masm->flush();
|
||||
|
||||
if (PrintMiscellaneous && (WizardMode || Verbose)) {
|
||||
tty->print_cr("itable #%d at " PTR_FORMAT "[%d] left over: %d",
|
||||
itable_index, p2i(s->entry_point()),
|
||||
(int)(s->code_end() - s->entry_point()),
|
||||
(int)(s->code_end() - __ pc()));
|
||||
}
|
||||
guarantee(__ pc() <= s->code_end(), "overflowed buffer");
|
||||
// FIXME ARM: need correct 'slop' - below is x86 code
|
||||
// shut the door on sizing bugs
|
||||
//int slop = 8; // 32-bit offset is this much larger than a 13-bit one
|
||||
//assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");
|
||||
|
||||
s->set_exception_points(npe_addr, ame_addr);
|
||||
return s;
|
||||
}
|
||||
|
||||
int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
|
||||
int instr_count;
|
||||
|
||||
if (is_vtable_stub) {
|
||||
// vtable stub size
|
||||
instr_count = NOT_AARCH64(4) AARCH64_ONLY(5);
|
||||
} else {
|
||||
// itable stub size
|
||||
instr_count = NOT_AARCH64(20) AARCH64_ONLY(20);
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
if (UseCompressedClassPointers) {
|
||||
instr_count += MacroAssembler::instr_count_for_decode_klass_not_null();
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
return instr_count * Assembler::InstructionSize;
|
||||
}
|
||||
|
||||
int VtableStub::pd_code_alignment() {
|
||||
return 8;
|
||||
}
|
@ -72,9 +72,12 @@ combination of ptrace and /proc calls.
|
||||
#include <asm/ptrace.h>
|
||||
#define user_regs_struct pt_regs
|
||||
#endif
|
||||
#if defined(aarch64)
|
||||
#if defined(aarch64) || defined(arm64)
|
||||
#include <asm/ptrace.h>
|
||||
#define user_regs_struct user_pt_regs
|
||||
#elif defined(arm)
|
||||
#include <asm/ptrace.h>
|
||||
#define user_regs_struct pt_regs
|
||||
#endif
|
||||
#if defined(s390x)
|
||||
#include <asm/ptrace.h>
|
||||
|
265
hotspot/src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp
Normal file
265
hotspot/src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp
Normal file
@ -0,0 +1,265 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP
|
||||
#define OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP
|
||||
|
||||
#include "runtime/os.hpp"
|
||||
#include "vm_version_arm.hpp"
|
||||
|
||||
// Implementation of class atomic
|
||||
|
||||
/*
|
||||
* Atomic long operations on 32-bit ARM
|
||||
* ARM v7 supports LDREXD/STREXD synchronization instructions so no problem.
|
||||
* ARM < v7 does not have explicit 64 atomic load/store capability.
|
||||
* However, gcc emits LDRD/STRD instructions on v5te and LDM/STM on v5t
|
||||
* when loading/storing 64 bits.
|
||||
* For non-MP machines (which is all we support for ARM < v7)
|
||||
* under current Linux distros these instructions appear atomic.
|
||||
* See section A3.5.3 of ARM Architecture Reference Manual for ARM v7.
|
||||
* Also, for cmpxchg64, if ARM < v7 we check for cmpxchg64 support in the
|
||||
* Linux kernel using _kuser_helper_version. See entry-armv.S in the Linux
|
||||
* kernel source or kernel_user_helpers.txt in Linux Doc.
|
||||
*/
|
||||
|
||||
inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
|
||||
inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
|
||||
inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
|
||||
inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
|
||||
inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
|
||||
|
||||
inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
|
||||
inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
|
||||
inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
|
||||
inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
|
||||
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
|
||||
|
||||
inline jlong Atomic::load (volatile jlong* src) {
|
||||
assert(((intx)src & (sizeof(jlong)-1)) == 0, "Atomic load jlong mis-aligned");
|
||||
#ifdef AARCH64
|
||||
return *src;
|
||||
#else
|
||||
return (*os::atomic_load_long_func)(src);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void Atomic::store (jlong value, volatile jlong* dest) {
|
||||
assert(((intx)dest & (sizeof(jlong)-1)) == 0, "Atomic store jlong mis-aligned");
|
||||
#ifdef AARCH64
|
||||
*dest = value;
|
||||
#else
|
||||
(*os::atomic_store_long_func)(value, dest);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void Atomic::store (jlong value, jlong* dest) {
|
||||
store(value, (volatile jlong*)dest);
|
||||
}
|
||||
|
||||
// As per atomic.hpp all read-modify-write operations have to provide two-way
|
||||
// barriers semantics. For AARCH64 we are using load-acquire-with-reservation and
|
||||
// store-release-with-reservation. While load-acquire combined with store-release
|
||||
// do not generally form two-way barriers, their use with reservations does - the
|
||||
// ARMv8 architecture manual Section F "Barrier Litmus Tests" indicates they
|
||||
// provide sequentially consistent semantics. All we need to add is an explicit
|
||||
// barrier in the failure path of the cmpxchg operations (as these don't execute
|
||||
// the store) - arguably this may be overly cautious as there is a very low
|
||||
// likelihood that the hardware would pull loads/stores into the region guarded
|
||||
// by the reservation.
|
||||
//
|
||||
// For ARMv7 we add explicit barriers in the stubs.
|
||||
|
||||
inline jint Atomic::add(jint add_value, volatile jint* dest) {
|
||||
#ifdef AARCH64
|
||||
jint val;
|
||||
int tmp;
|
||||
__asm__ volatile(
|
||||
"1:\n\t"
|
||||
" ldaxr %w[val], [%[dest]]\n\t"
|
||||
" add %w[val], %w[val], %w[add_val]\n\t"
|
||||
" stlxr %w[tmp], %w[val], [%[dest]]\n\t"
|
||||
" cbnz %w[tmp], 1b\n\t"
|
||||
: [val] "=&r" (val), [tmp] "=&r" (tmp)
|
||||
: [add_val] "r" (add_value), [dest] "r" (dest)
|
||||
: "memory");
|
||||
return val;
|
||||
#else
|
||||
return (*os::atomic_add_func)(add_value, dest);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void Atomic::inc(volatile jint* dest) {
|
||||
Atomic::add(1, (volatile jint *)dest);
|
||||
}
|
||||
|
||||
inline void Atomic::dec(volatile jint* dest) {
|
||||
Atomic::add(-1, (volatile jint *)dest);
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
|
||||
#ifdef AARCH64
|
||||
intptr_t val;
|
||||
int tmp;
|
||||
__asm__ volatile(
|
||||
"1:\n\t"
|
||||
" ldaxr %[val], [%[dest]]\n\t"
|
||||
" add %[val], %[val], %[add_val]\n\t"
|
||||
" stlxr %w[tmp], %[val], [%[dest]]\n\t"
|
||||
" cbnz %w[tmp], 1b\n\t"
|
||||
: [val] "=&r" (val), [tmp] "=&r" (tmp)
|
||||
: [add_val] "r" (add_value), [dest] "r" (dest)
|
||||
: "memory");
|
||||
return val;
|
||||
#else
|
||||
return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
|
||||
return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
|
||||
}
|
||||
|
||||
inline void Atomic::inc_ptr(volatile intptr_t* dest) {
|
||||
Atomic::add_ptr(1, dest);
|
||||
}
|
||||
|
||||
inline void Atomic::dec_ptr(volatile intptr_t* dest) {
|
||||
Atomic::add_ptr(-1, dest);
|
||||
}
|
||||
|
||||
inline void Atomic::inc_ptr(volatile void* dest) {
|
||||
inc_ptr((volatile intptr_t*)dest);
|
||||
}
|
||||
|
||||
inline void Atomic::dec_ptr(volatile void* dest) {
|
||||
dec_ptr((volatile intptr_t*)dest);
|
||||
}
|
||||
|
||||
|
||||
inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
|
||||
#ifdef AARCH64
|
||||
jint old_val;
|
||||
int tmp;
|
||||
__asm__ volatile(
|
||||
"1:\n\t"
|
||||
" ldaxr %w[old_val], [%[dest]]\n\t"
|
||||
" stlxr %w[tmp], %w[new_val], [%[dest]]\n\t"
|
||||
" cbnz %w[tmp], 1b\n\t"
|
||||
: [old_val] "=&r" (old_val), [tmp] "=&r" (tmp)
|
||||
: [new_val] "r" (exchange_value), [dest] "r" (dest)
|
||||
: "memory");
|
||||
return old_val;
|
||||
#else
|
||||
return (*os::atomic_xchg_func)(exchange_value, dest);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
|
||||
#ifdef AARCH64
|
||||
intptr_t old_val;
|
||||
int tmp;
|
||||
__asm__ volatile(
|
||||
"1:\n\t"
|
||||
" ldaxr %[old_val], [%[dest]]\n\t"
|
||||
" stlxr %w[tmp], %[new_val], [%[dest]]\n\t"
|
||||
" cbnz %w[tmp], 1b\n\t"
|
||||
: [old_val] "=&r" (old_val), [tmp] "=&r" (tmp)
|
||||
: [new_val] "r" (exchange_value), [dest] "r" (dest)
|
||||
: "memory");
|
||||
return old_val;
|
||||
#else
|
||||
return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
|
||||
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
|
||||
}
|
||||
|
||||
// The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering
|
||||
|
||||
inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
|
||||
#ifdef AARCH64
|
||||
jint rv;
|
||||
int tmp;
|
||||
__asm__ volatile(
|
||||
"1:\n\t"
|
||||
" ldaxr %w[rv], [%[dest]]\n\t"
|
||||
" cmp %w[rv], %w[cv]\n\t"
|
||||
" b.ne 2f\n\t"
|
||||
" stlxr %w[tmp], %w[ev], [%[dest]]\n\t"
|
||||
" cbnz %w[tmp], 1b\n\t"
|
||||
" b 3f\n\t"
|
||||
"2:\n\t"
|
||||
" dmb sy\n\t"
|
||||
"3:\n\t"
|
||||
: [rv] "=&r" (rv), [tmp] "=&r" (tmp)
|
||||
: [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
|
||||
: "memory");
|
||||
return rv;
|
||||
#else
|
||||
// Warning: Arguments are swapped to avoid moving them for kernel call
|
||||
return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
|
||||
#ifdef AARCH64
|
||||
jlong rv;
|
||||
int tmp;
|
||||
__asm__ volatile(
|
||||
"1:\n\t"
|
||||
" ldaxr %[rv], [%[dest]]\n\t"
|
||||
" cmp %[rv], %[cv]\n\t"
|
||||
" b.ne 2f\n\t"
|
||||
" stlxr %w[tmp], %[ev], [%[dest]]\n\t"
|
||||
" cbnz %w[tmp], 1b\n\t"
|
||||
" b 3f\n\t"
|
||||
"2:\n\t"
|
||||
" dmb sy\n\t"
|
||||
"3:\n\t"
|
||||
: [rv] "=&r" (rv), [tmp] "=&r" (tmp)
|
||||
: [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
|
||||
: "memory");
|
||||
return rv;
|
||||
#else
|
||||
assert(VM_Version::supports_cx8(), "Atomic compare and exchange jlong not supported on this architecture!");
|
||||
return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
|
||||
#ifdef AARCH64
|
||||
return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
|
||||
#else
|
||||
return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
|
||||
return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order);
|
||||
}
|
||||
|
||||
#endif // OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP
|
47
hotspot/src/os_cpu/linux_arm/vm/bytes_linux_arm.inline.hpp
Normal file
47
hotspot/src/os_cpu/linux_arm/vm/bytes_linux_arm.inline.hpp
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_CPU_LINUX_ARM_VM_BYTES_LINUX_ARM_INLINE_HPP
|
||||
#define OS_CPU_LINUX_ARM_VM_BYTES_LINUX_ARM_INLINE_HPP
|
||||
|
||||
#include <byteswap.h>
|
||||
|
||||
// Efficient swapping of data bytes from Java byte
|
||||
// ordering to native byte ordering and vice versa.
|
||||
inline u2 Bytes::swap_u2(u2 x) {
|
||||
// TODO: ARM - optimize
|
||||
return bswap_16(x);
|
||||
}
|
||||
|
||||
inline u4 Bytes::swap_u4(u4 x) {
|
||||
// TODO: ARM - optimize
|
||||
return bswap_32(x);
|
||||
}
|
||||
|
||||
inline u8 Bytes::swap_u8(u8 x) {
|
||||
// TODO: ARM - optimize
|
||||
return bswap_64(x);
|
||||
}
|
||||
|
||||
#endif // OS_CPU_LINUX_ARM_VM_BYTES_LINUX_ARM_INLINE_HPP
|
127
hotspot/src/os_cpu/linux_arm/vm/copy_linux_arm.inline.hpp
Normal file
127
hotspot/src/os_cpu/linux_arm/vm/copy_linux_arm.inline.hpp
Normal file
@ -0,0 +1,127 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_CPU_LINUX_ARM_VM_COPY_LINUX_ARM_INLINE_HPP
|
||||
#define OS_CPU_LINUX_ARM_VM_COPY_LINUX_ARM_INLINE_HPP
|
||||
|
||||
static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
|
||||
#ifdef AARCH64
|
||||
_Copy_conjoint_words(from, to, count * HeapWordSize);
|
||||
#else
|
||||
// NOTE: _Copy_* functions on 32-bit ARM expect "to" and "from" arguments in reversed order
|
||||
_Copy_conjoint_words(to, from, count * HeapWordSize);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
|
||||
#ifdef AARCH64
|
||||
_Copy_disjoint_words(from, to, count * HeapWordSize);
|
||||
#else
|
||||
_Copy_disjoint_words(to, from, count * HeapWordSize);
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) {
|
||||
pd_disjoint_words(from, to, count);
|
||||
}
|
||||
|
||||
static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
|
||||
pd_conjoint_words(from, to, count);
|
||||
}
|
||||
|
||||
static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
|
||||
pd_disjoint_words(from, to, count);
|
||||
}
|
||||
|
||||
static void pd_conjoint_bytes(void* from, void* to, size_t count) {
|
||||
memmove(to, from, count);
|
||||
}
|
||||
|
||||
static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) {
|
||||
pd_conjoint_bytes(from, to, count);
|
||||
}
|
||||
|
||||
static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
|
||||
#ifdef AARCH64
|
||||
_Copy_conjoint_jshorts_atomic(from, to, count * BytesPerShort);
|
||||
#else
|
||||
_Copy_conjoint_jshorts_atomic(to, from, count * BytesPerShort);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
|
||||
#ifdef AARCH64
|
||||
_Copy_conjoint_jints_atomic(from, to, count * BytesPerInt);
|
||||
#else
|
||||
assert(HeapWordSize == BytesPerInt, "heapwords and jints must be the same size");
|
||||
// pd_conjoint_words is word-atomic in this implementation.
|
||||
pd_conjoint_words((HeapWord*)from, (HeapWord*)to, count);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
|
||||
#ifdef AARCH64
|
||||
assert(HeapWordSize == BytesPerLong, "64-bit architecture");
|
||||
pd_conjoint_words((HeapWord*)from, (HeapWord*)to, count);
|
||||
#else
|
||||
_Copy_conjoint_jlongs_atomic(to, from, count * BytesPerLong);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
|
||||
#ifdef AARCH64
|
||||
if (UseCompressedOops) {
|
||||
assert(BytesPerHeapOop == BytesPerInt, "compressed oops");
|
||||
pd_conjoint_jints_atomic((jint*)from, (jint*)to, count);
|
||||
} else {
|
||||
assert(BytesPerHeapOop == BytesPerLong, "64-bit architecture");
|
||||
pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
|
||||
}
|
||||
#else
|
||||
assert(BytesPerHeapOop == BytesPerInt, "32-bit architecture");
|
||||
pd_conjoint_jints_atomic((jint*)from, (jint*)to, count);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) {
|
||||
pd_conjoint_bytes_atomic((void*)from, (void*)to, count);
|
||||
}
|
||||
|
||||
static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) {
|
||||
pd_conjoint_jshorts_atomic((jshort*)from, (jshort*)to, count);
|
||||
}
|
||||
|
||||
static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) {
|
||||
pd_conjoint_jints_atomic((jint*)from, (jint*)to, count);
|
||||
}
|
||||
|
||||
static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) {
|
||||
pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
|
||||
}
|
||||
|
||||
static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) {
|
||||
pd_conjoint_oops_atomic((oop*)from, (oop*)to, count);
|
||||
}
|
||||
|
||||
#endif // OS_CPU_LINUX_ARM_VM_COPY_LINUX_ARM_INLINE_HPP
|
49
hotspot/src/os_cpu/linux_arm/vm/globals_linux_arm.hpp
Normal file
49
hotspot/src/os_cpu/linux_arm/vm/globals_linux_arm.hpp
Normal file
@ -0,0 +1,49 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_CPU_LINUX_ARM_VM_GLOBALS_LINUX_ARM_HPP
|
||||
#define OS_CPU_LINUX_ARM_VM_GLOBALS_LINUX_ARM_HPP
|
||||
|
||||
//
|
||||
// Sets the default values for platform dependent flags used by the runtime system.
|
||||
// (see globals.hpp)
|
||||
//
|
||||
define_pd_global(bool, DontYieldALot, false);
|
||||
#ifdef AARCH64
|
||||
define_pd_global(intx, CompilerThreadStackSize, 1024);
|
||||
define_pd_global(intx, ThreadStackSize, 1024);
|
||||
define_pd_global(intx, VMThreadStackSize, 1024);
|
||||
#else
|
||||
define_pd_global(intx, CompilerThreadStackSize, 512);
|
||||
// System default ThreadStackSize appears to be 512 which is too big.
|
||||
define_pd_global(intx, ThreadStackSize, 320);
|
||||
define_pd_global(intx, VMThreadStackSize, 512);
|
||||
#endif // AARCH64
|
||||
|
||||
define_pd_global(size_t, JVMInvokeMethodSlack, 8192);
|
||||
|
||||
// Used on 64 bit platforms for UseCompressedOops base address or CDS
|
||||
define_pd_global(size_t, HeapBaseMinAddress, 2*G);
|
||||
|
||||
#endif // OS_CPU_LINUX_ARM_VM_GLOBALS_LINUX_ARM_HPP
|
513
hotspot/src/os_cpu/linux_arm/vm/linux_arm_32.s
Normal file
513
hotspot/src/os_cpu/linux_arm/vm/linux_arm_32.s
Normal file
@ -0,0 +1,513 @@
|
||||
#
|
||||
# Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
|
||||
# NOTE WELL! The _Copy functions are called directly
|
||||
# from server-compiler-generated code via CallLeafNoFP,
|
||||
# which means that they *must* either not use floating
|
||||
# point or use it in the same manner as does the server
|
||||
# compiler.
|
||||
|
||||
.globl _Copy_conjoint_bytes
|
||||
.type _Copy_conjoint_bytes, %function
|
||||
.globl _Copy_arrayof_conjoint_bytes
|
||||
.type _Copy_arrayof_conjoint_bytes, %function
|
||||
.globl _Copy_disjoint_words
|
||||
.type _Copy_disjoint_words, %function
|
||||
.globl _Copy_conjoint_words
|
||||
.type _Copy_conjoint_words, %function
|
||||
.globl _Copy_conjoint_jshorts_atomic
|
||||
.type _Copy_conjoint_jshorts_atomic, %function
|
||||
.globl _Copy_arrayof_conjoint_jshorts
|
||||
.type _Copy_arrayof_conjoint_jshorts, %function
|
||||
.globl _Copy_conjoint_jints_atomic
|
||||
.type _Copy_conjoint_jints_atomic, %function
|
||||
.globl _Copy_arrayof_conjoint_jints
|
||||
.type _Copy_arrayof_conjoint_jints, %function
|
||||
.globl _Copy_conjoint_jlongs_atomic
|
||||
.type _Copy_conjoint_jlongs_atomic, %function
|
||||
.globl _Copy_arrayof_conjoint_jlongs
|
||||
.type _Copy_arrayof_conjoint_jlongs, %function
|
||||
|
||||
.text
|
||||
.globl SpinPause
|
||||
.type SpinPause, %function
|
||||
SpinPause:
|
||||
bx LR
|
||||
|
||||
# Support for void Copy::conjoint_bytes(void* from,
|
||||
# void* to,
|
||||
# size_t count)
|
||||
_Copy_conjoint_bytes:
|
||||
swi 0x9f0001
|
||||
|
||||
# Support for void Copy::arrayof_conjoint_bytes(void* from,
|
||||
# void* to,
|
||||
# size_t count)
|
||||
_Copy_arrayof_conjoint_bytes:
|
||||
swi 0x9f0001
|
||||
|
||||
|
||||
# Support for void Copy::disjoint_words(void* from,
|
||||
# void* to,
|
||||
# size_t count)
|
||||
_Copy_disjoint_words:
|
||||
stmdb sp!, {r3 - r9, ip}
|
||||
|
||||
cmp r2, #0
|
||||
beq disjoint_words_finish
|
||||
|
||||
pld [r1, #0]
|
||||
cmp r2, #12
|
||||
ble disjoint_words_small
|
||||
|
||||
.align 3
|
||||
dw_f2b_loop_32:
|
||||
subs r2, #32
|
||||
blt dw_f2b_loop_32_finish
|
||||
ldmia r1!, {r3 - r9, ip}
|
||||
nop
|
||||
pld [r1]
|
||||
stmia r0!, {r3 - r9, ip}
|
||||
bgt dw_f2b_loop_32
|
||||
dw_f2b_loop_32_finish:
|
||||
addlts r2, #32
|
||||
beq disjoint_words_finish
|
||||
cmp r2, #16
|
||||
blt disjoint_words_small
|
||||
ldmia r1!, {r3 - r6}
|
||||
subge r2, r2, #16
|
||||
stmia r0!, {r3 - r6}
|
||||
beq disjoint_words_finish
|
||||
disjoint_words_small:
|
||||
cmp r2, #8
|
||||
ldr r7, [r1], #4
|
||||
ldrge r8, [r1], #4
|
||||
ldrgt r9, [r1], #4
|
||||
str r7, [r0], #4
|
||||
strge r8, [r0], #4
|
||||
strgt r9, [r0], #4
|
||||
|
||||
disjoint_words_finish:
|
||||
ldmia sp!, {r3 - r9, ip}
|
||||
bx lr
|
||||
|
||||
|
||||
# Support for void Copy::conjoint_words(void* from,
|
||||
# void* to,
|
||||
# size_t count)
|
||||
_Copy_conjoint_words:
|
||||
stmdb sp!, {r3 - r9, ip}
|
||||
|
||||
cmp r2, #0
|
||||
beq conjoint_words_finish
|
||||
|
||||
pld [r1, #0]
|
||||
cmp r2, #12
|
||||
ble conjoint_words_small
|
||||
|
||||
subs r3, r0, r1
|
||||
cmphi r2, r3
|
||||
bhi cw_b2f_copy
|
||||
.align 3
|
||||
cw_f2b_loop_32:
|
||||
subs r2, #32
|
||||
blt cw_f2b_loop_32_finish
|
||||
ldmia r1!, {r3 - r9, ip}
|
||||
nop
|
||||
pld [r1]
|
||||
stmia r0!, {r3 - r9, ip}
|
||||
bgt cw_f2b_loop_32
|
||||
cw_f2b_loop_32_finish:
|
||||
addlts r2, #32
|
||||
beq conjoint_words_finish
|
||||
cmp r2, #16
|
||||
blt conjoint_words_small
|
||||
ldmia r1!, {r3 - r6}
|
||||
subge r2, r2, #16
|
||||
stmia r0!, {r3 - r6}
|
||||
beq conjoint_words_finish
|
||||
conjoint_words_small:
|
||||
cmp r2, #8
|
||||
ldr r7, [r1], #4
|
||||
ldrge r8, [r1], #4
|
||||
ldrgt r9, [r1], #4
|
||||
str r7, [r0], #4
|
||||
strge r8, [r0], #4
|
||||
strgt r9, [r0], #4
|
||||
b conjoint_words_finish
|
||||
|
||||
# Src and dest overlap, copy in a descending order
|
||||
cw_b2f_copy:
|
||||
add r1, r2
|
||||
pld [r1, #-32]
|
||||
add r0, r2
|
||||
.align 3
|
||||
cw_b2f_loop_32:
|
||||
subs r2, #32
|
||||
blt cw_b2f_loop_32_finish
|
||||
ldmdb r1!, {r3-r9,ip}
|
||||
nop
|
||||
pld [r1, #-32]
|
||||
stmdb r0!, {r3-r9,ip}
|
||||
bgt cw_b2f_loop_32
|
||||
cw_b2f_loop_32_finish:
|
||||
addlts r2, #32
|
||||
beq conjoint_words_finish
|
||||
cmp r2, #16
|
||||
blt cw_b2f_copy_small
|
||||
ldmdb r1!, {r3 - r6}
|
||||
subge r2, r2, #16
|
||||
stmdb r0!, {r3 - r6}
|
||||
beq conjoint_words_finish
|
||||
cw_b2f_copy_small:
|
||||
cmp r2, #8
|
||||
ldr r7, [r1, #-4]!
|
||||
ldrge r8, [r1, #-4]!
|
||||
ldrgt r9, [r1, #-4]!
|
||||
str r7, [r0, #-4]!
|
||||
strge r8, [r0, #-4]!
|
||||
strgt r9, [r0, #-4]!
|
||||
|
||||
conjoint_words_finish:
|
||||
ldmia sp!, {r3 - r9, ip}
|
||||
bx lr
|
||||
|
||||
# Support for void Copy::conjoint_jshorts_atomic(void* from,
|
||||
# void* to,
|
||||
# size_t count)
|
||||
_Copy_conjoint_jshorts_atomic:
|
||||
stmdb sp!, {r3 - r9, ip}
|
||||
|
||||
cmp r2, #0
|
||||
beq conjoint_shorts_finish
|
||||
|
||||
subs r3, r0, r1
|
||||
cmphi r2, r3
|
||||
bhi cs_b2f_copy
|
||||
|
||||
pld [r1]
|
||||
|
||||
ands r3, r0, #3
|
||||
bne cs_f2b_dest_u
|
||||
ands r3, r1, #3
|
||||
bne cs_f2b_src_u
|
||||
|
||||
# Aligned source address
|
||||
.align 3
|
||||
cs_f2b_loop_32:
|
||||
subs r2, #32
|
||||
blt cs_f2b_loop_32_finish
|
||||
ldmia r1!, {r3 - r9, ip}
|
||||
nop
|
||||
pld [r1]
|
||||
stmia r0!, {r3 - r9, ip}
|
||||
bgt cs_f2b_loop_32
|
||||
cs_f2b_loop_32_finish:
|
||||
addlts r2, #32
|
||||
beq conjoint_shorts_finish
|
||||
movs r6, r2, lsr #3
|
||||
.align 3
|
||||
cs_f2b_8_loop:
|
||||
beq cs_f2b_4
|
||||
ldmia r1!, {r4-r5}
|
||||
subs r6, #1
|
||||
stmia r0!, {r4-r5}
|
||||
bgt cs_f2b_8_loop
|
||||
|
||||
cs_f2b_4:
|
||||
ands r2, #7
|
||||
beq conjoint_shorts_finish
|
||||
cmp r2, #4
|
||||
ldrh r3, [r1], #2
|
||||
ldrgeh r4, [r1], #2
|
||||
ldrgth r5, [r1], #2
|
||||
strh r3, [r0], #2
|
||||
strgeh r4, [r0], #2
|
||||
strgth r5, [r0], #2
|
||||
b conjoint_shorts_finish
|
||||
|
||||
# Destination not aligned
|
||||
cs_f2b_dest_u:
|
||||
ldrh r3, [r1], #2
|
||||
subs r2, #2
|
||||
strh r3, [r0], #2
|
||||
beq conjoint_shorts_finish
|
||||
|
||||
# Check to see if source is not aligned ether
|
||||
ands r3, r1, #3
|
||||
beq cs_f2b_loop_32
|
||||
|
||||
cs_f2b_src_u:
|
||||
cmp r2, #16
|
||||
blt cs_f2b_8_u
|
||||
|
||||
# Load 2 first bytes to r7 and make src ptr word aligned
|
||||
bic r1, #3
|
||||
ldr r7, [r1], #4
|
||||
|
||||
# Destination aligned, source not
|
||||
mov r8, r2, lsr #4
|
||||
.align 3
|
||||
cs_f2b_16_u_loop:
|
||||
mov r3, r7, lsr #16
|
||||
ldmia r1!, {r4 - r7}
|
||||
orr r3, r3, r4, lsl #16
|
||||
mov r4, r4, lsr #16
|
||||
pld [r1]
|
||||
orr r4, r4, r5, lsl #16
|
||||
mov r5, r5, lsr #16
|
||||
orr r5, r5, r6, lsl #16
|
||||
mov r6, r6, lsr #16
|
||||
orr r6, r6, r7, lsl #16
|
||||
stmia r0!, {r3 - r6}
|
||||
subs r8, #1
|
||||
bgt cs_f2b_16_u_loop
|
||||
ands r2, #0xf
|
||||
beq conjoint_shorts_finish
|
||||
sub r1, #2
|
||||
|
||||
cs_f2b_8_u:
|
||||
cmp r2, #8
|
||||
blt cs_f2b_4_u
|
||||
ldrh r4, [r1], #2
|
||||
ldr r5, [r1], #4
|
||||
ldrh r6, [r1], #2
|
||||
orr r4, r4, r5, lsl #16
|
||||
mov r5, r5, lsr #16
|
||||
orr r5, r5, r6, lsl #16
|
||||
subs r2, #8
|
||||
stmia r0!, {r4 - r5}
|
||||
cs_f2b_4_u:
|
||||
beq conjoint_shorts_finish
|
||||
cmp r2, #4
|
||||
ldrh r3, [r1], #2
|
||||
ldrgeh r4, [r1], #2
|
||||
ldrgth r5, [r1], #2
|
||||
strh r3, [r0], #2
|
||||
strgeh r4, [r0], #2
|
||||
strgth r5, [r0], #2
|
||||
b conjoint_shorts_finish
|
||||
|
||||
# Src and dest overlap, copy in a descending order
|
||||
cs_b2f_copy:
|
||||
add r1, r2
|
||||
pld [r1, #-32]
|
||||
add r0, r2
|
||||
|
||||
ands r3, r0, #3
|
||||
bne cs_b2f_dest_u
|
||||
ands r3, r1, #3
|
||||
bne cs_b2f_src_u
|
||||
.align 3
|
||||
cs_b2f_loop_32:
|
||||
subs r2, #32
|
||||
blt cs_b2f_loop_32_finish
|
||||
ldmdb r1!, {r3-r9,ip}
|
||||
nop
|
||||
pld [r1, #-32]
|
||||
stmdb r0!, {r3-r9,ip}
|
||||
bgt cs_b2f_loop_32
|
||||
cs_b2f_loop_32_finish:
|
||||
addlts r2, #32
|
||||
beq conjoint_shorts_finish
|
||||
cmp r2, #24
|
||||
blt cs_b2f_16
|
||||
ldmdb r1!, {r3-r8}
|
||||
sub r2, #24
|
||||
stmdb r0!, {r3-r8}
|
||||
beq conjoint_shorts_finish
|
||||
cs_b2f_16:
|
||||
cmp r2, #16
|
||||
blt cs_b2f_8
|
||||
ldmdb r1!, {r3-r6}
|
||||
sub r2, #16
|
||||
stmdb r0!, {r3-r6}
|
||||
beq conjoint_shorts_finish
|
||||
cs_b2f_8:
|
||||
cmp r2, #8
|
||||
blt cs_b2f_all_copy
|
||||
ldmdb r1!, {r3-r4}
|
||||
sub r2, #8
|
||||
stmdb r0!, {r3-r4}
|
||||
beq conjoint_shorts_finish
|
||||
|
||||
cs_b2f_all_copy:
|
||||
cmp r2, #4
|
||||
ldrh r3, [r1, #-2]!
|
||||
ldrgeh r4, [r1, #-2]!
|
||||
ldrgth r5, [r1, #-2]!
|
||||
strh r3, [r0, #-2]!
|
||||
strgeh r4, [r0, #-2]!
|
||||
strgth r5, [r0, #-2]!
|
||||
b conjoint_shorts_finish
|
||||
|
||||
# Destination not aligned
|
||||
cs_b2f_dest_u:
|
||||
ldrh r3, [r1, #-2]!
|
||||
strh r3, [r0, #-2]!
|
||||
sub r2, #2
|
||||
# Check source alignment as well
|
||||
ands r3, r1, #3
|
||||
beq cs_b2f_loop_32
|
||||
|
||||
# Source not aligned
|
||||
cs_b2f_src_u:
|
||||
bic r1, #3
|
||||
.align 3
|
||||
cs_b2f_16_loop_u:
|
||||
subs r2, #16
|
||||
blt cs_b2f_16_loop_u_finished
|
||||
ldr r7, [r1]
|
||||
mov r3, r7
|
||||
ldmdb r1!, {r4 - r7}
|
||||
mov r4, r4, lsr #16
|
||||
orr r4, r4, r5, lsl #16
|
||||
pld [r1, #-32]
|
||||
mov r5, r5, lsr #16
|
||||
orr r5, r5, r6, lsl #16
|
||||
mov r6, r6, lsr #16
|
||||
orr r6, r6, r7, lsl #16
|
||||
mov r7, r7, lsr #16
|
||||
orr r7, r7, r3, lsl #16
|
||||
stmdb r0!, {r4 - r7}
|
||||
bgt cs_b2f_16_loop_u
|
||||
beq conjoint_shorts_finish
|
||||
cs_b2f_16_loop_u_finished:
|
||||
addlts r2, #16
|
||||
ldr r3, [r1]
|
||||
cmp r2, #10
|
||||
blt cs_b2f_2_u_loop
|
||||
ldmdb r1!, {r4 - r5}
|
||||
mov r6, r4, lsr #16
|
||||
orr r6, r6, r5, lsl #16
|
||||
mov r7, r5, lsr #16
|
||||
orr r7, r7, r3, lsl #16
|
||||
stmdb r0!, {r6-r7}
|
||||
sub r2, #8
|
||||
.align 3
|
||||
cs_b2f_2_u_loop:
|
||||
subs r2, #2
|
||||
ldrh r3, [r1], #-2
|
||||
strh r3, [r0, #-2]!
|
||||
bgt cs_b2f_2_u_loop
|
||||
|
||||
conjoint_shorts_finish:
|
||||
ldmia sp!, {r3 - r9, ip}
|
||||
bx lr
|
||||
|
||||
|
||||
# Support for void Copy::arrayof_conjoint_jshorts(void* from,
|
||||
# void* to,
|
||||
# size_t count)
|
||||
_Copy_arrayof_conjoint_jshorts:
|
||||
swi 0x9f0001
|
||||
|
||||
# Support for void Copy::conjoint_jints_atomic(void* from,
|
||||
# void* to,
|
||||
# size_t count)
|
||||
_Copy_conjoint_jints_atomic:
|
||||
_Copy_arrayof_conjoint_jints:
|
||||
swi 0x9f0001
|
||||
|
||||
# Support for void Copy::conjoint_jlongs_atomic(jlong* from,
|
||||
# jlong* to,
|
||||
# size_t count)
|
||||
_Copy_conjoint_jlongs_atomic:
|
||||
_Copy_arrayof_conjoint_jlongs:
|
||||
stmdb sp!, {r3 - r9, ip}
|
||||
|
||||
cmp r2, #0
|
||||
beq conjoint_longs_finish
|
||||
|
||||
pld [r1, #0]
|
||||
cmp r2, #24
|
||||
ble conjoint_longs_small
|
||||
|
||||
subs r3, r0, r1
|
||||
cmphi r2, r3
|
||||
bhi cl_b2f_copy
|
||||
.align 3
|
||||
cl_f2b_loop_32:
|
||||
subs r2, #32
|
||||
blt cl_f2b_loop_32_finish
|
||||
ldmia r1!, {r3 - r9, ip}
|
||||
nop
|
||||
pld [r1]
|
||||
stmia r0!, {r3 - r9, ip}
|
||||
bgt cl_f2b_loop_32
|
||||
cl_f2b_loop_32_finish:
|
||||
addlts r2, #32
|
||||
beq conjoint_longs_finish
|
||||
conjoint_longs_small:
|
||||
cmp r2, #16
|
||||
blt cl_f2b_copy_8
|
||||
bgt cl_f2b_copy_24
|
||||
ldmia r1!, {r3 - r6}
|
||||
stmia r0!, {r3 - r6}
|
||||
b conjoint_longs_finish
|
||||
cl_f2b_copy_8:
|
||||
ldmia r1!, {r3 - r4}
|
||||
stmia r0!, {r3 - r4}
|
||||
b conjoint_longs_finish
|
||||
cl_f2b_copy_24:
|
||||
ldmia r1!, {r3 - r8}
|
||||
stmia r0!, {r3 - r8}
|
||||
b conjoint_longs_finish
|
||||
|
||||
# Src and dest overlap, copy in a descending order
|
||||
cl_b2f_copy:
|
||||
add r1, r2
|
||||
pld [r1, #-32]
|
||||
add r0, r2
|
||||
.align 3
|
||||
cl_b2f_loop_32:
|
||||
subs r2, #32
|
||||
blt cl_b2f_loop_32_finish
|
||||
ldmdb r1!, {r3 - r9, ip}
|
||||
nop
|
||||
pld [r1]
|
||||
stmdb r0!, {r3 - r9, ip}
|
||||
bgt cl_b2f_loop_32
|
||||
cl_b2f_loop_32_finish:
|
||||
addlts r2, #32
|
||||
beq conjoint_longs_finish
|
||||
cmp r2, #16
|
||||
blt cl_b2f_copy_8
|
||||
bgt cl_b2f_copy_24
|
||||
ldmdb r1!, {r3 - r6}
|
||||
stmdb r0!, {r3 - r6}
|
||||
b conjoint_longs_finish
|
||||
cl_b2f_copy_8:
|
||||
ldmdb r1!, {r3 - r4}
|
||||
stmdb r0!, {r3 - r4}
|
||||
b conjoint_longs_finish
|
||||
cl_b2f_copy_24:
|
||||
ldmdb r1!, {r3 - r8}
|
||||
stmdb r0!, {r3 - r8}
|
||||
|
||||
conjoint_longs_finish:
|
||||
ldmia sp!, {r3 - r9, ip}
|
||||
bx lr
|
||||
|
||||
|
542
hotspot/src/os_cpu/linux_arm/vm/linux_arm_64.s
Normal file
542
hotspot/src/os_cpu/linux_arm/vm/linux_arm_64.s
Normal file
@ -0,0 +1,542 @@
|
||||
#
|
||||
# Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
# TODO-AARCH64
|
||||
|
||||
# NOTE WELL! The _Copy functions are called directly
|
||||
# from server-compiler-generated code via CallLeafNoFP,
|
||||
# which means that they *must* either not use floating
|
||||
# point or use it in the same manner as does the server
|
||||
# compiler.
|
||||
|
||||
.globl _Copy_conjoint_bytes
|
||||
.type _Copy_conjoint_bytes, %function
|
||||
.globl _Copy_arrayof_conjoint_bytes
|
||||
.type _Copy_arrayof_conjoint_bytes, %function
|
||||
.globl _Copy_disjoint_words
|
||||
.type _Copy_disjoint_words, %function
|
||||
.globl _Copy_conjoint_words
|
||||
.type _Copy_conjoint_words, %function
|
||||
.globl _Copy_conjoint_jshorts_atomic
|
||||
.type _Copy_conjoint_jshorts_atomic, %function
|
||||
.globl _Copy_arrayof_conjoint_jshorts
|
||||
.type _Copy_arrayof_conjoint_jshorts, %function
|
||||
.globl _Copy_conjoint_jints_atomic
|
||||
.type _Copy_conjoint_jints_atomic, %function
|
||||
.globl _Copy_arrayof_conjoint_jints
|
||||
.type _Copy_arrayof_conjoint_jints, %function
|
||||
.globl _Copy_conjoint_jlongs_atomic
|
||||
.type _Copy_conjoint_jlongs_atomic, %function
|
||||
.globl _Copy_arrayof_conjoint_jlongs
|
||||
.type _Copy_arrayof_conjoint_jlongs, %function
|
||||
|
||||
.text
|
||||
.globl SpinPause
|
||||
.type SpinPause, %function
|
||||
SpinPause:
|
||||
yield
|
||||
ret
|
||||
|
||||
# Support for void Copy::conjoint_bytes(void* from,
|
||||
# void* to,
|
||||
# size_t count)
|
||||
_Copy_conjoint_bytes:
|
||||
hlt 1002
|
||||
|
||||
# Support for void Copy::arrayof_conjoint_bytes(void* from,
|
||||
# void* to,
|
||||
# size_t count)
|
||||
_Copy_arrayof_conjoint_bytes:
|
||||
hlt 1003
|
||||
|
||||
|
||||
# Support for void Copy::disjoint_words(void* from,
|
||||
# void* to,
|
||||
# size_t count)
|
||||
_Copy_disjoint_words:
|
||||
# These and further memory prefetches may hit out of array ranges.
|
||||
# Experiments showed that prefetching of inaccessible memory doesn't result in exceptions.
|
||||
prfm pldl1keep, [x0, #0]
|
||||
prfm pstl1keep, [x1, #0]
|
||||
prfm pldl1keep, [x0, #64]
|
||||
prfm pstl1keep, [x1, #64]
|
||||
|
||||
subs x18, x2, #128
|
||||
b.ge dw_large
|
||||
|
||||
dw_lt_128:
|
||||
# Copy [x0, x0 + x2) to [x1, x1 + x2)
|
||||
|
||||
adr x15, dw_tail_table_base
|
||||
and x16, x2, #~8
|
||||
|
||||
# Calculate address to jump and store it to x15:
|
||||
# Each pair of instructions before dw_tail_table_base copies 16 bytes.
|
||||
# x16 is count of bytes to copy aligned down by 16.
|
||||
# So x16/16 pairs of instructions should be executed.
|
||||
# Each pair takes 8 bytes, so x15 = dw_tail_table_base - (x16/16)*8 = x15 - x16/2
|
||||
sub x15, x15, x16, lsr #1
|
||||
prfm plil1keep, [x15]
|
||||
|
||||
add x17, x0, x2
|
||||
add x18, x1, x2
|
||||
|
||||
# If x2 = x16 + 8, then copy 8 bytes and x16 bytes after that.
|
||||
# Otherwise x2 = x16, so proceed to copy x16 bytes.
|
||||
tbz x2, #3, dw_lt_128_even
|
||||
ldr x3, [x0]
|
||||
str x3, [x1]
|
||||
dw_lt_128_even:
|
||||
# Copy [x17 - x16, x17) to [x18 - x16, x18)
|
||||
# x16 is aligned by 16 and less than 128
|
||||
|
||||
# Execute (x16/16) ldp-stp pairs; each pair copies 16 bytes
|
||||
br x15
|
||||
|
||||
ldp x3, x4, [x17, #-112]
|
||||
stp x3, x4, [x18, #-112]
|
||||
ldp x5, x6, [x17, #-96]
|
||||
stp x5, x6, [x18, #-96]
|
||||
ldp x7, x8, [x17, #-80]
|
||||
stp x7, x8, [x18, #-80]
|
||||
ldp x9, x10, [x17, #-64]
|
||||
stp x9, x10, [x18, #-64]
|
||||
ldp x11, x12, [x17, #-48]
|
||||
stp x11, x12, [x18, #-48]
|
||||
ldp x13, x14, [x17, #-32]
|
||||
stp x13, x14, [x18, #-32]
|
||||
ldp x15, x16, [x17, #-16]
|
||||
stp x15, x16, [x18, #-16]
|
||||
dw_tail_table_base:
|
||||
ret
|
||||
|
||||
.p2align 6
|
||||
.rept 12
|
||||
nop
|
||||
.endr
|
||||
dw_large:
|
||||
# x18 >= 0;
|
||||
# Copy [x0, x0 + x18 + 128) to [x1, x1 + x18 + 128)
|
||||
|
||||
ldp x3, x4, [x0], #64
|
||||
ldp x5, x6, [x0, #-48]
|
||||
ldp x7, x8, [x0, #-32]
|
||||
ldp x9, x10, [x0, #-16]
|
||||
|
||||
# Before and after each iteration of loop registers x3-x10 contain [x0 - 64, x0),
|
||||
# and x1 is a place to copy this data;
|
||||
# x18 contains number of bytes to be stored minus 128
|
||||
|
||||
# Exactly 16 instructions from p2align, so dw_loop starts from cache line boundary
|
||||
# Checking it explictly by aligning with "hlt 1000" instructions
|
||||
.p2alignl 6, 0xd4407d00
|
||||
dw_loop:
|
||||
prfm pldl1keep, [x0, #64]
|
||||
# Next line actually hurted memory copy performance (for interpreter) - JDK-8078120
|
||||
# prfm pstl1keep, [x1, #64]
|
||||
|
||||
subs x18, x18, #64
|
||||
|
||||
stp x3, x4, [x1, #0]
|
||||
ldp x3, x4, [x0, #0]
|
||||
stp x5, x6, [x1, #16]
|
||||
ldp x5, x6, [x0, #16]
|
||||
stp x7, x8, [x1, #32]
|
||||
ldp x7, x8, [x0, #32]
|
||||
stp x9, x10, [x1, #48]
|
||||
ldp x9, x10, [x0, #48]
|
||||
|
||||
add x1, x1, #64
|
||||
add x0, x0, #64
|
||||
|
||||
b.ge dw_loop
|
||||
|
||||
# 13 instructions from dw_loop, so the loop body hits into one cache line
|
||||
|
||||
dw_loop_end:
|
||||
adds x2, x18, #64
|
||||
|
||||
stp x3, x4, [x1], #64
|
||||
stp x5, x6, [x1, #-48]
|
||||
stp x7, x8, [x1, #-32]
|
||||
stp x9, x10, [x1, #-16]
|
||||
|
||||
# Increased x18 by 64, but stored 64 bytes, so x2 contains exact number of bytes to be stored
|
||||
|
||||
# If this number is not zero, also copy remaining bytes
|
||||
b.ne dw_lt_128
|
||||
ret
|
||||
|
||||
|
||||
# Support for void Copy::conjoint_words(void* from,
|
||||
# void* to,
|
||||
# size_t count)
|
||||
_Copy_conjoint_words:
|
||||
subs x3, x1, x0
|
||||
# hi condition is met <=> from < to
|
||||
ccmp x2, x3, #0, hi
|
||||
# hi condition is met <=> (from < to) and (to - from < count)
|
||||
# otherwise _Copy_disjoint_words may be used, because it performs forward copying,
|
||||
# so it also works when ranges overlap but to <= from
|
||||
b.ls _Copy_disjoint_words
|
||||
|
||||
# Overlapping case should be the rare one, it does not worth optimizing
|
||||
|
||||
ands x3, x2, #~8
|
||||
# x3 is count aligned down by 2*wordSize
|
||||
add x0, x0, x2
|
||||
add x1, x1, x2
|
||||
sub x3, x3, #16
|
||||
# Skip loop if 0 or 1 words
|
||||
b.eq cw_backward_loop_end
|
||||
|
||||
# x3 >= 0
|
||||
# Copy [x0 - x3 - 16, x0) to [x1 - x3 - 16, x1) backward
|
||||
cw_backward_loop:
|
||||
subs x3, x3, #16
|
||||
ldp x4, x5, [x0, #-16]!
|
||||
stp x4, x5, [x1, #-16]!
|
||||
b.ge cw_backward_loop
|
||||
|
||||
cw_backward_loop_end:
|
||||
# Copy remaining 0 or 1 words
|
||||
tbz x2, #3, cw_finish
|
||||
ldr x3, [x0, #-8]
|
||||
str x3, [x1, #-8]
|
||||
|
||||
cw_finish:
|
||||
ret
|
||||
|
||||
|
||||
# Support for void Copy::conjoint_jshorts_atomic(void* from,
|
||||
# void* to,
|
||||
# size_t count)
|
||||
_Copy_conjoint_jshorts_atomic:
|
||||
add x17, x0, x2
|
||||
add x18, x1, x2
|
||||
|
||||
subs x3, x1, x0
|
||||
# hi is met <=> (from < to) and (to - from < count)
|
||||
ccmp x2, x3, #0, hi
|
||||
b.hi cs_backward
|
||||
|
||||
subs x3, x2, #14
|
||||
b.ge cs_forward_loop
|
||||
|
||||
# Copy x2 < 14 bytes from x0 to x1
|
||||
cs_forward_lt14:
|
||||
ands x7, x2, #7
|
||||
tbz x2, #3, cs_forward_lt8
|
||||
ldrh w3, [x0, #0]
|
||||
ldrh w4, [x0, #2]
|
||||
ldrh w5, [x0, #4]
|
||||
ldrh w6, [x0, #6]
|
||||
|
||||
strh w3, [x1, #0]
|
||||
strh w4, [x1, #2]
|
||||
strh w5, [x1, #4]
|
||||
strh w6, [x1, #6]
|
||||
|
||||
# Copy x7 < 8 bytes from x17 - x7 to x18 - x7
|
||||
cs_forward_lt8:
|
||||
b.eq cs_forward_0
|
||||
cmp x7, #4
|
||||
b.lt cs_forward_2
|
||||
b.eq cs_forward_4
|
||||
|
||||
cs_forward_6:
|
||||
ldrh w3, [x17, #-6]
|
||||
strh w3, [x18, #-6]
|
||||
cs_forward_4:
|
||||
ldrh w4, [x17, #-4]
|
||||
strh w4, [x18, #-4]
|
||||
cs_forward_2:
|
||||
ldrh w5, [x17, #-2]
|
||||
strh w5, [x18, #-2]
|
||||
cs_forward_0:
|
||||
ret
|
||||
|
||||
|
||||
# Copy [x0, x0 + x3 + 14) to [x1, x1 + x3 + 14)
|
||||
# x3 >= 0
|
||||
.p2align 6
|
||||
cs_forward_loop:
|
||||
subs x3, x3, #14
|
||||
|
||||
ldrh w4, [x0], #14
|
||||
ldrh w5, [x0, #-12]
|
||||
ldrh w6, [x0, #-10]
|
||||
ldrh w7, [x0, #-8]
|
||||
ldrh w8, [x0, #-6]
|
||||
ldrh w9, [x0, #-4]
|
||||
ldrh w10, [x0, #-2]
|
||||
|
||||
strh w4, [x1], #14
|
||||
strh w5, [x1, #-12]
|
||||
strh w6, [x1, #-10]
|
||||
strh w7, [x1, #-8]
|
||||
strh w8, [x1, #-6]
|
||||
strh w9, [x1, #-4]
|
||||
strh w10, [x1, #-2]
|
||||
|
||||
b.ge cs_forward_loop
|
||||
# Exactly 16 instruction from cs_forward_loop, so loop fits into one cache line
|
||||
|
||||
adds x2, x3, #14
|
||||
# x2 bytes should be copied from x0 to x1
|
||||
b.ne cs_forward_lt14
|
||||
ret
|
||||
|
||||
# Very similar to forward copying
|
||||
cs_backward:
|
||||
subs x3, x2, #14
|
||||
b.ge cs_backward_loop
|
||||
|
||||
cs_backward_lt14:
|
||||
ands x7, x2, #7
|
||||
tbz x2, #3, cs_backward_lt8
|
||||
|
||||
ldrh w3, [x17, #-8]
|
||||
ldrh w4, [x17, #-6]
|
||||
ldrh w5, [x17, #-4]
|
||||
ldrh w6, [x17, #-2]
|
||||
|
||||
strh w3, [x18, #-8]
|
||||
strh w4, [x18, #-6]
|
||||
strh w5, [x18, #-4]
|
||||
strh w6, [x18, #-2]
|
||||
|
||||
cs_backward_lt8:
|
||||
b.eq cs_backward_0
|
||||
cmp x7, #4
|
||||
b.lt cs_backward_2
|
||||
b.eq cs_backward_4
|
||||
|
||||
cs_backward_6:
|
||||
ldrh w3, [x0, #4]
|
||||
strh w3, [x1, #4]
|
||||
|
||||
cs_backward_4:
|
||||
ldrh w4, [x0, #2]
|
||||
strh w4, [x1, #2]
|
||||
|
||||
cs_backward_2:
|
||||
ldrh w5, [x0, #0]
|
||||
strh w5, [x1, #0]
|
||||
|
||||
cs_backward_0:
|
||||
ret
|
||||
|
||||
|
||||
.p2align 6
|
||||
cs_backward_loop:
|
||||
subs x3, x3, #14
|
||||
|
||||
ldrh w4, [x17, #-14]!
|
||||
ldrh w5, [x17, #2]
|
||||
ldrh w6, [x17, #4]
|
||||
ldrh w7, [x17, #6]
|
||||
ldrh w8, [x17, #8]
|
||||
ldrh w9, [x17, #10]
|
||||
ldrh w10, [x17, #12]
|
||||
|
||||
strh w4, [x18, #-14]!
|
||||
strh w5, [x18, #2]
|
||||
strh w6, [x18, #4]
|
||||
strh w7, [x18, #6]
|
||||
strh w8, [x18, #8]
|
||||
strh w9, [x18, #10]
|
||||
strh w10, [x18, #12]
|
||||
|
||||
b.ge cs_backward_loop
|
||||
adds x2, x3, #14
|
||||
b.ne cs_backward_lt14
|
||||
ret
|
||||
|
||||
|
||||
# Support for void Copy::arrayof_conjoint_jshorts(void* from,
|
||||
# void* to,
|
||||
# size_t count)
|
||||
_Copy_arrayof_conjoint_jshorts:
|
||||
hlt 1007
|
||||
|
||||
|
||||
# Support for void Copy::conjoint_jlongs_atomic(jlong* from,
|
||||
# jlong* to,
|
||||
# size_t count)
|
||||
_Copy_conjoint_jlongs_atomic:
|
||||
_Copy_arrayof_conjoint_jlongs:
|
||||
hlt 1009
|
||||
|
||||
|
||||
# Support for void Copy::conjoint_jints_atomic(void* from,
|
||||
# void* to,
|
||||
# size_t count)
|
||||
_Copy_conjoint_jints_atomic:
|
||||
_Copy_arrayof_conjoint_jints:
|
||||
# These and further memory prefetches may hit out of array ranges.
|
||||
# Experiments showed that prefetching of inaccessible memory doesn't result in exceptions.
|
||||
prfm pldl1keep, [x0, #0]
|
||||
prfm pstl1keep, [x1, #0]
|
||||
prfm pldl1keep, [x0, #32]
|
||||
prfm pstl1keep, [x1, #32]
|
||||
|
||||
subs x3, x1, x0
|
||||
# hi condition is met <=> from < to
|
||||
ccmp x2, x3, #0, hi
|
||||
# hi condition is met <=> (from < to) and (to - from < count)
|
||||
b.hi ci_backward
|
||||
|
||||
subs x18, x2, #64
|
||||
b.ge ci_forward_large
|
||||
|
||||
ci_forward_lt_64:
|
||||
# Copy [x0, x0 + x2) to [x1, x1 + x2)
|
||||
|
||||
adr x15, ci_forward_tail_table_base
|
||||
and x16, x2, #~4
|
||||
|
||||
# Calculate address to jump and store it to x15:
|
||||
# Each pair of instructions before ci_forward_tail_table_base copies 8 bytes.
|
||||
# x16 is count of bytes to copy aligned down by 8.
|
||||
# So x16/8 pairs of instructions should be executed.
|
||||
# Each pair takes 8 bytes, so x15 = ci_forward_tail_table_base - (x16/8)*8 = x15 - x16
|
||||
sub x15, x15, x16
|
||||
prfm plil1keep, [x15]
|
||||
|
||||
add x17, x0, x2
|
||||
add x18, x1, x2
|
||||
|
||||
# If x2 = x16 + 4, then copy 4 bytes and x16 bytes after that.
|
||||
# Otherwise x2 = x16, so proceed to copy x16 bytes.
|
||||
tbz x2, #2, ci_forward_lt_64_even
|
||||
ldr w3, [x0]
|
||||
str w3, [x1]
|
||||
ci_forward_lt_64_even:
|
||||
# Copy [x17 - x16, x17) to [x18 - x16, x18)
|
||||
# x16 is aligned by 8 and less than 64
|
||||
|
||||
# Execute (x16/8) ldp-stp pairs; each pair copies 8 bytes
|
||||
br x15
|
||||
|
||||
ldp w3, w4, [x17, #-56]
|
||||
stp w3, w4, [x18, #-56]
|
||||
ldp w5, w6, [x17, #-48]
|
||||
stp w5, w6, [x18, #-48]
|
||||
ldp w7, w8, [x17, #-40]
|
||||
stp w7, w8, [x18, #-40]
|
||||
ldp w9, w10, [x17, #-32]
|
||||
stp w9, w10, [x18, #-32]
|
||||
ldp w11, w12, [x17, #-24]
|
||||
stp w11, w12, [x18, #-24]
|
||||
ldp w13, w14, [x17, #-16]
|
||||
stp w13, w14, [x18, #-16]
|
||||
ldp w15, w16, [x17, #-8]
|
||||
stp w15, w16, [x18, #-8]
|
||||
ci_forward_tail_table_base:
|
||||
ret
|
||||
|
||||
.p2align 6
|
||||
.rept 12
|
||||
nop
|
||||
.endr
|
||||
ci_forward_large:
|
||||
# x18 >= 0;
|
||||
# Copy [x0, x0 + x18 + 64) to [x1, x1 + x18 + 64)
|
||||
|
||||
ldp w3, w4, [x0], #32
|
||||
ldp w5, w6, [x0, #-24]
|
||||
ldp w7, w8, [x0, #-16]
|
||||
ldp w9, w10, [x0, #-8]
|
||||
|
||||
# Before and after each iteration of loop registers w3-w10 contain [x0 - 32, x0),
|
||||
# and x1 is a place to copy this data;
|
||||
# x18 contains number of bytes to be stored minus 64
|
||||
|
||||
# Exactly 16 instructions from p2align, so ci_forward_loop starts from cache line boundary
|
||||
# Checking it explictly by aligning with "hlt 1000" instructions
|
||||
.p2alignl 6, 0xd4407d00
|
||||
ci_forward_loop:
|
||||
prfm pldl1keep, [x0, #32]
|
||||
prfm pstl1keep, [x1, #32]
|
||||
|
||||
subs x18, x18, #32
|
||||
|
||||
stp w3, w4, [x1, #0]
|
||||
ldp w3, w4, [x0, #0]
|
||||
stp w5, w6, [x1, #8]
|
||||
ldp w5, w6, [x0, #8]
|
||||
stp w7, w8, [x1, #16]
|
||||
ldp w7, w8, [x0, #16]
|
||||
stp w9, w10, [x1, #24]
|
||||
ldp w9, w10, [x0, #24]
|
||||
|
||||
add x1, x1, #32
|
||||
add x0, x0, #32
|
||||
|
||||
b.ge ci_forward_loop
|
||||
|
||||
# 14 instructions from ci_forward_loop, so the loop body hits into one cache line
|
||||
|
||||
ci_forward_loop_end:
|
||||
adds x2, x18, #32
|
||||
|
||||
stp w3, w4, [x1], #32
|
||||
stp w5, w6, [x1, #-24]
|
||||
stp w7, w8, [x1, #-16]
|
||||
stp w9, w10, [x1, #-8]
|
||||
|
||||
# Increased x18 by 32, but stored 32 bytes, so x2 contains exact number of bytes to be stored
|
||||
|
||||
# If this number is not zero, also copy remaining bytes
|
||||
b.ne ci_forward_lt_64
|
||||
ret
|
||||
|
||||
ci_backward:
|
||||
|
||||
# Overlapping case should be the rare one, it does not worth optimizing
|
||||
|
||||
ands x3, x2, #~4
|
||||
# x3 is count aligned down by 2*jintSize
|
||||
add x0, x0, x2
|
||||
add x1, x1, x2
|
||||
sub x3, x3, #8
|
||||
# Skip loop if 0 or 1 jints
|
||||
b.eq ci_backward_loop_end
|
||||
|
||||
# x3 >= 0
|
||||
# Copy [x0 - x3 - 8, x0) to [x1 - x3 - 8, x1) backward
|
||||
ci_backward_loop:
|
||||
subs x3, x3, #8
|
||||
ldp w4, w5, [x0, #-8]!
|
||||
stp w4, w5, [x1, #-8]!
|
||||
b.ge ci_backward_loop
|
||||
|
||||
ci_backward_loop_end:
|
||||
# Copy remaining 0 or 1 jints
|
||||
tbz x2, #2, ci_backward_finish
|
||||
ldr w3, [x0, #-4]
|
||||
str w3, [x1, #-4]
|
||||
|
||||
ci_backward_finish:
|
||||
ret
|
329
hotspot/src/os_cpu/linux_arm/vm/macroAssembler_linux_arm_32.cpp
Normal file
329
hotspot/src/os_cpu/linux_arm/vm/macroAssembler_linux_arm_32.cpp
Normal file
@ -0,0 +1,329 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
||||
void MacroAssembler::breakpoint(AsmCondition cond) {
|
||||
if (cond == al) {
|
||||
emit_int32(0xe7f001f0);
|
||||
} else {
|
||||
call(CAST_FROM_FN_PTR(address, os::breakpoint), relocInfo::runtime_call_type, cond);
|
||||
}
|
||||
}
|
||||
|
||||
// atomic_cas_bool
|
||||
//
|
||||
// Perform an atomic compare and exchange and return bool result
|
||||
//
|
||||
// inputs:
|
||||
// oldval value to compare to
|
||||
// newval value to store if *(base+offset) == oldval
|
||||
// base base address of storage location
|
||||
// offset offset added to base to form dest address
|
||||
// output:
|
||||
// Z flag is set in success
|
||||
|
||||
void MacroAssembler::atomic_cas_bool(Register oldval, Register newval, Register base, int offset, Register tmpreg) {
|
||||
if (VM_Version::supports_ldrex()) {
|
||||
Register tmp_reg;
|
||||
if (tmpreg == noreg) {
|
||||
push(LR);
|
||||
tmp_reg = LR;
|
||||
} else {
|
||||
tmp_reg = tmpreg;
|
||||
}
|
||||
assert_different_registers(tmp_reg, oldval, newval, base);
|
||||
Label loop;
|
||||
bind(loop);
|
||||
ldrex(tmp_reg, Address(base, offset));
|
||||
subs(tmp_reg, tmp_reg, oldval);
|
||||
strex(tmp_reg, newval, Address(base, offset), eq);
|
||||
cmp(tmp_reg, 1, eq);
|
||||
b(loop, eq);
|
||||
cmp(tmp_reg, 0);
|
||||
if (tmpreg == noreg) {
|
||||
pop(tmp_reg);
|
||||
}
|
||||
} else if (VM_Version::supports_kuser_cmpxchg32()) {
|
||||
// On armv5 platforms we must use the Linux kernel helper
|
||||
// function for atomic cas operations since ldrex/strex is
|
||||
// not supported.
|
||||
//
|
||||
// This is a special routine at a fixed address 0xffff0fc0 with
|
||||
// with these arguments and results
|
||||
//
|
||||
// input:
|
||||
// r0 = oldval, r1 = newval, r2 = ptr, lr = return adress
|
||||
// output:
|
||||
// r0 = 0 carry set on success
|
||||
// r0 != 0 carry clear on failure
|
||||
//
|
||||
// r3, ip and flags are clobbered
|
||||
//
|
||||
|
||||
Label loop;
|
||||
|
||||
push(RegisterSet(R0, R3) | RegisterSet(R12) | RegisterSet(LR));
|
||||
|
||||
Register tmp_reg = LR; // ignore the argument
|
||||
|
||||
assert_different_registers(tmp_reg, oldval, newval, base);
|
||||
|
||||
// Shuffle registers for kernel call
|
||||
if (oldval != R0) {
|
||||
if (newval == R0) {
|
||||
mov(tmp_reg, newval);
|
||||
newval = tmp_reg;
|
||||
}
|
||||
if (base == R0) {
|
||||
mov(tmp_reg, base);
|
||||
base = tmp_reg;
|
||||
}
|
||||
mov(R0, oldval);
|
||||
}
|
||||
if(newval != R1) {
|
||||
if(base == R1) {
|
||||
if(newval == R2) {
|
||||
mov(tmp_reg, base);
|
||||
base = tmp_reg;
|
||||
}
|
||||
else {
|
||||
mov(R2, base);
|
||||
base = R2;
|
||||
}
|
||||
}
|
||||
mov(R1, newval);
|
||||
}
|
||||
if (base != R2)
|
||||
mov(R2, base);
|
||||
|
||||
if (offset != 0)
|
||||
add(R2, R2, offset);
|
||||
|
||||
mvn(R3, 0xf000);
|
||||
mov(LR, PC);
|
||||
sub(PC, R3, 0x3f);
|
||||
cmp (R0, 0);
|
||||
|
||||
pop(RegisterSet(R0, R3) | RegisterSet(R12) | RegisterSet(LR));
|
||||
} else {
|
||||
// Should never run on a platform so old that it does not have kernel helper
|
||||
stop("Atomic cmpxchg32 unsupported on this platform");
|
||||
}
|
||||
}
|
||||
|
||||
// atomic_cas
|
||||
//
|
||||
// Perform an atomic compare and exchange and return previous value
|
||||
//
|
||||
// inputs:
|
||||
// prev temporary register (destroyed)
|
||||
// oldval value to compare to
|
||||
// newval value to store if *(base+offset) == oldval
|
||||
// base base address of storage location
|
||||
// offset offset added to base to form dest address
|
||||
// output:
|
||||
// returns previous value from *(base+offset) in R0
|
||||
|
||||
void MacroAssembler::atomic_cas(Register temp1, Register temp2, Register oldval, Register newval, Register base, int offset) {
|
||||
if (temp1 != R0) {
|
||||
// try to read the previous value directly in R0
|
||||
if (temp2 == R0) {
|
||||
// R0 declared free
|
||||
temp2 = temp1;
|
||||
temp1 = R0;
|
||||
} else if ((oldval != R0) && (newval != R0) && (base != R0)) {
|
||||
// free, and scratched on return
|
||||
temp1 = R0;
|
||||
}
|
||||
}
|
||||
if (VM_Version::supports_ldrex()) {
|
||||
Label loop;
|
||||
assert_different_registers(temp1, temp2, oldval, newval, base);
|
||||
|
||||
bind(loop);
|
||||
ldrex(temp1, Address(base, offset));
|
||||
cmp(temp1, oldval);
|
||||
strex(temp2, newval, Address(base, offset), eq);
|
||||
cmp(temp2, 1, eq);
|
||||
b(loop, eq);
|
||||
if (temp1 != R0) {
|
||||
mov(R0, temp1);
|
||||
}
|
||||
} else if (VM_Version::supports_kuser_cmpxchg32()) {
|
||||
// On armv5 platforms we must use the Linux kernel helper
|
||||
// function for atomic cas operations since ldrex/strex is
|
||||
// not supported.
|
||||
//
|
||||
// This is a special routine at a fixed address 0xffff0fc0
|
||||
//
|
||||
// input:
|
||||
// r0 = oldval, r1 = newval, r2 = ptr, lr = return adress
|
||||
// output:
|
||||
// r0 = 0 carry set on success
|
||||
// r0 != 0 carry clear on failure
|
||||
//
|
||||
// r3, ip and flags are clobbered
|
||||
//
|
||||
Label done;
|
||||
Label loop;
|
||||
|
||||
push(RegisterSet(R1, R4) | RegisterSet(R12) | RegisterSet(LR));
|
||||
|
||||
if ( oldval != R0 || newval != R1 || base != R2 ) {
|
||||
push(oldval);
|
||||
push(newval);
|
||||
push(base);
|
||||
pop(R2);
|
||||
pop(R1);
|
||||
pop(R0);
|
||||
}
|
||||
|
||||
if (offset != 0) {
|
||||
add(R2, R2, offset);
|
||||
}
|
||||
|
||||
mov(R4, R0);
|
||||
bind(loop);
|
||||
ldr(R0, Address(R2));
|
||||
cmp(R0, R4);
|
||||
b(done, ne);
|
||||
mvn(R12, 0xf000);
|
||||
mov(LR, PC);
|
||||
sub(PC, R12, 0x3f);
|
||||
b(loop, cc);
|
||||
mov(R0, R4);
|
||||
bind(done);
|
||||
|
||||
pop(RegisterSet(R1, R4) | RegisterSet(R12) | RegisterSet(LR));
|
||||
} else {
|
||||
// Should never run on a platform so old that it does not have kernel helper
|
||||
stop("Atomic cmpxchg32 unsupported on this platform");
|
||||
}
|
||||
}
|
||||
|
||||
// atomic_cas64
|
||||
//
|
||||
// Perform a 64 bit atomic compare and exchange and return previous value
|
||||
// as well as returning status in 'result' register
|
||||
//
|
||||
// inputs:
|
||||
// oldval_lo, oldval_hi value to compare to
|
||||
// newval_lo, newval_hi value to store if *(base+offset) == oldval
|
||||
// base base address of storage location
|
||||
// offset offset added to base to form dest address
|
||||
// output:
|
||||
// memval_lo, memval_hi, result
|
||||
// returns previous value from *(base+offset) in memval_lo/hi
|
||||
// returns status in result, 1==success, 0==failure
|
||||
// C1 just uses status result
|
||||
// VM code uses previous value returned in memval_lo/hi
|
||||
|
||||
void MacroAssembler::atomic_cas64(Register memval_lo, Register memval_hi, Register result, Register oldval_lo, Register oldval_hi, Register newval_lo, Register newval_hi, Register base, int offset) {
|
||||
if (VM_Version::supports_ldrexd()) {
|
||||
Label loop;
|
||||
assert_different_registers(memval_lo, memval_hi, result, oldval_lo,
|
||||
oldval_hi, newval_lo, newval_hi, base);
|
||||
assert(memval_hi == memval_lo + 1 && memval_lo < R9, "cmpxchg_long: illegal registers");
|
||||
assert(oldval_hi == oldval_lo + 1 && oldval_lo < R9, "cmpxchg_long: illegal registers");
|
||||
assert(newval_hi == newval_lo + 1 && newval_lo < R9, "cmpxchg_long: illegal registers");
|
||||
assert(result != R10, "cmpxchg_long: illegal registers");
|
||||
assert(base != R10, "cmpxchg_long: illegal registers");
|
||||
|
||||
mov(result, 0);
|
||||
bind(loop);
|
||||
ldrexd(memval_lo, Address(base, offset));
|
||||
cmp(memval_lo, oldval_lo);
|
||||
cmp(memval_hi, oldval_hi, eq);
|
||||
strexd(result, newval_lo, Address(base, offset), eq);
|
||||
rsbs(result, result, 1, eq);
|
||||
b(loop, eq);
|
||||
} else if (VM_Version::supports_kuser_cmpxchg64()) {
|
||||
// On armv5 platforms we must use the Linux kernel helper
|
||||
// function for atomic cas64 operations since ldrexd/strexd is
|
||||
// not supported.
|
||||
//
|
||||
// This is a special routine at a fixed address 0xffff0f60
|
||||
//
|
||||
// input:
|
||||
// r0 = (long long *)oldval, r1 = (long long *)newval,
|
||||
// r2 = ptr, lr = return adress
|
||||
// output:
|
||||
// r0 = 0 carry set on success
|
||||
// r0 != 0 carry clear on failure
|
||||
//
|
||||
// r3, and flags are clobbered
|
||||
//
|
||||
Label done;
|
||||
Label loop;
|
||||
|
||||
if (result != R12) {
|
||||
push(R12);
|
||||
}
|
||||
push(RegisterSet(R10) | RegisterSet(LR));
|
||||
mov(R10, SP); // Save SP
|
||||
|
||||
bic(SP, SP, StackAlignmentInBytes - 1); // align stack
|
||||
push(RegisterSet(oldval_lo, oldval_hi));
|
||||
push(RegisterSet(newval_lo, newval_hi));
|
||||
|
||||
if ((offset != 0) || (base != R12)) {
|
||||
add(R12, base, offset);
|
||||
}
|
||||
push(RegisterSet(R0, R3));
|
||||
bind(loop);
|
||||
ldrd(memval_lo, Address(R12)); //current
|
||||
ldrd(oldval_lo, Address(SP, 24));
|
||||
cmp(memval_lo, oldval_lo);
|
||||
cmp(memval_hi, oldval_hi, eq);
|
||||
pop(RegisterSet(R0, R3), ne);
|
||||
mov(result, 0, ne);
|
||||
b(done, ne);
|
||||
// Setup for kernel call
|
||||
mov(R2, R12);
|
||||
add(R0, SP, 24); // R0 == &oldval_lo
|
||||
add(R1, SP, 16); // R1 == &newval_lo
|
||||
mvn(R3, 0xf000); // call kernel helper at 0xffff0f60
|
||||
mov(LR, PC);
|
||||
sub(PC, R3, 0x9f);
|
||||
b(loop, cc); // if Carry clear then oldval != current
|
||||
// try again. Otherwise, return oldval
|
||||
// Here on success
|
||||
pop(RegisterSet(R0, R3));
|
||||
mov(result, 1);
|
||||
ldrd(memval_lo, Address(SP, 8));
|
||||
bind(done);
|
||||
pop(RegisterSet(newval_lo, newval_hi));
|
||||
pop(RegisterSet(oldval_lo, oldval_hi));
|
||||
mov(SP, R10); // restore SP
|
||||
pop(RegisterSet(R10) | RegisterSet(LR));
|
||||
if (result != R12) {
|
||||
pop(R12);
|
||||
}
|
||||
} else {
|
||||
stop("Atomic cmpxchg64 unsupported on this platform");
|
||||
}
|
||||
}
|
221
hotspot/src/os_cpu/linux_arm/vm/orderAccess_linux_arm.inline.hpp
Normal file
221
hotspot/src/os_cpu/linux_arm/vm/orderAccess_linux_arm.inline.hpp
Normal file
@ -0,0 +1,221 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_INLINE_HPP
|
||||
#define OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_INLINE_HPP
|
||||
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "vm_version_arm.hpp"
|
||||
|
||||
// Implementation of class OrderAccess.
|
||||
// - we define the high level barriers below and use the general
|
||||
// implementation in orderAccess.inline.hpp, with customizations
|
||||
// on AARCH64 via the specialized_* template functions
|
||||
#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
|
||||
|
||||
// Memory Ordering on ARM is weak.
|
||||
//
|
||||
// Implement all 4 memory ordering barriers by DMB, since it is a
|
||||
// lighter version of DSB.
|
||||
// dmb_sy implies full system shareability domain. RD/WR access type.
|
||||
// dmb_st implies full system shareability domain. WR only access type.
|
||||
//
|
||||
// NOP on < ARMv6 (MP not supported)
|
||||
//
|
||||
// Non mcr instructions can be used if we build for armv7 or higher arch
|
||||
// __asm__ __volatile__ ("dmb" : : : "memory");
|
||||
// __asm__ __volatile__ ("dsb" : : : "memory");
|
||||
//
|
||||
// inline void _OrderAccess_dsb() {
|
||||
// volatile intptr_t dummy = 0;
|
||||
// if (os::is_MP()) {
|
||||
// __asm__ volatile (
|
||||
// "mcr p15, 0, %0, c7, c10, 4"
|
||||
// : : "r" (dummy) : "memory");
|
||||
// }
|
||||
// }
|
||||
|
||||
inline static void dmb_sy() {
|
||||
if (!os::is_MP()) {
|
||||
return;
|
||||
}
|
||||
#ifdef AARCH64
|
||||
__asm__ __volatile__ ("dmb sy" : : : "memory");
|
||||
#else
|
||||
if (VM_Version::arm_arch() >= 7) {
|
||||
#ifdef __thumb__
|
||||
__asm__ volatile (
|
||||
"dmb sy": : : "memory");
|
||||
#else
|
||||
__asm__ volatile (
|
||||
".word 0xF57FF050 | 0xf" : : : "memory");
|
||||
#endif
|
||||
} else {
|
||||
intptr_t zero = 0;
|
||||
__asm__ volatile (
|
||||
"mcr p15, 0, %0, c7, c10, 5"
|
||||
: : "r" (zero) : "memory");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
inline static void dmb_st() {
|
||||
if (!os::is_MP()) {
|
||||
return;
|
||||
}
|
||||
#ifdef AARCH64
|
||||
__asm__ __volatile__ ("dmb st" : : : "memory");
|
||||
#else
|
||||
if (VM_Version::arm_arch() >= 7) {
|
||||
#ifdef __thumb__
|
||||
__asm__ volatile (
|
||||
"dmb st": : : "memory");
|
||||
#else
|
||||
__asm__ volatile (
|
||||
".word 0xF57FF050 | 0xe" : : : "memory");
|
||||
#endif
|
||||
} else {
|
||||
intptr_t zero = 0;
|
||||
__asm__ volatile (
|
||||
"mcr p15, 0, %0, c7, c10, 5"
|
||||
: : "r" (zero) : "memory");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// Load-Load/Store barrier
|
||||
inline static void dmb_ld() {
|
||||
#ifdef AARCH64
|
||||
if (!os::is_MP()) {
|
||||
return;
|
||||
}
|
||||
__asm__ __volatile__ ("dmb ld" : : : "memory");
|
||||
#else
|
||||
dmb_sy();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
inline void OrderAccess::loadload() { dmb_ld(); }
|
||||
inline void OrderAccess::loadstore() { dmb_ld(); }
|
||||
inline void OrderAccess::acquire() { dmb_ld(); }
|
||||
inline void OrderAccess::storestore() { dmb_st(); }
|
||||
inline void OrderAccess::storeload() { dmb_sy(); }
|
||||
inline void OrderAccess::release() { dmb_sy(); }
|
||||
inline void OrderAccess::fence() { dmb_sy(); }
|
||||
|
||||
// specializations for Aarch64
|
||||
// TODO-AARCH64: evaluate effectiveness of ldar*/stlr* implementations compared to 32-bit ARM approach
|
||||
|
||||
#ifdef AARCH64
|
||||
|
||||
template<> inline jbyte OrderAccess::specialized_load_acquire<jbyte>(volatile jbyte* p) {
|
||||
volatile jbyte result;
|
||||
__asm__ volatile(
|
||||
"ldarb %w[res], [%[ptr]]"
|
||||
: [res] "=&r" (result)
|
||||
: [ptr] "r" (p)
|
||||
: "memory");
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> inline jshort OrderAccess::specialized_load_acquire<jshort>(volatile jshort* p) {
|
||||
volatile jshort result;
|
||||
__asm__ volatile(
|
||||
"ldarh %w[res], [%[ptr]]"
|
||||
: [res] "=&r" (result)
|
||||
: [ptr] "r" (p)
|
||||
: "memory");
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> inline jint OrderAccess::specialized_load_acquire<jint>(volatile jint* p) {
|
||||
volatile jint result;
|
||||
__asm__ volatile(
|
||||
"ldar %w[res], [%[ptr]]"
|
||||
: [res] "=&r" (result)
|
||||
: [ptr] "r" (p)
|
||||
: "memory");
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> inline jfloat OrderAccess::specialized_load_acquire<jfloat>(volatile jfloat* p) {
|
||||
return jfloat_cast(specialized_load_acquire((volatile jint*)p));
|
||||
}
|
||||
|
||||
// This is implicit as jlong and intptr_t are both "long int"
|
||||
//template<> inline jlong OrderAccess::specialized_load_acquire(volatile jlong* p) {
|
||||
// return (volatile jlong)specialized_load_acquire((volatile intptr_t*)p);
|
||||
//}
|
||||
|
||||
template<> inline intptr_t OrderAccess::specialized_load_acquire<intptr_t>(volatile intptr_t* p) {
|
||||
volatile intptr_t result;
|
||||
__asm__ volatile(
|
||||
"ldar %[res], [%[ptr]]"
|
||||
: [res] "=&r" (result)
|
||||
: [ptr] "r" (p)
|
||||
: "memory");
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> inline jdouble OrderAccess::specialized_load_acquire<jdouble>(volatile jdouble* p) {
|
||||
return jdouble_cast(specialized_load_acquire((volatile intptr_t*)p));
|
||||
}
|
||||
|
||||
|
||||
template<> inline void OrderAccess::specialized_release_store<jbyte>(volatile jbyte* p, jbyte v) {
|
||||
__asm__ volatile(
|
||||
"stlrb %w[val], [%[ptr]]"
|
||||
:
|
||||
: [ptr] "r" (p), [val] "r" (v)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
template<> inline void OrderAccess::specialized_release_store<jshort>(volatile jshort* p, jshort v) {
|
||||
__asm__ volatile(
|
||||
"stlrh %w[val], [%[ptr]]"
|
||||
:
|
||||
: [ptr] "r" (p), [val] "r" (v)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
template<> inline void OrderAccess::specialized_release_store<jint>(volatile jint* p, jint v) {
|
||||
__asm__ volatile(
|
||||
"stlr %w[val], [%[ptr]]"
|
||||
:
|
||||
: [ptr] "r" (p), [val] "r" (v)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
template<> inline void OrderAccess::specialized_release_store<jlong>(volatile jlong* p, jlong v) {
|
||||
__asm__ volatile(
|
||||
"stlr %[val], [%[ptr]]"
|
||||
:
|
||||
: [ptr] "r" (p), [val] "r" (v)
|
||||
: "memory");
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
#endif // OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_INLINE_HPP
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user