This commit is contained in:
J. Duke 2017-07-05 22:28:45 +02:00
commit c35d31d372
365 changed files with 64833 additions and 3241 deletions
.hgtags-top-repo
corba
hotspot
.hgtags
.mx.jvmci
make
src/cpu
aarch64/vm
ppc/vm
s390/vm

@ -387,3 +387,4 @@ f64afae7f1a5608e438585bbf0bc23785e69cba0 jdk-9+141
2b3e5caafe3594ea507c37675c4d3086f415dc64 jdk-9+142
1fc62b1c629fb80fdaa639d3b59452a184f0d705 jdk-9+143
8d337fd6333e28c48aa87880144b840aad82baaf jdk-9+144
ff98aa9ec9fae991e426ce5926fc9036d25f5562 jdk-9+145

@ -387,3 +387,4 @@ b32f998da32b488ec7c4e9dbb3c750841b48e74d jdk-9+141
408c9c621938ca028e20bced0459f815de47eba8 jdk-9+142
6211236ef15ec796806357608b1dd1b70c258ece jdk-9+143
d4f1dae174098e799c48948e866054c52e11a186 jdk-9+144
a44b156ae7f06bf41b9bece30df7775e482395dd jdk-9+145

@ -547,3 +547,4 @@ fec31089c2ef5a12dd64f401b0bf2e00f56ee0d0 jdk-9+140
7b48d63dfd6b8e2657288de3d7b1f153dee02d7e jdk-9+142
d87d5d430c42342f0320ca7f5cbe0cbd1f9d62ba jdk-9+143
6187b582d02aee38341dc8ce4011906e9b364e9f jdk-9+144
61e7ea56312351657e69198c503a6f7bf865af83 jdk-9+145

@ -1,11 +1,9 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?eclipse-pydev version="1.0"?>
<pydev_project>
<?eclipse-pydev version="1.0"?><pydev_project>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
<path>/mx.jvmci</path>
<path>/.mx.jvmci</path>
</pydev_pathproperty>
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
<path>/mx</path>

@ -61,9 +61,8 @@ ifeq ($(OPENJDK_TARGET_OS), linux)
else ifeq ($(OPENJDK_TARGET_OS), solaris)
SA_TOOLCHAIN := TOOLCHAIN_LINK_CXX
COMMON_CFLAGS := -DSOLARIS_11_B159_OR_LATER
SA_CFLAGS := $(CFLAGS_JDKLIB) $(COMMON_CFLAGS)
SA_CXXFLAGS := $(CXXFLAGS_JDKLIB) $(COMMON_CFLAGS)
SA_CFLAGS := $(CFLAGS_JDKLIB)
SA_CXXFLAGS := $(CXXFLAGS_JDKLIB)
SA_LDFLAGS := $(subst -Wl$(COMMA)-z$(COMMA)defs,, $(LDFLAGS_JDKLIB)) \
-mt $(LDFLAGS_CXX_JDK)
SA_LIBS := -ldl -ldemangle -lthread -lc
@ -75,7 +74,7 @@ else ifeq ($(OPENJDK_TARGET_OS), macosx)
-mstack-alignment=16 -fPIC
SA_LDFLAGS := $(LDFLAGS_JDKLIB)
SA_LIBS := -framework Foundation -framework JavaNativeFoundation \
-framework Security -framework CoreFoundation
-framework JavaRuntimeSupport -framework Security -framework CoreFoundation
else ifeq ($(OPENJDK_TARGET_OS), windows)
SA_NAME := sawindbg

@ -47,11 +47,13 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
$(HOTSPOT_TOPDIR)/test/runtime/jni/checked \
$(HOTSPOT_TOPDIR)/test/runtime/jni/PrivateInterfaceMethods \
$(HOTSPOT_TOPDIR)/test/runtime/jni/ToStringInInterfaceTest \
$(HOTSPOT_TOPDIR)/test/runtime/jni/CalleeSavedRegisters \
$(HOTSPOT_TOPDIR)/test/runtime/modules/getModuleJNI \
$(HOTSPOT_TOPDIR)/test/runtime/SameObject \
$(HOTSPOT_TOPDIR)/test/runtime/BoolReturn \
$(HOTSPOT_TOPDIR)/test/compiler/floatingpoint/ \
$(HOTSPOT_TOPDIR)/test/compiler/calls \
$(HOTSPOT_TOPDIR)/test/compiler/native \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/GetNamedModule \
$(HOTSPOT_TOPDIR)/test/testlibrary/jvmti \
$(HOTSPOT_TOPDIR)/test/compiler/jvmci/jdk.vm.ci.code.test \
@ -89,6 +91,11 @@ ifeq ($(OPENJDK_TARGET_OS), linux)
BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libtest-rwx := -z execstack
BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exeinvoke := -ljvm -lpthread
BUILD_TEST_invoke_exeinvoke.c_OPTIMIZATION := NONE
BUILD_HOTSPOT_JTREG_EXECUTABLES_LDFLAGS_exeFPRegs := -ldl
endif
ifeq ($(OPENJDK_TARGET_OS), windows)
BUILD_HOTSPOT_JTREG_EXECUTABLES_CFLAGS_exeFPRegs := -MT
endif
BUILD_HOTSPOT_JTREG_OUTPUT_DIR := $(BUILD_OUTPUT)/support/test/hotspot/jtreg/native

@ -2277,14 +2277,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ br(Assembler::HI, *stub->entry());
}
// FIXME: The logic in LIRGenerator::arraycopy_helper clears
// length_positive_check if the source of our length operand is an
// arraylength. However, that arraylength might be zero, and the
// stub that we're about to call contains an assertion that count !=
// 0 . So we make this check purely in order not to trigger an
// assertion failure.
__ cbzw(length, *stub->continuation());
if (flags & LIR_OpArrayCopy::type_check) {
// We don't know the array types are compatible
if (basic_type != T_OBJECT) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -72,6 +72,7 @@ define_pd_global(bool, OptoScheduling, false);
define_pd_global(bool, OptoBundling, false);
define_pd_global(bool, OptoRegScheduling, false);
define_pd_global(bool, SuperWordLoopUnrollAnalysis, true);
define_pd_global(bool, IdealizeClearArrayNode, true);
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);

@ -142,6 +142,10 @@ bool frame::safe_for_sender(JavaThread *thread) {
}
sender_sp = _unextended_sp + _cb->frame_size();
// Is sender_sp safe?
if ((address)sender_sp >= thread->stack_base()) {
return false;
}
sender_unextended_sp = sender_sp;
sender_pc = (address) *(sender_sp-1);
// Note: frame::sender_sp_offset is only valid for compiled frame
@ -200,8 +204,15 @@ bool frame::safe_for_sender(JavaThread *thread) {
}
// construct the potential sender
frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc);
return sender.is_entry_frame_valid(thread);
// Validate the JavaCallWrapper an entry frame must have
address jcw = (address)sender.entry_frame_call_wrapper();
bool jcw_safe = (jcw < thread->stack_base()) && (jcw > (address)sender.fp());
return jcw_safe;
}
CompiledMethod* nm = sender_blob->as_compiled_method_or_null();

@ -39,6 +39,7 @@ define_pd_global(bool, ImplicitNullChecks, true); // Generate code for im
define_pd_global(bool, TrapBasedNullChecks, false);
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast
define_pd_global(uintx, CodeCacheSegmentSize, 64 TIERED_ONLY(+64)); // Tiered compilation has large code-entry alignment.
define_pd_global(intx, CodeEntryAlignment, 64);
define_pd_global(intx, OptoLoopAlignment, 16);
define_pd_global(intx, InlineFrequencyCount, 100);

@ -1962,6 +1962,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// due to cache line collision.
__ serialize_memory(rthread, r2);
}
} else {
__ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
}
// check for safepoint operation in progress and/or pending suspend requests

@ -476,6 +476,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
}
#endif
}
#endif
// handle exceptions
{
Label L;

@ -2102,7 +2102,9 @@ class Assembler : public AbstractAssembler {
inline void mfvscr( VectorRegister d);
// Vector-Scalar (VSX) instructions.
inline void lxvd2x( VectorSRegister d, Register a);
inline void lxvd2x( VectorSRegister d, Register a, Register b);
inline void stxvd2x( VectorSRegister d, Register a);
inline void stxvd2x( VectorSRegister d, Register a, Register b);
inline void mtvrd( VectorRegister d, Register a);
inline void mfvrd( Register a, VectorRegister d);

@ -734,8 +734,10 @@ inline void Assembler::lvsl( VectorRegister d, Register s1, Register s2) { emit
inline void Assembler::lvsr( VectorRegister d, Register s1, Register s2) { emit_int32( LVSR_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
// Vector-Scalar (VSX) instructions.
inline void Assembler::lxvd2x (VectorSRegister d, Register s1, Register s2) { emit_int32( LXVD2X_OPCODE | vsrt(d) | ra(s1) | rb(s2)); }
inline void Assembler::stxvd2x(VectorSRegister d, Register s1, Register s2) { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra(s1) | rb(s2)); }
inline void Assembler::lxvd2x (VectorSRegister d, Register s1) { emit_int32( LXVD2X_OPCODE | vsrt(d) | ra(0) | rb(s1)); }
inline void Assembler::lxvd2x (VectorSRegister d, Register s1, Register s2) { emit_int32( LXVD2X_OPCODE | vsrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::stxvd2x(VectorSRegister d, Register s1) { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra(0) | rb(s1)); }
inline void Assembler::stxvd2x(VectorSRegister d, Register s1, Register s2) { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::mtvrd( VectorRegister d, Register a) { emit_int32( MTVSRD_OPCODE | vrt(d) | ra(a) | 1u); } // 1u: d is treated as Vector (VMX/Altivec).
inline void Assembler::mfvrd( Register a, VectorRegister d) { emit_int32( MFVSRD_OPCODE | vrt(d) | ra(a) | 1u); } // 1u: d is treated as Vector (VMX/Altivec).

@ -1894,15 +1894,18 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ beq(combined_check, slow);
}
// If the compiler was not able to prove that exact type of the source or the destination
// of the arraycopy is an array type, check at runtime if the source or the destination is
// an instance type.
if (flags & LIR_OpArrayCopy::type_check) {
if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
__ load_klass(tmp, dst);
__ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp);
__ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value);
__ bge(CCR0, slow);
}
if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
if (!(flags & LIR_OpArrayCopy::src_objarray)) {
__ load_klass(tmp, src);
__ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp);
__ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value);

@ -1,6 +1,6 @@
/*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -80,6 +80,7 @@ define_pd_global(bool, SuperWordLoopUnrollAnalysis, false);
// loc = x.f
// NullCheck loc
define_pd_global(bool, OptoScheduling, false);
define_pd_global(bool, IdealizeClearArrayNode, true);
define_pd_global(intx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
define_pd_global(intx, ReservedCodeCacheSize, 256*M);

@ -221,6 +221,7 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
values.describe(frame_no, (intptr_t*)&(get_ijava_state()->name), #name);
DESCRIBE_ADDRESS(method);
DESCRIBE_ADDRESS(mirror);
DESCRIBE_ADDRESS(locals);
DESCRIBE_ADDRESS(monitors);
DESCRIBE_ADDRESS(cpoolCache);

@ -257,8 +257,7 @@
struct ijava_state {
#ifdef ASSERT
uint64_t ijava_reserved; // Used for assertion.
uint64_t ijava_reserved2; // Inserted for alignment.
uint64_t ijava_reserved; // Used for assertion.
#endif
uint64_t method;
uint64_t mirror;
@ -274,7 +273,6 @@
uint64_t oop_tmp;
uint64_t lresult;
uint64_t fresult;
// Aligned to frame::alignment_in_bytes (16).
};
enum {

@ -56,10 +56,11 @@ define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES);
// Use large code-entry alignment.
define_pd_global(intx, CodeEntryAlignment, 128);
define_pd_global(intx, OptoLoopAlignment, 16);
define_pd_global(intx, InlineFrequencyCount, 100);
define_pd_global(intx, InlineSmallCode, 1500);
define_pd_global(uintx, CodeCacheSegmentSize, 128);
define_pd_global(intx, CodeEntryAlignment, 128);
define_pd_global(intx, OptoLoopAlignment, 16);
define_pd_global(intx, InlineFrequencyCount, 100);
define_pd_global(intx, InlineSmallCode, 1500);
// Flags for template interpreter.
define_pd_global(bool, RewriteBytecodes, true);

@ -1922,7 +1922,7 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
// Check the supertype display:
if (must_load_sco) {
// The super check offset is always positive...
lwz(check_cache_offset, sco_offset, super_klass);
lwz(check_cache_offset, sco_offset, super_klass);
super_check_offset = RegisterOrConstant(check_cache_offset);
// super_check_offset is register.
assert_different_registers(sub_klass, super_klass, cached_super, super_check_offset.as_register());
@ -3325,12 +3325,10 @@ void MacroAssembler::load_klass(Register dst, Register src) {
}
}
void MacroAssembler::load_mirror(Register mirror, Register method) {
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
ld(mirror, in_bytes(Method::const_offset()), method);
ld(mirror, in_bytes(ConstMethod::constants_offset()), mirror);
void MacroAssembler::load_mirror_from_const_method(Register mirror, Register const_method) {
ld(mirror, in_bytes(ConstMethod::constants_offset()), const_method);
ld(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror);
ld(mirror, mirror_offset, mirror);
ld(mirror, in_bytes(Klass::java_mirror_offset()), mirror);
}
// Clear Array
@ -4345,8 +4343,8 @@ void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len
* @param t3 volatile register
*/
void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Register len, Register table,
Register constants, Register barretConstants,
Register t0, Register t1, Register t2, Register t3, Register t4) {
Register constants, Register barretConstants,
Register t0, Register t1, Register t2, Register t3, Register t4) {
assert_different_registers(crc, buf, len, table);
Label L_alignedHead, L_tail, L_alignTail, L_start, L_end;

@ -723,7 +723,7 @@ class MacroAssembler: public Assembler {
void store_klass(Register dst_oop, Register klass, Register tmp = R0);
void store_klass_gap(Register dst_oop, Register val = noreg); // Will store 0 if val not specified.
void load_mirror(Register mirror, Register method);
void load_mirror_from_const_method(Register mirror, Register const_method);
static int instr_size_for_decode_klass_not_null();
void decode_klass_not_null(Register dst, Register src = noreg);

@ -11237,6 +11237,17 @@ instruct cmpP_reg_reg(flagsReg crx, iRegP_N2P src1, iRegP_N2P src2) %{
ins_pipe(pipe_class_compare);
%}
instruct cmpP_reg_null(flagsReg crx, iRegP_N2P src1, immP_0or1 src2) %{
match(Set crx (CmpP src1 src2));
format %{ "CMPLDI $crx, $src1, $src2 \t// ptr" %}
size(4);
ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_cmpl);
__ cmpldi($crx$$CondRegister, $src1$$Register, (int)((short)($src2$$constant & 0xFFFF)));
%}
ins_pipe(pipe_class_compare);
%}
// Used in postalloc expand.
instruct cmpP_reg_imm16(flagsReg crx, iRegPsrc src1, immL16 src2) %{
// This match rule prevents reordering of node before a safepoint.

@ -1220,8 +1220,8 @@ class StubGenerator: public StubCodeGenerator {
__ bind(l_10);
// Use loop with VSX load/store instructions to
// copy 32 elements a time.
__ lxvd2x(tmp_vsr1, 0, R3_ARG1); // Load src
__ stxvd2x(tmp_vsr1, 0, R4_ARG2); // Store to dst
__ lxvd2x(tmp_vsr1, R3_ARG1); // Load src
__ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst
__ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src + 16
__ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16
__ addi(R3_ARG1, R3_ARG1, 32); // Update src+=32
@ -1486,8 +1486,8 @@ class StubGenerator: public StubCodeGenerator {
__ bind(l_9);
// Use loop with VSX load/store instructions to
// copy 16 elements a time.
__ lxvd2x(tmp_vsr1, 0, R3_ARG1); // Load from src.
__ stxvd2x(tmp_vsr1, 0, R4_ARG2); // Store to dst.
__ lxvd2x(tmp_vsr1, R3_ARG1); // Load from src.
__ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst.
__ lxvd2x(tmp_vsr2, R3_ARG1, tmp1); // Load from src + 16.
__ stxvd2x(tmp_vsr2, R4_ARG2, tmp1); // Store to dst + 16.
__ addi(R3_ARG1, R3_ARG1, 32); // Update src+=32.
@ -1677,8 +1677,8 @@ class StubGenerator: public StubCodeGenerator {
__ bind(l_7);
// Use loop with VSX load/store instructions to
// copy 8 elements a time.
__ lxvd2x(tmp_vsr1, 0, R3_ARG1); // Load src
__ stxvd2x(tmp_vsr1, 0, R4_ARG2); // Store to dst
__ lxvd2x(tmp_vsr1, R3_ARG1); // Load src
__ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst
__ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src + 16
__ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16
__ addi(R3_ARG1, R3_ARG1, 32); // Update src+=32
@ -1745,13 +1745,16 @@ class StubGenerator: public StubCodeGenerator {
// Do reverse copy. We assume the case of actual overlap is rare enough
// that we don't have to optimize it.
Label l_1, l_2, l_3, l_4, l_5, l_6;
Label l_1, l_2, l_3, l_4, l_5, l_6, l_7;
Register tmp1 = R6_ARG4;
Register tmp2 = R7_ARG5;
Register tmp3 = R8_ARG6;
Register tmp4 = R0;
VectorSRegister tmp_vsr1 = VSR1;
VectorSRegister tmp_vsr2 = VSR2;
{ // FasterArrayCopy
__ cmpwi(CCR0, R5_ARG3, 0);
__ beq(CCR0, l_6);
@ -1761,6 +1764,25 @@ class StubGenerator: public StubCodeGenerator {
__ add(R4_ARG2, R4_ARG2, R5_ARG3);
__ srdi(R5_ARG3, R5_ARG3, 2);
if (!aligned) {
// check if arrays have same alignment mod 8.
__ xorr(tmp1, R3_ARG1, R4_ARG2);
__ andi_(R0, tmp1, 7);
// Not the same alignment, but ld and std just need to be 4 byte aligned.
__ bne(CCR0, l_7); // to OR from is 8 byte aligned -> copy 2 at a time
// copy 1 element to align to and from on an 8 byte boundary
__ andi_(R0, R3_ARG1, 7);
__ beq(CCR0, l_7);
__ addi(R3_ARG1, R3_ARG1, -4);
__ addi(R4_ARG2, R4_ARG2, -4);
__ addi(R5_ARG3, R5_ARG3, -1);
__ lwzx(tmp2, R3_ARG1);
__ stwx(tmp2, R4_ARG2);
__ bind(l_7);
}
__ cmpwi(CCR0, R5_ARG3, 7);
__ ble(CCR0, l_5); // copy 1 at a time if less than 8 elements remain
@ -1768,6 +1790,7 @@ class StubGenerator: public StubCodeGenerator {
__ andi(R5_ARG3, R5_ARG3, 7);
__ mtctr(tmp1);
if (!VM_Version::has_vsx()) {
__ bind(l_4);
// Use unrolled version for mass copying (copy 4 elements a time).
// Load feeding store gets zero latency on Power6, however not on Power5.
@ -1783,6 +1806,40 @@ class StubGenerator: public StubCodeGenerator {
__ std(tmp2, 8, R4_ARG2);
__ std(tmp1, 0, R4_ARG2);
__ bdnz(l_4);
} else { // Processor supports VSX, so use it to mass copy.
// Prefetch the data into the L2 cache.
__ dcbt(R3_ARG1, 0);
// If supported set DSCR pre-fetch to deepest.
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
}
__ li(tmp1, 16);
// Backbranch target aligned to 32-byte. Not 16-byte align as
// loop contains < 8 instructions that fit inside a single
// i-cache sector.
__ align(32);
__ bind(l_4);
// Use loop with VSX load/store instructions to
// copy 8 elements a time.
__ addi(R3_ARG1, R3_ARG1, -32); // Update src-=32
__ addi(R4_ARG2, R4_ARG2, -32); // Update dsc-=32
__ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src+16
__ lxvd2x(tmp_vsr1, R3_ARG1); // Load src
__ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst+16
__ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst
__ bdnz(l_4);
// Restore DSCR pre-fetch value.
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
}
}
__ cmpwi(CCR0, R5_ARG3, 0);
__ beq(CCR0, l_6);
@ -1892,8 +1949,8 @@ class StubGenerator: public StubCodeGenerator {
__ bind(l_5);
// Use loop with VSX load/store instructions to
// copy 4 elements a time.
__ lxvd2x(tmp_vsr1, 0, R3_ARG1); // Load src
__ stxvd2x(tmp_vsr1, 0, R4_ARG2); // Store to dst
__ lxvd2x(tmp_vsr1, R3_ARG1); // Load src
__ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst
__ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src + 16
__ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16
__ addi(R3_ARG1, R3_ARG1, 32); // Update src+=32
@ -1962,6 +2019,9 @@ class StubGenerator: public StubCodeGenerator {
Register tmp3 = R8_ARG6;
Register tmp4 = R0;
VectorSRegister tmp_vsr1 = VSR1;
VectorSRegister tmp_vsr2 = VSR2;
Label l_1, l_2, l_3, l_4, l_5;
__ cmpwi(CCR0, R5_ARG3, 0);
@ -1980,6 +2040,7 @@ class StubGenerator: public StubCodeGenerator {
__ andi(R5_ARG3, R5_ARG3, 3);
__ mtctr(tmp1);
if (!VM_Version::has_vsx()) {
__ bind(l_4);
// Use unrolled version for mass copying (copy 4 elements a time).
// Load feeding store gets zero latency on Power6, however not on Power5.
@ -1995,6 +2056,40 @@ class StubGenerator: public StubCodeGenerator {
__ std(tmp2, 8, R4_ARG2);
__ std(tmp1, 0, R4_ARG2);
__ bdnz(l_4);
} else { // Processor supports VSX, so use it to mass copy.
// Prefetch the data into the L2 cache.
__ dcbt(R3_ARG1, 0);
// If supported set DSCR pre-fetch to deepest.
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
}
__ li(tmp1, 16);
// Backbranch target aligned to 32-byte. Not 16-byte align as
// loop contains < 8 instructions that fit inside a single
// i-cache sector.
__ align(32);
__ bind(l_4);
// Use loop with VSX load/store instructions to
// copy 4 elements a time.
__ addi(R3_ARG1, R3_ARG1, -32); // Update src-=32
__ addi(R4_ARG2, R4_ARG2, -32); // Update dsc-=32
__ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src+16
__ lxvd2x(tmp_vsr1, R3_ARG1); // Load src
__ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst+16
__ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst
__ bdnz(l_4);
// Restore DSCR pre-fetch value.
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
}
}
__ cmpwi(CCR0, R5_ARG3, 0);
__ beq(CCR0, l_1);

@ -915,7 +915,9 @@ void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratc
__ b(Ldone);
__ bind(Lstatic); // Static case: Lock the java mirror
__ load_mirror(Robj_to_lock, R19_method);
// Load mirror from interpreter frame.
__ ld(Robj_to_lock, _abi(callers_sp), R1_SP);
__ ld(Robj_to_lock, _ijava_state_neg(mirror), Robj_to_lock);
__ bind(Ldone);
__ verify_oop(Robj_to_lock);
@ -1077,12 +1079,12 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Regist
__ resize_frame(parent_frame_resize, R11_scratch1);
__ std(R12_scratch2, _abi(lr), R1_SP);
// Get mirror and store it in the frame as GC root for this Method*.
__ load_mirror_from_const_method(R12_scratch2, Rconst_method);
__ addi(R26_monitor, R1_SP, - frame::ijava_state_size);
__ addi(R15_esp, R26_monitor, - Interpreter::stackElementSize);
// Get mirror and store it in the frame as GC root for this Method*.
__ load_mirror(R12_scratch2, R19_method);
// Store values.
// R15_esp, R14_bcp, R26_monitor, R28_mdx are saved at java calls
// in InterpreterMacroAssembler::call_from_interpreter.
@ -1380,13 +1382,12 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT);
__ bfalse(CCR0, method_is_not_static);
__ load_mirror(R12_scratch2, R19_method);
// state->_native_mirror = mirror;
__ ld(R11_scratch1, 0, R1_SP);
__ std(R12_scratch2/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1);
__ ld(R11_scratch1, _abi(callers_sp), R1_SP);
// Load mirror from interpreter frame.
__ ld(R12_scratch2, _ijava_state_neg(mirror), R11_scratch1);
// R4_ARG2 = &state->_oop_temp;
__ addi(R4_ARG2, R11_scratch1, _ijava_state_neg(oop_tmp));
__ std(R12_scratch2/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1);
BIND(method_is_not_static);
}
@ -2157,12 +2158,12 @@ address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state
// Restoration of lr done by remove_activation.
switch (state) {
// Narrow result if state is itos but result type is smaller.
case itos: __ narrow(R17_tos); /* fall through */
case ltos:
case btos:
case ztos:
case ctos:
case stos:
case itos: __ narrow(R17_tos); /* fall through */
case ltos:
case atos: __ mr(R3_RET, R17_tos); break;
case ftos:
case dtos: __ fmr(F1_RET, F15_ftos); break;

@ -2133,10 +2133,6 @@ void TemplateTable::_return(TosState state) {
// since compiled code callers expect the result to already be narrowed.
case itos: __ narrow(R17_tos); /* fall through */
case ltos:
case btos:
case ztos:
case ctos:
case stos:
case atos: __ mr(R3_RET, R17_tos); break;
case ftos:
case dtos: __ fmr(F1_RET, F15_ftos); break;
@ -2548,7 +2544,6 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
assert(branch_table[ztos] == 0, "can't compute twice");
branch_table[ztos] = __ pc(); // non-volatile_entry point
__ lbzx(R17_tos, Rclass_or_obj, Roffset);
__ extsb(R17_tos, R17_tos);
__ push(ztos);
if (!is_static && rc == may_rewrite) {
// use btos rewriting, no truncating to t/f bit is needed for getfield.

@ -656,7 +656,7 @@ void VM_Version::determine_features() {
a->vpmsumb(VR0, VR1, VR2); // code[11] -> vpmsumb
a->tcheck(0); // code[12] -> tcheck
a->mfdscr(R0); // code[13] -> mfdscr
a->lxvd2x(VSR0, 0, R3_ARG1); // code[14] -> vsx
a->lxvd2x(VSR0, R3_ARG1); // code[14] -> vsx
a->blr();
// Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.

@ -0,0 +1,210 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "interpreter/interpreter.hpp"
#include "oops/constMethod.hpp"
#include "oops/method.hpp"
#include "runtime/frame.inline.hpp"
#include "utilities/debug.hpp"
#include "utilities/macros.hpp"
int AbstractInterpreter::BasicType_as_index(BasicType type) {
int i = 0;
switch (type) {
case T_BOOLEAN: i = 0; break;
case T_CHAR : i = 1; break;
case T_BYTE : i = 2; break;
case T_SHORT : i = 3; break;
case T_INT : i = 4; break;
case T_LONG : i = 5; break;
case T_VOID : i = 6; break;
case T_FLOAT : i = 7; break;
case T_DOUBLE : i = 8; break;
case T_OBJECT : i = 9; break;
case T_ARRAY : i = 9; break;
default : ShouldNotReachHere();
}
assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
return i;
}
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
// No special entry points that preclude compilation.
return true;
}
// How much stack a method top interpreter activation needs in words.
int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
// We have to size the following 2 frames:
//
// [TOP_IJAVA_FRAME_ABI]
// [ENTRY_FRAME]
//
// This expands to (see frame_s390.hpp):
//
// [TOP_IJAVA_FRAME_ABI]
// [operand stack] > stack
// [monitors] (optional) > monitors
// [IJAVA_STATE] > interpreter_state
// [PARENT_IJAVA_FRAME_ABI]
// [callee's locals w/o arguments] \ locals
// [outgoing arguments] /
// [ENTRY_FRAME_LOCALS]
int locals = method->max_locals() * BytesPerWord;
int interpreter_state = frame::z_ijava_state_size;
int stack = method->max_stack() * BytesPerWord;
int monitors = method->is_synchronized() ? frame::interpreter_frame_monitor_size_in_bytes() : 0;
int total_bytes =
frame::z_top_ijava_frame_abi_size +
stack +
monitors +
interpreter_state +
frame::z_parent_ijava_frame_abi_size +
locals +
frame::z_entry_frame_locals_size;
return (total_bytes/BytesPerWord);
}
// Returns number of stackElementWords needed for the interpreter frame with the
// given sections.
// This overestimates the stack by one slot in case of alignments.
int AbstractInterpreter::size_activation(int max_stack,
int temps,
int extra_args,
int monitors,
int callee_params,
int callee_locals,
bool is_top_frame) {
// Note: This calculation must exactly parallel the frame setup
// in AbstractInterpreterGenerator::generate_method_entry.
assert((Interpreter::stackElementSize == frame::alignment_in_bytes), "must align frame size");
const int abi_scratch = is_top_frame ? (frame::z_top_ijava_frame_abi_size / Interpreter::stackElementSize) :
(frame::z_parent_ijava_frame_abi_size / Interpreter::stackElementSize);
const int size =
max_stack +
(callee_locals - callee_params) + // Already counted in max_stack().
monitors * frame::interpreter_frame_monitor_size() +
abi_scratch +
frame::z_ijava_state_size / Interpreter::stackElementSize;
// Fixed size of an interpreter frame.
return size;
}
// Fills a sceletal interpreter frame generated during deoptimizations.
//
// Parameters:
//
// interpreter_frame != NULL:
// set up the method, locals, and monitors.
// The frame interpreter_frame, if not NULL, is guaranteed to be the
// right size, as determined by a previous call to this method.
// It is also guaranteed to be walkable even though it is in a skeletal state
//
// is_top_frame == true:
// We're processing the *oldest* interpreter frame!
//
// pop_frame_extra_args:
// If this is != 0 we are returning to a deoptimized frame by popping
// off the callee frame. We want to re-execute the call that called the
// callee interpreted, but since the return to the interpreter would pop
// the arguments off advance the esp by dummy popframe_extra_args slots.
// Popping off those will establish the stack layout as it was before the call.
//
void AbstractInterpreter::layout_activation(Method* method,
int tempcount,
int popframe_extra_args,
int moncount,
int caller_actual_parameters,
int callee_param_count,
int callee_locals_count,
frame* caller,
frame* interpreter_frame,
bool is_top_frame,
bool is_bottom_frame) {
// TOP_IJAVA_FRAME:
//
// 0 [TOP_IJAVA_FRAME_ABI] -+
// 16 [operand stack] | size
// [monitors] (optional) |
// [IJAVA_STATE] -+
// Note: own locals are located in the caller frame.
//
// PARENT_IJAVA_FRAME:
//
// 0 [PARENT_IJAVA_FRAME_ABI] -+
// [callee's locals w/o arguments] |
// [outgoing arguments] | size
// [used part of operand stack w/o arguments] |
// [monitors] (optional) |
// [IJAVA_STATE] -+
//
// Now we know our caller, calc the exact frame layout and size
// z_ijava_state->locals - i*BytesPerWord points to i-th Java local (i starts at 0).
intptr_t* locals_base = (caller->is_interpreted_frame())
? (caller->interpreter_frame_tos_address() + caller_actual_parameters - 1)
: (caller->sp() + method->max_locals() - 1 +
frame::z_parent_ijava_frame_abi_size / Interpreter::stackElementSize);
intptr_t* monitor_base = (intptr_t*)((address)interpreter_frame->fp() - frame::z_ijava_state_size);
intptr_t* monitor = monitor_base - (moncount * frame::interpreter_frame_monitor_size());
intptr_t* operand_stack_base = monitor;
intptr_t* tos = operand_stack_base - tempcount - popframe_extra_args;
intptr_t* top_frame_sp =
operand_stack_base - method->max_stack() - frame::z_top_ijava_frame_abi_size / Interpreter::stackElementSize;
intptr_t* sender_sp;
if (caller->is_interpreted_frame()) {
sender_sp = caller->interpreter_frame_top_frame_sp();
} else if (caller->is_compiled_frame()) {
sender_sp = caller->fp() - caller->cb()->frame_size();
// The bottom frame's sender_sp is its caller's unextended_sp.
// It was already set when its skeleton was pushed (see push_skeleton_frames()).
// Note: the unextended_sp is required by nmethod::orig_pc_addr().
assert(is_bottom_frame && (sender_sp == caller->unextended_sp()),
"must initialize sender_sp of bottom skeleton frame when pushing it");
} else {
assert(caller->is_entry_frame(), "is there a new frame type??");
sender_sp = caller->sp(); // Call_stub only uses it's fp.
}
interpreter_frame->interpreter_frame_set_method(method);
interpreter_frame->interpreter_frame_set_mirror(method->method_holder()->java_mirror());
interpreter_frame->interpreter_frame_set_locals(locals_base);
interpreter_frame->interpreter_frame_set_monitor_end((BasicObjectLock *)monitor);
*interpreter_frame->interpreter_frame_cache_addr() = method->constants()->cache();
interpreter_frame->interpreter_frame_set_tos_address(tos);
interpreter_frame->interpreter_frame_set_sender_sp(sender_sp);
interpreter_frame->interpreter_frame_set_top_frame_sp(top_frame_sp);
}

@ -0,0 +1,171 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "memory/resourceArea.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
#endif
// Convention: Use Z_R0 and Z_R1 instead of Z_scratch_* in all
// assembler_s390.* files.
// Convert the raw encoding form into the form expected by the
// constructor for Address. This is called by adlc generated code.
Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) {
assert(scale == 0, "Scale should not be used on z/Architecture. The call to make_raw is "
"generated by adlc and this must mirror all features of Operands from machnode.hpp.");
assert(disp_reloc == relocInfo::none, "not implemented on z/Architecture.");
Address madr(as_Register(base), as_Register(index), in_ByteSize(disp));
return madr;
}
int AbstractAssembler::code_fill_byte() {
return 0x00; // Illegal instruction 0x00000000.
}
// Condition code masks. Details see enum branch_condition.
// Although this method is meant for INT CCs, the Overflow/Ordered
// bit in the masks has to be considered. The CC might have been set
// by a float operation, but is evaluated while calculating an integer
// result. See elementary test TestFloat.isNotEqual(FF)Z for example.
Assembler::branch_condition Assembler::inverse_condition(Assembler::branch_condition cc) {
Assembler::branch_condition unordered_bit = (Assembler::branch_condition)(cc & bcondNotOrdered);
Assembler::branch_condition inverse_cc;
// Some are commented out to avoid duplicate labels.
switch (cc) {
case bcondNever : inverse_cc = bcondAlways; break; // 0 -> 15
case bcondAlways : inverse_cc = bcondNever; break; // 15 -> 0
case bcondOverflow : inverse_cc = bcondNotOverflow; break; // 1 -> 14
case bcondNotOverflow : inverse_cc = bcondOverflow; break; // 14 -> 1
default :
switch ((Assembler::branch_condition)(cc & bcondOrdered)) {
case bcondEqual : inverse_cc = bcondNotEqual; break; // 8 -> 6
// case bcondZero :
// case bcondAllZero :
case bcondNotEqual : inverse_cc = bcondEqual; break; // 6 -> 8
// case bcondNotZero :
// case bcondMixed :
case bcondLow : inverse_cc = bcondNotLow; break; // 4 -> 10
// case bcondNegative :
case bcondNotLow : inverse_cc = bcondLow; break; // 10 -> 4
// case bcondNotNegative :
case bcondHigh : inverse_cc = bcondNotHigh; break; // 2 -> 12
// case bcondPositive :
case bcondNotHigh : inverse_cc = bcondHigh; break; // 12 -> 2
// case bcondNotPositive :
default :
fprintf(stderr, "inverse_condition(%d)\n", (int)cc);
fflush(stderr);
ShouldNotReachHere();
return bcondNever;
}
// If cc is even, inverse_cc must be odd.
if (!unordered_bit) {
inverse_cc = (Assembler::branch_condition)(inverse_cc | bcondNotOrdered);
}
break;
}
return inverse_cc;
}
Assembler::branch_condition Assembler::inverse_float_condition(Assembler::branch_condition cc) {
Assembler::branch_condition inverse_cc;
switch (cc) {
case bcondNever : inverse_cc = bcondAlways; break; // 0
case bcondAlways : inverse_cc = bcondNever; break; // 15
case bcondNotOrdered : inverse_cc = bcondOrdered; break; // 14
case bcondOrdered : inverse_cc = bcondNotOrdered; break; // 1
case bcondEqual : inverse_cc = (branch_condition)(bcondNotEqual + bcondNotOrdered); break; // 8
case bcondNotEqual + bcondNotOrdered : inverse_cc = bcondEqual; break; // 7
case bcondLow + bcondNotOrdered : inverse_cc = (branch_condition)(bcondHigh + bcondEqual); break; // 5
case bcondNotLow : inverse_cc = (branch_condition)(bcondLow + bcondNotOrdered); break; // 10
case bcondHigh : inverse_cc = (branch_condition)(bcondLow + bcondNotOrdered + bcondEqual); break; // 2
case bcondNotHigh + bcondNotOrdered : inverse_cc = bcondHigh; break; // 13
default :
fprintf(stderr, "inverse_float_condition(%d)\n", (int)cc);
fflush(stderr);
ShouldNotReachHere();
return bcondNever;
}
return inverse_cc;
}
#ifdef ASSERT
void Assembler::print_dbg_msg(outputStream* out, unsigned long inst, const char* msg, int ilen) {
out->flush();
switch (ilen) {
case 2: out->print_cr("inst = %4.4x, %s", (unsigned short)inst, msg); break;
case 4: out->print_cr("inst = %8.8x, %s\n", (unsigned int)inst, msg); break;
case 6: out->print_cr("inst = %12.12lx, %s\n", inst, msg); break;
default: out->print_cr("inst = %16.16lx, %s\n", inst, msg); break;
}
out->flush();
}
void Assembler::dump_code_range(outputStream* out, address pc, const unsigned int range, const char* msg) {
out->cr();
out->print_cr("-------------------------------");
out->print_cr("-- %s", msg);
out->print_cr("-------------------------------");
out->print_cr("Hex dump of +/-%d bytes around %p, interval [%p,%p)", range, pc, pc-range, pc+range);
os::print_hex_dump(out, pc-range, pc+range, 2);
out->cr();
out->print_cr("Disassembly of +/-%d bytes around %p, interval [%p,%p)", range, pc, pc-range, pc+range);
Disassembler::decode(pc, pc + range, out);
}
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -0,0 +1,76 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_BYTES_S390_HPP
#define CPU_S390_VM_BYTES_S390_HPP
#include "memory/allocation.hpp"
class Bytes: AllStatic {
public:
// Efficient reading and writing of unaligned unsigned data in
// platform-specific byte ordering.
// Use regular load and store for unaligned access.
//
// On z/Architecture, unaligned loads and stores are supported when using the
// "traditional" load (LH, L/LY, LG) and store (STH, ST/STY, STG) instructions.
// The penalty for unaligned access is just very few (two or three) ticks,
// plus another few (two or three) ticks if the access crosses a cache line boundary.
//
// In short, it makes no sense on z/Architecture to piecemeal get or put unaligned data.
// Returns true if the byte ordering used by Java is different from
// the native byte ordering of the underlying machine.
// z/Arch is big endian, thus, a swap between native and Java ordering
// is always a no-op.
static inline bool is_Java_byte_ordering_different() { return false; }
// Only swap on little endian machines => suffix `_le'.
static inline u2 swap_u2_le(u2 x) { return x; }
static inline u4 swap_u4_le(u4 x) { return x; }
static inline u8 swap_u8_le(u8 x) { return x; }
static inline u2 get_native_u2(address p) { return *(u2*)p; }
static inline u4 get_native_u4(address p) { return *(u4*)p; }
static inline u8 get_native_u8(address p) { return *(u8*)p; }
static inline void put_native_u2(address p, u2 x) { *(u2*)p = x; }
static inline void put_native_u4(address p, u4 x) { *(u4*)p = x; }
static inline void put_native_u8(address p, u8 x) { *(u8*)p = x; }
#include "bytes_linux_s390.inline.hpp"
// Efficient reading and writing of unaligned unsigned data in Java byte ordering (i.e. big-endian ordering)
static inline u2 get_Java_u2(address p) { return get_native_u2(p); }
static inline u4 get_Java_u4(address p) { return get_native_u4(p); }
static inline u8 get_Java_u8(address p) { return get_native_u8(p); }
static inline void put_Java_u2(address p, u2 x) { put_native_u2(p, x); }
static inline void put_Java_u4(address p, u4 x) { put_native_u4(p, x); }
static inline void put_Java_u8(address p, u8 x) { put_native_u8(p, x); }
};
#endif // CPU_S390_VM_BYTES_S390_HPP

@ -0,0 +1,497 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "c1/c1_CodeStubs.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "nativeInst_s390.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/macros.hpp"
#include "vmreg_s390.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif // INCLUDE_ALL_GCS
#define __ ce->masm()->
#undef CHECK_BAILOUT
#define CHECK_BAILOUT() { if (ce->compilation()->bailed_out()) return; }
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
bool throw_index_out_of_bounds_exception) :
_throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception),
_index(index) {
assert(info != NULL, "must have info");
_info = new CodeEmitInfo(info);
}
void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
if (_info->deoptimize_on_exception()) {
address a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
ce->emit_call_c(a);
CHECK_BAILOUT();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
return;
}
// Pass the array index in Z_R1_scratch which is not managed by linear scan.
if (_index->is_cpu_register()) {
__ lgr_if_needed(Z_R1_scratch, _index->as_register());
} else {
__ load_const_optimized(Z_R1_scratch, _index->as_jint());
}
Runtime1::StubID stub_id;
if (_throw_index_out_of_bounds_exception) {
stub_id = Runtime1::throw_index_exception_id;
} else {
stub_id = Runtime1::throw_range_check_failed_id;
}
ce->emit_call_c(Runtime1::entry_for (stub_id));
CHECK_BAILOUT();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
}
PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
_info = new CodeEmitInfo(info);
}
void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
address a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
ce->emit_call_c(a);
CHECK_BAILOUT();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
}
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
Metadata *m = _method->as_constant_ptr()->as_metadata();
bool success = __ set_metadata_constant(m, Z_R1_scratch);
if (!success) {
ce->compilation()->bailout("const section overflow");
return;
}
ce->store_parameter(/*_method->as_register()*/ Z_R1_scratch, 1);
ce->store_parameter(_bci, 0);
ce->emit_call_c(Runtime1::entry_for (Runtime1::counter_overflow_id));
CHECK_BAILOUT();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
__ branch_optimized(Assembler::bcondAlways, _continuation);
}
void DivByZeroStub::emit_code(LIR_Assembler* ce) {
if (_offset != -1) {
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
}
__ bind(_entry);
ce->emit_call_c(Runtime1::entry_for (Runtime1::throw_div0_exception_id));
CHECK_BAILOUT();
ce->add_call_info_here(_info);
debug_only(__ should_not_reach_here());
}
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
address a;
if (_info->deoptimize_on_exception()) {
// Deoptimize, do not throw the exception, because it is probably wrong to do it here.
a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
} else {
a = Runtime1::entry_for (Runtime1::throw_null_pointer_exception_id);
}
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
__ bind(_entry);
ce->emit_call_c(a);
CHECK_BAILOUT();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
}
// Note: pass object in Z_R1_scratch
void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
if (_obj->is_valid()) {
__ z_lgr(Z_R1_scratch, _obj->as_register()); // _obj contains the optional argument to the stub
}
address a = Runtime1::entry_for (_stub);
ce->emit_call_c(a);
CHECK_BAILOUT();
ce->add_call_info_here(_info);
debug_only(__ should_not_reach_here());
}
NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
_result = result;
_klass = klass;
_klass_reg = klass_reg;
_info = new CodeEmitInfo(info);
assert(stub_id == Runtime1::new_instance_id ||
stub_id == Runtime1::fast_new_instance_id ||
stub_id == Runtime1::fast_new_instance_init_check_id,
"need new_instance id");
_stub_id = stub_id;
}
void NewInstanceStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
address a = Runtime1::entry_for (_stub_id);
ce->emit_call_c(a);
CHECK_BAILOUT();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
__ z_brul(_continuation);
}
NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
_klass_reg = klass_reg;
_length = length;
_result = result;
_info = new CodeEmitInfo(info);
}
void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
__ lgr_if_needed(Z_R13, _length->as_register());
address a = Runtime1::entry_for (Runtime1::new_type_array_id);
ce->emit_call_c(a);
CHECK_BAILOUT();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
__ z_brul(_continuation);
}
NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
_klass_reg = klass_reg;
_length = length;
_result = result;
_info = new CodeEmitInfo(info);
}
void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
__ lgr_if_needed(Z_R13, _length->as_register());
address a = Runtime1::entry_for (Runtime1::new_object_array_id);
ce->emit_call_c(a);
CHECK_BAILOUT();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
__ z_brul(_continuation);
}
MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
: MonitorAccessStub(obj_reg, lock_reg) {
_info = new CodeEmitInfo(info);
}
void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
Runtime1::StubID enter_id;
if (ce->compilation()->has_fpu_code()) {
enter_id = Runtime1::monitorenter_id;
} else {
enter_id = Runtime1::monitorenter_nofpu_id;
}
__ lgr_if_needed(Z_R1_scratch, _obj_reg->as_register());
__ lgr_if_needed(Z_R13, _lock_reg->as_register()); // See LIRGenerator::syncTempOpr().
ce->emit_call_c(Runtime1::entry_for (enter_id));
CHECK_BAILOUT();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
__ branch_optimized(Assembler::bcondAlways, _continuation);
}
void MonitorExitStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
// Move address of the BasicObjectLock into Z_R1_scratch.
if (_compute_lock) {
// Lock_reg was destroyed by fast unlocking attempt => recompute it.
ce->monitor_address(_monitor_ix, FrameMap::as_opr(Z_R1_scratch));
} else {
__ lgr_if_needed(Z_R1_scratch, _lock_reg->as_register());
}
// Note: non-blocking leaf routine => no call info needed.
Runtime1::StubID exit_id;
if (ce->compilation()->has_fpu_code()) {
exit_id = Runtime1::monitorexit_id;
} else {
exit_id = Runtime1::monitorexit_nofpu_id;
}
ce->emit_call_c(Runtime1::entry_for (exit_id));
CHECK_BAILOUT();
__ branch_optimized(Assembler::bcondAlways, _continuation);
}
// Implementation of patching:
// - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes).
// - Replace original code with a call to the stub.
// At Runtime:
// - call to stub, jump to runtime.
// - in runtime: Preserve all registers (especially objects, i.e., source and destination object).
// - in runtime: After initializing class, restore original code, reexecute instruction.
int PatchingStub::_patch_info_offset = - (12 /* load const */ + 2 /*BASR*/);
void PatchingStub::align_patch_site(MacroAssembler* masm) {
#ifndef PRODUCT
const char* bc;
switch (_id) {
case access_field_id: bc = "patch site (access_field)"; break;
case load_klass_id: bc = "patch site (load_klass)"; break;
case load_mirror_id: bc = "patch site (load_mirror)"; break;
case load_appendix_id: bc = "patch site (load_appendix)"; break;
default: bc = "patch site (unknown patch id)"; break;
}
masm->block_comment(bc);
#endif
masm->align(round_to(NativeGeneralJump::instruction_size, wordSize));
}
void PatchingStub::emit_code(LIR_Assembler* ce) {
// Copy original code here.
assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
"not enough room for call");
NearLabel call_patch;
int being_initialized_entry = __ offset();
if (_id == load_klass_id) {
// Produce a copy of the load klass instruction for use by the case being initialized.
#ifdef ASSERT
address start = __ pc();
#endif
AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(_index));
__ load_const(_obj, addrlit);
#ifdef ASSERT
for (int i = 0; i < _bytes_to_copy; i++) {
address ptr = (address)(_pc_start + i);
int a_byte = (*ptr) & 0xFF;
assert(a_byte == *start++, "should be the same code");
}
#endif
} else if (_id == load_mirror_id || _id == load_appendix_id) {
// Produce a copy of the load mirror instruction for use by the case being initialized.
#ifdef ASSERT
address start = __ pc();
#endif
AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(_index));
__ load_const(_obj, addrlit);
#ifdef ASSERT
for (int i = 0; i < _bytes_to_copy; i++) {
address ptr = (address)(_pc_start + i);
int a_byte = (*ptr) & 0xFF;
assert(a_byte == *start++, "should be the same code");
}
#endif
} else {
// Make a copy the code which is going to be patched.
for (int i = 0; i < _bytes_to_copy; i++) {
address ptr = (address)(_pc_start + i);
int a_byte = (*ptr) & 0xFF;
__ emit_int8 (a_byte);
}
}
address end_of_patch = __ pc();
int bytes_to_skip = 0;
if (_id == load_mirror_id) {
int offset = __ offset();
if (CommentedAssembly) {
__ block_comment(" being_initialized check");
}
// Static field accesses have special semantics while the class
// initializer is being run, so we emit a test which can be used to
// check that this code is being executed by the initializing
// thread.
assert(_obj != noreg, "must be a valid register");
assert(_index >= 0, "must have oop index");
__ z_lg(Z_R1_scratch, java_lang_Class::klass_offset_in_bytes(), _obj);
__ z_cg(Z_thread, Address(Z_R1_scratch, InstanceKlass::init_thread_offset()));
__ branch_optimized(Assembler::bcondNotEqual, call_patch);
// Load_klass patches may execute the patched code before it's
// copied back into place so we need to jump back into the main
// code of the nmethod to continue execution.
__ branch_optimized(Assembler::bcondAlways, _patch_site_continuation);
// Make sure this extra code gets skipped.
bytes_to_skip += __ offset() - offset;
}
// Now emit the patch record telling the runtime how to find the
// pieces of the patch. We only need 3 bytes but to help the disassembler
// we make the data look like a the following add instruction:
// A R1, D2(X2, B2)
// which requires 4 bytes.
int sizeof_patch_record = 4;
bytes_to_skip += sizeof_patch_record;
// Emit the offsets needed to find the code to patch.
int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
// Emit the patch record: opcode of the add followed by 3 bytes patch record data.
__ emit_int8((int8_t)(A_ZOPC>>24));
__ emit_int8(being_initialized_entry_offset);
__ emit_int8(bytes_to_skip);
__ emit_int8(_bytes_to_copy);
address patch_info_pc = __ pc();
assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
address entry = __ pc();
NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
address target = NULL;
relocInfo::relocType reloc_type = relocInfo::none;
switch (_id) {
case access_field_id: target = Runtime1::entry_for (Runtime1::access_field_patching_id); break;
case load_klass_id: target = Runtime1::entry_for (Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
case load_mirror_id: target = Runtime1::entry_for (Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
case load_appendix_id: target = Runtime1::entry_for (Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
default: ShouldNotReachHere();
}
__ bind(call_patch);
if (CommentedAssembly) {
__ block_comment("patch entry point");
}
// Cannot use call_c_opt() because its size is not constant.
__ load_const(Z_R1_scratch, target); // Must not optimize in order to keep constant _patch_info_offset constant.
__ z_basr(Z_R14, Z_R1_scratch);
assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
ce->add_call_info_here(_info);
__ z_brcl(Assembler::bcondAlways, _patch_site_entry);
if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
CodeSection* cs = __ code_section();
address pc = (address)_pc_start;
RelocIterator iter(cs, pc, pc + 1);
relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none);
}
}
void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
__ load_const_optimized(Z_R1_scratch, _trap_request); // Pass trap request in Z_R1_scratch.
ce->emit_call_c(Runtime1::entry_for (Runtime1::deoptimize_id));
CHECK_BAILOUT();
ce->add_call_info_here(_info);
DEBUG_ONLY(__ should_not_reach_here());
}
void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
// Slow case: call to native.
__ bind(_entry);
__ lgr_if_needed(Z_ARG1, src()->as_register());
__ lgr_if_needed(Z_ARG2, src_pos()->as_register());
__ lgr_if_needed(Z_ARG3, dst()->as_register());
__ lgr_if_needed(Z_ARG4, dst_pos()->as_register());
__ lgr_if_needed(Z_ARG5, length()->as_register());
// Must align calls sites, otherwise they can't be updated atomically on MP hardware.
ce->align_call(lir_static_call);
assert((__ offset() + NativeCall::call_far_pcrelative_displacement_offset) % NativeCall::call_far_pcrelative_displacement_alignment == 0,
"must be aligned");
ce->emit_static_call_stub();
// Prepend each BRASL with a nop.
__ relocate(relocInfo::static_call_type);
__ z_nop();
__ z_brasl(Z_R14, SharedRuntime::get_resolve_static_call_stub());
ce->add_call_info_here(info());
ce->verify_oop_map(info());
#ifndef PRODUCT
__ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_slowcase_cnt);
__ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch);
#endif
__ branch_optimized(Assembler::bcondAlways, _continuation);
}
///////////////////////////////////////////////////////////////////////////////////
#if INCLUDE_ALL_GCS
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that marking is in progress.
// If do_load() is true then we have to emit the
// load of the previous value; otherwise it has already
// been loaded into _pre_val.
__ bind(_entry);
ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
assert(pre_val()->is_register(), "Precondition.");
Register pre_val_reg = pre_val()->as_register();
if (do_load()) {
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
}
__ z_ltgr(Z_R1_scratch, pre_val_reg); // Pass oop in Z_R1_scratch to Runtime1::g1_pre_barrier_slow_id.
__ branch_optimized(Assembler::bcondZero, _continuation);
ce->emit_call_c(Runtime1::entry_for (Runtime1::g1_pre_barrier_slow_id));
CHECK_BAILOUT();
__ branch_optimized(Assembler::bcondAlways, _continuation);
}
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
assert(addr()->is_register(), "Precondition.");
assert(new_val()->is_register(), "Precondition.");
Register new_val_reg = new_val()->as_register();
__ z_ltgr(new_val_reg, new_val_reg);
__ branch_optimized(Assembler::bcondZero, _continuation);
__ z_lgr(Z_R1_scratch, addr()->as_pointer_register());
ce->emit_call_c(Runtime1::entry_for (Runtime1::g1_post_barrier_slow_id));
CHECK_BAILOUT();
__ branch_optimized(Assembler::bcondAlways, _continuation);
}
#endif // INCLUDE_ALL_GCS
#undef __

@ -0,0 +1,71 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_C1_DEFS_S390_HPP
#define CPU_S390_VM_C1_DEFS_S390_HPP
// Native word offsets from memory address (big endian).
enum {
pd_lo_word_offset_in_bytes = BytesPerInt,
pd_hi_word_offset_in_bytes = 0
};
// Explicit rounding operations are not required to implement the strictFP mode.
enum {
pd_strict_fp_requires_explicit_rounding = false
};
// registers
enum {
pd_nof_cpu_regs_frame_map = 16, // Number of registers used during code emission.
// Treat all registers as caller save (values of callee save are hard to find if caller is in runtime).
// unallocated: Z_thread, Z_fp, Z_SP, Z_R0_scratch, Z_R1_scratch, Z_R14
pd_nof_cpu_regs_unallocated = 6,
pd_nof_caller_save_cpu_regs_frame_map = pd_nof_cpu_regs_frame_map - pd_nof_cpu_regs_unallocated, // Number of cpu registers killed by calls.
pd_nof_cpu_regs_reg_alloc = pd_nof_caller_save_cpu_regs_frame_map, // Number of registers that are visible to register allocator.
pd_nof_cpu_regs_linearscan = pd_nof_cpu_regs_frame_map,// Number of registers visible linear scan.
pd_first_cpu_reg = 0,
pd_last_cpu_reg = 9, // Others are unallocated (see FrameMap::initialize()).
pd_nof_fpu_regs_frame_map = 16, // Number of registers used during code emission.
pd_nof_fcpu_regs_unallocated = 1, // Leave Z_F15 unallocated and use it as scratch register.
pd_nof_caller_save_fpu_regs_frame_map = pd_nof_fpu_regs_frame_map - pd_nof_fcpu_regs_unallocated, // Number of fpu registers killed by calls.
pd_nof_fpu_regs_reg_alloc = pd_nof_caller_save_fpu_regs_frame_map, // Number of registers that are visible to register allocator.
pd_nof_fpu_regs_linearscan = pd_nof_fpu_regs_frame_map, // Number of registers visible to linear scan.
pd_first_fpu_reg = pd_nof_cpu_regs_frame_map,
pd_last_fpu_reg = pd_first_fpu_reg + pd_nof_fpu_regs_frame_map - pd_nof_fcpu_regs_unallocated - 1,
pd_nof_xmm_regs_linearscan = 0,
pd_nof_caller_save_xmm_regs = 0,
pd_first_xmm_reg = -1,
pd_last_xmm_reg = -1
};
// For debug info: a float value in a register is saved in single precision by runtime stubs.
enum {
pd_float_saved_as_double = false
};
#endif // CPU_S390_VM_C1_DEFS_S390_HPP

@ -0,0 +1,32 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_C1_FPUSTACKSIM_S390_HPP
#define CPU_S390_VM_C1_FPUSTACKSIM_S390_HPP
// No FPU stack on ZARCH_64
class FpuStackSim;
#endif // CPU_S390_VM_C1_FPUSTACKSIM_S390_HPP

@ -0,0 +1,293 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_LIR.hpp"
#include "runtime/sharedRuntime.hpp"
#include "vmreg_s390.inline.hpp"
const int FrameMap::pd_c_runtime_reserved_arg_size = 7;
LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool outgoing) {
LIR_Opr opr = LIR_OprFact::illegalOpr;
VMReg r_1 = reg->first();
VMReg r_2 = reg->second();
if (r_1->is_stack()) {
// Convert stack slot to an SP offset.
// The calling convention does not count the SharedRuntime::out_preserve_stack_slots() value
// so we must add it in here.
int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
opr = LIR_OprFact::address(new LIR_Address(Z_SP_opr, st_off, type));
} else if (r_1->is_Register()) {
Register reg = r_1->as_Register();
if (r_2->is_Register() && (type == T_LONG || type == T_DOUBLE)) {
opr = as_long_opr(reg);
} else if (type == T_OBJECT || type == T_ARRAY) {
opr = as_oop_opr(reg);
} else if (type == T_METADATA) {
opr = as_metadata_opr(reg);
} else {
opr = as_opr(reg);
}
} else if (r_1->is_FloatRegister()) {
assert(type == T_DOUBLE || type == T_FLOAT, "wrong type");
FloatRegister f = r_1->as_FloatRegister();
if (type == T_FLOAT) {
opr = as_float_opr(f);
} else {
opr = as_double_opr(f);
}
} else {
ShouldNotReachHere();
}
return opr;
}
// FrameMap
//--------------------------------------------------------
FloatRegister FrameMap::_fpu_rnr2reg [FrameMap::nof_fpu_regs]; // mapping c1 regnr. -> FloatRegister
int FrameMap::_fpu_reg2rnr [FrameMap::nof_fpu_regs]; // mapping assembler encoding -> c1 regnr.
// Some useful constant RInfo's:
LIR_Opr FrameMap::Z_R0_opr;
LIR_Opr FrameMap::Z_R1_opr;
LIR_Opr FrameMap::Z_R2_opr;
LIR_Opr FrameMap::Z_R3_opr;
LIR_Opr FrameMap::Z_R4_opr;
LIR_Opr FrameMap::Z_R5_opr;
LIR_Opr FrameMap::Z_R6_opr;
LIR_Opr FrameMap::Z_R7_opr;
LIR_Opr FrameMap::Z_R8_opr;
LIR_Opr FrameMap::Z_R9_opr;
LIR_Opr FrameMap::Z_R10_opr;
LIR_Opr FrameMap::Z_R11_opr;
LIR_Opr FrameMap::Z_R12_opr;
LIR_Opr FrameMap::Z_R13_opr;
LIR_Opr FrameMap::Z_R14_opr;
LIR_Opr FrameMap::Z_R15_opr;
LIR_Opr FrameMap::Z_R0_oop_opr;
LIR_Opr FrameMap::Z_R1_oop_opr;
LIR_Opr FrameMap::Z_R2_oop_opr;
LIR_Opr FrameMap::Z_R3_oop_opr;
LIR_Opr FrameMap::Z_R4_oop_opr;
LIR_Opr FrameMap::Z_R5_oop_opr;
LIR_Opr FrameMap::Z_R6_oop_opr;
LIR_Opr FrameMap::Z_R7_oop_opr;
LIR_Opr FrameMap::Z_R8_oop_opr;
LIR_Opr FrameMap::Z_R9_oop_opr;
LIR_Opr FrameMap::Z_R10_oop_opr;
LIR_Opr FrameMap::Z_R11_oop_opr;
LIR_Opr FrameMap::Z_R12_oop_opr;
LIR_Opr FrameMap::Z_R13_oop_opr;
LIR_Opr FrameMap::Z_R14_oop_opr;
LIR_Opr FrameMap::Z_R15_oop_opr;
LIR_Opr FrameMap::Z_R0_metadata_opr;
LIR_Opr FrameMap::Z_R1_metadata_opr;
LIR_Opr FrameMap::Z_R2_metadata_opr;
LIR_Opr FrameMap::Z_R3_metadata_opr;
LIR_Opr FrameMap::Z_R4_metadata_opr;
LIR_Opr FrameMap::Z_R5_metadata_opr;
LIR_Opr FrameMap::Z_R6_metadata_opr;
LIR_Opr FrameMap::Z_R7_metadata_opr;
LIR_Opr FrameMap::Z_R8_metadata_opr;
LIR_Opr FrameMap::Z_R9_metadata_opr;
LIR_Opr FrameMap::Z_R10_metadata_opr;
LIR_Opr FrameMap::Z_R11_metadata_opr;
LIR_Opr FrameMap::Z_R12_metadata_opr;
LIR_Opr FrameMap::Z_R13_metadata_opr;
LIR_Opr FrameMap::Z_R14_metadata_opr;
LIR_Opr FrameMap::Z_R15_metadata_opr;
LIR_Opr FrameMap::Z_SP_opr;
LIR_Opr FrameMap::Z_FP_opr;
LIR_Opr FrameMap::Z_R2_long_opr;
LIR_Opr FrameMap::Z_R10_long_opr;
LIR_Opr FrameMap::Z_R11_long_opr;
LIR_Opr FrameMap::Z_F0_opr;
LIR_Opr FrameMap::Z_F0_double_opr;
LIR_Opr FrameMap::_caller_save_cpu_regs[] = { 0, };
LIR_Opr FrameMap::_caller_save_fpu_regs[] = { 0, };
// c1 rnr -> FloatRegister
FloatRegister FrameMap::nr2floatreg (int rnr) {
assert(_init_done, "tables not initialized");
debug_only(fpu_range_check(rnr);)
return _fpu_rnr2reg[rnr];
}
void FrameMap::map_float_register(int rnr, FloatRegister reg) {
debug_only(fpu_range_check(rnr);)
debug_only(fpu_range_check(reg->encoding());)
_fpu_rnr2reg[rnr] = reg; // mapping c1 regnr. -> FloatRegister
_fpu_reg2rnr[reg->encoding()] = rnr; // mapping assembler encoding -> c1 regnr.
}
void FrameMap::initialize() {
assert(!_init_done, "once");
DEBUG_ONLY(int allocated = 0;)
DEBUG_ONLY(int unallocated = 0;)
// Register usage:
// Z_thread (Z_R8)
// Z_fp (Z_R9)
// Z_SP (Z_R15)
DEBUG_ONLY(allocated++); map_register(0, Z_R2);
DEBUG_ONLY(allocated++); map_register(1, Z_R3);
DEBUG_ONLY(allocated++); map_register(2, Z_R4);
DEBUG_ONLY(allocated++); map_register(3, Z_R5);
DEBUG_ONLY(allocated++); map_register(4, Z_R6);
DEBUG_ONLY(allocated++); map_register(5, Z_R7);
DEBUG_ONLY(allocated++); map_register(6, Z_R10);
DEBUG_ONLY(allocated++); map_register(7, Z_R11);
DEBUG_ONLY(allocated++); map_register(8, Z_R12);
DEBUG_ONLY(allocated++); map_register(9, Z_R13); // <- last register visible in RegAlloc
DEBUG_ONLY(unallocated++); map_register(11, Z_R0); // Z_R0_scratch
DEBUG_ONLY(unallocated++); map_register(12, Z_R1); // Z_R1_scratch
DEBUG_ONLY(unallocated++); map_register(10, Z_R14); // return pc; TODO: Try to let c1/c2 allocate R14.
// The following registers are usually unavailable.
DEBUG_ONLY(unallocated++); map_register(13, Z_R8);
DEBUG_ONLY(unallocated++); map_register(14, Z_R9);
DEBUG_ONLY(unallocated++); map_register(15, Z_R15);
assert(allocated-1 == pd_last_cpu_reg, "wrong number/mapping of allocated CPU registers");
assert(unallocated == pd_nof_cpu_regs_unallocated, "wrong number of unallocated CPU registers");
assert(nof_cpu_regs == allocated+unallocated, "wrong number of CPU registers");
int j = 0;
for (int i = 0; i < nof_fpu_regs; i++) {
if (as_FloatRegister(i) == Z_fscratch_1) continue; // unallocated
map_float_register(j++, as_FloatRegister(i));
}
assert(j == nof_fpu_regs-1, "missed one fpu reg?");
map_float_register(j++, Z_fscratch_1);
_init_done = true;
Z_R0_opr = as_opr(Z_R0);
Z_R1_opr = as_opr(Z_R1);
Z_R2_opr = as_opr(Z_R2);
Z_R3_opr = as_opr(Z_R3);
Z_R4_opr = as_opr(Z_R4);
Z_R5_opr = as_opr(Z_R5);
Z_R6_opr = as_opr(Z_R6);
Z_R7_opr = as_opr(Z_R7);
Z_R8_opr = as_opr(Z_R8);
Z_R9_opr = as_opr(Z_R9);
Z_R10_opr = as_opr(Z_R10);
Z_R11_opr = as_opr(Z_R11);
Z_R12_opr = as_opr(Z_R12);
Z_R13_opr = as_opr(Z_R13);
Z_R14_opr = as_opr(Z_R14);
Z_R15_opr = as_opr(Z_R15);
Z_R0_oop_opr = as_oop_opr(Z_R0);
Z_R1_oop_opr = as_oop_opr(Z_R1);
Z_R2_oop_opr = as_oop_opr(Z_R2);
Z_R3_oop_opr = as_oop_opr(Z_R3);
Z_R4_oop_opr = as_oop_opr(Z_R4);
Z_R5_oop_opr = as_oop_opr(Z_R5);
Z_R6_oop_opr = as_oop_opr(Z_R6);
Z_R7_oop_opr = as_oop_opr(Z_R7);
Z_R8_oop_opr = as_oop_opr(Z_R8);
Z_R9_oop_opr = as_oop_opr(Z_R9);
Z_R10_oop_opr = as_oop_opr(Z_R10);
Z_R11_oop_opr = as_oop_opr(Z_R11);
Z_R12_oop_opr = as_oop_opr(Z_R12);
Z_R13_oop_opr = as_oop_opr(Z_R13);
Z_R14_oop_opr = as_oop_opr(Z_R14);
Z_R15_oop_opr = as_oop_opr(Z_R15);
Z_R0_metadata_opr = as_metadata_opr(Z_R0);
Z_R1_metadata_opr = as_metadata_opr(Z_R1);
Z_R2_metadata_opr = as_metadata_opr(Z_R2);
Z_R3_metadata_opr = as_metadata_opr(Z_R3);
Z_R4_metadata_opr = as_metadata_opr(Z_R4);
Z_R5_metadata_opr = as_metadata_opr(Z_R5);
Z_R6_metadata_opr = as_metadata_opr(Z_R6);
Z_R7_metadata_opr = as_metadata_opr(Z_R7);
Z_R8_metadata_opr = as_metadata_opr(Z_R8);
Z_R9_metadata_opr = as_metadata_opr(Z_R9);
Z_R10_metadata_opr = as_metadata_opr(Z_R10);
Z_R11_metadata_opr = as_metadata_opr(Z_R11);
Z_R12_metadata_opr = as_metadata_opr(Z_R12);
Z_R13_metadata_opr = as_metadata_opr(Z_R13);
Z_R14_metadata_opr = as_metadata_opr(Z_R14);
Z_R15_metadata_opr = as_metadata_opr(Z_R15);
// TODO: needed? Or can we make Z_R9 available for linear scan allocation.
Z_FP_opr = as_pointer_opr(Z_fp);
Z_SP_opr = as_pointer_opr(Z_SP);
Z_R2_long_opr = LIR_OprFact::double_cpu(cpu_reg2rnr(Z_R2), cpu_reg2rnr(Z_R2));
Z_R10_long_opr = LIR_OprFact::double_cpu(cpu_reg2rnr(Z_R10), cpu_reg2rnr(Z_R10));
Z_R11_long_opr = LIR_OprFact::double_cpu(cpu_reg2rnr(Z_R11), cpu_reg2rnr(Z_R11));
Z_F0_opr = as_float_opr(Z_F0);
Z_F0_double_opr = as_double_opr(Z_F0);
// All allocated cpu regs are caller saved.
for (int c1rnr = 0; c1rnr < max_nof_caller_save_cpu_regs; c1rnr++) {
_caller_save_cpu_regs[c1rnr] = as_opr(cpu_rnr2reg(c1rnr));
}
// All allocated fpu regs are caller saved.
for (int c1rnr = 0; c1rnr < nof_caller_save_fpu_regs; c1rnr++) {
_caller_save_fpu_regs[c1rnr] = as_float_opr(nr2floatreg(c1rnr));
}
}
Address FrameMap::make_new_address(ByteSize sp_offset) const {
return Address(Z_SP, sp_offset);
}
VMReg FrameMap::fpu_regname (int n) {
return nr2floatreg(n)->as_VMReg();
}
LIR_Opr FrameMap::stack_pointer() {
return Z_SP_opr;
}
// JSR 292
// On ZARCH_64, there is no need to save the SP, because neither
// method handle intrinsics nor compiled lambda forms modify it.
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
return LIR_OprFact::illegalOpr;
}
bool FrameMap::validate_frame() {
return true;
}

@ -0,0 +1,141 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_C1_FRAMEMAP_S390_HPP
#define CPU_S390_VM_C1_FRAMEMAP_S390_HPP
public:
enum {
nof_reg_args = 5, // Registers Z_ARG1 - Z_ARG5 are available for parameter passing.
first_available_sp_in_frame = frame::z_abi_16_size,
frame_pad_in_bytes = 0
};
static const int pd_c_runtime_reserved_arg_size;
static LIR_Opr Z_R0_opr;
static LIR_Opr Z_R1_opr;
static LIR_Opr Z_R2_opr;
static LIR_Opr Z_R3_opr;
static LIR_Opr Z_R4_opr;
static LIR_Opr Z_R5_opr;
static LIR_Opr Z_R6_opr;
static LIR_Opr Z_R7_opr;
static LIR_Opr Z_R8_opr;
static LIR_Opr Z_R9_opr;
static LIR_Opr Z_R10_opr;
static LIR_Opr Z_R11_opr;
static LIR_Opr Z_R12_opr;
static LIR_Opr Z_R13_opr;
static LIR_Opr Z_R14_opr;
static LIR_Opr Z_R15_opr;
static LIR_Opr Z_R0_oop_opr;
static LIR_Opr Z_R1_oop_opr;
static LIR_Opr Z_R2_oop_opr;
static LIR_Opr Z_R3_oop_opr;
static LIR_Opr Z_R4_oop_opr;
static LIR_Opr Z_R5_oop_opr;
static LIR_Opr Z_R6_oop_opr;
static LIR_Opr Z_R7_oop_opr;
static LIR_Opr Z_R8_oop_opr;
static LIR_Opr Z_R9_oop_opr;
static LIR_Opr Z_R10_oop_opr;
static LIR_Opr Z_R11_oop_opr;
static LIR_Opr Z_R12_oop_opr;
static LIR_Opr Z_R13_oop_opr;
static LIR_Opr Z_R14_oop_opr;
static LIR_Opr Z_R15_oop_opr;
static LIR_Opr Z_R0_metadata_opr;
static LIR_Opr Z_R1_metadata_opr;
static LIR_Opr Z_R2_metadata_opr;
static LIR_Opr Z_R3_metadata_opr;
static LIR_Opr Z_R4_metadata_opr;
static LIR_Opr Z_R5_metadata_opr;
static LIR_Opr Z_R6_metadata_opr;
static LIR_Opr Z_R7_metadata_opr;
static LIR_Opr Z_R8_metadata_opr;
static LIR_Opr Z_R9_metadata_opr;
static LIR_Opr Z_R10_metadata_opr;
static LIR_Opr Z_R11_metadata_opr;
static LIR_Opr Z_R12_metadata_opr;
static LIR_Opr Z_R13_metadata_opr;
static LIR_Opr Z_R14_metadata_opr;
static LIR_Opr Z_R15_metadata_opr;
static LIR_Opr Z_SP_opr;
static LIR_Opr Z_FP_opr;
static LIR_Opr Z_R2_long_opr;
static LIR_Opr Z_R10_long_opr;
static LIR_Opr Z_R11_long_opr;
static LIR_Opr Z_F0_opr;
static LIR_Opr Z_F0_double_opr;
private:
static FloatRegister _fpu_rnr2reg [FrameMap::nof_fpu_regs]; // mapping c1 regnr. -> FloatRegister
static int _fpu_reg2rnr [FrameMap::nof_fpu_regs]; // mapping assembler encoding -> c1 regnr.
static void map_float_register(int rnr, FloatRegister reg);
// FloatRegister -> c1 rnr
static int fpu_reg2rnr (FloatRegister reg) {
assert(_init_done, "tables not initialized");
int c1rnr = _fpu_reg2rnr[reg->encoding()];
debug_only(fpu_range_check(c1rnr);)
return c1rnr;
}
public:
static LIR_Opr as_long_opr(Register r) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
}
static LIR_Opr as_pointer_opr(Register r) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
}
static LIR_Opr as_float_opr(FloatRegister r) {
return LIR_OprFact::single_fpu(fpu_reg2rnr(r));
}
static LIR_Opr as_double_opr(FloatRegister r) {
return LIR_OprFact::double_fpu(fpu_reg2rnr(r));
}
static FloatRegister nr2floatreg (int rnr);
static VMReg fpu_regname (int n);
// No callee saved registers (saved values are not accessible if callee is in runtime).
static bool is_caller_save_register (LIR_Opr opr) { return true; }
static bool is_caller_save_register (Register r) { return true; }
static int nof_caller_save_cpu_regs() { return pd_nof_caller_save_cpu_regs_frame_map; }
static int last_cpu_reg() { return pd_last_cpu_reg; }
#endif // CPU_S390_VM_C1_FRAMEMAP_S390_HPP

File diff suppressed because it is too large Load Diff

@ -0,0 +1,54 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_C1_LIRASSEMBLER_S390_HPP
#define CPU_S390_VM_C1_LIRASSEMBLER_S390_HPP
private:
// Record the type of the receiver in ReceiverTypeData.
void type_profile_helper(Register mdo, ciMethodData *md, ciProfileData *data,
Register recv, Register tmp1, Label* update_done);
// Setup pointers to MDO, MDO slot, also compute offset bias to access the slot.
void setup_md_access(ciMethod* method, int bci,
ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias);
public:
address emit_call_c(address a);
void store_parameter(Register r, int param_num);
void store_parameter(jint c, int param_num);
void check_reserved_argument_area(int bytes) {
assert(bytes + FrameMap::first_available_sp_in_frame <= frame_map()->reserved_argument_area_size(),
"reserved_argument_area too small");
}
enum {
call_stub_size = 512, // See Compile::MAX_stubs_size and CompiledStaticCall::emit_to_interp_stub.
exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(128),
deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(64)
};
#endif // CPU_S390_VM_C1_LIRASSEMBLER_S390_HPP

File diff suppressed because it is too large Load Diff

@ -0,0 +1,58 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/register.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_LIR.hpp"
FloatRegister LIR_OprDesc::as_float_reg() const {
return FrameMap::nr2floatreg(fpu_regnr());
}
FloatRegister LIR_OprDesc::as_double_reg() const {
return FrameMap::nr2floatreg(fpu_regnrHi());
}
// Reg2 unused.
LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) {
assert(!as_FloatRegister(reg2)->is_valid(), "Not used on this platform");
return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
(reg1 << LIR_OprDesc::reg2_shift) |
LIR_OprDesc::double_type |
LIR_OprDesc::fpu_register |
LIR_OprDesc::double_size);
}
#ifndef PRODUCT
void LIR_Address::verify() const {
assert(base()->is_cpu_register(), "wrong base operand");
assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand");
assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
"wrong type for addresses");
}
#endif // PRODUCT

@ -0,0 +1,33 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "c1/c1_LinearScan.hpp"
#include "utilities/debug.hpp"
void LinearScan::allocate_fpu_stack() {
// No FPU stack on ZARCH_64.
ShouldNotCallThis();
}

@ -0,0 +1,64 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_C1_LINEARSCAN_S390_HPP
#define CPU_S390_VM_C1_LINEARSCAN_S390_HPP
inline bool LinearScan::is_processed_reg_num(int reg_num) {
// unallocated: Z_thread, Z_fp, Z_SP, Z_R0_scratch, Z_R1_scratch, Z_R14
assert(FrameMap::Z_R14_opr->cpu_regnr() == 10, "wrong assumption below");
assert(FrameMap::Z_R0_opr->cpu_regnr() == 11, "wrong assumption below");
assert(FrameMap::Z_R1_opr->cpu_regnr() == 12, "wrong assumption below");
assert(FrameMap::Z_R8_opr->cpu_regnr() == 13, "wrong assumption below");
assert(FrameMap::Z_R9_opr->cpu_regnr() == 14, "wrong assumption below");
assert(FrameMap::Z_R15_opr->cpu_regnr() == 15, "wrong assumption below");
assert(reg_num >= 0, "invalid reg_num");
return reg_num <= FrameMap::last_cpu_reg() || reg_num >= pd_nof_cpu_regs_frame_map;
}
inline int LinearScan::num_physical_regs(BasicType type) {
// IBM Z requires one cpu registers for long,
// and one fpu register for double.
return 1;
}
inline bool LinearScan::requires_adjacent_regs(BasicType type) {
return false;
}
inline bool LinearScan::is_caller_save(int assigned_reg) {
assert(assigned_reg >= 0 && assigned_reg < nof_regs, "should call this only for registers");
return true; // No callee-saved registers on IBM Z.
}
inline void LinearScan::pd_add_temps(LIR_Op* op) {
// No special case behaviours.
}
inline bool LinearScanWalker::pd_init_regs_for_alloc(Interval* cur) {
return false; // No special case behaviours.
}
#endif // CPU_S390_VM_C1_LINEARSCAN_S390_HPP

@ -0,0 +1,380 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interpreter/interpreter.hpp"
#include "oops/arrayOop.hpp"
#include "oops/markOop.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
Label ic_miss, ic_hit;
verify_oop(receiver);
int klass_offset = oopDesc::klass_offset_in_bytes();
if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) {
if (VM_Version::has_CompareBranch()) {
z_cgij(receiver, 0, Assembler::bcondEqual, ic_miss);
} else {
z_ltgr(receiver, receiver);
z_bre(ic_miss);
}
}
compare_klass_ptr(iCache, klass_offset, receiver, false);
z_bre(ic_hit);
// If icache check fails, then jump to runtime routine.
// Note: RECEIVER must still contain the receiver!
load_const_optimized(Z_R1_scratch, AddressLiteral(SharedRuntime::get_ic_miss_stub()));
z_br(Z_R1_scratch);
align(CodeEntryAlignment);
bind(ic_hit);
}
void C1_MacroAssembler::explicit_null_check(Register base) {
ShouldNotCallThis(); // unused
}
void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
generate_stack_overflow_check(bang_size_in_bytes);
save_return_pc();
push_frame(frame_size_in_bytes); // TODO: Must we add z_abi_160?
}
void C1_MacroAssembler::unverified_entry(Register receiver, Register ic_klass) {
ShouldNotCallThis(); // unused
}
void C1_MacroAssembler::verified_entry() {
if (C1Breakpoint) z_illtrap(0xC1);
}
void C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
const int hdr_offset = oopDesc::mark_offset_in_bytes();
assert_different_registers(hdr, obj, disp_hdr);
NearLabel done;
verify_oop(obj);
// Load object header.
z_lg(hdr, Address(obj, hdr_offset));
// Save object being locked into the BasicObjectLock...
z_stg(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
if (UseBiasedLocking) {
biased_locking_enter(obj, hdr, Z_R1_scratch, Z_R0_scratch, done, &slow_case);
}
// and mark it as unlocked.
z_oill(hdr, markOopDesc::unlocked_value);
// Save unlocked object header into the displaced header location on the stack.
z_stg(hdr, Address(disp_hdr, (intptr_t)0));
// Test if object header is still the same (i.e. unlocked), and if so, store the
// displaced header address in the object header. If it is not the same, get the
// object header instead.
z_csg(hdr, disp_hdr, hdr_offset, obj);
// If the object header was the same, we're done.
if (PrintBiasedLockingStatistics) {
Unimplemented();
#if 0
cond_inc32(Assembler::equal,
ExternalAddress((address)BiasedLocking::fast_path_entry_count_addr()));
#endif
}
branch_optimized(Assembler::bcondEqual, done);
// If the object header was not the same, it is now in the hdr register.
// => Test if it is a stack pointer into the same stack (recursive locking), i.e.:
//
// 1) (hdr & markOopDesc::lock_mask_in_place) == 0
// 2) rsp <= hdr
// 3) hdr <= rsp + page_size
//
// These 3 tests can be done by evaluating the following expression:
//
// (hdr - Z_SP) & (~(page_size-1) | markOopDesc::lock_mask_in_place)
//
// assuming both the stack pointer and page_size have their least
// significant 2 bits cleared and page_size is a power of 2
z_sgr(hdr, Z_SP);
load_const_optimized(Z_R0_scratch, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
z_ngr(hdr, Z_R0_scratch); // AND sets CC (result eq/ne 0).
// For recursive locking, the result is zero. => Save it in the displaced header
// location (NULL in the displaced hdr location indicates recursive locking).
z_stg(hdr, Address(disp_hdr, (intptr_t)0));
// Otherwise we don't care about the result and handle locking via runtime call.
branch_optimized(Assembler::bcondNotZero, slow_case);
// done
bind(done);
}
void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
const int aligned_mask = BytesPerWord -1;
const int hdr_offset = oopDesc::mark_offset_in_bytes();
assert_different_registers(hdr, obj, disp_hdr);
NearLabel done;
if (UseBiasedLocking) {
// Load object.
z_lg(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
biased_locking_exit(obj, hdr, done);
}
// Load displaced header.
z_ltg(hdr, Address(disp_hdr, (intptr_t)0));
// If the loaded hdr is NULL we had recursive locking, and we are done.
z_bre(done);
if (!UseBiasedLocking) {
// Load object.
z_lg(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
}
verify_oop(obj);
// Test if object header is pointing to the displaced header, and if so, restore
// the displaced header in the object. If the object header is not pointing to
// the displaced header, get the object header instead.
z_csg(disp_hdr, hdr, hdr_offset, obj);
// If the object header was not pointing to the displaced header,
// we do unlocking via runtime call.
branch_optimized(Assembler::bcondNotEqual, slow_case);
// done
bind(done);
}
void C1_MacroAssembler::try_allocate(
Register obj, // result: Pointer to object after successful allocation.
Register var_size_in_bytes, // Object size in bytes if unknown at compile time; invalid otherwise.
int con_size_in_bytes, // Object size in bytes if known at compile time.
Register t1, // Temp register: Must be global register for incr_allocated_bytes.
Label& slow_case // Continuation point if fast allocation fails.
) {
if (UseTLAB) {
tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
} else {
// Allocation in shared Eden not implemented, because sapjvm allocation trace does not allow it.
z_brul(slow_case);
}
}
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register Rzero, Register t1) {
assert_different_registers(obj, klass, len, t1, Rzero);
if (UseBiasedLocking && !len->is_valid()) {
assert_different_registers(obj, klass, len, t1);
z_lg(t1, Address(klass, Klass::prototype_header_offset()));
} else {
// This assumes that all prototype bits fit in an int32_t.
load_const_optimized(t1, (intx)markOopDesc::prototype());
}
z_stg(t1, Address(obj, oopDesc::mark_offset_in_bytes()));
if (len->is_valid()) {
// Length will be in the klass gap, if one exists.
z_st(len, Address(obj, arrayOopDesc::length_offset_in_bytes()));
} else if (UseCompressedClassPointers) {
store_klass_gap(Rzero, obj); // Zero klass gap for compressed oops.
}
store_klass(klass, obj, t1);
}
void C1_MacroAssembler::initialize_body(Register objectFields, Register len_in_bytes, Register Rzero) {
Label done;
assert_different_registers(objectFields, len_in_bytes, Rzero);
// Initialize object fields.
// See documentation for MVCLE instruction!!!
assert(objectFields->encoding()%2==0, "objectFields must be an even register");
assert(len_in_bytes->encoding() == (objectFields->encoding()+1), "objectFields and len_in_bytes must be a register pair");
assert(Rzero->encoding()%2==1, "Rzero must be an odd register");
// Use Rzero as src length, then mvcle will copy nothing
// and fill the object with the padding value 0.
move_long_ext(objectFields, as_Register(Rzero->encoding()-1), 0);
bind(done);
}
void C1_MacroAssembler::allocate_object(
Register obj, // Result: pointer to object after successful allocation.
Register t1, // temp register
Register t2, // temp register: Must be a global register for try_allocate.
int hdr_size, // object header size in words
int obj_size, // object size in words
Register klass, // object klass
Label& slow_case // Continuation point if fast allocation fails.
) {
assert_different_registers(obj, t1, t2, klass);
// Allocate space and initialize header.
try_allocate(obj, noreg, obj_size * wordSize, t1, slow_case);
initialize_object(obj, klass, noreg, obj_size * HeapWordSize, t1, t2);
}
void C1_MacroAssembler::initialize_object(
Register obj, // result: Pointer to object after successful allocation.
Register klass, // object klass
Register var_size_in_bytes, // Object size in bytes if unknown at compile time; invalid otherwise.
int con_size_in_bytes, // Object size in bytes if known at compile time.
Register t1, // temp register
Register t2 // temp register
) {
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0,
"con_size_in_bytes is not multiple of alignment");
assert(var_size_in_bytes == noreg, "not implemented");
const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
const Register Rzero = t2;
z_xgr(Rzero, Rzero);
initialize_header(obj, klass, noreg, Rzero, t1);
// Clear rest of allocated space.
const int threshold = 4 * BytesPerWord;
if (con_size_in_bytes <= threshold) {
// Use explicit null stores.
// code size = 6*n bytes (n = number of fields to clear)
for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord)
z_stg(Rzero, Address(obj, i));
} else {
// Code size generated by initialize_body() is 16.
Register object_fields = Z_R0_scratch;
Register len_in_bytes = Z_R1_scratch;
z_la(object_fields, hdr_size_in_bytes, obj);
load_const_optimized(len_in_bytes, con_size_in_bytes - hdr_size_in_bytes);
initialize_body(object_fields, len_in_bytes, Rzero);
}
// Dtrace support is unimplemented.
// if (CURRENT_ENV->dtrace_alloc_probes()) {
// assert(obj == rax, "must be");
// call(RuntimeAddress(Runtime1::entry_for (Runtime1::dtrace_object_alloc_id)));
// }
verify_oop(obj);
}
void C1_MacroAssembler::allocate_array(
Register obj, // result: Pointer to array after successful allocation.
Register len, // array length
Register t1, // temp register
Register t2, // temp register
int hdr_size, // object header size in words
int elt_size, // element size in bytes
Register klass, // object klass
Label& slow_case // Continuation point if fast allocation fails.
) {
assert_different_registers(obj, len, t1, t2, klass);
// Determine alignment mask.
assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work");
// Check for negative or excessive length.
compareU64_and_branch(len, (int32_t)max_array_allocation_length, bcondHigh, slow_case);
// Compute array size.
// Note: If 0 <= len <= max_length, len*elt_size + header + alignment is
// smaller or equal to the largest integer. Also, since top is always
// aligned, we can do the alignment here instead of at the end address
// computation.
const Register arr_size = t2;
switch (elt_size) {
case 1: lgr_if_needed(arr_size, len); break;
case 2: z_sllg(arr_size, len, 1); break;
case 4: z_sllg(arr_size, len, 2); break;
case 8: z_sllg(arr_size, len, 3); break;
default: ShouldNotReachHere();
}
add2reg(arr_size, hdr_size * wordSize + MinObjAlignmentInBytesMask); // Add space for header & alignment.
z_nill(arr_size, (~MinObjAlignmentInBytesMask) & 0xffff); // Align array size.
try_allocate(obj, arr_size, 0, t1, slow_case);
initialize_header(obj, klass, len, noreg, t1);
// Clear rest of allocated space.
Label done;
Register object_fields = t1;
Register Rzero = Z_R1_scratch;
z_aghi(arr_size, -(hdr_size * BytesPerWord));
z_bre(done); // Jump if size of fields is zero.
z_la(object_fields, hdr_size * BytesPerWord, obj);
z_xgr(Rzero, Rzero);
initialize_body(object_fields, arr_size, Rzero);
bind(done);
// Dtrace support is unimplemented.
// if (CURRENT_ENV->dtrace_alloc_probes()) {
// assert(obj == rax, "must be");
// call(RuntimeAddress(Runtime1::entry_for (Runtime1::dtrace_object_alloc_id)));
// }
verify_oop(obj);
}
#ifndef PRODUCT
void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
Unimplemented();
// if (!VerifyOops) return;
// verify_oop_addr(Address(SP, stack_offset + STACK_BIAS));
}
void C1_MacroAssembler::verify_not_null_oop(Register r) {
if (!VerifyOops) return;
NearLabel not_null;
compareU64_and_branch(r, (intptr_t)0, bcondNotEqual, not_null);
stop("non-null oop required");
bind(not_null);
verify_oop(r);
}
void C1_MacroAssembler::invalidate_registers(Register preserve1,
Register preserve2,
Register preserve3) {
Register dead_value = noreg;
for (int i = 0; i < FrameMap::nof_cpu_regs; i++) {
Register r = as_Register(i);
if (r != preserve1 && r != preserve2 && r != preserve3 && r != Z_SP && r != Z_thread) {
if (dead_value == noreg) {
load_const_optimized(r, 0xc1dead);
dead_value = r;
} else {
z_lgr(r, dead_value);
}
}
}
}
#endif // !PRODUCT

@ -0,0 +1,103 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_C1_MACROASSEMBLER_S390_HPP
#define CPU_S390_VM_C1_MACROASSEMBLER_S390_HPP
void pd_init() { /* nothing to do */ }
public:
void try_allocate(
Register obj, // result: Pointer to object after successful allocation.
Register var_size_in_bytes, // Object size in bytes if unknown at compile time; invalid otherwise.
int con_size_in_bytes, // Object size in bytes if known at compile time.
Register t1, // temp register
Label& slow_case // Continuation point if fast allocation fails.
);
void initialize_header(Register obj, Register klass, Register len, Register Rzero, Register t1);
void initialize_body(Register objectFields, Register len_in_bytes, Register Rzero);
// locking
// hdr : Used to hold locked markOop to be CASed into obj, contents destroyed.
// obj : Must point to the object to lock, contents preserved.
// disp_hdr: Must point to the displaced header location, contents preserved.
// Returns code offset at which to add null check debug information.
void lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case);
// unlocking
// hdr : Used to hold original markOop to be CASed back into obj, contents destroyed.
// obj : Must point to the object to lock, contents preserved.
// disp_hdr: Must point to the displaced header location, contents destroyed.
void unlock_object(Register hdr, Register obj, Register lock, Label& slow_case);
void initialize_object(
Register obj, // result: Pointer to object after successful allocation.
Register klass, // object klass
Register var_size_in_bytes, // Object size in bytes if unknown at compile time; invalid otherwise.
int con_size_in_bytes, // Object size in bytes if known at compile time.
Register t1, // temp register
Register t2 // temp register
);
// Allocation of fixed-size objects.
// This can also be used to allocate fixed-size arrays, by setting
// hdr_size correctly and storing the array length afterwards.
void allocate_object(
Register obj, // result: Pointer to object after successful allocation.
Register t1, // temp register
Register t2, // temp register
int hdr_size, // object header size in words
int obj_size, // object size in words
Register klass, // object klass
Label& slow_case // Continuation point if fast allocation fails.
);
enum {
max_array_allocation_length = 0x01000000 // Sparc friendly value, requires sethi only.
};
// Allocation of arrays.
void allocate_array(
Register obj, // result: Pointer to array after successful allocation.
Register len, // array length
Register t1, // temp register
Register t2, // temp register
int hdr_size, // object header size in words
int elt_size, // element size in bytes
Register klass, // object klass
Label& slow_case // Continuation point if fast allocation fails.
);
// Invalidates registers in this window.
void invalidate_registers(Register preserve1 = noreg, Register preserve2 = noreg,
Register preserve3 = noreg) PRODUCT_RETURN;
void nop() { z_nop(); }
// This platform only uses signal-based null checks. The Label is not needed.
void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); }
#endif // CPU_S390_VM_C1_MACROASSEMBLER_S390_HPP

File diff suppressed because it is too large Load Diff

@ -0,0 +1,73 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_C1_GLOBALS_S390_HPP
#define CPU_S390_VM_C1_GLOBALS_S390_HPP
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
// Sets the default values for platform dependent flags used by the client compiler.
// (see c1_globals.hpp)
// Flags sorted according to sparc.
#ifndef TIERED
define_pd_global(bool, BackgroundCompilation, true);
define_pd_global(bool, CICompileOSR, true);
define_pd_global(bool, InlineIntrinsics, true);
define_pd_global(bool, PreferInterpreterNativeStubs, false);
define_pd_global(bool, ProfileTraps, false);
define_pd_global(bool, UseOnStackReplacement, true);
define_pd_global(bool, TieredCompilation, false);
define_pd_global(intx, CompileThreshold, 1000);
define_pd_global(intx, OnStackReplacePercentage, 1400);
define_pd_global(bool, UseTLAB, true);
define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(intx, FreqInlineSize, 325);
define_pd_global(bool, ResizeTLAB, true);
define_pd_global(intx, ReservedCodeCacheSize, 32*M);
define_pd_global(uintx, NonProfiledCodeHeapSize, 13*M);
define_pd_global(uintx, ProfiledCodeHeapSize, 14*M);
define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M);
define_pd_global(uintx, CodeCacheExpansionSize, 32*K);
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(size_t, MetaspaceSize, 12*M);
define_pd_global(bool, NeverActAsServerClassMachine, true);
define_pd_global(size_t, NewSizeThreadIncrease, 16*K);
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
define_pd_global(uintx, InitialCodeCacheSize, 160*K);
#endif // !TIERED
define_pd_global(bool, UseTypeProfile, false);
define_pd_global(bool, RoundFPResults, false);
define_pd_global(bool, LIRFillDelaySlots, false);
define_pd_global(bool, OptimizeSinglePrecision, false);
define_pd_global(bool, CSEArrayLength, true);
define_pd_global(bool, TwoOperandLIRForm, true);
#endif // CPU_S390_VM_C1_GLOBALS_S390_HPP

@ -0,0 +1,95 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_C2_GLOBALS_S390_HPP
#define CPU_S390_VM_C2_GLOBALS_S390_HPP
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
// Sets the default values for platform dependent flags used by the server compiler.
// (see c2_globals.hpp).
// Sorted according to sparc.
define_pd_global(bool, BackgroundCompilation, true);
define_pd_global(bool, CICompileOSR, true);
define_pd_global(bool, InlineIntrinsics, true);
define_pd_global(bool, PreferInterpreterNativeStubs, false);
define_pd_global(bool, ProfileTraps, true);
define_pd_global(bool, UseOnStackReplacement, true);
define_pd_global(bool, ProfileInterpreter, true);
define_pd_global(bool, TieredCompilation, trueInTiered);
define_pd_global(intx, CompileThreshold, 10000);
define_pd_global(intx, OnStackReplacePercentage, 140);
define_pd_global(intx, ConditionalMoveLimit, 4);
define_pd_global(intx, FLOATPRESSURE, 15);
define_pd_global(intx, FreqInlineSize, 175);
// 10 prevents spill-split-recycle sanity check in JVM2008.xml.transform.
define_pd_global(intx, INTPRESSURE, 10); // Medium size register set, 6 special purpose regs, 3 SOE regs.
define_pd_global(intx, InteriorEntryAlignment, 2);
define_pd_global(size_t, NewSizeThreadIncrease, ScaleForWordSize(4*K));
define_pd_global(intx, RegisterCostAreaRatio, 12000);
define_pd_global(bool, UseTLAB, true);
define_pd_global(bool, ResizeTLAB, true);
define_pd_global(intx, LoopUnrollLimit, 60);
define_pd_global(intx, LoopPercentProfileLimit, 10);
define_pd_global(intx, PostLoopMultiversioning, false);
define_pd_global(intx, MinJumpTableSize, 18);
// Peephole and CISC spilling both break the graph, and so makes the
// scheduler sick.
define_pd_global(bool, OptoPeephole, false);
define_pd_global(bool, UseCISCSpill, true);
define_pd_global(bool, OptoBundling, false);
define_pd_global(bool, OptoScheduling, false);
define_pd_global(bool, OptoRegScheduling, false);
define_pd_global(bool, SuperWordLoopUnrollAnalysis, false);
// On s390x, we can clear the array with a single instruction,
// so don't idealize it.
define_pd_global(bool, IdealizeClearArrayNode, false);
// InitialCodeCacheSize derived from specjbb2000 run.
define_pd_global(uintx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
define_pd_global(uintx, ReservedCodeCacheSize, 48*M);
define_pd_global(uintx, NonProfiledCodeHeapSize, 21*M);
define_pd_global(uintx, ProfiledCodeHeapSize, 22*M);
define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M);
define_pd_global(uintx, CodeCacheExpansionSize, 64*K);
// Ergonomics related flags
define_pd_global(uint64_t, MaxRAM, 128ULL*G);
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed on z/Architecture.
// Heap related flags
define_pd_global(size_t, MetaspaceSize, ScaleForWordSize(16*M));
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);
#endif // CPU_S390_VM_C2_GLOBALS_S390_HPP

@ -0,0 +1,34 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "opto/compile.hpp"
#include "opto/node.hpp"
// Processor dependent initialization for z/Architecture.
void Compile::pd_compiler2_init() {
guarantee(CodeEntryAlignment >= InteriorEntryAlignment, "");
}

@ -0,0 +1,39 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_CODEBUFFER_S390_HPP
#define CPU_S390_VM_CODEBUFFER_S390_HPP
private:
void pd_initialize() {}
public:
void flush_bundle(bool start_new_bundle) {}
void getCpuData(const CodeBuffer * const cb) {}
#endif // CPU_S390_VM_CODEBUFFER_S390_HPP

@ -0,0 +1,160 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/safepoint.hpp"
#ifdef COMPILER2
#include "opto/matcher.hpp"
#endif
// ----------------------------------------------------------------------------
#undef __
#define __ _masm.
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = NULL*/) {
#ifdef COMPILER2
// Stub is fixed up when the corresponding call is converted from calling
// compiled code to calling interpreted code.
if (mark == NULL) {
// Get the mark within main instrs section which is set to the address of the call.
mark = cbuf.insts_mark();
}
assert(mark != NULL, "mark must not be NULL");
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a stub.
MacroAssembler _masm(&cbuf);
address stub = __ start_a_stub(Compile::MAX_stubs_size);
if (stub == NULL) {
return NULL; // CodeBuffer::expand failed.
}
__ relocate(static_stub_Relocation::spec(mark));
AddressLiteral meta = __ allocate_metadata_address(NULL);
bool success = __ load_const_from_toc(as_Register(Matcher::inline_cache_reg_encode()), meta);
__ set_inst_mark();
AddressLiteral a((address)-1);
success = success && __ load_const_from_toc(Z_R1, a);
if (!success) {
return NULL; // CodeCache is full.
}
__ z_br(Z_R1);
__ end_a_stub(); // Update current stubs pointer and restore insts_end.
return stub;
#else
ShouldNotReachHere();
#endif
}
#undef __
int CompiledStaticCall::to_interp_stub_size() {
return 2 * MacroAssembler::load_const_from_toc_size() +
2; // branch
}
// Relocation entries for call stub, compiled java to interpreter.
int CompiledStaticCall::reloc_to_interp_stub() {
return 5; // 4 in emit_java_to_interp + 1 in Java_Static_Call
}
void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
address stub = find_stub();
guarantee(stub != NULL, "stub not found");
if (TraceICs) {
ResourceMark rm;
tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
p2i(instruction_address()),
callee->name_and_sig_as_C_string());
}
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + get_IC_pos_in_java_to_interp_stub());
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
// A generated lambda form might be deleted from the Lambdaform
// cache in MethodTypeForm. If a jit compiled lambdaform method
// becomes not entrant and the cache access returns null, the new
// resolve will lead to a new generated LambdaForm.
assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee() || callee->is_compiled_lambda_form(),
"a) MT-unsafe modification of inline cache");
assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry,
"b) MT-unsafe modification of inline cache");
// Update stub.
method_holder->set_data((intptr_t)callee());
jump->set_jump_destination(entry);
// Update jump to call.
set_destination_mt_safe(stub);
}
void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
// Reset stub.
address stub = static_stub->addr();
assert(stub != NULL, "stub not found");
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + get_IC_pos_in_java_to_interp_stub());
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
method_holder->set_data(0);
jump->set_jump_destination((address)-1);
}
//-----------------------------------------------------------------------------
#ifndef PRODUCT
void CompiledStaticCall::verify() {
// Verify call.
NativeCall::verify();
if (os::is_MP()) {
verify_alignment();
}
// Verify stub.
address stub = find_stub();
assert(stub != NULL, "no stub found for static call");
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + get_IC_pos_in_java_to_interp_stub());
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
// Verify state.
assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
}
#endif // !PRODUCT

File diff suppressed because it is too large Load Diff

@ -0,0 +1,35 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "code/nmethod.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/frame.hpp"
#include "runtime/init.hpp"
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
void pd_ps(frame f) {}

@ -0,0 +1,31 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_DEPCHECKER_S390_HPP
#define CPU_S390_VM_DEPCHECKER_S390_HPP
// Nothing to do on z/Architecture
#endif // CPU_S390_VM_DEPCHECKER_S390_HPP

@ -0,0 +1,37 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_DISASSEMBLER_S390_HPP
#define CPU_S390_VM_DISASSEMBLER_S390_HPP
static int pd_instruction_alignment() {
return 1;
}
static const char* pd_cpu_opts() {
return "zarch";
}
#endif // CPU_S390_VM_DISASSEMBLER_S390_HPP

@ -0,0 +1,504 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/resourceArea.hpp"
#include "oops/markOop.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/monitorChunk.hpp"
#include "runtime/signature.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
#include "vmreg_s390.inline.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#include "runtime/vframeArray.hpp"
#endif
// Major contributions by Aha, AS.
#ifdef ASSERT
void RegisterMap::check_location_valid() {
}
#endif // ASSERT
// Profiling/safepoint support
bool frame::safe_for_sender(JavaThread *thread) {
bool safe = false;
address cursp = (address)sp();
address curfp = (address)fp();
if ((cursp != NULL && curfp != NULL &&
(cursp <= thread->stack_base() && cursp >= thread->stack_base() - thread->stack_size())) &&
(curfp <= thread->stack_base() && curfp >= thread->stack_base() - thread->stack_size())) {
safe = true;
}
return safe;
}
bool frame::is_interpreted_frame() const {
return Interpreter::contains(pc());
}
// sender_sp
intptr_t* frame::interpreter_frame_sender_sp() const {
return sender_sp();
}
frame frame::sender_for_entry_frame(RegisterMap *map) const {
assert(map != NULL, "map must be set");
// Java frame called from C. Skip all C frames and return top C
// frame of that chunk as the sender.
JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
assert(!entry_frame_is_first(), "next Java sp must be non zero");
assert(jfa->last_Java_sp() > _sp, "must be above this frame on stack");
map->clear();
assert(map->include_argument_oops(), "should be set by clear");
if (jfa->last_Java_pc() != NULL) {
frame fr(jfa->last_Java_sp(), jfa->last_Java_pc());
return fr;
}
// Last_java_pc is not set if we come here from compiled code.
frame fr(jfa->last_Java_sp());
return fr;
}
frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
// Pass callers sender_sp as unextended_sp.
return frame(sender_sp(), sender_pc(), (intptr_t*)(ijava_state()->sender_sp));
}
frame frame::sender_for_compiled_frame(RegisterMap *map) const {
assert(map != NULL, "map must be set");
// Frame owned by compiler.
address pc = *compiled_sender_pc_addr(_cb);
frame caller(compiled_sender_sp(_cb), pc);
// Now adjust the map.
// Get the rest.
if (map->update_map()) {
// Tell GC to use argument oopmaps for some runtime stubs that need it.
map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
if (_cb->oop_maps() != NULL) {
OopMapSet::update_register_map(this, map);
}
}
return caller;
}
intptr_t* frame::compiled_sender_sp(CodeBlob* cb) const {
return sender_sp();
}
address* frame::compiled_sender_pc_addr(CodeBlob* cb) const {
return sender_pc_addr();
}
frame frame::sender(RegisterMap* map) const {
// Default is we don't have to follow them. The sender_for_xxx will
// update it accordingly.
map->set_include_argument_oops(false);
if (is_entry_frame()) {
return sender_for_entry_frame(map);
}
if (is_interpreted_frame()) {
return sender_for_interpreter_frame(map);
}
assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
if (_cb != NULL) {
return sender_for_compiled_frame(map);
}
// Must be native-compiled frame, i.e. the marshaling code for native
// methods that exists in the core system.
return frame(sender_sp(), sender_pc());
}
void frame::patch_pc(Thread* thread, address pc) {
if (TracePcPatching) {
tty->print_cr("patch_pc at address " PTR_FORMAT " [" PTR_FORMAT " -> " PTR_FORMAT "] ",
p2i(&((address*) _sp)[-1]), p2i(((address*) _sp)[-1]), p2i(pc));
}
own_abi()->return_pc = (uint64_t)pc;
_cb = CodeCache::find_blob(pc);
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
assert(original_pc == _pc, "expected original to be stored before patching");
_deopt_state = is_deoptimized;
// Leave _pc as is.
} else {
_deopt_state = not_deoptimized;
_pc = pc;
}
}
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
// Is there anything to do?
assert(is_interpreted_frame(), "Not an interpreted frame");
return true;
}
BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
assert(is_interpreted_frame(), "interpreted frame expected");
Method* method = interpreter_frame_method();
BasicType type = method->result_type();
if (method->is_native()) {
address lresult = (address)&(ijava_state()->lresult);
address fresult = (address)&(ijava_state()->fresult);
switch (type) {
case T_OBJECT:
case T_ARRAY: {
*oop_result = (oop) (void*) ijava_state()->oop_tmp;
break;
}
// We use std/stfd to store the values.
case T_BOOLEAN : value_result->z = (jboolean) *(unsigned long*)lresult; break;
case T_INT : value_result->i = (jint) *(long*)lresult; break;
case T_CHAR : value_result->c = (jchar) *(unsigned long*)lresult; break;
case T_SHORT : value_result->s = (jshort) *(long*)lresult; break;
case T_BYTE : value_result->z = (jbyte) *(long*)lresult; break;
case T_LONG : value_result->j = (jlong) *(long*)lresult; break;
case T_FLOAT : value_result->f = (jfloat) *(float*)fresult; break;
case T_DOUBLE : value_result->d = (jdouble) *(double*)fresult; break;
case T_VOID : break; // Nothing to do.
default : ShouldNotReachHere();
}
} else {
intptr_t* tos_addr = interpreter_frame_tos_address();
switch (type) {
case T_OBJECT:
case T_ARRAY: {
oop obj = *(oop*)tos_addr;
assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
*oop_result = obj;
break;
}
case T_BOOLEAN : value_result->z = (jboolean) *(jint*)tos_addr; break;
case T_BYTE : value_result->b = (jbyte) *(jint*)tos_addr; break;
case T_CHAR : value_result->c = (jchar) *(jint*)tos_addr; break;
case T_SHORT : value_result->s = (jshort) *(jint*)tos_addr; break;
case T_INT : value_result->i = *(jint*)tos_addr; break;
case T_LONG : value_result->j = *(jlong*)tos_addr; break;
case T_FLOAT : value_result->f = *(jfloat*)tos_addr; break;
case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break;
case T_VOID : break; // Nothing to do.
default : ShouldNotReachHere();
}
}
return type;
}
// Dump all frames starting a given C stack-pointer.
// Use max_frames to limit the number of traced frames.
void frame::back_trace(outputStream* st, intptr_t* start_sp, intptr_t* top_pc, unsigned long flags, int max_frames) {
static char buf[ 150 ];
bool print_outgoing_arguments = flags & 0x1;
bool print_istate_pointers = flags & 0x2;
int num = 0;
intptr_t* current_sp = (intptr_t*) start_sp;
int last_num_jargs = 0;
int frame_type = 0;
int last_frame_type = 0;
while (current_sp) {
intptr_t* current_fp = (intptr_t*) *current_sp;
address current_pc = (num == 0)
? (address) top_pc
: (address) *((intptr_t*)(((address) current_sp) + _z_abi(return_pc)));
if ((intptr_t*) current_fp != 0 && (intptr_t*) current_fp <= current_sp) {
st->print_cr("ERROR: corrupt stack");
return;
}
st->print("#%-3d ", num);
const char* type_name = " ";
const char* function_name = NULL;
// Detect current frame's frame_type, default to 'C frame'.
frame_type = 0;
CodeBlob* blob = NULL;
if (Interpreter::contains(current_pc)) {
frame_type = 1;
} else if (StubRoutines::contains(current_pc)) {
if (StubRoutines::returns_to_call_stub(current_pc)) {
frame_type = 2;
} else {
frame_type = 4;
type_name = "stu";
StubCodeDesc* desc = StubCodeDesc::desc_for (current_pc);
if (desc) {
function_name = desc->name();
} else {
function_name = "unknown stub";
}
}
} else if (CodeCache::contains(current_pc)) {
blob = CodeCache::find_blob_unsafe(current_pc);
if (blob) {
if (blob->is_nmethod()) {
frame_type = 3;
} else if (blob->is_deoptimization_stub()) {
frame_type = 4;
type_name = "deo";
function_name = "deoptimization blob";
} else if (blob->is_uncommon_trap_stub()) {
frame_type = 4;
type_name = "uct";
function_name = "uncommon trap blob";
} else if (blob->is_exception_stub()) {
frame_type = 4;
type_name = "exc";
function_name = "exception blob";
} else if (blob->is_safepoint_stub()) {
frame_type = 4;
type_name = "saf";
function_name = "safepoint blob";
} else if (blob->is_runtime_stub()) {
frame_type = 4;
type_name = "run";
function_name = ((RuntimeStub *)blob)->name();
} else if (blob->is_method_handles_adapter_blob()) {
frame_type = 4;
type_name = "mha";
function_name = "method handles adapter blob";
} else {
frame_type = 4;
type_name = "blo";
function_name = "unknown code blob";
}
} else {
frame_type = 4;
type_name = "blo";
function_name = "unknown code blob";
}
}
st->print("sp=" PTR_FORMAT " ", p2i(current_sp));
if (frame_type == 0) {
current_pc = (address) *((intptr_t*)(((address) current_sp) + _z_abi(gpr14)));
}
st->print("pc=" PTR_FORMAT " ", p2i(current_pc));
st->print(" ");
switch (frame_type) {
case 0: // C frame:
{
st->print(" ");
if (current_pc == 0) {
st->print("? ");
} else {
// name
int func_offset;
char demangled_name[256];
int demangled_name_len = 256;
if (os::dll_address_to_function_name(current_pc, demangled_name, demangled_name_len, &func_offset)) {
demangled_name[demangled_name_len-1] = '\0';
st->print(func_offset == -1 ? "%s " : "%s+0x%x", demangled_name, func_offset);
} else {
st->print("? ");
}
}
}
break;
case 1: // interpreter frame:
{
st->print(" i ");
if (last_frame_type != 1) last_num_jargs = 8;
// name
Method* method = *(Method**)((address)current_fp + _z_ijava_state_neg(method));
if (method) {
if (method->is_synchronized()) st->print("synchronized ");
if (method->is_static()) st->print("static ");
if (method->is_native()) st->print("native ");
method->name_and_sig_as_C_string(buf, sizeof(buf));
st->print("%s ", buf);
}
else
st->print("? ");
intptr_t* tos = (intptr_t*) *(intptr_t*)((address)current_fp + _z_ijava_state_neg(esp));
if (print_istate_pointers) {
st->cr();
st->print(" ");
st->print("ts=" PTR_FORMAT " ", p2i(tos));
}
// Dump some Java stack slots.
if (print_outgoing_arguments) {
if (method->is_native()) {
#ifdef ASSERT
intptr_t* cargs = (intptr_t*) (((address)current_sp) + _z_abi(carg_1));
for (int i = 0; i < last_num_jargs; i++) {
// Cargs is not prepushed.
st->cr();
st->print(" ");
st->print(PTR_FORMAT, *(cargs));
cargs++;
}
#endif /* ASSERT */
}
else {
if (tos) {
for (int i = 0; i < last_num_jargs; i++) {
// tos+0 is prepushed, ignore.
tos++;
if (tos >= (intptr_t *)((address)current_fp + _z_ijava_state_neg(monitors)))
break;
st->cr();
st->print(" ");
st->print(PTR_FORMAT " %+.3e %+.3le", *(tos), *(float*)(tos), *(double*)(tos));
}
}
}
last_num_jargs = method->size_of_parameters();
}
}
break;
case 2: // entry frame:
{
st->print("v2i ");
// name
st->print("call stub");
}
break;
case 3: // compiled frame:
{
st->print(" c ");
// name
Method* method = ((nmethod *)blob)->method();
if (method) {
method->name_and_sig_as_C_string(buf, sizeof(buf));
st->print("%s ", buf);
}
else
st->print("? ");
}
break;
case 4: // named frames
{
st->print("%s ", type_name);
// name
if (function_name)
st->print("%s", function_name);
}
break;
default:
break;
}
st->cr();
st->flush();
current_sp = current_fp;
last_frame_type = frame_type;
num++;
// Check for maximum # of frames, and stop when reached.
if (max_frames > 0 && --max_frames == 0)
break;
}
}
// Convenience function for calls from the debugger.
extern "C" void bt(intptr_t* start_sp,intptr_t* top_pc) {
frame::back_trace(tty,start_sp, top_pc, 0);
}
extern "C" void bt_full(intptr_t* start_sp,intptr_t* top_pc) {
frame::back_trace(tty,start_sp, top_pc, (unsigned long)(long)-1);
}
// Function for tracing a limited number of frames.
// Use this one if you only need to see the "top of stack" frames.
extern "C" void bt_max(intptr_t *start_sp, intptr_t *top_pc, int max_frames) {
frame::back_trace(tty, start_sp, top_pc, 0, max_frames);
}
#if !defined(PRODUCT)
#define DESCRIBE_ADDRESS(name) \
values.describe(frame_no, (intptr_t*)&ijava_state()->name, #name);
void frame::describe_pd(FrameValues& values, int frame_no) {
if (is_interpreted_frame()) {
// Describe z_ijava_state elements.
DESCRIBE_ADDRESS(method);
DESCRIBE_ADDRESS(locals);
DESCRIBE_ADDRESS(monitors);
DESCRIBE_ADDRESS(cpoolCache);
DESCRIBE_ADDRESS(bcp);
DESCRIBE_ADDRESS(mdx);
DESCRIBE_ADDRESS(esp);
DESCRIBE_ADDRESS(sender_sp);
DESCRIBE_ADDRESS(top_frame_sp);
DESCRIBE_ADDRESS(oop_tmp);
DESCRIBE_ADDRESS(lresult);
DESCRIBE_ADDRESS(fresult);
}
}
#endif // !PRODUCT
intptr_t *frame::initial_deoptimization_info() {
// Used to reset the saved FP.
return fp();
}

@ -0,0 +1,552 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// Major contributions by ML, AHa.
#ifndef CPU_S390_VM_FRAME_S390_HPP
#define CPU_S390_VM_FRAME_S390_HPP
#include "runtime/synchronizer.hpp"
// C frame layout on ZARCH_64.
//
// In this figure the stack grows upwards, while memory grows
// downwards. See "Linux for zSeries: ELF Application Binary Interface Supplement",
// IBM Corp. (LINUX-1107-01)
//
// Square brackets denote stack regions possibly larger
// than a single 64 bit slot.
//
// STACK:
// 0 [C_FRAME] <-- SP after prolog (mod 8 = 0)
// [C_FRAME] <-- SP before prolog
// ...
// [C_FRAME]
//
// C_FRAME:
// 0 [ABI_160]
//
// ABI_160:
// 0 [ABI_16]
// 16 CARG_1: spill slot for outgoing arg 1. used by next callee.
// 24 CARG_2: spill slot for outgoing arg 2. used by next callee.
// 32 CARG_3: spill slot for outgoing arg 3. used by next callee.
// 40 CARG_4: spill slot for outgoing arg 4. used by next callee.
// 48 GPR_6: spill slot for GPR_6. used by next callee.
// ... ...
// 120 GPR_15: spill slot for GPR_15. used by next callee.
// 128 CFARG_1: spill slot for outgoing fp arg 1. used by next callee.
// 136 CFARG_2: spill slot for outgoing fp arg 2. used by next callee.
// 144 CFARG_3: spill slot for outgoing fp arg 3. used by next callee.
// 152 CFARG_4: spill slot for outgoing fp arg 4. used by next callee.
// 160 [REMAINING CARGS]
//
// ABI_16:
// 0 callers_sp
// 8 return_pc
public:
// C frame layout
typedef enum {
// stack alignment
alignment_in_bytes = 8,
// log_2(8*8 bits) = 6.
log_2_of_alignment_in_bits = 6
} frame_constants;
struct z_abi_16 {
uint64_t callers_sp;
uint64_t return_pc;
};
enum {
z_abi_16_size = sizeof(z_abi_16)
};
#define _z_abi16(_component) \
(offset_of(frame::z_abi_16, _component))
// ABI_160:
// REMARK: This structure should reflect the "minimal" ABI frame
// layout, but it doesn't. There is an extra field at the end of the
// structure that marks the area where arguments are passed, when
// the argument registers "overflow". Thus, sizeof(z_abi_160)
// doesn't yield the expected (and desired) result. Therefore, as
// long as we do not provide extra infrastructure, one should use
// either z_abi_160_size, or _z_abi(remaining_cargs) instead of
// sizeof(...).
struct z_abi_160 {
uint64_t callers_sp;
uint64_t return_pc;
uint64_t carg_1;
uint64_t carg_2;
uint64_t carg_3;
uint64_t carg_4;
uint64_t gpr6;
uint64_t gpr7;
uint64_t gpr8;
uint64_t gpr9;
uint64_t gpr10;
uint64_t gpr11;
uint64_t gpr12;
uint64_t gpr13;
uint64_t gpr14;
uint64_t gpr15;
uint64_t cfarg_1;
uint64_t cfarg_2;
uint64_t cfarg_3;
uint64_t cfarg_4;
uint64_t remaining_cargs;
};
enum {
z_abi_160_size = 160
};
#define _z_abi(_component) \
(offset_of(frame::z_abi_160, _component))
struct z_abi_160_spill : z_abi_160 {
// Additional spill slots. Use as 'offset_of(z_abi_160_spill, spill[n])'.
uint64_t spill[0];
// Aligned to frame::alignment_in_bytes (16).
};
// non-volatile GPRs:
struct z_spill_nonvolatiles {
uint64_t r6;
uint64_t r7;
uint64_t r8;
uint64_t r9;
uint64_t r10;
uint64_t r11;
uint64_t r12;
uint64_t r13;
};
enum {
z_spill_nonvolatiles_size = sizeof(z_spill_nonvolatiles)
};
#define _z_spill_nonvolatiles_neg(_component) \
(-frame::z_spill_nonvolatiles_size + offset_of(frame::z_spill_nonvolatiles, _component))
// Frame layout for the Java template interpreter on z/Architecture.
//
// In these figures the stack grows upwards, while memory grows
// downwards. Square brackets denote regions possibly larger than
// single 64 bit slots.
//
// STACK (no JNI, no compiled code, no library calls, template interpreter is active):
//
// 0 [TOP_IJAVA_FRAME]
// [PARENT_IJAVA_FRAME]
// [PARENT_IJAVA_FRAME]
// ...
// [PARENT_IJAVA_FRAME]
// [ENTRY_FRAME]
// [C_FRAME]
// ...
// [C_FRAME]
//
// TOP_IJAVA_FRAME:
//
// 0 [TOP_IJAVA_FRAME_ABI]
// 16 [operand stack]
// [monitors] (optional)
// [IJAVA_STATE]
// note: Own locals are located in the caller frame.
//
// PARENT_IJAVA_FRAME:
//
// 0 [PARENT_IJAVA_FRAME_ABI]
// [callee's locals w/o arguments]
// [outgoing arguments]
// [used part of operand stack w/o arguments]
// [monitors] (optional)
// [IJAVA_STATE]
//
// ENTRY_FRAME:
//
// 0 [PARENT_IJAVA_FRAME_ABI]
// [callee's locals w/o arguments]
// [outgoing arguments]
// [ENTRY_FRAME_LOCALS]
//
// TOP_IJAVA_FRAME_ABI:
//
// 0 [ABI_160]
//
//
// PARENT_IJAVA_FRAME_ABI:
//
// 0 [ABI_16]
//
// IJAVA_STATE:
//
// 0 method
// 8 locals
// monitors : monitor block top (i.e. lowest address)
// cpoolCache
// bcp
// mdx
// esp : Points to first slot above operands.
// sender_sp : See comment in z_ijava_state.
// top_frame_sp : Own SP before modification by i2c adapter.
// oop_tmp
// lresult
// fresult
//
// EXAMPLE:
// ---------
//
// 3 monitors, 5 operand stack slots max. / 3 allocated
//
// F0 callers_sp <- Z_SP (callers_sp == Z_fp (own fp))
// return_pc
// [rest of ABI_160]
// /slot 4: free
// oper. | slot 3: free <- Z_esp points to first free slot
// stack | slot 2: ref val v2 caches IJAVA_STATE.esp
// | slot 1: unused
// \slot 0: long val v1
// /slot 5 <- IJAVA_STATE.monitors = monitor block top
// | slot 4
// monitors| slot 3
// | slot 2
// | slot 1
// \slot 0
// [IJAVA_STATE] <- monitor block bot (points to first byte in IJAVA_STATE)
// F1 [PARENT_IJAVA_FRAME_ABI] <- Z_fp (== *Z_SP, points to slot just below IJAVA_STATE)
// [F0's locals] <- Z_locals, locals[i] := *(Z_locals - i*BytesPerWord)
// [F1's operand stack]
// [F1's monitors] (optional)
// [IJAVA_STATE]
public:
// PARENT_IJAVA_FRAME_ABI
struct z_parent_ijava_frame_abi : z_abi_16 {
};
enum {
z_parent_ijava_frame_abi_size = sizeof(z_parent_ijava_frame_abi)
};
#define _z_parent_ijava_frame_abi(_component) \
(offset_of(frame::z_parent_ijava_frame_abi, _component))
// TOP_IJAVA_FRAME_ABI
struct z_top_ijava_frame_abi : z_abi_160 {
};
enum {
z_top_ijava_frame_abi_size = sizeof(z_top_ijava_frame_abi)
};
#define _z_top_ijava_frame_abi(_component) \
(offset_of(frame::z_top_ijava_frame_abi, _component))
// IJAVA_STATE
struct z_ijava_state{
DEBUG_ONLY(uint64_t magic;) // wrong magic -> wrong state!
uint64_t method;
uint64_t mirror;
uint64_t locals; // Z_locals
uint64_t monitors;
uint64_t cpoolCache;
uint64_t bcp; // Z_bcp
uint64_t mdx;
uint64_t esp; // Z_esp
// Caller's original SP before modification by c2i adapter (if caller is compiled)
// and before top -> parent frame conversion by the interpreter entry.
// Note: for i2i calls a correct sender_sp is required, too, because there
// we cannot use the caller's top_frame_sp as sp when removing the callee
// frame (caller could be compiled or entry frame). Therefore the sender_sp
// has to be the interpreted caller's sp as TOP_IJAVA_FRAME. See also
// AbstractInterpreter::layout_activation() used by deoptimization.
uint64_t sender_sp;
// Own SP before modification by i2c adapter and top-2-parent-resize
// by interpreted callee.
uint64_t top_frame_sp;
// Slots only needed for native calls. Maybe better to move elsewhere.
uint64_t oop_tmp;
uint64_t lresult;
uint64_t fresult;
};
enum {
z_ijava_state_size = sizeof(z_ijava_state)
};
#ifdef ASSERT
enum {
z_istate_magic_number = 0x900d // ~= good magic
};
#endif
#define _z_ijava_state_neg(_component) \
(int) (-frame::z_ijava_state_size + offset_of(frame::z_ijava_state, _component))
// ENTRY_FRAME
struct z_entry_frame_locals {
uint64_t call_wrapper_address;
uint64_t result_address;
uint64_t result_type;
uint64_t arguments_tos_address;
// Callee saved registers are spilled to caller frame.
// Caller must have z_abi_160.
};
enum {
z_entry_frame_locals_size = sizeof(z_entry_frame_locals)
};
#define _z_entry_frame_locals_neg(_component) \
(int) (-frame::z_entry_frame_locals_size + offset_of(frame::z_entry_frame_locals, _component))
// Frame layout for JIT generated methods
//
// In these figures the stack grows upwards, while memory grows
// downwards. Square brackets denote regions possibly larger than single
// 64 bit slots.
//
// STACK (interpreted Java calls JIT generated Java):
//
// [JIT_FRAME] <-- SP (mod 16 = 0)
// [TOP_IJAVA_FRAME]
// ...
//
//
// JIT_FRAME (is a C frame according to z/Architecture ABI):
//
// [out_preserve]
// [out_args]
// [spills]
// [monitor] (optional)
// ...
// [monitor] (optional)
// [in_preserve] added / removed by prolog / epilog
public:
struct z_top_jit_abi_32 {
uint64_t callers_sp;
uint64_t return_pc;
uint64_t toc;
uint64_t tmp;
};
#define _z_top_jit_abi(_component) \
(offset_of(frame::z_top_jit_abi_32, _component))
struct jit_monitor {
uint64_t monitor[1];
};
struct jit_in_preserve {
// Used to provide a z/Architecture ABI on top of a jit frame.
// nothing to add here!
};
struct jit_out_preserve : z_top_jit_abi_32 {
// Nothing to add here!
};
enum {
z_jit_out_preserve_size = sizeof(jit_out_preserve)
};
typedef enum {
jit_monitor_size_in_4_byte_units = sizeof(jit_monitor) / 4,
// Stack alignment requirement. Log_2 of alignment size in bits.
// log_2(16*8 bits) = 7.
jit_log_2_of_stack_alignment_in_bits = 7,
jit_out_preserve_size_in_4_byte_units = sizeof(jit_out_preserve) / 4,
jit_in_preserve_size_in_4_byte_units = sizeof(jit_in_preserve) / 4
} jit_frame_constants;
// C2I adapter frames:
//
// STACK (interpreted called from compiled, on entry to frame manager):
//
// [TOP_C2I_FRAME]
// [JIT_FRAME]
// ...
//
//
// STACK (interpreted called from compiled, after interpreter has been pushed):
//
// [TOP_IJAVA_FRAME]
// [PARENT_C2I_FRAME]
// [JIT_FRAME]
// ...
//
//
// TOP_C2I_FRAME:
//
// [TOP_IJAVA_FRAME_ABI]
// [outgoing Java arguments]
// alignment (optional)
//
//
// PARENT_C2I_FRAME:
//
// [PARENT_IJAVA_FRAME_ABI]
// alignment (optional)
// [callee's locals w/o arguments]
// [outgoing Java arguments]
// alignment (optional)
private:
// STACK:
// ...
// [THIS_FRAME] <-- this._sp (stack pointer for this frame)
// [CALLER_FRAME] <-- this.fp() (_sp of caller's frame)
// ...
//
// NOTE: Stack pointer is now held in the base class, so remove it from here.
// Frame pointer for this frame.
intptr_t* _fp;
// Needed by deoptimization.
intptr_t* _unextended_sp;
public:
// Interface for all frames:
// Accessors
inline intptr_t* fp() const { return _fp; }
private:
inline void find_codeblob_and_set_pc_and_deopt_state(address pc);
// Constructors
public:
frame(intptr_t* sp);
// To be used, if sp was not extended to match callee's calling convention.
frame(intptr_t* sp, address pc);
frame(intptr_t* sp, address pc, intptr_t* unextended_sp);
// Access frame via stack pointer.
inline intptr_t* sp_addr_at(int index) const { return &sp()[index]; }
inline intptr_t sp_at( int index) const { return *sp_addr_at(index); }
// Access ABIs.
inline z_abi_16* own_abi() const { return (z_abi_16*) sp(); }
inline z_abi_160* callers_abi() const { return (z_abi_160*) fp(); }
private:
intptr_t* compiled_sender_sp(CodeBlob* cb) const;
address* compiled_sender_pc_addr(CodeBlob* cb) const;
address* sender_pc_addr(void) const;
public:
// Additional interface for interpreter frames:
static int interpreter_frame_interpreterstate_size_in_bytes();
static int interpreter_frame_monitor_size_in_bytes();
private:
// template interpreter state
inline z_ijava_state* ijava_state() const;
// Where z_ijava_state.monitors is saved.
inline BasicObjectLock** interpreter_frame_monitors_addr() const;
// Where z_ijava_state.esp is saved.
inline intptr_t** interpreter_frame_esp_addr() const;
public:
inline intptr_t* interpreter_frame_top_frame_sp();
inline void interpreter_frame_set_tos_address(intptr_t* x);
inline void interpreter_frame_set_top_frame_sp(intptr_t* top_frame_sp);
inline void interpreter_frame_set_sender_sp(intptr_t* sender_sp);
#ifdef ASSERT
inline void interpreter_frame_set_magic();
#endif
// monitors:
// Next two functions read and write z_ijava_state.monitors.
private:
inline BasicObjectLock* interpreter_frame_monitors() const;
inline void interpreter_frame_set_monitors(BasicObjectLock* monitors);
public:
// Additional interface for entry frames:
inline z_entry_frame_locals* entry_frame_locals() const {
return (z_entry_frame_locals*) (((address) fp()) - z_entry_frame_locals_size);
}
public:
// Get caller pc from stack slot of gpr14.
address native_sender_pc() const;
// Get caller pc from stack slot of gpr10.
address callstub_sender_pc() const;
// Dump all frames starting at a given C stack pointer.
// max_frames: Limit number of traced frames.
// <= 0 --> full trace
// > 0 --> trace the #max_frames topmost frames
static void back_trace(outputStream* st, intptr_t* start_sp, intptr_t* top_pc,
unsigned long flags, int max_frames = 0);
enum {
// This enum value specifies the offset from the pc remembered by
// call instructions to the location where control returns to
// after a normal return. Most architectures remember the return
// location directly, i.e. the offset is zero. This is the case
// for z/Architecture, too.
//
// Normal return address is the instruction following the branch.
pc_return_offset = 0,
};
#endif // CPU_S390_VM_FRAME_S390_HPP

@ -0,0 +1,297 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_FRAME_S390_INLINE_HPP
#define CPU_S390_VM_FRAME_S390_INLINE_HPP
#include "code/codeCache.hpp"
#include "code/vmreg.inline.hpp"
// Inline functions for z/Architecture frames:
inline void frame::find_codeblob_and_set_pc_and_deopt_state(address pc) {
assert(pc != NULL, "precondition: must have PC");
_cb = CodeCache::find_blob(pc);
_pc = pc; // Must be set for get_deopt_original_pc().
_fp = (intptr_t *) own_abi()->callers_sp;
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
_pc = original_pc;
_deopt_state = is_deoptimized;
} else {
_deopt_state = not_deoptimized;
}
assert(((uint64_t)_sp & 0x7) == 0, "SP must be 8-byte aligned");
}
// Constructors
// Initialize all fields, _unextended_sp will be adjusted in find_codeblob_and_set_pc_and_deopt_state.
inline frame::frame() : _sp(NULL), _unextended_sp(NULL), _fp(NULL), _cb(NULL), _pc(NULL), _deopt_state(unknown) {}
inline frame::frame(intptr_t* sp) : _sp(sp), _unextended_sp(sp) {
find_codeblob_and_set_pc_and_deopt_state((address)own_abi()->return_pc);
}
inline frame::frame(intptr_t* sp, address pc) : _sp(sp), _unextended_sp(sp) {
find_codeblob_and_set_pc_and_deopt_state(pc); // Also sets _fp and adjusts _unextended_sp.
}
inline frame::frame(intptr_t* sp, address pc, intptr_t* unextended_sp) : _sp(sp), _unextended_sp(unextended_sp) {
find_codeblob_and_set_pc_and_deopt_state(pc); // Also sets _fp and adjusts _unextended_sp.
}
// Generic constructor. Used by pns() in debug.cpp only
#ifndef PRODUCT
inline frame::frame(void* sp, void* pc, void* unextended_sp) :
_sp((intptr_t*)sp), _unextended_sp((intptr_t*)unextended_sp), _cb(NULL), _pc(NULL) {
find_codeblob_and_set_pc_and_deopt_state((address)pc); // Also sets _fp and adjusts _unextended_sp.
}
#endif
// template interpreter state
inline frame::z_ijava_state* frame::ijava_state() const {
z_ijava_state* state = (z_ijava_state*) ((uintptr_t)fp() - z_ijava_state_size);
assert(state->magic == (intptr_t) frame::z_istate_magic_number,
"wrong z_ijava_state in interpreter frame (no magic found)");
return state;
}
inline BasicObjectLock** frame::interpreter_frame_monitors_addr() const {
return (BasicObjectLock**) &(ijava_state()->monitors);
}
// The next two funcions read and write z_ijava_state.monitors.
inline BasicObjectLock* frame::interpreter_frame_monitors() const {
return *interpreter_frame_monitors_addr();
}
inline void frame::interpreter_frame_set_monitors(BasicObjectLock* monitors) {
*interpreter_frame_monitors_addr() = monitors;
}
// Accessors
// Return unique id for this frame. The id must have a value where we
// can distinguish identity and younger/older relationship. NULL
// represents an invalid (incomparable) frame.
inline intptr_t* frame::id(void) const {
// Use _fp. _sp or _unextended_sp wouldn't be correct due to resizing.
return _fp;
}
// Return true if this frame is younger (more recent activation) than
// the frame represented by id.
inline bool frame::is_younger(intptr_t* id) const {
assert(this->id() != NULL && id != NULL, "NULL frame id");
// Stack grows towards smaller addresses on z/Architecture.
return this->id() < id;
}
// Return true if this frame is older (less recent activation) than
// the frame represented by id.
inline bool frame::is_older(intptr_t* id) const {
assert(this->id() != NULL && id != NULL, "NULL frame id");
// Stack grows towards smaller addresses on z/Architecture.
return this->id() > id;
}
inline int frame::frame_size(RegisterMap* map) const {
// Stack grows towards smaller addresses on z/Linux: sender is at a higher address.
return sender_sp() - sp();
}
// Ignore c2i adapter frames.
inline intptr_t* frame::unextended_sp() const {
return _unextended_sp;
}
inline address frame::sender_pc() const {
return (address) callers_abi()->return_pc;
}
// Get caller pc, if caller is native from stack slot of gpr14.
inline address frame::native_sender_pc() const {
return (address) callers_abi()->gpr14;
}
// Get caller pc from stack slot of gpr10.
inline address frame::callstub_sender_pc() const {
return (address) callers_abi()->gpr10;
}
inline address* frame::sender_pc_addr() const {
return (address*) &(callers_abi()->return_pc);
}
inline intptr_t* frame::sender_sp() const {
return (intptr_t*) callers_abi();
}
inline intptr_t* frame::link() const {
return (intptr_t*) callers_abi()->callers_sp;
}
inline intptr_t** frame::interpreter_frame_locals_addr() const {
return (intptr_t**) &(ijava_state()->locals);
}
inline intptr_t* frame::interpreter_frame_bcp_addr() const {
return (intptr_t*) &(ijava_state()->bcp);
}
inline intptr_t* frame::interpreter_frame_mdp_addr() const {
return (intptr_t*) &(ijava_state()->mdx);
}
// Bottom(base) of the expression stack (highest address).
inline intptr_t* frame::interpreter_frame_expression_stack() const {
return (intptr_t*)interpreter_frame_monitor_end() - 1;
}
inline jint frame::interpreter_frame_expression_stack_direction() {
return -1;
}
inline intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
return &interpreter_frame_tos_address()[offset];
}
// monitor elements
// End is lower in memory than begin, and beginning element is oldest element.
// Also begin is one past last monitor.
inline intptr_t* frame::interpreter_frame_top_frame_sp() {
return (intptr_t*)ijava_state()->top_frame_sp;
}
inline void frame::interpreter_frame_set_top_frame_sp(intptr_t* top_frame_sp) {
ijava_state()->top_frame_sp = (intptr_t) top_frame_sp;
}
inline void frame::interpreter_frame_set_sender_sp(intptr_t* sender_sp) {
ijava_state()->sender_sp = (intptr_t) sender_sp;
}
#ifdef ASSERT
inline void frame::interpreter_frame_set_magic() {
ijava_state()->magic = (intptr_t) frame::z_istate_magic_number;
}
#endif
// Where z_ijava_state.esp is saved.
inline intptr_t** frame::interpreter_frame_esp_addr() const {
return (intptr_t**) &(ijava_state()->esp);
}
// top of expression stack (lowest address)
inline intptr_t* frame::interpreter_frame_tos_address() const {
return *interpreter_frame_esp_addr() + 1;
}
inline void frame::interpreter_frame_set_tos_address(intptr_t* x) {
*interpreter_frame_esp_addr() = x - 1;
}
// Stack slot needed for native calls and GC.
inline oop * frame::interpreter_frame_temp_oop_addr() const {
return (oop *) ((address) _fp + _z_ijava_state_neg(oop_tmp));
}
// In keeping with Intel side: end is lower in memory than begin.
// Beginning element is oldest element. Also begin is one past last monitor.
inline BasicObjectLock * frame::interpreter_frame_monitor_begin() const {
return (BasicObjectLock*)ijava_state();
}
inline BasicObjectLock * frame::interpreter_frame_monitor_end() const {
return interpreter_frame_monitors();
}
inline void frame::interpreter_frame_set_monitor_end(BasicObjectLock* monitors) {
interpreter_frame_set_monitors((BasicObjectLock *)monitors);
}
inline int frame::interpreter_frame_monitor_size() {
// Number of stack slots for a monitor
return round_to(BasicObjectLock::size() /* number of stack slots */,
WordsPerLong /* Number of stack slots for a Java long. */);
}
inline int frame::interpreter_frame_monitor_size_in_bytes() {
// Number of bytes for a monitor.
return frame::interpreter_frame_monitor_size() * wordSize;
}
inline int frame::interpreter_frame_interpreterstate_size_in_bytes() {
return z_ijava_state_size;
}
inline Method** frame::interpreter_frame_method_addr() const {
return (Method**)&(ijava_state()->method);
}
inline oop* frame::interpreter_frame_mirror_addr() const {
return (oop*)&(ijava_state()->mirror);
}
// Constant pool cache
inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
return (ConstantPoolCache**)&(ijava_state()->cpoolCache);
}
// entry frames
inline intptr_t* frame::entry_frame_argument_at(int offset) const {
// Since an entry frame always calls the interpreter first,
// the parameters are on the stack and relative to known register in the
// entry frame.
intptr_t* tos = (intptr_t*) entry_frame_locals()->arguments_tos_address;
return &tos[offset + 1]; // prepushed tos
}
inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
return (JavaCallWrapper**) &entry_frame_locals()->call_wrapper_address;
}
inline oop frame::saved_oop_result(RegisterMap* map) const {
return *((oop*) map->location(Z_R2->as_VMReg())); // R2 is return register.
}
inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
*((oop*) map->location(Z_R2->as_VMReg())) = obj; // R2 is return register.
}
inline intptr_t* frame::real_fp() const {
return fp();
}
#endif // CPU_S390_VM_FRAME_S390_INLINE_HPP

@ -0,0 +1,55 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_GLOBALDEFINITIONS_S390_HPP
#define CPU_S390_VM_GLOBALDEFINITIONS_S390_HPP
#ifdef CC_INTERP
#error "CC_INTERP is not supported on z/Architecture."
#endif
// Convenience macro that produces a string literal with the filename
// and linenumber of the location where the macro was used.
#ifndef FILE_AND_LINE
#define FILE_AND_LINE __FILE__ ":" XSTR(__LINE__)
#endif
#define ShortenBranches true
const int StackAlignmentInBytes = 16;
#define SUPPORTS_NATIVE_CX8
// Indicates whether the C calling conventions require that
// 32-bit integer argument values are extended to 64 bits.
// This is the case on z/Architecture.
const bool CCallingConventionRequiresIntsAsLongs = true;
// Contended Locking reorder and cache line bucket.
// This setting should be kept compatible with vm_version_s390.cpp.
// The expected size in bytes of a cache line, used to pad data structures.
#define DEFAULT_CACHE_LINE_SIZE 256
#endif // CPU_S390_VM_GLOBALDEFINITIONS_S390_HPP

@ -0,0 +1,127 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_GLOBALS_S390_HPP
#define CPU_S390_VM_GLOBALS_S390_HPP
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
// Sets the default values for platform dependent flags used by the runtime system.
// (see globals.hpp)
// Sorted according to sparc.
// z/Architecture remembers branch targets, so don't share vtables.
define_pd_global(bool, ShareVtableStubs, false);
define_pd_global(bool, NeedsDeoptSuspend, false); // Only register window machines need this.
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks.
define_pd_global(bool, TrapBasedNullChecks, true);
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs passed to check cast.
define_pd_global(uintx, CodeCacheSegmentSize, 256);
// This shall be at least 32 for proper branch target alignment.
// Ideally, this is 256 (cache line size). This keeps code end data
// on separate lines. But we reduced it to 64 since 256 increased
// code size significantly by padding nops between IVC and second UEP.
define_pd_global(intx, CodeEntryAlignment, 64);
define_pd_global(intx, OptoLoopAlignment, 2);
define_pd_global(intx, InlineFrequencyCount, 100);
define_pd_global(intx, InlineSmallCode, 2000);
#define DEFAULT_STACK_YELLOW_PAGES (2)
#define DEFAULT_STACK_RED_PAGES (1)
// Java_java_net_SocketOutputStream_socketWrite0() uses a 64k buffer on the
// stack. To pass stack overflow tests we need 20 shadow pages.
#define DEFAULT_STACK_SHADOW_PAGES (20 DEBUG_ONLY(+2))
#define DEFAULT_STACK_RESERVED_PAGES (0)
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
#define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES
#define MIN_STACK_RESERVED_PAGES (0)
define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES);
define_pd_global(bool, RewriteBytecodes, true);
define_pd_global(bool, RewriteFrequentPairs, true);
define_pd_global(bool, UseMembar, false);
define_pd_global(bool, PreserveFramePointer, false);
// GC Ergo Flags
define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // Default max size of CMS young gen, per GC worker thread.
define_pd_global(uintx, TypeProfileLevel, 111);
define_pd_global(bool, CompactStrings, true);
// 8146801 (Short Array Allocation): No performance work done here yet.
define_pd_global(intx, InitArrayShortSize, 1*BytesPerLong);
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint, writeable) \
\
/* Reoptimize code-sequences of calls at runtime, e.g. replace an */ \
/* indirect call by a direct call. */ \
product(bool, ReoptimizeCallSequences, true, \
"Reoptimize code-sequences of calls at runtime.") \
\
product(bool, UseCountLeadingZerosInstruction, true, \
"Use count leading zeros instruction.") \
\
product(bool, UseByteReverseInstruction, true, \
"Use byte reverse instruction.") \
\
product(bool, ExpandLoadingBaseDecode, true, "Expand the assembler " \
"instruction required to load the base from DecodeN nodes during " \
"matching.") \
product(bool, ExpandLoadingBaseDecode_NN, true, "Expand the assembler " \
"instruction required to load the base from DecodeN_NN nodes " \
"during matching.") \
product(bool, ExpandLoadingBaseEncode, true, "Expand the assembler " \
"instruction required to load the base from EncodeP nodes during " \
"matching.") \
product(bool, ExpandLoadingBaseEncode_NN, true, "Expand the assembler " \
"instruction required to load the base from EncodeP_NN nodes " \
"during matching.") \
\
/* Seems to pay off with 2 pages already. */ \
product(size_t, MVCLEThreshold, +2*(4*K), \
"Threshold above which page-aligned MVCLE copy/init is used.") \
\
product(bool, PreferLAoverADD, false, \
"Use LA/LAY instructions over ADD instructions (z/Architecture).") \
\
develop(bool, ZapEmptyStackFields, false, "Write 0x0101... to empty stack" \
" fields. Use this to ease stack debugging.") \
\
product(bool, TraceTraps, false, "Trace all traps the signal handler" \
"handles.")
#endif // CPU_S390_VM_GLOBALS_S390_HPP

@ -0,0 +1,65 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/icBuffer.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/bytecodes.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_s390.hpp"
#include "oops/oop.inline.hpp"
#define __ masm.
int InlineCacheBuffer::ic_stub_code_size() {
return MacroAssembler::load_const_size() + Assembler::z_brul_size();
}
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_oop, address entry_point) {
ResourceMark rm;
CodeBuffer code(code_begin, ic_stub_code_size());
MacroAssembler masm(&code);
// Note: even though the code contains an embedded oop, we do not need reloc info
// because
// (1) the oop is old (i.e., doesn't matter for scavenges)
// (2) these ICStubs are removed *before* a GC happens, so the roots disappear.
// Load the oop,
__ load_const(Z_method, (address) cached_oop); // inline cache reg = Z_method
// and do a tail-call (pc-relative).
__ z_brul((address) entry_point);
__ flush();
}
address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // Creation also verifies the object.
return MacroAssembler::get_target_addr_pcrel(move->next_instruction_address());
}
void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // Creation also verifies the object.
return (void*)move->data();
}

@ -0,0 +1,46 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "runtime/icache.hpp"
// interface (see ICache::flush_icache_stub_t):
// address addr (Z_R2, ignored)
// int lines (Z_R3, ignored)
// int magic (Z_R4)
//
// returns: int (Z_R2)
//
// Note: z/Architecture doesn't need explicit flushing, so this is implemented as a nop.
// Call c function (which just does nothing).
int z_flush_icache(address start, int lines, int magic) { return magic; }
void ICacheStubGenerator::generate_icache_flush(ICache::flush_icache_stub_t* flush_icache_stub) {
*flush_icache_stub = (ICache::flush_icache_stub_t)z_flush_icache;
// First call to flush itself.
ICache::invalidate_range((address)(*flush_icache_stub), 0);
};

@ -0,0 +1,44 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_ICACHE_S390_HPP
#define CPU_S390_VM_ICACHE_S390_HPP
// Interface for updating the instruction cache. Whenever the VM modifies
// code, part of the processor instruction cache potentially has to be flushed.
class ICache : public AbstractICache {
public:
enum {
stub_size = 0, // Size of the icache flush stub in bytes.
line_size = 2, // There is no explicit flushing on z/Architecture.
// This value is ignored by the flush stub (a nop !).
log2_line_size = 1
};
// Use default implementation.
};
#endif // CPU_S390_VM_ICACHE_S390_HPP

File diff suppressed because it is too large Load Diff

@ -0,0 +1,329 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_INTERP_MASM_ZARCH_64_64_HPP
#define CPU_S390_VM_INTERP_MASM_ZARCH_64_64_HPP
#include "asm/macroAssembler.hpp"
#include "interpreter/invocationCounter.hpp"
// This file specializes the assember with interpreter-specific macros.
class InterpreterMacroAssembler: public MacroAssembler {
protected:
// Interpreter specific version of call_VM_base().
virtual void call_VM_leaf_base(address entry_point);
virtual void call_VM_leaf_base(address entry_point, bool allow_relocation);
virtual void call_VM_base(Register oop_result,
Register last_java_sp,
address entry_point,
bool check_exceptions);
virtual void call_VM_base(Register oop_result,
Register last_java_sp,
address entry_point,
bool allow_relocation,
bool check_exceptions);
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// Base routine for all dispatches.
void dispatch_base(TosState state, address* table);
public:
InterpreterMacroAssembler(CodeBuffer* c)
: MacroAssembler(c) {}
void jump_to_entry(address entry, Register Rscratch);
virtual void load_earlyret_value(TosState state);
static const Address l_tmp;
static const Address d_tmp;
// Handy address generation macros.
#define thread_(field_name) Address(Z_thread, JavaThread::field_name ## _offset())
#define method_(field_name) Address(Z_method, Method::field_name ## _offset())
#define method2_(Rmethod, field_name) Address(Rmethod, Method::field_name ## _offset())
// Helper routine for frame allocation/deallocation.
// Compute the delta by which the caller's SP has to
// be adjusted to accomodate for the non-argument locals.
void compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta);
// dispatch routines
void dispatch_prolog(TosState state, int step = 0);
void dispatch_epilog(TosState state, int step = 0);
void dispatch_only(TosState state);
// Dispatch normal table via Z_bytecode (assume Z_bytecode is loaded already).
void dispatch_only_normal(TosState state);
void dispatch_normal(TosState state);
void dispatch_next(TosState state, int step = 0);
void dispatch_next_noverify_oop(TosState state, int step = 0);
void dispatch_via(TosState state, address* table);
// Jump to an invoked target.
void prepare_to_jump_from_interpreted(Register method);
void jump_from_interpreted(Register method, Register temp);
// Removes the current activation (incl. unlocking of monitors).
// Additionally this code is used for earlyReturn in which case we
// want to skip throwing an exception and installing an exception.
void remove_activation(TosState state,
Register return_pc,
bool throw_monitor_exception = true,
bool install_monitor_exception = true,
bool notify_jvmti = true);
public:
// Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls.
void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
void super_call_VM(Register thread_cache, Register oop_result, Register last_java_sp,
address entry_point, Register arg_1, Register arg_2, bool check_exception = true);
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass. Blows registers tmp1, tmp2 and tmp3.
void gen_subtype_check(Register sub_klass, Register super_klass, Register tmp1, Register tmp2, Label &ok_is_subtype);
void get_cache_and_index_at_bcp(Register cache, Register cpe_offset, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register cpe_offset, Register bytecode,
int byte_no, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
void load_resolved_reference_at_index(Register result, Register index);
// Pop topmost element from stack. It just disappears. Useful if
// consumed previously by access via stackTop().
void popx(int len);
void pop_i() { popx(1); }
void pop_ptr() { popx(1); }
void pop_l() { popx(2); }
void pop_f() { popx(1); }
void pop_d() { popx(2); }
// Get Address object of stack top. No checks. No pop.
// Purpose: provide address of stack operand to exploit reg-mem operations.
// Avoid RISC-like mem2reg - reg-reg-op sequence.
Address stackTop();
// Helpers for expression stack.
void pop_i( Register r);
void pop_ptr( Register r);
void pop_l( Register r);
void pop_f(FloatRegister f);
void pop_d(FloatRegister f);
void push_i( Register r = Z_tos);
void push_ptr( Register r = Z_tos);
void push_l( Register r = Z_tos);
void push_f(FloatRegister f = Z_ftos);
void push_d(FloatRegister f = Z_ftos);
// Helpers for swap and dup.
void load_ptr(int n, Register val);
void store_ptr(int n, Register val);
void pop (TosState state); // transition vtos -> state
void push(TosState state); // transition state -> vtos
void empty_expression_stack(void);
#ifdef ASSERT
void verify_sp(Register Rsp, Register Rtemp);
void verify_esp(Register Resp, Register Rtemp); // Verify that Resp points to a word in the operand stack.
#endif // ASSERT
public:
void if_cmp(Condition cc, bool ptr_compare);
// Accessors to the template interpreter state.
void asm_assert_ijava_state_magic(Register tmp) PRODUCT_RETURN;
void save_bcp();
void restore_bcp();
void save_esp();
void restore_esp();
void get_monitors(Register reg);
void save_monitors(Register reg);
void get_mdp(Register mdp);
void save_mdp(Register mdp);
// Values that are only read (besides initialization).
void restore_locals();
void get_method(Register reg);
// Load values from bytecode stream:
enum signedOrNot { Signed, Unsigned };
enum setCCOrNot { set_CC, dont_set_CC };
void get_2_byte_integer_at_bcp(Register Rdst,
int bcp_offset,
signedOrNot is_signed );
void get_4_byte_integer_at_bcp(Register Rdst,
int bcp_offset,
setCCOrNot should_set_CC = dont_set_CC);
// common code
void field_offset_at(int n, Register tmp, Register dest, Register base);
int field_offset_at(Register object, address bcp, int offset);
void fast_iaaccess(int n, address bcp);
void fast_iaputfield(address bcp, bool do_store_check);
void index_check(Register array, Register index, int index_shift, Register tmp, Register res);
void index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res);
void get_constant_pool(Register Rdst);
void get_constant_pool_cache(Register Rdst);
void get_cpool_and_tags(Register Rcpool, Register Rtags);
void is_a(Label& L);
// --------------------------------------------------
void unlock_if_synchronized_method(TosState state, bool throw_monitor_exception = true, bool install_monitor_exception = true);
void add_monitor_to_stack(bool stack_is_empty,
Register Rtemp,
Register Rtemp2,
Register Rtemp3);
void access_local_int(Register index, Register dst);
void access_local_ptr(Register index, Register dst);
void access_local_long(Register index, Register dst);
void access_local_float(Register index, FloatRegister dst);
void access_local_double(Register index, FloatRegister dst);
#ifdef ASSERT
void check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1);
#endif // ASSERT
void store_local_int(Register index, Register src);
void store_local_ptr(Register index, Register src);
void store_local_long(Register index, Register src);
void store_local_float(Register index, FloatRegister src);
void store_local_double(Register index, FloatRegister src);
Address first_local_in_stack();
static int top_most_monitor_byte_offset(); // Offset in bytes to top of monitor block.
Address top_most_monitor();
void compute_stack_base(Register Rdest);
enum LoadOrStore { load, store };
void static_iload_or_store(int which_local, LoadOrStore direction, Register Rtmp);
void static_aload_or_store(int which_local, LoadOrStore direction, Register Rtmp);
void static_dload_or_store(int which_local, LoadOrStore direction);
void static_iinc( int which_local, jint increment, Register Rtmp, Register Rtmp2);
void get_method_counters(Register Rmethod, Register Rcounters, Label& skip);
void increment_invocation_counter(Register Rcounters, Register RctrSum);
void increment_backedge_counter(Register Rcounters, Register RctrSum);
void test_backedge_count_for_osr(Register backedge_count, Register branch_bcp, Register Rtmp);
void record_static_call_in_profile(Register Rentry, Register Rtmp);
void record_receiver_call_in_profile(Register Rklass, Register Rentry, Register Rtmp);
// Object locking
void lock_object (Register lock_reg, Register obj_reg);
void unlock_object(Register lock_reg, Register obj_reg=noreg);
// Interpreter profiling operations
void set_method_data_pointer_for_bcp();
void test_method_data_pointer(Register mdp, Label& zero_continue);
void verify_method_data_pointer();
void set_mdp_data_at(Register mdp_in, int constant, Register value);
void increment_mdp_data_at(Register mdp_in, int constant,
Register tmp = Z_R1_scratch, bool decrement = false);
void increment_mask_and_jump(Address counter_addr,
int increment, Address mask,
Register scratch, bool preloaded,
branch_condition cond, Label* where);
void set_mdp_flag_at(Register mdp_in, int flag_constant);
void test_mdp_data_at(Register mdp_in, int offset, Register value,
Register test_value_out,
Label& not_equal_continue);
void record_klass_in_profile(Register receiver, Register mdp,
Register reg2, bool is_virtual_call);
void record_klass_in_profile_helper(Register receiver, Register mdp,
Register reg2, int start_row,
Label& done, bool is_virtual_call);
void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
void update_mdp_by_offset(Register mdp_in, Register dataidx, int offset_of_disp);
void update_mdp_by_constant(Register mdp_in, int constant);
void update_mdp_for_ret(Register return_bci);
void profile_taken_branch(Register mdp, Register bumped_count);
void profile_not_taken_branch(Register mdp);
void profile_call(Register mdp);
void profile_final_call(Register mdp);
void profile_virtual_call(Register receiver, Register mdp,
Register scratch2,
bool receiver_can_be_null = false);
void profile_ret(Register return_bci, Register mdp);
void profile_null_seen(Register mdp);
void profile_typecheck(Register mdp, Register klass, Register scratch);
void profile_typecheck_failed(Register mdp, Register tmp);
void profile_switch_default(Register mdp);
void profile_switch_case(Register index_in_scratch, Register mdp,
Register scratch1, Register scratch2);
void profile_obj_type(Register obj, Address mdo_addr, Register klass, bool cmp_done = false);
void profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual);
void profile_return_type(Register mdp, Register ret, Register tmp);
void profile_parameters_type(Register mdp, Register tmp1, Register tmp2);
// Debugging
void verify_oop(Register reg, TosState state = atos); // Only if +VerifyOops && state == atos.
void verify_oop_or_return_address(Register reg, Register rtmp); // for astore
void verify_FPU(int stack_depth, TosState state = ftos);
// JVMTI helpers
void skip_if_jvmti_mode(Label &Lskip, Register Rscratch = Z_R0);
// support for JVMTI/Dtrace
typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
void notify_method_entry();
void notify_method_exit(bool native_method, TosState state, NotifyMethodExitMode mode);
// Pop the topmost TOP_IJAVA_FRAME and set it's sender_sp as new Z_SP.
// The return pc is loaded into the Register return_pc.
void pop_interpreter_frame(Register return_pc, Register tmp1, Register tmp2);
};
#endif // CPU_S390_VM_INTERP_MASM_ZARCH_64_64_HPP

@ -0,0 +1,159 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/universe.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/icache.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/signature.hpp"
// Access macros for Java and C arguments.
// First Java argument is at index-1.
#define locals_j_arg_at(index) Address(Z_R1/*locals*/, in_ByteSize((-(index)*wordSize)))
#define __ _masm->
static int sp_c_int_arg_offset(int arg_nr, int fp_arg_nr) {
int int_arg_nr = arg_nr-fp_arg_nr;
// arg_nr, fp_arg_nr start with 1 => int_arg_nr starts with 0
if (int_arg_nr < 5) {
return int_arg_nr * wordSize + _z_abi(carg_1);
}
int offset = int_arg_nr - 5 + (fp_arg_nr > 4 ? fp_arg_nr - 4 : 0);
return offset * wordSize + _z_abi(remaining_cargs);
}
static int sp_c_fp_arg_offset(int arg_nr, int fp_arg_nr) {
int int_arg_nr = arg_nr-fp_arg_nr;
// Arg_nr, fp_arg_nr start with 1 => int_arg_nr starts with 0.
if (fp_arg_nr < 5) {
return (fp_arg_nr - 1 ) * wordSize + _z_abi(cfarg_1);
}
int offset = fp_arg_nr - 5 + (int_arg_nr > 4 ? int_arg_nr - 4 : 0);
return offset * wordSize + _z_abi(remaining_cargs);
}
// Implementation of SignatureHandlerGenerator
void InterpreterRuntime::SignatureHandlerGenerator::pass_int() {
int int_arg_nr = jni_offset() - _fp_arg_nr;
Register r = (int_arg_nr < 5 /*max_int_register_arguments*/) ?
as_Register(int_arg_nr) + Z_ARG1->encoding() : Z_R0;
__ z_lgf(r, locals_j_arg_at(offset()));
if (DEBUG_ONLY(true ||) int_arg_nr >= 5) {
__ z_stg(r, sp_c_int_arg_offset(jni_offset(), _fp_arg_nr), Z_SP);
}
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
int int_arg_nr = jni_offset() - _fp_arg_nr;
Register r = (int_arg_nr < 5 /*max_int_register_arguments*/) ?
as_Register(int_arg_nr) + Z_ARG1->encoding() : Z_R0;
__ z_lg(r, locals_j_arg_at(offset() + 1)); // Long resides in upper slot.
if (DEBUG_ONLY(true ||) int_arg_nr >= 5) {
__ z_stg(r, sp_c_int_arg_offset(jni_offset(), _fp_arg_nr), Z_SP);
}
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
FloatRegister fp_reg = (_fp_arg_nr < 4/*max_fp_register_arguments*/) ?
as_FloatRegister((_fp_arg_nr * 2) + Z_FARG1->encoding()) : Z_F1;
_fp_arg_nr++;
__ z_ley(fp_reg, locals_j_arg_at(offset()));
if (DEBUG_ONLY(true ||) _fp_arg_nr > 4) {
__ z_ste(fp_reg, sp_c_fp_arg_offset(jni_offset(), _fp_arg_nr) + 4, Z_SP);
}
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_double() {
FloatRegister fp_reg = (_fp_arg_nr < 4/*max_fp_register_arguments*/) ?
as_FloatRegister((_fp_arg_nr*2) + Z_FARG1->encoding()) : Z_F1;
_fp_arg_nr++;
__ z_ldy(fp_reg, locals_j_arg_at(offset()+1));
if (DEBUG_ONLY(true ||) _fp_arg_nr > 4) {
__ z_std(fp_reg, sp_c_fp_arg_offset(jni_offset(), _fp_arg_nr), Z_SP);
}
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
int int_arg_nr = jni_offset() - _fp_arg_nr;
Register r = (int_arg_nr < 5 /*max_int_register_arguments*/) ?
as_Register(int_arg_nr) + Z_ARG1->encoding() : Z_R0;
// The handle for a receiver will never be null.
bool do_NULL_check = offset() != 0 || is_static();
Label do_null;
if (do_NULL_check) {
__ clear_reg(r, true, false);
__ load_and_test_long(Z_R0, locals_j_arg_at(offset()));
__ z_bre(do_null);
}
__ add2reg(r, -offset() * wordSize, Z_R1 /* locals */);
__ bind(do_null);
if (DEBUG_ONLY(true ||) int_arg_nr >= 5) {
__ z_stg(r, sp_c_int_arg_offset(jni_offset(), _fp_arg_nr), Z_SP);
}
}
void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprint) {
__ z_lgr(Z_R1, Z_ARG1); // Z_R1 is used in locals_j_arg_at(index) macro.
// Generate code to handle arguments.
iterate(fingerprint);
__ load_const_optimized(Z_RET, AbstractInterpreter::result_handler(method()->result_type()));
__ z_br(Z_R14);
__ flush();
}
#undef __
// Implementation of SignatureHandlerLibrary
void SignatureHandlerLibrary::pd_set_handler(address handler) {}
IRT_ENTRY(address, InterpreterRuntime::get_signature(JavaThread* thread, Method* method))
methodHandle m(thread, method);
assert(m->is_native(), "sanity check");
Symbol *s = m->signature();
return (address) s->base();
IRT_END
IRT_ENTRY(address, InterpreterRuntime::get_result_handler(JavaThread* thread, Method* method))
methodHandle m(thread, method);
assert(m->is_native(), "sanity check");
return AbstractInterpreter::result_handler(m->result_type());
IRT_END

@ -0,0 +1,67 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_INTERPRETERRT_S390_HPP
#define CPU_S390_VM_INTERPRETERRT_S390_HPP
#include "memory/allocation.hpp"
static int binary_search(int key, LookupswitchPair* array, int n);
static address iload (JavaThread* thread);
static address aload (JavaThread* thread);
static address istore(JavaThread* thread);
static address astore(JavaThread* thread);
static address iinc (JavaThread* thread);
// native method calls
class SignatureHandlerGenerator: public NativeSignatureIterator {
private:
MacroAssembler* _masm;
int _fp_arg_nr;
void pass_int();
void pass_long();
void pass_double();
void pass_float();
void pass_object();
public:
// creation
SignatureHandlerGenerator(methodHandle method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
_masm = new MacroAssembler(buffer);
_fp_arg_nr = 0;
}
// code generation
void generate(uint64_t fingerprint);
};
static address get_result_handler(JavaThread* thread, Method* method);
static address get_signature(JavaThread* thread, Method* method);
#endif // CPU_S390_VM_INTERPRETERRT_S390_HPP

@ -0,0 +1,87 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_JAVAFRAMEANCHOR_S390_HPP
#define CPU_S390_VM_JAVAFRAMEANCHOR_S390_HPP
public:
// Each arch must define reset, save, restore.
// These are used by objects that only care about:
// 1 - initializing a new state (thread creation, javaCalls)
// 2 - saving a current state (javaCalls)
// 3 - restoring an old state (javaCalls).
inline void clear(void) {
// Clearing _last_Java_sp must be first.
OrderAccess::release();
_last_Java_sp = NULL;
// Fence?
OrderAccess::fence();
_last_Java_pc = NULL;
}
inline void set(intptr_t* sp, address pc) {
_last_Java_pc = pc;
OrderAccess::release();
_last_Java_sp = sp;
}
void copy(JavaFrameAnchor* src) {
// In order to make sure the transition state is valid for "this"
// we must clear _last_Java_sp before copying the rest of the new data.
// Hack Alert: Temporary bugfix for 4717480/4721647
// To act like previous version (pd_cache_state) don't NULL _last_Java_sp
// unless the value is changing.
//
if (_last_Java_sp != src->_last_Java_sp) {
OrderAccess::release();
_last_Java_sp = NULL;
OrderAccess::fence();
}
_last_Java_pc = src->_last_Java_pc;
// Must be last so profiler will always see valid frame if has_last_frame() is true.
OrderAccess::release();
_last_Java_sp = src->_last_Java_sp;
}
// We don't have to flush registers, so the stack is always walkable.
inline bool walkable(void) { return true; }
inline void make_walkable(JavaThread* thread) { }
public:
// We don't have a frame pointer.
intptr_t* last_Java_fp(void) { return NULL; }
intptr_t* last_Java_sp() const { return _last_Java_sp; }
void set_last_Java_sp(intptr_t* sp) { OrderAccess::release(); _last_Java_sp = sp; }
address last_Java_pc(void) { return _last_Java_pc; }
#endif // CPU_S390_VM_JAVAFRAMEANCHOR_S390_HPP

@ -0,0 +1,78 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
// TSO ensures that loads are blocking and ordered with respect to
// to earlier loads, so we don't need LoadLoad membars.
#define __ masm->
#define BUFFER_SIZE 30*sizeof(jint)
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
// Don't use fast jni accessors.
return (address) -1;
}
address JNI_FastGetField::generate_fast_get_boolean_field() {
return generate_fast_get_int_field0(T_BOOLEAN);
}
address JNI_FastGetField::generate_fast_get_byte_field() {
return generate_fast_get_int_field0(T_BYTE);
}
address JNI_FastGetField::generate_fast_get_char_field() {
return generate_fast_get_int_field0(T_CHAR);
}
address JNI_FastGetField::generate_fast_get_short_field() {
return generate_fast_get_int_field0(T_SHORT);
}
address JNI_FastGetField::generate_fast_get_int_field() {
return generate_fast_get_int_field0(T_INT);
}
address JNI_FastGetField::generate_fast_get_long_field() {
// Don't use fast jni accessors.
return (address) -1;
}
address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
// Don't use fast jni accessors.
return (address) -1;
}
address JNI_FastGetField::generate_fast_get_float_field() {
return generate_fast_get_float_field0(T_FLOAT);
}
address JNI_FastGetField::generate_fast_get_double_field() {
return generate_fast_get_float_field0(T_DOUBLE);
}

@ -0,0 +1,141 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_JNITYPES_S390_HPP
#define CPU_S390_VM_JNITYPES_S390_HPP
// This file holds platform-dependent routines used to write primitive
// jni types to the array of arguments passed into JavaCalls::call.
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
#include "prims/jni.h"
class JNITypes : AllStatic {
// These functions write a java primitive type (in native format) to
// a java stack slot array to be passed as an argument to
// JavaCalls:calls. I.e., they are functionally 'push' operations
// if they have a 'pos' formal parameter. Note that jlongs and
// jdoubles are written _in reverse_ of the order in which they
// appear in the interpreter stack. This is because call stubs (see
// stubGenerator_s390.cpp) reverse the argument list constructed by
// JavaCallArguments (see javaCalls.hpp).
public:
// Ints are stored in native format in one JavaCallArgument slot at *to.
static inline void put_int(jint from, intptr_t *to) {
*(jint*) to = from;
}
static inline void put_int(jint from, intptr_t *to, int& pos) {
*(jint*) (to + pos++) = from;
}
static inline void put_int(jint *from, intptr_t *to, int& pos) {
*(jint*) (to + pos++) = *from;
}
// Longs are stored in native format in one JavaCallArgument slot at *(to+1).
static inline void put_long(jlong from, intptr_t *to) {
*(jlong*) (to + 1) = from;
}
static inline void put_long(jlong from, intptr_t *to, int& pos) {
*(jlong*) (to + 1 + pos) = from;
pos += 2;
}
static inline void put_long(jlong *from, intptr_t *to, int& pos) {
*(jlong*) (to + 1 + pos) = *from;
pos += 2;
}
// Oops are stored in native format in one JavaCallArgument slot at *to.
static inline void put_obj(oop from, intptr_t *to) {
*(oop*) to = from;
}
static inline void put_obj(oop from, intptr_t *to, int& pos) {
*(oop*) (to + pos++) = from;
}
static inline void put_obj(oop *from, intptr_t *to, int& pos) {
*(oop*) (to + pos++) = *from;
}
// Floats are stored in native format in one JavaCallArgument slot at *to.
static inline void put_float(jfloat from, intptr_t *to) {
*(jfloat*) to = from;
}
static inline void put_float(jfloat from, intptr_t *to, int& pos) {
*(jfloat*) (to + pos++) = from;
}
static inline void put_float(jfloat *from, intptr_t *to, int& pos) {
*(jfloat*) (to + pos++) = *from;
}
// Doubles are stored in native word format in one JavaCallArgument
// slot at *(to+1).
static inline void put_double(jdouble from, intptr_t *to) {
*(jdouble*) (to + 1) = from;
}
static inline void put_double(jdouble from, intptr_t *to, int& pos) {
*(jdouble*) (to + 1 + pos) = from;
pos += 2;
}
static inline void put_double(jdouble *from, intptr_t *to, int& pos) {
*(jdouble*) (to + 1 + pos) = *from;
pos += 2;
}
// The get_xxx routines, on the other hand, actually _do_ fetch
// java primitive types from the interpreter stack.
// No need to worry about alignment on z/Architecture.
static inline jint get_int(intptr_t *from) {
return *(jint*) from;
}
static inline jlong get_long(intptr_t *from) {
return *(jlong*) (from + 1);
}
static inline oop get_obj(intptr_t *from) {
return *(oop*) from;
}
static inline jfloat get_float(intptr_t *from) {
return *(jfloat*) from;
}
static inline jdouble get_double(intptr_t *from) {
return *(jdouble*) (from + 1);
}
};
#endif // CPU_S390_VM_JNITYPES_S390_HPP

@ -0,0 +1,45 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef _JAVASOFT_JNI_MD_H_
#define _JAVASOFT_JNI_MD_H_
#if defined(__GNUC__) && (__GNUC__ >= 4)
#define JNIEXPORT __attribute__((visibility("default")))
#define JNIIMPORT __attribute__((visibility("default")))
#else
#define JNIEXPORT
#define JNIIMPORT
#endif
#define JNICALL
typedef int jint;
typedef long int jlong;
typedef signed char jbyte;
#endif // _JAVASOFT_JNI_MD_H_

@ -0,0 +1,86 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// JVMCI (JEP 243):
// So far, the JVMCI is not supported/implemented on SAP platforms.
// This file just serves as a placeholder which may be filled with life
// should the JVMCI ever be implemented.
#if INCLUDE_JVMCI
#include "jvmci/jvmciCodeInstaller.hpp"
#include "jvmci/jvmciRuntime.hpp"
#include "jvmci/jvmciCompilerToVM.hpp"
#include "jvmci/jvmciJavaClasses.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "vmreg_s390.inline.hpp"
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, oop method) {
Unimplemented();
return 0;
}
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) {
Unimplemented();
}
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle& constant) {
Unimplemented();
}
void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset) {
Unimplemented();
}
void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset) {
Unimplemented();
}
void CodeInstaller::pd_relocate_CodeBlob(CodeBlob* cb, NativeInstruction* inst) {
Unimplemented();
}
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination) {
Unimplemented();
}
void CodeInstaller::pd_relocate_JavaMethod(oop hotspot_method, jint pc_offset) {
Unimplemented();
}
void CodeInstaller::pd_relocate_poll(address pc, jint mark) {
Unimplemented();
}
// Convert JVMCI register indices (as used in oop maps) to HotSpot registers.
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg) {
return NULL;
}
bool CodeInstaller::is_general_purpose_reg(VMReg hotspotRegister) {
return false;
}
#endif // INLCUDE_JVMCI

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -0,0 +1,314 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_MACROASSEMBLER_S390_INLINE_HPP
#define CPU_S390_VM_MACROASSEMBLER_S390_INLINE_HPP
#include "asm/assembler.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/codeBuffer.hpp"
#include "code/codeCache.hpp"
#include "runtime/thread.hpp"
// Simplified shift operations for single register operands, constant shift amount.
inline void MacroAssembler::lshift(Register r, int places, bool is_DW) {
if (is_DW) {
z_sllg(r, r, places);
} else {
z_sll(r, places);
}
}
inline void MacroAssembler::rshift(Register r, int places, bool is_DW) {
if (is_DW) {
z_srlg(r, r, places);
} else {
z_srl(r, places);
}
}
// *((int8_t*)(dst)) |= imm8
inline void MacroAssembler::or2mem_8(Address& dst, int64_t imm8) {
if (Displacement::is_shortDisp(dst.disp())) {
z_oi(dst, imm8);
} else {
z_oiy(dst, imm8);
}
}
inline int MacroAssembler::store_const(const Address &dest, long imm, Register scratch, bool is_long) {
unsigned int lm = is_long ? 8 : 4;
unsigned int lc = is_long ? 8 : 4;
return store_const(dest, imm, lm, lc, scratch);
}
// Do not rely on add2reg* emitter.
// Depending on CmdLine switches and actual parameter values,
// the generated code may alter the condition code, which is counter-intuitive
// to the semantics of the "load address" (LA/LAY) instruction.
// Generic address loading d <- base(a) + index(a) + disp(a)
inline void MacroAssembler::load_address(Register d, const Address &a) {
if (Displacement::is_shortDisp(a.disp())) {
z_la(d, a.disp(), a.indexOrR0(), a.baseOrR0());
} else if (Displacement::is_validDisp(a.disp())) {
z_lay(d, a.disp(), a.indexOrR0(), a.baseOrR0());
} else {
guarantee(false, "displacement = " SIZE_FORMAT_HEX ", out of range for LA/LAY", a.disp());
}
}
inline void MacroAssembler::load_const(Register t, void* x) {
load_const(t, (long)x);
}
// Load a 64 bit constant encoded by a `Label'.
// Works for bound as well as unbound labels. For unbound labels, the
// code will become patched as soon as the label gets bound.
inline void MacroAssembler::load_const(Register t, Label& L) {
load_const(t, target(L));
}
inline void MacroAssembler::load_const(Register t, const AddressLiteral& a) {
assert(t != Z_R0, "R0 not allowed");
// First relocate (we don't change the offset in the RelocationHolder,
// just pass a.rspec()), then delegate to load_const(Register, long).
relocate(a.rspec());
load_const(t, (long)a.value());
}
inline void MacroAssembler::load_const_optimized(Register t, long x) {
(void) load_const_optimized_rtn_len(t, x, true);
}
inline void MacroAssembler::load_const_optimized(Register t, void* a) {
load_const_optimized(t, (long)a);
}
inline void MacroAssembler::load_const_optimized(Register t, Label& L) {
load_const_optimized(t, target(L));
}
inline void MacroAssembler::load_const_optimized(Register t, const AddressLiteral& a) {
assert(t != Z_R0, "R0 not allowed");
assert((relocInfo::relocType)a.rspec().reloc()->type() == relocInfo::none,
"cannot relocate optimized load_consts");
load_const_optimized(t, a.value());
}
inline void MacroAssembler::set_oop(jobject obj, Register d) {
load_const(d, allocate_oop_address(obj));
}
inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
load_const(d, constant_oop_address(obj));
}
// Adds MetaData constant md to TOC and loads it from there.
// md is added to the oop_recorder, but no relocation is added.
inline bool MacroAssembler::set_metadata_constant(Metadata* md, Register d) {
AddressLiteral a = constant_metadata_address(md);
return load_const_from_toc(d, a, d); // Discards the relocation.
}
inline bool MacroAssembler::is_call_pcrelative_short(unsigned long inst) {
return is_equal(inst, BRAS_ZOPC); // off 16, len 16
}
inline bool MacroAssembler::is_call_pcrelative_long(unsigned long inst) {
return is_equal(inst, BRASL_ZOPC); // off 16, len 32
}
inline bool MacroAssembler::is_branch_pcrelative_short(unsigned long inst) {
// Branch relative, 16-bit offset.
return is_equal(inst, BRC_ZOPC); // off 16, len 16
}
inline bool MacroAssembler::is_branch_pcrelative_long(unsigned long inst) {
// Branch relative, 32-bit offset.
return is_equal(inst, BRCL_ZOPC); // off 16, len 32
}
inline bool MacroAssembler::is_compareandbranch_pcrelative_short(unsigned long inst) {
// Compare and branch relative, 16-bit offset.
return is_equal(inst, CRJ_ZOPC, CMPBRANCH_MASK) || is_equal(inst, CGRJ_ZOPC, CMPBRANCH_MASK) ||
is_equal(inst, CIJ_ZOPC, CMPBRANCH_MASK) || is_equal(inst, CGIJ_ZOPC, CMPBRANCH_MASK) ||
is_equal(inst, CLRJ_ZOPC, CMPBRANCH_MASK) || is_equal(inst, CLGRJ_ZOPC, CMPBRANCH_MASK) ||
is_equal(inst, CLIJ_ZOPC, CMPBRANCH_MASK) || is_equal(inst, CLGIJ_ZOPC, CMPBRANCH_MASK);
}
inline bool MacroAssembler::is_branchoncount_pcrelative_short(unsigned long inst) {
// Branch relative on count, 16-bit offset.
return is_equal(inst, BRCT_ZOPC) || is_equal(inst, BRCTG_ZOPC); // off 16, len 16
}
inline bool MacroAssembler::is_branchonindex32_pcrelative_short(unsigned long inst) {
// Branch relative on index (32bit), 16-bit offset.
return is_equal(inst, BRXH_ZOPC) || is_equal(inst, BRXLE_ZOPC); // off 16, len 16
}
inline bool MacroAssembler::is_branchonindex64_pcrelative_short(unsigned long inst) {
// Branch relative on index (64bit), 16-bit offset.
return is_equal(inst, BRXHG_ZOPC) || is_equal(inst, BRXLG_ZOPC); // off 16, len 16
}
inline bool MacroAssembler::is_branchonindex_pcrelative_short(unsigned long inst) {
return is_branchonindex32_pcrelative_short(inst) ||
is_branchonindex64_pcrelative_short(inst);
}
inline bool MacroAssembler::is_branch_pcrelative16(unsigned long inst) {
return is_branch_pcrelative_short(inst) ||
is_compareandbranch_pcrelative_short(inst) ||
is_branchoncount_pcrelative_short(inst) ||
is_branchonindex_pcrelative_short(inst);
}
inline bool MacroAssembler::is_branch_pcrelative32(unsigned long inst) {
return is_branch_pcrelative_long(inst);
}
inline bool MacroAssembler::is_branch_pcrelative(unsigned long inst) {
return is_branch_pcrelative16(inst) ||
is_branch_pcrelative32(inst);
}
inline bool MacroAssembler::is_load_pcrelative_long(unsigned long inst) {
// Load relative, 32-bit offset.
return is_equal(inst, LRL_ZOPC, REL_LONG_MASK) || is_equal(inst, LGRL_ZOPC, REL_LONG_MASK); // off 16, len 32
}
inline bool MacroAssembler::is_misc_pcrelative_long(unsigned long inst) {
// Load address, execute relative, 32-bit offset.
return is_equal(inst, LARL_ZOPC, REL_LONG_MASK) || is_equal(inst, EXRL_ZOPC, REL_LONG_MASK); // off 16, len 32
}
inline bool MacroAssembler::is_pcrelative_short(unsigned long inst) {
return is_branch_pcrelative16(inst) ||
is_call_pcrelative_short(inst);
}
inline bool MacroAssembler::is_pcrelative_long(unsigned long inst) {
return is_branch_pcrelative32(inst) ||
is_call_pcrelative_long(inst) ||
is_load_pcrelative_long(inst) ||
is_misc_pcrelative_long(inst);
}
inline bool MacroAssembler::is_load_pcrelative_long(address iLoc) {
unsigned long inst;
unsigned int len = get_instruction(iLoc, &inst);
return (len == 6) && is_load_pcrelative_long(inst);
}
inline bool MacroAssembler::is_pcrelative_short(address iLoc) {
unsigned long inst;
unsigned int len = get_instruction(iLoc, &inst);
return ((len == 4) || (len == 6)) && is_pcrelative_short(inst);
}
inline bool MacroAssembler::is_pcrelative_long(address iLoc) {
unsigned long inst;
unsigned int len = get_instruction(iLoc, &inst);
return (len == 6) && is_pcrelative_long(inst);
}
// Dynamic TOC. Test for any pc-relative instruction.
inline bool MacroAssembler::is_pcrelative_instruction(address iloc) {
unsigned long inst;
get_instruction(iloc, &inst);
return is_pcrelative_short(inst) ||
is_pcrelative_long(inst);
}
inline bool MacroAssembler::is_load_addr_pcrel(address a) {
return is_equal(a, LARL_ZOPC, LARL_MASK);
}
// Save the return pc in the register that should be stored as the return pc
// in the current frame (default is R14).
inline void MacroAssembler::save_return_pc(Register pc) {
z_stg(pc, _z_abi16(return_pc), Z_SP);
}
inline void MacroAssembler::restore_return_pc() {
z_lg(Z_R14, _z_abi16(return_pc), Z_SP);
}
// Call a function with given entry.
inline address MacroAssembler::call(Register function_entry) {
assert(function_entry != Z_R0, "function_entry cannot be Z_R0");
Assembler::z_basr(Z_R14, function_entry);
_last_calls_return_pc = pc();
return _last_calls_return_pc;
}
// Call a C function via a function entry.
inline address MacroAssembler::call_c(Register function_entry) {
return call(function_entry);
}
// Call a stub function via a function descriptor, but don't save TOC before
// call, don't setup TOC and ENV for call, and don't restore TOC after call
inline address MacroAssembler::call_stub(Register function_entry) {
return call_c(function_entry);
}
inline address MacroAssembler::call_stub(address function_entry) {
return call_c(function_entry);
}
// Get the pc where the last emitted call will return to.
inline address MacroAssembler::last_calls_return_pc() {
return _last_calls_return_pc;
}
inline void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc) {
set_last_Java_frame(last_Java_sp, last_Java_pc, true);
}
inline void MacroAssembler::set_last_Java_frame_static(Register last_Java_sp, Register last_Java_pc) {
set_last_Java_frame(last_Java_sp, last_Java_pc, false);
}
inline void MacroAssembler::reset_last_Java_frame(void) {
reset_last_Java_frame(true);
}
inline void MacroAssembler::reset_last_Java_frame_static(void) {
reset_last_Java_frame(false);
}
inline void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1) {
set_top_ijava_frame_at_SP_as_last_Java_frame(sp, tmp1, true);
}
inline void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame_static(Register sp, Register tmp1) {
set_top_ijava_frame_at_SP_as_last_Java_frame(sp, tmp1, true);
}
#endif // CPU_S390_VM_MACROASSEMBLER_S390_INLINE_HPP

@ -0,0 +1,76 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/codeBuffer.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
// This method will be called (as any other Klass virtual method) with
// the Klass itself as the first argument. Example:
//
// oop obj;
// int size = obj->klass()->klass_part()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();.
//
// The dummy method is called with the Klass object as the first
// operand, and an object as the second argument.
//
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
// differing only by an ordinal constant, and they bear no releationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#undef __
#define __ masm->
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
char** mc_top,
char* mc_end) {
intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
*(intptr_t *)(*md_top) = vtable_bytes;
*md_top += sizeof(intptr_t);
void** dummy_vtable = (void**)*md_top;
*vtable = dummy_vtable;
*md_top += vtable_bytes;
// Get ready to generate dummy methods.
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
MacroAssembler* masm = new MacroAssembler(&cb);
__ unimplemented();
}

@ -0,0 +1,635 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "prims/methodHandles.hpp"
#ifdef PRODUCT
#define __ _masm->
#define BLOCK_COMMENT(str) /* nothing */
#else
#define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm):_masm)->
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
static RegisterOrConstant constant(int value) {
return RegisterOrConstant(value);
}
void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg,
Register temp_reg, Register temp2_reg) {
if (VerifyMethodHandles) {
verify_klass(_masm, klass_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_Class),
temp_reg, temp2_reg, "MH argument is a Class");
}
__ z_lg(klass_reg, Address(klass_reg, java_lang_Class::klass_offset_in_bytes()));
}
#ifdef ASSERT
static int check_nonzero(const char* xname, int x) {
assert(x != 0, "%s should be nonzero", xname);
return x;
}
#define NONZERO(x) check_nonzero(#x, x)
#else
#define NONZERO(x) (x)
#endif
#ifdef ASSERT
void MethodHandles::verify_klass(MacroAssembler* _masm,
Register obj_reg, SystemDictionary::WKID klass_id,
Register temp_reg, Register temp2_reg,
const char* error_message) {
InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
assert(temp_reg != Z_R0 && // Is used as base register!
temp_reg != noreg && temp2_reg != noreg, "need valid registers!");
NearLabel L_ok, L_bad;
BLOCK_COMMENT("verify_klass {");
__ verify_oop(obj_reg);
__ compareU64_and_branch(obj_reg, (intptr_t)0L, Assembler::bcondEqual, L_bad);
__ load_klass(temp_reg, obj_reg);
// klass_addr is a klass in allstatic SystemDictionaryHandles. Can't get GCed.
__ load_const_optimized(temp2_reg, (address)klass_addr);
__ z_lg(temp2_reg, Address(temp2_reg));
__ compareU64_and_branch(temp_reg, temp2_reg, Assembler::bcondEqual, L_ok);
intptr_t super_check_offset = klass->super_check_offset();
__ z_lg(temp_reg, Address(temp_reg, super_check_offset));
__ compareU64_and_branch(temp_reg, temp2_reg, Assembler::bcondEqual, L_ok);
__ BIND(L_bad);
__ stop(error_message);
__ BIND(L_ok);
BLOCK_COMMENT("} verify_klass");
}
void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind,
Register member_reg, Register temp ) {
NearLabel L;
BLOCK_COMMENT("verify_ref_kind {");
__ z_llgf(temp,
Address(member_reg,
NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes())));
__ z_srl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT);
__ z_nilf(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK);
__ compare32_and_branch(temp, constant(ref_kind), Assembler::bcondEqual, L);
{
char *buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal);
jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind);
if (ref_kind == JVM_REF_invokeVirtual || ref_kind == JVM_REF_invokeSpecial) {
// Could do this for all ref_kinds, but would explode assembly code size.
trace_method_handle(_masm, buf);
}
__ stop(buf);
}
BLOCK_COMMENT("} verify_ref_kind");
__ bind(L);
}
#endif // ASSERT
void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target,
Register temp, bool for_compiler_entry) {
assert(method == Z_method, "interpreter calling convention");
__ verify_method_ptr(method);
assert(target != method, "don 't you kill the method reg!");
Label L_no_such_method;
if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) {
// JVMTI events, such as single-stepping, are implemented partly
// by avoiding running compiled code in threads for which the
// event is enabled. Check here for interp_only_mode if these
// events CAN be enabled.
__ verify_thread();
Label run_compiled_code;
__ load_and_test_int(temp, Address(Z_thread, JavaThread::interp_only_mode_offset()));
__ z_bre(run_compiled_code);
// Null method test is replicated below in compiled case,
// it might be able to address across the verify_thread().
__ z_ltgr(temp, method);
__ z_bre(L_no_such_method);
__ z_lg(target, Address(method, Method::interpreter_entry_offset()));
__ z_br(target);
__ bind(run_compiled_code);
}
// Compiled case, either static or fall-through from runtime conditional.
__ z_ltgr(temp, method);
__ z_bre(L_no_such_method);
ByteSize offset = for_compiler_entry ?
Method::from_compiled_offset() : Method::from_interpreted_offset();
Address method_from(method, offset);
__ z_lg(target, method_from);
__ z_br(target);
__ bind(L_no_such_method);
assert(StubRoutines::throw_AbstractMethodError_entry() != NULL, "not yet generated!");
__ load_const_optimized(target, StubRoutines::throw_AbstractMethodError_entry());
__ z_br(target);
}
void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
Register recv, Register method_temp,
Register temp2, Register temp3,
bool for_compiler_entry) {
// This is the initial entry point of a lazy method handle.
// After type checking, it picks up the invoker from the LambdaForm.
assert_different_registers(recv, method_temp, temp2, temp3);
assert(method_temp == Z_method, "required register for loading method");
BLOCK_COMMENT("jump_to_lambda_form {");
// Load the invoker, as MH -> MH.form -> LF.vmentry
__ verify_oop(recv);
__ load_heap_oop(method_temp,
Address(recv,
NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes())));
__ verify_oop(method_temp);
__ load_heap_oop(method_temp,
Address(method_temp,
NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
__ verify_oop(method_temp);
// The following assumes that a method is normally compressed in the vmtarget field.
__ z_lg(method_temp,
Address(method_temp,
NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));
if (VerifyMethodHandles && !for_compiler_entry) {
// Make sure recv is already on stack.
NearLabel L;
Address paramSize(temp2, ConstMethod::size_of_parameters_offset());
__ z_lg(temp2, Address(method_temp, Method::const_offset()));
__ load_sized_value(temp2, paramSize, sizeof(u2), /*is_signed*/ false);
// if (temp2 != recv) stop
__ z_lg(temp2, __ argument_address(temp2, temp2, 0));
__ compare64_and_branch(temp2, recv, Assembler::bcondEqual, L);
__ stop("receiver not on stack");
__ BIND(L);
}
jump_from_method_handle(_masm, method_temp, temp2, Z_R0, for_compiler_entry);
BLOCK_COMMENT("} jump_to_lambda_form");
}
// code generation
address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm,
vmIntrinsics::ID iid) {
const bool not_for_compiler_entry = false; // This is the interpreter entry.
assert(is_signature_polymorphic(iid), "expected invoke iid");
if (iid == vmIntrinsics::_invokeGeneric || iid == vmIntrinsics::_compiledLambdaForm) {
// Perhaps surprisingly, the symbolic references visible to Java
// are not directly used. They are linked to Java-generated
// adapters via MethodHandleNatives.linkMethod. They all allow an
// appendix argument.
__ should_not_reach_here(); // Empty stubs make SG sick.
return NULL;
}
// Z_R10: sender SP (must preserve; see prepare_to_jump_from_interprted)
// Z_method: method
// Z_ARG1 (Gargs): incoming argument list (must preserve)
Register Z_R4_param_size = Z_R4; // size of parameters
address code_start = __ pc();
// Here is where control starts out:
__ align(CodeEntryAlignment);
address entry_point = __ pc();
if (VerifyMethodHandles) {
Label L;
BLOCK_COMMENT("verify_intrinsic_id {");
// Supplement to 8139891: _intrinsic_id exceeded 1-byte size limit.
if (Method::intrinsic_id_size_in_bytes() == 1) {
__ z_cli(Address(Z_method, Method::intrinsic_id_offset_in_bytes()), (int)iid);
} else {
assert(Method::intrinsic_id_size_in_bytes() == 2, "size error: check Method::_intrinsic_id");
__ z_lh(Z_R0_scratch, Address(Z_method, Method::intrinsic_id_offset_in_bytes()));
__ z_chi(Z_R0_scratch, (int)iid);
}
__ z_bre(L);
if (iid == vmIntrinsics::_linkToVirtual || iid == vmIntrinsics::_linkToSpecial) {
// Could do this for all kinds, but would explode assembly code size.
trace_method_handle(_masm, "bad Method::intrinsic_id");
}
__ stop("bad Method::intrinsic_id");
__ bind(L);
BLOCK_COMMENT("} verify_intrinsic_id");
}
// First task: Find out how big the argument list is.
Address Z_R4_first_arg_addr;
int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic,
"must be _invokeBasic or a linkTo intrinsic");
if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
Address paramSize(Z_R1_scratch, ConstMethod::size_of_parameters_offset());
__ z_lg(Z_R1_scratch, Address(Z_method, Method::const_offset()));
__ load_sized_value(Z_R4_param_size, paramSize, sizeof(u2), /*is_signed*/ false);
Z_R4_first_arg_addr = __ argument_address(Z_R4_param_size, Z_R4_param_size, 0);
} else {
DEBUG_ONLY(Z_R4_param_size = noreg);
}
Register Z_mh = noreg;
if (!is_signature_polymorphic_static(iid)) {
Z_mh = Z_ARG4;
__ z_lg(Z_mh, Z_R4_first_arg_addr);
DEBUG_ONLY(Z_R4_param_size = noreg);
}
// Z_R4_first_arg_addr is live!
trace_method_handle_interpreter_entry(_masm, iid);
if (iid == vmIntrinsics::_invokeBasic) {
__ pc(); // just for the block comment
generate_method_handle_dispatch(_masm, iid, Z_mh, noreg, not_for_compiler_entry);
} else {
// Adjust argument list by popping the trailing MemberName argument.
Register Z_recv = noreg;
if (MethodHandles::ref_kind_has_receiver(ref_kind)) {
// Load the receiver (not the MH; the actual MemberName's receiver)
// up from the interpreter stack.
__ z_lg(Z_recv = Z_R5, Z_R4_first_arg_addr);
DEBUG_ONLY(Z_R4_param_size = noreg);
}
Register Z_member = Z_method; // MemberName ptr; incoming method ptr is dead now
__ z_lg(Z_member, __ argument_address(constant(1)));
__ add2reg(Z_esp, Interpreter::stackElementSize);
generate_method_handle_dispatch(_masm, iid, Z_recv, Z_member, not_for_compiler_entry);
}
return entry_point;
}
void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
vmIntrinsics::ID iid,
Register receiver_reg,
Register member_reg,
bool for_compiler_entry) {
assert(is_signature_polymorphic(iid), "expected invoke iid");
Register temp1 = for_compiler_entry ? Z_R10 : Z_R6;
Register temp2 = Z_R12;
Register temp3 = Z_R11;
Register temp4 = Z_R13;
if (for_compiler_entry) {
assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : Z_ARG1),
"only valid assignment");
}
if (receiver_reg != noreg) {
assert_different_registers(temp1, temp2, temp3, temp4, receiver_reg);
}
if (member_reg != noreg) {
assert_different_registers(temp1, temp2, temp3, temp4, member_reg);
}
if (!for_compiler_entry) { // Don't trash last SP.
assert_different_registers(temp1, temp2, temp3, temp4, Z_R10);
}
if (iid == vmIntrinsics::_invokeBasic) {
__ pc(); // Just for the block comment.
// Indirect through MH.form.vmentry.vmtarget.
jump_to_lambda_form(_masm, receiver_reg, Z_method, Z_R1, temp3, for_compiler_entry);
return;
}
// The method is a member invoker used by direct method handles.
if (VerifyMethodHandles) {
// Make sure the trailing argument really is a MemberName (caller responsibility).
verify_klass(_masm, member_reg,
SystemDictionary::WK_KLASS_ENUM_NAME(MemberName_klass),
temp1, temp2,
"MemberName required for invokeVirtual etc.");
}
Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
Address member_vmtarget(member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));
Register temp1_recv_klass = temp1;
if (iid != vmIntrinsics::_linkToStatic) {
__ verify_oop(receiver_reg);
if (iid == vmIntrinsics::_linkToSpecial) {
// Don't actually load the klass; just null-check the receiver.
__ null_check(receiver_reg);
} else {
// Load receiver klass itself.
__ null_check(receiver_reg, Z_R0, oopDesc::klass_offset_in_bytes());
__ load_klass(temp1_recv_klass, receiver_reg);
__ verify_klass_ptr(temp1_recv_klass);
}
BLOCK_COMMENT("check_receiver {");
// The receiver for the MemberName must be in receiver_reg.
// Check the receiver against the MemberName.clazz.
if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) {
// Did not load it above...
__ load_klass(temp1_recv_klass, receiver_reg);
__ verify_klass_ptr(temp1_recv_klass);
}
if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {
NearLabel L_ok;
Register temp2_defc = temp2;
__ load_heap_oop(temp2_defc, member_clazz);
load_klass_from_Class(_masm, temp2_defc, temp3, temp4);
__ verify_klass_ptr(temp2_defc);
__ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, temp4, L_ok);
// If we get here, the type check failed!
__ stop("receiver class disagrees with MemberName.clazz");
__ bind(L_ok);
}
BLOCK_COMMENT("} check_receiver");
}
if (iid == vmIntrinsics::_linkToSpecial || iid == vmIntrinsics::_linkToStatic) {
DEBUG_ONLY(temp1_recv_klass = noreg); // These guys didn't load the recv_klass.
}
// Live registers at this point:
// member_reg - MemberName that was the trailing argument.
// temp1_recv_klass - Klass of stacked receiver, if needed.
// Z_R10 - Interpreter linkage if interpreted.
bool method_is_live = false;
switch (iid) {
case vmIntrinsics::_linkToSpecial:
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
}
__ z_lg(Z_method, member_vmtarget);
method_is_live = true;
break;
case vmIntrinsics::_linkToStatic:
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
}
__ z_lg(Z_method, member_vmtarget);
method_is_live = true;
break;
case vmIntrinsics::_linkToVirtual: {
// Same as TemplateTable::invokevirtual, minus the CP setup and profiling.
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3);
}
// Pick out the vtable index from the MemberName, and then we can discard it.
Register temp2_index = temp2;
__ z_lg(temp2_index, member_vmindex);
if (VerifyMethodHandles) {
// if (member_vmindex < 0) stop
NearLabel L_index_ok;
__ compare32_and_branch(temp2_index, constant(0), Assembler::bcondNotLow, L_index_ok);
__ stop("no virtual index");
__ BIND(L_index_ok);
}
// Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget
// at this point. And VerifyMethodHandles has already checked clazz, if needed.
// Get target method and entry point.
__ lookup_virtual_method(temp1_recv_klass, temp2_index, Z_method);
method_is_live = true;
break;
}
case vmIntrinsics::_linkToInterface: {
// Same as TemplateTable::invokeinterface, minus the CP setup
// and profiling, with different argument motion.
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3);
}
Register temp3_intf = temp3;
__ load_heap_oop(temp3_intf, member_clazz);
load_klass_from_Class(_masm, temp3_intf, temp2, temp4);
Register Z_index = Z_method;
__ z_lg(Z_index, member_vmindex);
if (VerifyMethodHandles) {
NearLabel L;
// if (member_vmindex < 0) stop
__ compare32_and_branch(Z_index, constant(0), Assembler::bcondNotLow, L);
__ stop("invalid vtable index for MH.invokeInterface");
__ bind(L);
}
// Given interface, index, and recv klass, dispatch to the implementation method.
Label L_no_such_interface;
__ lookup_interface_method(temp1_recv_klass, temp3_intf,
// Note: next two args must be the same:
Z_index, Z_method, temp2, noreg,
L_no_such_interface);
jump_from_method_handle(_masm, Z_method, temp2, Z_R0, for_compiler_entry);
__ bind(L_no_such_interface);
// Throw exception.
__ load_const_optimized(Z_R1, StubRoutines::throw_IncompatibleClassChangeError_entry());
__ z_br(Z_R1);
break;
}
default:
fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
break;
}
if (method_is_live) {
// Live at this point: Z_method, O5_savedSP (if interpreted).
// After figuring out which concrete method to call, jump into it.
// Note that this works in the interpreter with no data motion.
// But the compiled version will require that rcx_recv be shifted out.
jump_from_method_handle(_masm, Z_method, temp1, Z_R0, for_compiler_entry);
}
}
#ifndef PRODUCT
void trace_method_handle_stub(const char* adaptername,
oopDesc* mh,
intptr_t* sender_sp,
intptr_t* args,
intptr_t* tracing_fp) {
bool has_mh = (strstr(adaptername, "/static") == NULL &&
strstr(adaptername, "linkTo") == NULL); // Static linkers don't have MH.
const char* mh_reg_name = has_mh ? "Z_R4_mh" : "Z_R4";
tty->print_cr("MH %s %s=" INTPTR_FORMAT " sender_sp=" INTPTR_FORMAT " args=" INTPTR_FORMAT,
adaptername, mh_reg_name,
p2i(mh), p2i(sender_sp), p2i(args));
if (Verbose) {
// Dumping last frame with frame::describe.
JavaThread* p = JavaThread::active();
ResourceMark rm;
PRESERVE_EXCEPTION_MARK; // May not be needed by safer and unexpensive here.
FrameValues values;
// Note: We want to allow trace_method_handle from any call site.
// While trace_method_handle creates a frame, it may be entered
// without a valid return PC in Z_R14 (e.g. not just after a call).
// Walking that frame could lead to failures due to that invalid PC.
// => carefully detect that frame when doing the stack walking.
// Walk up to the right frame using the "tracing_fp" argument.
frame cur_frame = os::current_frame(); // Current C frame.
while (cur_frame.fp() != tracing_fp) {
cur_frame = os::get_sender_for_C_frame(&cur_frame);
}
// Safely create a frame and call frame::describe.
intptr_t *dump_sp = cur_frame.sender_sp();
intptr_t *dump_fp = cur_frame.link();
bool walkable = has_mh; // Whether the traced frame shoud be walkable.
// The sender for cur_frame is the caller of trace_method_handle.
if (walkable) {
// The previous definition of walkable may have to be refined
// if new call sites cause the next frame constructor to start
// failing. Alternatively, frame constructors could be
// modified to support the current or future non walkable
// frames (but this is more intrusive and is not considered as
// part of this RFE, which will instead use a simpler output).
frame dump_frame = frame(dump_sp);
dump_frame.describe(values, 1);
} else {
// Robust dump for frames which cannot be constructed from sp/younger_sp
// Add descriptions without building a Java frame to avoid issues.
values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>");
values.describe(-1, dump_sp, "sp");
}
bool has_args = has_mh; // Whether Z_esp is meaningful.
// Mark args, if seems valid (may not be valid for some adapters).
if (has_args) {
if ((args >= dump_sp) && (args < dump_fp)) {
values.describe(-1, args, "*Z_esp");
}
}
// Note: the unextended_sp may not be correct.
tty->print_cr(" stack layout:");
values.print(p);
if (has_mh && mh->is_oop()) {
mh->print();
if (java_lang_invoke_MethodHandle::is_instance(mh)) {
if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0) {
java_lang_invoke_MethodHandle::form(mh)->print();
}
}
}
}
}
void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
if (!TraceMethodHandles) { return; }
BLOCK_COMMENT("trace_method_handle {");
// Save argument registers (they are used in raise exception stub).
__ z_stg(Z_ARG1, Address(Z_SP, 16));
__ z_stg(Z_ARG2, Address(Z_SP, 24));
__ z_stg(Z_ARG3, Address(Z_SP, 32));
__ z_stg(Z_ARG4, Address(Z_SP, 40));
__ z_stg(Z_ARG5, Address(Z_SP, 48));
// Setup arguments.
__ z_lgr(Z_ARG2, Z_ARG4); // mh, see generate_method_handle_interpreter_entry()
__ z_lgr(Z_ARG3, Z_R10); // sender_sp
__ z_lgr(Z_ARG4, Z_esp);
__ load_const_optimized(Z_ARG1, (void *)adaptername);
__ z_lgr(Z_ARG5, Z_SP); // tracing_fp
__ save_return_pc(); // saves Z_R14
__ push_frame_abi160(0);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub));
__ pop_frame();
__ restore_return_pc(); // restores to Z_R14
__ z_lg(Z_ARG1, Address(Z_SP, 16));
__ z_lg(Z_ARG2, Address(Z_SP, 24));
__ z_lg(Z_ARG3, Address(Z_SP, 32));
__ z_lg(Z_ARG4, Address(Z_SP, 40));
__ z_lg(Z_ARG5, Address(Z_SP, 45));
__ zap_from_to(Z_SP, Z_SP, Z_R0, Z_R1, 50, -1);
__ zap_from_to(Z_SP, Z_SP, Z_R0, Z_R1, -1, 5);
BLOCK_COMMENT("} trace_method_handle");
}
#endif // !PRODUCT

@ -0,0 +1,61 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// Platform-specific definitions for method handles.
// These definitions are inlined into class MethodHandles.
// Adapters
enum /* platform_dependent_constants */ {
adapter_code_size = NOT_LP64(23000 DEBUG_ONLY(+ 40000)) LP64_ONLY(35000 DEBUG_ONLY(+ 50000))
};
// Additional helper methods for MethodHandles code generation:
public:
static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg);
static void verify_klass(MacroAssembler* _masm,
Register obj_reg, SystemDictionary::WKID klass_id,
Register temp_reg, Register temp2_reg,
const char* error_message = "wrong klass") NOT_DEBUG_RETURN;
static void verify_method_handle(MacroAssembler* _masm, Register mh_reg,
Register temp_reg, Register temp2_reg) {
verify_klass(_masm, mh_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_invoke_MethodHandle),
temp_reg, temp2_reg,
"reference is a MH");
}
static void verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) NOT_DEBUG_RETURN;
// Similar to InterpreterMacroAssembler::jump_from_interpreted.
// Takes care of special dispatch from single stepping too.
static void jump_from_method_handle(MacroAssembler* _masm, Register method,
Register temp, Register temp2,
bool for_compiler_entry);
static void jump_to_lambda_form(MacroAssembler* _masm,
Register recv, Register method_temp,
Register temp2, Register temp3,
bool for_compiler_entry);

@ -0,0 +1,690 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// Major contributions by JL, LS
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_s390.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/ostream.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
#define LUCY_DBG
//-------------------------------------
// N a t i v e I n s t r u c t i o n
//-------------------------------------
// Define this switch to prevent identity updates.
// In high-concurrency scenarios, it is beneficial to prevent
// identity updates. It has a positive effect on cache line steals.
// and invalidations.
// Test runs of JVM98, JVM2008, and JBB2005 show a very low frequency
// of identity updates. Detection is therefore disabled.
#undef SUPPRESS_IDENTITY_UPDATE
void NativeInstruction::verify() {
// Make sure code pattern is actually an instruction address.
// Do not allow:
// - NULL
// - any address in first page (0x0000 .. 0x0fff)
// - odd address (will cause a "specification exception")
address addr = addr_at(0);
if ((addr == 0) || (((unsigned long)addr & ~0x0fff) == 0) || ((intptr_t)addr & 1) != 0) {
tty->print_cr(INTPTR_FORMAT ": bad instruction address", p2i(addr));
fatal("not an instruction address");
}
}
// Print location and value (hex representation) of current NativeInstruction
void NativeInstruction::print(const char* msg) const {
int len = Assembler::instr_len(addr_at(0));
if (msg == NULL) { // Output line without trailing blanks.
switch (len) {
case 2: tty->print_cr(INTPTR_FORMAT "(len=%d): %4.4x", p2i(addr_at(0)), len, halfword_at(0)); break;
case 4: tty->print_cr(INTPTR_FORMAT "(len=%d): %4.4x %4.4x", p2i(addr_at(0)), len, halfword_at(0), halfword_at(2)); break;
case 6: tty->print_cr(INTPTR_FORMAT "(len=%d): %4.4x %4.4x %4.4x", p2i(addr_at(0)), len, halfword_at(0), halfword_at(2), halfword_at(4)); break;
default: // Never reached. instr_len() always returns one of the above values. Keep the compiler happy.
ShouldNotReachHere();
break;
}
} else { // Output line with filler blanks to have msg aligned.
switch (len) {
case 2: tty->print_cr(INTPTR_FORMAT "(len=%d): %4.4x %s", p2i(addr_at(0)), len, halfword_at(0), msg); break;
case 4: tty->print_cr(INTPTR_FORMAT "(len=%d): %4.4x %4.4x %s", p2i(addr_at(0)), len, halfword_at(0), halfword_at(2), msg); break;
case 6: tty->print_cr(INTPTR_FORMAT "(len=%d): %4.4x %4.4x %4.4x %s", p2i(addr_at(0)), len, halfword_at(0), halfword_at(2), halfword_at(4), msg); break;
default: // Never reached. instr_len() always returns one of the above values. Keep the compiler happy.
ShouldNotReachHere();
break;
}
}
}
void NativeInstruction::print() const {
print(NULL);
}
// Hex-Dump of storage around current NativeInstruction. Also try disassembly.
void NativeInstruction::dump(const unsigned int range, const char* msg) const {
Assembler::dump_code_range(tty, addr_at(0), range, (msg == NULL) ? "":msg);
}
void NativeInstruction::dump(const unsigned int range) const {
dump(range, NULL);
}
void NativeInstruction::dump() const {
dump(32, NULL);
}
void NativeInstruction::set_halfword_at(int offset, short i) {
address addr = addr_at(offset);
#ifndef SUPPRESS_IDENTITY_UPDATE
*(short*)addr = i;
#else
if (*(short*)addr != i) {
*(short*)addr = i;
}
#endif
ICache::invalidate_word(addr);
}
void NativeInstruction::set_word_at(int offset, int i) {
address addr = addr_at(offset);
#ifndef SUPPRESS_IDENTITY_UPDATE
*(int*)addr = i;
#else
if (*(int*)addr != i) {
*(int*)addr = i;
}
#endif
ICache::invalidate_word(addr);
}
void NativeInstruction::set_jlong_at(int offset, jlong i) {
address addr = addr_at(offset);
#ifndef SUPPRESS_IDENTITY_UPDATE
*(jlong*)addr = i;
#else
if (*(jlong*)addr != i) {
*(jlong*)addr = i;
}
#endif
// Don't need to invalidate 2 words here, because
// the flush instruction operates on doublewords.
ICache::invalidate_word(addr);
}
#undef SUPPRESS_IDENTITY_UPDATE
//------------------------------------------------------------
int NativeInstruction::illegal_instruction() {
return 0;
}
bool NativeInstruction::is_illegal() {
// An instruction with main opcode 0x00 (leftmost byte) is not a valid instruction
// (and will never be) and causes a SIGILL where the pc points to the next instruction.
// The caller of this method wants to know if such a situation exists at the current pc.
//
// The result of this method is unsharp with respect to the following facts:
// - Stepping backwards in the instruction stream is not possible on z/Architecture.
// - z/Architecture instructions are 2, 4, or 6 bytes in length.
// - The instruction length is coded in the leftmost two bits of the main opcode.
// - The result is exact if the caller knows by some other means that the
// instruction is of length 2.
//
// If this method returns false, then the 2-byte instruction at *-2 is not a 0x00 opcode.
// If this method returns true, then the 2-byte instruction at *-2 is a 0x00 opcode.
return halfword_at(-2) == illegal_instruction();
}
// We use an illtrap for marking a method as not_entrant or zombie.
bool NativeInstruction::is_sigill_zombie_not_entrant() {
if (!is_illegal()) return false; // Just a quick path.
// One-sided error of is_illegal tolerable here
// (see implementation of is_illegal() for details).
CodeBlob* cb = CodeCache::find_blob_unsafe(addr_at(0));
if (cb == NULL || !cb->is_nmethod()) {
return false;
}
nmethod *nm = (nmethod *)cb;
// This method is not_entrant or zombie if the illtrap instruction
// is located at the verified entry point.
// BE AWARE: the current pc (this) points to the instruction after the
// "illtrap" location.
address sig_addr = ((address) this) - 2;
return nm->verified_entry_point() == sig_addr;
}
bool NativeInstruction::is_jump() {
unsigned long inst;
Assembler::get_instruction((address)this, &inst);
return MacroAssembler::is_branch_pcrelative_long(inst);
}
//---------------------------------------------------
// N a t i v e I l l e g a l I n s t r u c t i o n
//---------------------------------------------------
void NativeIllegalInstruction::insert(address code_pos) {
NativeIllegalInstruction* nii = (NativeIllegalInstruction*) nativeInstruction_at(code_pos);
nii->set_halfword_at(0, illegal_instruction());
}
//-----------------------
// N a t i v e C a l l
//-----------------------
void NativeCall::verify() {
if (NativeCall::is_call_at(addr_at(0))) return;
fatal("this is not a `NativeCall' site");
}
address NativeCall::destination() const {
if (MacroAssembler::is_call_far_pcrelative(instruction_address())) {
address here = addr_at(MacroAssembler::nop_size());
return MacroAssembler::get_target_addr_pcrel(here);
}
return (address)((NativeMovConstReg *)this)->data();
}
// Similar to replace_mt_safe, but just changes the destination. The
// important thing is that free-running threads are able to execute this
// call instruction at all times. Thus, the displacement field must be
// 4-byte-aligned. We enforce this on z/Architecture by inserting a nop
// instruction in front of 'brasl' when needed.
//
// Used in the runtime linkage of calls; see class CompiledIC.
void NativeCall::set_destination_mt_safe(address dest) {
if (MacroAssembler::is_call_far_pcrelative(instruction_address())) {
address iaddr = addr_at(MacroAssembler::nop_size());
// Ensure that patching is atomic hence mt safe.
assert(((long)addr_at(MacroAssembler::call_far_pcrelative_size()) & (call_far_pcrelative_displacement_alignment-1)) == 0,
"constant must be 4-byte aligned");
set_word_at(MacroAssembler::call_far_pcrelative_size() - 4, Assembler::z_pcrel_off(dest, iaddr));
} else {
assert(MacroAssembler::is_load_const_from_toc(instruction_address()), "unsupported instruction");
nativeMovConstReg_at(instruction_address())->set_data(((intptr_t)dest));
}
}
//-----------------------------
// N a t i v e F a r C a l l
//-----------------------------
void NativeFarCall::verify() {
NativeInstruction::verify();
if (NativeFarCall::is_far_call_at(addr_at(0))) return;
fatal("not a NativeFarCall");
}
address NativeFarCall::destination() {
assert(MacroAssembler::is_call_far_patchable_at((address)this), "unexpected call type");
address ctable = NULL;
if (MacroAssembler::call_far_patchable_requires_alignment_nop((address)this)) {
return MacroAssembler::get_dest_of_call_far_patchable_at(((address)this)+MacroAssembler::nop_size(), ctable);
} else {
return MacroAssembler::get_dest_of_call_far_patchable_at((address)this, ctable);
}
}
// Handles both patterns of patchable far calls.
void NativeFarCall::set_destination(address dest, int toc_offset) {
address inst_addr = (address)this;
// Set new destination (implementation of call may change here).
assert(MacroAssembler::is_call_far_patchable_at(inst_addr), "unexpected call type");
if (!MacroAssembler::is_call_far_patchable_pcrelative_at(inst_addr)) {
address ctable = CodeCache::find_blob(inst_addr)->ctable_begin();
// Need distance of TOC entry from current instruction.
toc_offset = (ctable + toc_offset) - inst_addr;
// Call is via constant table entry.
MacroAssembler::set_dest_of_call_far_patchable_at(inst_addr, dest, toc_offset);
} else {
// Here, we have a pc-relative call (brasl).
// Be aware: dest may have moved in this case, so really patch the displacement,
// when necessary!
// This while loop will also consume the nop which always preceeds a call_far_pcrelative.
// We need to revert this after the loop. Pc-relative calls are always assumed to have a leading nop.
unsigned int nop_sz = MacroAssembler::nop_size();
unsigned int nop_bytes = 0;
while(MacroAssembler::is_z_nop(inst_addr+nop_bytes)) {
nop_bytes += nop_sz;
}
if (nop_bytes > 0) {
inst_addr += nop_bytes - nop_sz;
}
assert(MacroAssembler::is_call_far_pcrelative(inst_addr), "not a pc-relative call");
address target = MacroAssembler::get_target_addr_pcrel(inst_addr + nop_sz);
if (target != dest) {
NativeCall *call = nativeCall_at(inst_addr);
call->set_destination_mt_safe(dest);
}
}
}
//-------------------------------------
// N a t i v e M o v C o n s t R e g
//-------------------------------------
// Do not use an assertion here. Let clients decide whether they only
// want this when assertions are enabled.
void NativeMovConstReg::verify() {
address loc = addr_at(0);
// This while loop will also consume the nop which always preceeds a
// call_far_pcrelative. We need to revert this after the
// loop. Pc-relative calls are always assumed to have a leading nop.
unsigned int nop_sz = MacroAssembler::nop_size();
unsigned int nop_bytes = 0;
while(MacroAssembler::is_z_nop(loc+nop_bytes)) {
nop_bytes += nop_sz;
}
if (nop_bytes > 0) {
if (MacroAssembler::is_call_far_pcrelative(loc+nop_bytes-nop_sz)) return;
loc += nop_bytes;
}
if (!MacroAssembler::is_load_const_from_toc(loc) && // Load const from TOC.
!MacroAssembler::is_load_const(loc) && // Load const inline.
!MacroAssembler::is_load_narrow_oop(loc) && // Load narrow oop.
!MacroAssembler::is_load_narrow_klass(loc) && // Load narrow Klass ptr.
!MacroAssembler::is_compare_immediate_narrow_oop(loc) && // Compare immediate narrow.
!MacroAssembler::is_compare_immediate_narrow_klass(loc) && // Compare immediate narrow.
!MacroAssembler::is_pcrelative_instruction(loc)) { // Just to make it run.
tty->cr();
tty->print_cr("NativeMovConstReg::verify(): verifying addr %p(0x%x), %d leading nops", loc, *(uint*)loc, nop_bytes/nop_sz);
tty->cr();
((NativeMovConstReg*)loc)->dump(64, "NativeMovConstReg::verify()");
#ifdef LUCY_DBG
VM_Version::z_SIGSEGV();
#endif
fatal("this is not a `NativeMovConstReg' site");
}
}
address NativeMovConstReg::next_instruction_address(int offset) const {
address inst_addr = addr_at(offset);
// Load address (which is a constant) pc-relative.
if (MacroAssembler::is_load_addr_pcrel(inst_addr)) { return addr_at(offset+MacroAssembler::load_addr_pcrel_size()); }
// Load constant from TOC.
if (MacroAssembler::is_load_const_from_toc(inst_addr)) { return addr_at(offset+MacroAssembler::load_const_from_toc_size()); }
// Load constant inline.
if (MacroAssembler::is_load_const(inst_addr)) { return addr_at(offset+MacroAssembler::load_const_size()); }
// Load constant narrow inline.
if (MacroAssembler::is_load_narrow_oop(inst_addr)) { return addr_at(offset+MacroAssembler::load_narrow_oop_size()); }
if (MacroAssembler::is_load_narrow_klass(inst_addr)) { return addr_at(offset+MacroAssembler::load_narrow_klass_size()); }
// Compare constant narrow inline.
if (MacroAssembler::is_compare_immediate_narrow_oop(inst_addr)) { return addr_at(offset+MacroAssembler::compare_immediate_narrow_oop_size()); }
if (MacroAssembler::is_compare_immediate_narrow_klass(inst_addr)) { return addr_at(offset+MacroAssembler::compare_immediate_narrow_klass_size()); }
if (MacroAssembler::is_call_far_patchable_pcrelative_at(inst_addr)) { return addr_at(offset+MacroAssembler::call_far_patchable_size()); }
if (MacroAssembler::is_pcrelative_instruction(inst_addr)) { return addr_at(offset+Assembler::instr_len(inst_addr)); }
((NativeMovConstReg*)inst_addr)->dump(64, "NativeMovConstReg site is not recognized as such");
#ifdef LUCY_DBG
VM_Version::z_SIGSEGV();
#else
guarantee(false, "Not a NativeMovConstReg site");
#endif
return NULL;
}
intptr_t NativeMovConstReg::data() const {
address loc = addr_at(0);
if (MacroAssembler::is_load_const(loc)) {
return MacroAssembler::get_const(loc);
} else if (MacroAssembler::is_load_narrow_oop(loc) ||
MacroAssembler::is_compare_immediate_narrow_oop(loc) ||
MacroAssembler::is_load_narrow_klass(loc) ||
MacroAssembler::is_compare_immediate_narrow_klass(loc)) {
((NativeMovConstReg*)loc)->dump(32, "NativeMovConstReg::data(): cannot extract data from narrow ptr (oop or klass)");
#ifdef LUCY_DBG
VM_Version::z_SIGSEGV();
#else
ShouldNotReachHere();
#endif
return *(intptr_t *)NULL;
} else {
// Otherwise, assume data resides in TOC. Is asserted in called method.
return MacroAssembler::get_const_from_toc(loc);
}
}
// Patch in a new constant.
//
// There are situations where we have multiple (hopefully two at most)
// relocations connected to one instruction. Loading an oop from CP
// using pcrelative addressing would one such example. Here we have an
// oop relocation, modifying the oop itself, and an internal word relocation,
// modifying the relative address.
//
// NativeMovConstReg::set_data is then called once for each relocation. To be
// able to distinguish between the relocations, we use a rather dirty hack:
//
// All calls that deal with an internal word relocation to fix their relative
// address are on a faked, odd instruction address. The instruction can be
// found on the next lower, even address.
//
// All other calls are "normal", i.e. on even addresses.
address NativeMovConstReg::set_data_plain(intptr_t src, CodeBlob *cb) {
unsigned long x = (unsigned long)src;
address loc = instruction_address();
address next_address;
if (MacroAssembler::is_load_addr_pcrel(loc)) {
MacroAssembler::patch_target_addr_pcrel(loc, (address)src);
ICache::invalidate_range(loc, MacroAssembler::load_addr_pcrel_size());
next_address = next_instruction_address();
} else if (MacroAssembler::is_load_const_from_toc(loc)) { // Load constant from TOC.
MacroAssembler::set_const_in_toc(loc, src, cb);
next_address = next_instruction_address();
} else if (MacroAssembler::is_load_const(loc)) {
// Not mt safe, ok in methods like CodeBuffer::copy_code().
MacroAssembler::patch_const(loc, x);
ICache::invalidate_range(loc, MacroAssembler::load_const_size());
next_address = next_instruction_address();
}
// cOops
else if (MacroAssembler::is_load_narrow_oop(loc)) {
MacroAssembler::patch_load_narrow_oop(loc, (oop) (void*) x);
ICache::invalidate_range(loc, MacroAssembler::load_narrow_oop_size());
next_address = next_instruction_address();
}
// compressed klass ptrs
else if (MacroAssembler::is_load_narrow_klass(loc)) {
MacroAssembler::patch_load_narrow_klass(loc, (Klass*)x);
ICache::invalidate_range(loc, MacroAssembler::load_narrow_klass_size());
next_address = next_instruction_address();
}
// cOops
else if (MacroAssembler::is_compare_immediate_narrow_oop(loc)) {
MacroAssembler::patch_compare_immediate_narrow_oop(loc, (oop) (void*) x);
ICache::invalidate_range(loc, MacroAssembler::compare_immediate_narrow_oop_size());
next_address = next_instruction_address();
}
// compressed klass ptrs
else if (MacroAssembler::is_compare_immediate_narrow_klass(loc)) {
MacroAssembler::patch_compare_immediate_narrow_klass(loc, (Klass*)x);
ICache::invalidate_range(loc, MacroAssembler::compare_immediate_narrow_klass_size());
next_address = next_instruction_address();
}
else if (MacroAssembler::is_call_far_patchable_pcrelative_at(loc)) {
assert(ShortenBranches, "Wait a minute! A pc-relative call w/o ShortenBranches?");
// This NativeMovConstReg site does not need to be patched. It was
// patched when it was converted to a call_pcrelative site
// before. The value of the src argument is not related to the
// branch target.
next_address = next_instruction_address();
}
else {
tty->print_cr("WARNING: detected an unrecognized code pattern at loc = %p -> 0x%8.8x %8.8x",
loc, *((unsigned int*)loc), *((unsigned int*)(loc+4)));
next_address = next_instruction_address(); // Failure should be handled in next_instruction_address().
#ifdef LUCY_DBG
VM_Version::z_SIGSEGV();
#endif
}
return next_address;
}
// Divided up in set_data_plain() which patches the instruction in the
// code stream and set_data() which additionally patches the oop pool
// if necessary.
void NativeMovConstReg::set_data(intptr_t src) {
// Also store the value into an oop_Relocation cell, if any.
CodeBlob *cb = CodeCache::find_blob(instruction_address());
address next_address = set_data_plain(src, cb);
relocInfo::update_oop_pool(instruction_address(), next_address, (address)src, cb);
}
void NativeMovConstReg::set_narrow_oop(intptr_t data) {
const address start = addr_at(0);
int range = 0;
if (MacroAssembler::is_load_narrow_oop(start)) {
range = MacroAssembler::patch_load_narrow_oop(start, cast_to_oop <intptr_t> (data));
} else if (MacroAssembler::is_compare_immediate_narrow_oop(start)) {
range = MacroAssembler::patch_compare_immediate_narrow_oop(start, cast_to_oop <intptr_t>(data));
} else {
fatal("this is not a `NativeMovConstReg::narrow_oop' site");
}
ICache::invalidate_range(start, range);
}
// Compressed klass ptrs. patch narrow klass constant.
void NativeMovConstReg::set_narrow_klass(intptr_t data) {
const address start = addr_at(0);
int range = 0;
if (MacroAssembler::is_load_narrow_klass(start)) {
range = MacroAssembler::patch_load_narrow_klass(start, (Klass*)data);
} else if (MacroAssembler::is_compare_immediate_narrow_klass(start)) {
range = MacroAssembler::patch_compare_immediate_narrow_klass(start, (Klass*)data);
} else {
fatal("this is not a `NativeMovConstReg::narrow_klass' site");
}
ICache::invalidate_range(start, range);
}
void NativeMovConstReg::set_pcrel_addr(intptr_t newTarget, CompiledMethod *passed_nm /* = NULL */, bool copy_back_to_oop_pool) {
address next_address;
address loc = addr_at(0);
if (MacroAssembler::is_load_addr_pcrel(loc)) {
address oldTarget = MacroAssembler::get_target_addr_pcrel(loc);
MacroAssembler::patch_target_addr_pcrel(loc, (address)newTarget);
ICache::invalidate_range(loc, MacroAssembler::load_addr_pcrel_size());
next_address = loc + MacroAssembler::load_addr_pcrel_size();
} else if (MacroAssembler::is_load_const_from_toc_pcrelative(loc) ) { // Load constant from TOC.
address oldTarget = MacroAssembler::get_target_addr_pcrel(loc);
MacroAssembler::patch_target_addr_pcrel(loc, (address)newTarget);
ICache::invalidate_range(loc, MacroAssembler::load_const_from_toc_size());
next_address = loc + MacroAssembler::load_const_from_toc_size();
} else if (MacroAssembler::is_call_far_patchable_pcrelative_at(loc)) {
assert(ShortenBranches, "Wait a minute! A pc-relative call w/o ShortenBranches?");
next_address = next_instruction_address();
} else {
assert(false, "Not a NativeMovConstReg site for set_pcrel_addr");
next_address = next_instruction_address(); // Failure should be handled in next_instruction_address().
}
if (copy_back_to_oop_pool) {
if (relocInfo::update_oop_pool(instruction_address(), next_address, (address)newTarget, NULL)) {
((NativeMovConstReg*)instruction_address())->dump(64, "NativeMovConstReg::set_pcrel_addr(): found oop reloc for pcrel_addr");
#ifdef LUCY_DBG
VM_Version::z_SIGSEGV();
#else
assert(false, "Ooooops: found oop reloc for pcrel_addr");
#endif
}
}
}
void NativeMovConstReg::set_pcrel_data(intptr_t newData, CompiledMethod *passed_nm /* = NULL */, bool copy_back_to_oop_pool) {
address next_address;
address loc = addr_at(0);
if (MacroAssembler::is_load_const_from_toc(loc) ) { // Load constant from TOC.
// Offset is +/- 2**32 -> use long.
long offset = MacroAssembler::get_load_const_from_toc_offset(loc);
address target = MacroAssembler::get_target_addr_pcrel(loc);
intptr_t oldData = *(intptr_t*)target;
if (oldData != newData) { // Update only if data changes. Prevents cache invalidation.
*(intptr_t *)(target) = newData;
}
// ICache::invalidate_range(target, sizeof(unsigned long)); // No ICache invalidate for CP data.
next_address = loc + MacroAssembler::load_const_from_toc_size();
} else if (MacroAssembler::is_call_far_pcrelative(loc)) {
((NativeMovConstReg*)loc)->dump(64, "NativeMovConstReg::set_pcrel_data() has a problem: setting data for a pc-relative call?");
#ifdef LUCY_DBG
VM_Version::z_SIGSEGV();
#else
assert(false, "Ooooops: setting data for a pc-relative call");
#endif
next_address = next_instruction_address();
} else {
assert(false, "Not a NativeMovConstReg site for set_pcrel_data");
next_address = next_instruction_address(); // Failure should be handled in next_instruction_address().
}
if (copy_back_to_oop_pool) {
if (relocInfo::update_oop_pool(instruction_address(), next_address, (address)newData, NULL)) {
((NativeMovConstReg*)instruction_address())->dump(64, "NativeMovConstReg::set_pcrel_data(): found oop reloc for pcrel_data");
#ifdef LUCY_DBG
VM_Version::z_SIGSEGV();
#else
assert(false, "Ooooops: found oop reloc for pcrel_data");
#endif
}
}
}
#ifdef COMPILER1
//--------------------------------
// N a t i v e M o v R e g M e m
//--------------------------------
void NativeMovRegMem::verify() {
address l1 = addr_at(0);
address l2 = addr_at(MacroAssembler::load_const_size());
if (!MacroAssembler::is_load_const(l1)) {
tty->cr();
tty->print_cr("NativeMovRegMem::verify(): verifying addr " PTR_FORMAT, p2i(l1));
tty->cr();
((NativeMovRegMem*)l1)->dump(64, "NativeMovConstReg::verify()");
fatal("this is not a `NativeMovRegMem' site");
}
unsigned long inst1;
Assembler::get_instruction(l2, &inst1);
if (!Assembler::is_z_lb(inst1) &&
!Assembler::is_z_llgh(inst1) &&
!Assembler::is_z_lh(inst1) &&
!Assembler::is_z_l(inst1) &&
!Assembler::is_z_llgf(inst1) &&
!Assembler::is_z_lg(inst1) &&
!Assembler::is_z_le(inst1) &&
!Assembler::is_z_ld(inst1) &&
!Assembler::is_z_stc(inst1) &&
!Assembler::is_z_sth(inst1) &&
!Assembler::is_z_st(inst1) &&
!(Assembler::is_z_lgr(inst1) && UseCompressedOops) &&
!Assembler::is_z_stg(inst1) &&
!Assembler::is_z_ste(inst1) &&
!Assembler::is_z_std(inst1)) {
tty->cr();
tty->print_cr("NativeMovRegMem::verify(): verifying addr " PTR_FORMAT
": wrong or missing load or store at " PTR_FORMAT, p2i(l1), p2i(l2));
tty->cr();
((NativeMovRegMem*)l1)->dump(64, "NativeMovConstReg::verify()");
fatal("this is not a `NativeMovRegMem' site");
}
}
#endif // COMPILER1
//-----------------------
// N a t i v e J u m p
//-----------------------
void NativeJump::verify() {
if (NativeJump::is_jump_at(addr_at(0))) return;
fatal("this is not a `NativeJump' site");
}
// Patch atomically with an illtrap.
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
ResourceMark rm;
int code_size = 2;
CodeBuffer cb(verified_entry, code_size + 1);
MacroAssembler* a = new MacroAssembler(&cb);
#ifdef COMPILER2
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
#endif
a->z_illtrap();
ICache::invalidate_range(verified_entry, code_size);
}
#undef LUCY_DBG
//-------------------------------------
// N a t i v e G e n e r a l J u m p
//-------------------------------------
#ifndef PRODUCT
void NativeGeneralJump::verify() {
unsigned long inst;
Assembler::get_instruction((address)this, &inst);
assert(MacroAssembler::is_branch_pcrelative_long(inst), "not a general jump instruction");
}
#endif
void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
uint64_t instr = BRCL_ZOPC |
Assembler::uimm4(Assembler::bcondAlways, 8, 48) |
Assembler::simm32(RelAddr::pcrel_off32(entry, code_pos), 16, 48);
*(uint64_t*) code_pos = (instr << 16); // Must shift into big end, then the brcl will be written to code_pos.
ICache::invalidate_range(code_pos, instruction_size);
}
void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
assert(((intptr_t)instr_addr & (BytesPerWord-1)) == 0, "requirement for mt safe patching");
// Bytes_after_jump cannot change, because we own the Patching_lock.
assert(Patching_lock->owned_by_self(), "must hold lock to patch instruction");
intptr_t bytes_after_jump = (*(intptr_t*)instr_addr) & 0x000000000000ffffL; // 2 bytes after jump.
intptr_t load_const_bytes = (*(intptr_t*)code_buffer) & 0xffffffffffff0000L;
*(intptr_t*)instr_addr = load_const_bytes | bytes_after_jump;
ICache::invalidate_range(instr_addr, 6);
}

@ -0,0 +1,673 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// Major contributions by AHa, JL, LS
#ifndef CPU_S390_VM_NATIVEINST_S390_HPP
#define CPU_S390_VM_NATIVEINST_S390_HPP
#include "asm/macroAssembler.hpp"
#include "memory/allocation.hpp"
#include "runtime/icache.hpp"
#include "runtime/os.hpp"
class NativeCall;
class NativeFarCall;
class NativeMovConstReg;
class NativeJump;
#ifndef COMPILER2
class NativeGeneralJump;
class NativeMovRegMem;
#endif
class NativeInstruction;
NativeCall* nativeCall_before(address return_address);
NativeCall* nativeCall_at(address instr);
NativeFarCall* nativeFarCall_before(address return_address);
NativeFarCall* nativeFarCall_at(address instr);
NativeMovConstReg* nativeMovConstReg_at(address address);
NativeMovConstReg* nativeMovConstReg_before(address address);
NativeJump* nativeJump_at(address address);
#ifndef COMPILER2
NativeMovRegMem* nativeMovRegMem_at (address address);
NativeGeneralJump* nativeGeneralJump_at(address address);
#endif
NativeInstruction* nativeInstruction_at(address address);
// We have interface for the following instructions:
// - NativeInstruction
// - NativeCall
// - NativeFarCall
// - NativeMovConstReg
// - NativeMovRegMem
// - NativeJump
// - NativeGeneralJump
// - NativeIllegalInstruction
// The base class for different kinds of native instruction abstractions.
// Provides the primitive operations to manipulate code relative to this.
//-------------------------------------
// N a t i v e I n s t r u c t i o n
//-------------------------------------
class NativeInstruction VALUE_OBJ_CLASS_SPEC {
friend class Relocation;
public:
enum z_specific_constants {
nop_instruction_size = 2
};
bool is_illegal();
// Bcrl is currently the only accepted instruction here.
bool is_jump();
// We use an illtrap for marking a method as not_entrant or zombie.
bool is_sigill_zombie_not_entrant();
bool is_safepoint_poll() {
// Is the current instruction a POTENTIAL read access to the polling page?
// The instruction's current arguments are not checked!
return MacroAssembler::is_load_from_polling_page(addr_at(0));
}
address get_poll_address(void *ucontext) {
// Extract poll address from instruction and ucontext.
return MacroAssembler::get_poll_address(addr_at(0), ucontext);
}
uint get_poll_register() {
// Extract poll register from instruction.
return MacroAssembler::get_poll_register(addr_at(0));
}
bool is_memory_serialization(JavaThread *thread, void *ucontext) {
// Is the current instruction a write access of thread to the
// memory serialization page?
return MacroAssembler::is_memory_serialization(long_at(0), thread, ucontext);
}
public:
// The output of __ breakpoint_trap().
static int illegal_instruction();
// The address of the currently processed instruction.
address instruction_address() const { return addr_at(0); }
protected:
address addr_at(int offset) const { return address(this) + offset; }
// z/Architecture terminology
// halfword = 2 bytes
// word = 4 bytes
// doubleword = 8 bytes
unsigned short halfword_at(int offset) const { return *(unsigned short*)addr_at(offset); }
int word_at(int offset) const { return *(jint*)addr_at(offset); }
long long_at(int offset) const { return *(jlong*)addr_at(offset); }
void set_halfword_at(int offset, short i); // Deals with I-cache.
void set_word_at(int offset, int i); // Deals with I-cache.
void set_jlong_at(int offset, jlong i); // Deals with I-cache.
void set_addr_at(int offset, address x); // Deals with I-cache.
void print() const;
void print(const char* msg) const;
void dump() const;
void dump(const unsigned int range) const;
void dump(const unsigned int range, const char* msg) const;
public:
void verify();
// unit test stuff
static void test() {} // Override for testing.
friend NativeInstruction* nativeInstruction_at(address address) {
NativeInstruction* inst = (NativeInstruction*)address;
#ifdef ASSERT
inst->verify();
#endif
return inst;
}
};
//---------------------------------------------------
// N a t i v e I l l e g a l I n s t r u c t i o n
//---------------------------------------------------
class NativeIllegalInstruction: public NativeInstruction {
public:
enum z_specific_constants {
instruction_size = 2
};
// Insert illegal opcode at specific address.
static void insert(address code_pos);
};
//-----------------------
// N a t i v e C a l l
//-----------------------
// The NativeCall is an abstraction for accessing/manipulating call
// instructions. It is used to manipulate inline caches, primitive &
// dll calls, etc.
// A native call, as defined by this abstraction layer, consists of
// all instructions required to set up for and actually make the call.
//
// On z/Architecture, there exist three different forms of native calls:
// 1) Call with pc-relative address, 1 instruction
// The location of the target function is encoded as relative address
// in the call instruction. The short form (BRAS) allows for a
// 16-bit signed relative address (in 2-byte units). The long form
// (BRASL) allows for a 32-bit signed relative address (in 2-byte units).
// 2) Call with immediate address, 3 or 5 instructions.
// The location of the target function is given by an immediate
// constant which is loaded into a (scratch) register. Depending on
// the hardware capabilities, this takes 2 or 4 instructions.
// The call itself is then a "call by register"(BASR) instruction.
// 3) Call with address from constant pool, 2(3) instructions (with dynamic TOC)
// The location of the target function is stored in the constant pool
// during compilation. From there it is loaded into a (scratch) register.
// The call itself is then a "call by register"(BASR) instruction.
//
// When initially generating a call, the compiler uses form 2) (not
// patchable, target address constant, e.g. runtime calls) or 3) (patchable,
// target address might eventually get relocated). Later in the process,
// a call could be transformed into form 1) (also patchable) during ShortenBranches.
//
// If a call is/has to be patchable, the instruction sequence generated for it
// has to be constant in length. Excessive space, created e.g. by ShortenBranches,
// is allocated to lower addresses and filled with nops. That is necessary to
// keep the return address constant, no matter what form the call has.
// Methods dealing with such calls have "patchable" as part of their name.
class NativeCall: public NativeInstruction {
public:
static int get_IC_pos_in_java_to_interp_stub() {
return 0;
}
enum z_specific_constants {
instruction_size = 18, // Used in shared code for calls with reloc_info:
// value correct if !has_long_displacement_fast().
call_far_pcrelative_displacement_offset = 4, // Includes 2 bytes for the nop.
call_far_pcrelative_displacement_alignment = 4
};
// Maximum size (in bytes) of a call to an absolute address.
// Used when emitting call to deopt handler blob, which is a
// "load_const_call". The code pattern is:
// tmpReg := load_const(address); (* depends on CPU ArchLvl, but is otherwise constant *)
// call(tmpReg); (* basr, 2 bytes *)
static unsigned int max_instruction_size() {
return MacroAssembler::load_const_size() + MacroAssembler::call_byregister_size();
}
// address instruction_address() const { return addr_at(0); }
// For the ordering of the checks see note at nativeCall_before.
address next_instruction_address() const {
address iaddr = instruction_address();
if (MacroAssembler::is_load_const_call(iaddr)) {
// Form 2): load_const, BASR
return addr_at(MacroAssembler::load_const_call_size());
}
if (MacroAssembler::is_load_const_from_toc_call(iaddr)) {
// Form 3): load_const_from_toc (LARL+LG/LGRL), BASR.
return addr_at(MacroAssembler::load_const_from_toc_call_size());
}
if (MacroAssembler::is_call_far_pcrelative(iaddr)) {
// Form 1): NOP, BRASL
// The BRASL (Branch Relative And Save Long) is patched into the space created
// by the load_const_from_toc_call sequence (typically (LARL-LG)/LGRL - BASR.
// The BRASL must be positioned such that it's end is FW (4-byte) aligned (for atomic patching).
// It is achieved by aligning the end of the entire sequence on a 4byte boundary, by inserting
// a nop, if required, at the very beginning of the instruction sequence. The nop needs to
// be accounted for when calculating the next instruction address. The alignment takes place
// already when generating the original instruction sequence. The alignment requirement
// makes the size depend on location.
// The return address of the call must always be at the end of the instruction sequence.
// Inserting the extra alignment nop (or anything else) at the end is not an option.
// The patched-in brasl instruction is prepended with a nop to make it easier to
// distinguish from a load_const_from_toc_call sequence.
return addr_at(MacroAssembler::call_far_pcrelative_size());
}
((NativeCall*)iaddr)->print();
guarantee(false, "Not a NativeCall site");
return NULL;
}
address return_address() const {
return next_instruction_address();
}
address destination() const;
void set_destination_mt_safe(address dest);
void verify_alignment() {} // Yet another real do nothing guy :)
void verify();
// unit test stuff
static void test();
// Creation.
friend NativeCall* nativeCall_at(address instr) {
NativeCall* call;
// Make sure not to return garbage.
if (NativeCall::is_call_at(instr)) {
call = (NativeCall*)instr;
} else {
call = (NativeCall*)instr;
call->print();
guarantee(false, "Not a NativeCall site");
}
#ifdef ASSERT
call->verify();
#endif
return call;
}
// This is a very tricky function to implement. It involves stepping
// backwards in the instruction stream. On architectures with variable
// instruction length, this is a risky endeavor. From the return address,
// you do not know how far to step back to be at a location (your starting
// point) that will eventually bring you back to the return address.
// Furthermore, it may happen that there are multiple starting points.
//
// With only a few possible (allowed) code patterns, the risk is lower but
// does not diminish completely. Experience shows that there are code patterns
// which look like a load_const_from_toc_call @(return address-8), but in
// fact are a call_far_pcrelative @(return address-6). The other way around
// is possible as well, but was not knowingly observed so far.
//
// The unpredictability is caused by the pc-relative address field in both
// the call_far_pcrelative (BASR) and the load_const_from_toc (LGRL)
// instructions. This field can contain an arbitrary bit pattern.
//
// Here is a real-world example:
// Mnemonics: <not a valid sequence> LGRL r10,<addr> BASR r14,r10
// Hex code: eb01 9008 007a c498 ffff c4a8 c0e5 ffc1 0dea
// Mnemonics: AGSI <mem>,I8 LGRL r9,<addr> BRASL r14,<addr> correct
//
// If you first check for a load_const_from_toc_call @(-8), you will find
// a false positive. In this example, it is obviously false, because the
// preceding bytes do not form a valid instruction pattern. If you first
// check for call_far_pcrelative @(-6), you get a true positive - in this
// case.
//
// The following remedy has been implemented/enforced:
// 1) Everywhere, the permissible code patterns are checked in the same
// sequence: Form 2) - Form 3) - Form 1).
// 2) The call_far_pcrelative, which would ideally be just one BRASL
// instruction, is always prepended with a NOP. This measure avoids
// ambiguities with load_const_from_toc_call.
friend NativeCall* nativeCall_before(address return_address) {
NativeCall *call = NULL;
// Make sure not to return garbage
address instp = return_address - MacroAssembler::load_const_call_size();
if (MacroAssembler::is_load_const_call(instp)) { // Form 2)
call = (NativeCall*)(instp); // load_const + basr
} else {
instp = return_address - MacroAssembler::load_const_from_toc_call_size();
if (MacroAssembler::is_load_const_from_toc_call(instp)) { // Form 3)
call = (NativeCall*)(instp); // load_const_from_toc + basr
} else {
instp = return_address - MacroAssembler::call_far_pcrelative_size();
if (MacroAssembler::is_call_far_pcrelative(instp)) { // Form 1)
call = (NativeCall*)(instp); // brasl (or nop + brasl)
} else {
call = (NativeCall*)(instp);
call->print();
guarantee(false, "Not a NativeCall site");
}
}
}
#ifdef ASSERT
call->verify();
#endif
return call;
}
// Ordering of checks 2) 3) 1) is relevant!
static bool is_call_at(address a) {
// Check plain instruction sequence. Do not care about filler or alignment nops.
bool b = MacroAssembler::is_load_const_call(a) || // load_const + basr
MacroAssembler::is_load_const_from_toc_call(a) || // load_const_from_toc + basr
MacroAssembler::is_call_far_pcrelative(a); // nop + brasl
return b;
}
// Ordering of checks 2) 3) 1) is relevant!
static bool is_call_before(address a) {
// check plain instruction sequence. Do not care about filler or alignment nops.
bool b = MacroAssembler::is_load_const_call( a - MacroAssembler::load_const_call_size()) || // load_const + basr
MacroAssembler::is_load_const_from_toc_call(a - MacroAssembler::load_const_from_toc_call_size()) || // load_const_from_toc + basr
MacroAssembler::is_call_far_pcrelative( a - MacroAssembler::call_far_pcrelative_size()); // nop+brasl
return b;
}
static bool is_call_to(address instr, address target) {
// Check whether there is a `NativeCall' at the address `instr'
// calling to the address `target'.
return is_call_at(instr) && target == ((NativeCall *)instr)->destination();
}
bool is_pcrelative() {
return MacroAssembler::is_call_far_pcrelative((address)this);
}
};
//-----------------------------
// N a t i v e F a r C a l l
//-----------------------------
// The NativeFarCall is an abstraction for accessing/manipulating native
// call-anywhere instructions.
// Used to call native methods which may be loaded anywhere in the address
// space, possibly out of reach of a call instruction.
// Refer to NativeCall for a description of the supported call forms.
class NativeFarCall: public NativeInstruction {
public:
// We use MacroAssembler::call_far_patchable() for implementing a
// call-anywhere instruction.
static int instruction_size() { return MacroAssembler::call_far_patchable_size(); }
static int return_address_offset() { return MacroAssembler::call_far_patchable_ret_addr_offset(); }
// address instruction_address() const { return addr_at(0); }
address next_instruction_address() const {
return addr_at(instruction_size());
}
address return_address() const {
return addr_at(return_address_offset());
}
// Returns the NativeFarCall's destination.
address destination();
// Sets the NativeCall's destination, not necessarily mt-safe.
// Used when relocating code.
void set_destination(address dest, int toc_offset);
// Checks whether instr points at a NativeFarCall instruction.
static bool is_far_call_at(address instr) {
// Use compound inspection function which, in addition to instruction sequence,
// also checks for expected nops and for instruction alignment.
return MacroAssembler::is_call_far_patchable_at(instr);
}
// Does the NativeFarCall implementation use a pc-relative encoding
// of the call destination?
// Used when relocating code.
bool is_pcrelative() {
address iaddr = (address)this;
assert(is_far_call_at(iaddr), "unexpected call type");
return MacroAssembler::is_call_far_patchable_pcrelative_at(iaddr);
}
void verify();
// Unit tests
static void test();
// Instantiates a NativeFarCall object starting at the given instruction
// address and returns the NativeFarCall object.
inline friend NativeFarCall* nativeFarCall_at(address instr) {
NativeFarCall* call = (NativeFarCall*)instr;
#ifdef ASSERT
call->verify();
#endif
return call;
}
};
//-------------------------------------
// N a t i v e M o v C o n s t R e g
//-------------------------------------
// An interface for accessing/manipulating native set_oop imm, reg instructions.
// (Used to manipulate inlined data references, etc.)
// A native move of a constant into a register, as defined by this abstraction layer,
// deals with instruction sequences that load "quasi constant" oops into registers
// for addressing. For multiple causes, those "quasi constant" oops eventually need
// to be changed (i.e. patched). The reason is quite simple: objects might get moved
// around in storage. Pc-relative oop addresses have to be patched also if the
// reference location is moved. That happens when executable code is relocated.
class NativeMovConstReg: public NativeInstruction {
public:
enum z_specific_constants {
instruction_size = 10 // Used in shared code for calls with reloc_info.
};
// address instruction_address() const { return addr_at(0); }
// The current instruction might be located at an offset.
address next_instruction_address(int offset = 0) const;
// (The [set_]data accessor respects oop_type relocs also.)
intptr_t data() const;
// Patch data in code stream.
address set_data_plain(intptr_t x, CodeBlob *code);
// Patch data in code stream and oop pool if necessary.
void set_data(intptr_t x);
// Patch narrow oop constant in code stream.
void set_narrow_oop(intptr_t data);
void set_narrow_klass(intptr_t data);
void set_pcrel_addr(intptr_t addr, CompiledMethod *nm = NULL, bool copy_back_to_oop_pool=false);
void set_pcrel_data(intptr_t data, CompiledMethod *nm = NULL, bool copy_back_to_oop_pool=false);
void verify();
// unit test stuff
static void test();
// Creation.
friend NativeMovConstReg* nativeMovConstReg_at(address address) {
NativeMovConstReg* test = (NativeMovConstReg*)address;
#ifdef ASSERT
test->verify();
#endif
return test;
}
};
#ifdef COMPILER1
//---------------------------------
// N a t i v e M o v R e g M e m
//---------------------------------
// Interface to manipulate a code sequence that performs a memory access (load/store).
// The code is the patchable version of memory accesses generated by
// LIR_Assembler::reg2mem() and LIR_Assembler::mem2reg().
//
// Loading the offset for the mem access is target of the manipulation.
//
// The instruction sequence looks like this:
// iihf %r1,$bits1 ; load offset for mem access
// iilf %r1,$bits2
// [compress oop] ; optional, load only
// load/store %r2,0(%r1,%r2) ; memory access
class NativeMovRegMem;
inline NativeMovRegMem* nativeMovRegMem_at (address address);
class NativeMovRegMem: public NativeInstruction {
public:
intptr_t offset() const {
return nativeMovConstReg_at(addr_at(0))->data();
}
void set_offset(intptr_t x) {
nativeMovConstReg_at(addr_at(0))->set_data(x);
}
void add_offset_in_bytes(intptr_t radd_offset) {
set_offset(offset() + radd_offset);
}
void verify();
private:
friend inline NativeMovRegMem* nativeMovRegMem_at(address address) {
NativeMovRegMem* test = (NativeMovRegMem*)address;
#ifdef ASSERT
test->verify();
#endif
return test;
}
};
#endif // COMPILER1
//-----------------------
// N a t i v e J u m p
//-----------------------
// An interface for accessing/manipulating native jumps
class NativeJump: public NativeInstruction {
public:
enum z_constants {
instruction_size = 2 // Size of z_illtrap().
};
// Maximum size (in bytes) of a jump to an absolute address.
// Used when emitting branch to an exception handler which is a "load_const_optimized_branch".
// Thus, a pessimistic estimate is obtained when using load_const.
// code pattern is:
// tmpReg := load_const(address); (* varying size *)
// jumpTo(tmpReg); (* bcr, 2 bytes *)
//
static unsigned int max_instruction_size() {
return MacroAssembler::load_const_size() + MacroAssembler::jump_byregister_size();
}
// address instruction_address() const { return addr_at(0); }
address jump_destination() const {
return (address)nativeMovConstReg_at(instruction_address())->data();
}
void set_jump_destination(address dest) {
nativeMovConstReg_at(instruction_address())->set_data(((intptr_t)dest));
}
// Creation
friend NativeJump* nativeJump_at(address address) {
NativeJump* jump = (NativeJump*)address;
#ifdef ASSERT
jump->verify();
#endif
return jump;
}
static bool is_jump_at(address a) {
int off = 0;
bool b = (MacroAssembler::is_load_const_from_toc(a+off) &&
Assembler::is_z_br(*(short*)(a+off + MacroAssembler::load_const_from_toc_size())));
b = b || (MacroAssembler::is_load_const(a+off) &&
Assembler::is_z_br(*(short*)(a+off + MacroAssembler::load_const_size())));
return b;
}
void verify();
// Unit testing stuff
static void test();
// Insertion of native jump instruction.
static void insert(address code_pos, address entry);
// MT-safe insertion of native jump at verified method entry.
static void check_verified_entry_alignment(address entry, address verified_entry) { }
static void patch_verified_entry(address entry, address verified_entry, address dest);
};
//-------------------------------------
// N a t i v e G e n e r a l J u m p
//-------------------------------------
// Despite the name, handles only simple branches.
// On ZARCH_64 BRCL only.
class NativeGeneralJump;
inline NativeGeneralJump* nativeGeneralJump_at(address address);
class NativeGeneralJump: public NativeInstruction {
public:
enum ZARCH_specific_constants {
instruction_size = 6
};
address instruction_address() const { return addr_at(0); }
address jump_destination() const { return addr_at(0) + MacroAssembler::get_pcrel_offset(addr_at(0)); }
// Creation
friend inline NativeGeneralJump* nativeGeneralJump_at(address addr) {
NativeGeneralJump* jump = (NativeGeneralJump*)(addr);
#ifdef ASSERT
jump->verify();
#endif
return jump;
}
// Insertion of native general jump instruction.
static void insert_unconditional(address code_pos, address entry);
void set_jump_destination(address dest) {
Unimplemented();
// set_word_at(MacroAssembler::call_far_pcrelative_size()-4, Assembler::z_pcrel_off(dest, addr_at(0)));
}
static void replace_mt_safe(address instr_addr, address code_buffer);
void verify() PRODUCT_RETURN;
};
#endif // CPU_S390_VM_NATIVEINST_S390_HPP

@ -0,0 +1,44 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_REGISTERMAP_S390_HPP
#define CPU_S390_VM_REGISTERMAP_S390_HPP
// Machine-dependent implementation for register maps.
friend class frame;
private:
// This is the hook for finding a register in a "well-known" location,
// such as a register block of a predetermined format.
// Since there is none, we just return NULL.
address pd_location(VMReg reg) const {return NULL;}
// No PD state to clear or copy.
void pd_clear() {}
void pd_initialize() {}
void pd_initialize_from(const RegisterMap* map) {}
#endif // CPU_S390_VM_REGISTERMAP_S390_HPP

@ -0,0 +1,82 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_REGISTERSAVER_S390_HPP
#define CPU_S390_VM_REGISTERSAVER_S390_HPP
class RegisterSaver {
// Used for saving volatile registers.
// Class declaration moved to separate file to make it available elsewhere.
// Implementation remains in sharedRuntime_s390.cpp
public:
// Set of registers to be saved.
typedef enum {
all_registers,
all_registers_except_r2,
all_integer_registers,
all_volatile_registers, // According to ABI calling convention.
arg_registers
} RegisterSet;
// Boolean flags to force only argument registers to be saved.
static int live_reg_save_size(RegisterSet reg_set);
static int live_reg_frame_size(RegisterSet reg_set);
// Specify the register that should be stored as the return pc in the current frame.
static OopMap* save_live_registers(MacroAssembler* masm, RegisterSet reg_set, Register return_pc = Z_R14);
static void restore_live_registers(MacroAssembler* masm, RegisterSet reg_set);
// Generate the OopMap (again, regs where saved before).
static OopMap* generate_oop_map(MacroAssembler* masm, RegisterSet reg_set);
// During deoptimization only the result register need to be restored
// all the other values have already been extracted.
static void restore_result_registers(MacroAssembler* masm);
// Constants and data structures:
typedef enum {
int_reg = 0,
float_reg = 1,
excluded_reg = 2, // Not saved/restored.
} RegisterType;
typedef enum {
reg_size = 8,
half_reg_size = reg_size / 2,
} RegisterConstants;
// Remember type, number, and VMReg.
typedef struct {
RegisterType reg_type;
int reg_num;
VMReg vmreg;
} LiveRegType;
};
#endif // CPU_S390_VM_REGISTERSAVER_S390_HPP

@ -0,0 +1,37 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// Make sure the defines don't screw up the declarations later on in this file.
#define DONT_USE_REGISTER_DEFINES
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/register.hpp"
#include "register_s390.hpp"
#include "interp_masm_s390.hpp"
REGISTER_DEFINITION(Register, noreg);
REGISTER_DEFINITION(FloatRegister, fnoreg);

@ -0,0 +1,48 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "register_s390.hpp"
const int ConcreteRegisterImpl::max_gpr = RegisterImpl::number_of_registers * 2;
const int ConcreteRegisterImpl::max_fpr = ConcreteRegisterImpl::max_gpr +
FloatRegisterImpl::number_of_registers * 2;
const char* RegisterImpl::name() const {
const char* names[number_of_registers] = {
"Z_R0", "Z_R1", "Z_R2", "Z_R3", "Z_R4", "Z_R5", "Z_R6", "Z_R7",
"Z_R8", "Z_R9", "Z_R10", "Z_R11", "Z_R12", "Z_R13", "Z_R14", "Z_R15"
};
return is_valid() ? names[encoding()] : "noreg";
}
const char* FloatRegisterImpl::name() const {
const char* names[number_of_registers] = {
"Z_F0", "Z_F1", "Z_F2", "Z_F3", "Z_F4", "Z_F5", "Z_F6", "Z_F7", "Z_F8", "Z_F9",
"Z_F10", "Z_F11", "Z_F12", "Z_F13", "Z_F14", "Z_F15"
};
return is_valid() ? names[encoding()] : "fnoreg";
}

@ -0,0 +1,427 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_REGISTER_S390_HPP
#define CPU_S390_VM_REGISTER_S390_HPP
#include "asm/register.hpp"
#include "vm_version_s390.hpp"
class Address;
class VMRegImpl;
typedef VMRegImpl* VMReg;
// Use Register as shortcut.
class RegisterImpl;
typedef RegisterImpl* Register;
// The implementation of integer registers for z/Architecture.
// z/Architecture registers, see "LINUX for zSeries ELF ABI Supplement", IBM March 2001
//
// r0-r1 General purpose (volatile)
// r2 Parameter and return value (volatile)
// r3 TOC pointer (volatile)
// r3-r5 Parameters (volatile)
// r6 Parameter (nonvolatile)
// r7-r11 Locals (nonvolatile)
// r12 Local, often used as GOT pointer (nonvolatile)
// r13 Local, often used as toc (nonvolatile)
// r14 return address (volatile)
// r15 stack pointer (nonvolatile)
//
// f0,f2,f4,f6 Parameters (volatile)
// f1,f3,f5,f7 General purpose (volatile)
// f8-f15 General purpose (nonvolatile)
inline Register as_Register(int encoding) {
return (Register)(long)encoding;
}
class RegisterImpl: public AbstractRegisterImpl {
public:
enum {
number_of_registers = 16,
number_of_arg_registers = 5
};
// general construction
inline friend Register as_Register(int encoding);
inline VMReg as_VMReg();
// accessors
int encoding() const { assert(is_valid(), "invalid register"); return value(); }
const char* name() const;
// testers
bool is_valid() const { return (0 <= (value()&0x7F) && (value()&0x7F) < number_of_registers); }
bool is_even() const { return (encoding() & 1) == 0; }
bool is_volatile() const { return (0 <= (value()&0x7F) && (value()&0x7F) <= 5) || (value()&0x7F)==14; }
bool is_nonvolatile() const { return is_valid() && !is_volatile(); }
public:
// derived registers, offsets, and addresses
Register predecessor() const { return as_Register((encoding()-1) & (number_of_registers-1)); }
Register successor() const { return as_Register((encoding() + 1) & (number_of_registers-1)); }
};
// The integer registers of the z/Architecture.
CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1));
CONSTANT_REGISTER_DECLARATION(Register, Z_R0, (0));
CONSTANT_REGISTER_DECLARATION(Register, Z_R1, (1));
CONSTANT_REGISTER_DECLARATION(Register, Z_R2, (2));
CONSTANT_REGISTER_DECLARATION(Register, Z_R3, (3));
CONSTANT_REGISTER_DECLARATION(Register, Z_R4, (4));
CONSTANT_REGISTER_DECLARATION(Register, Z_R5, (5));
CONSTANT_REGISTER_DECLARATION(Register, Z_R6, (6));
CONSTANT_REGISTER_DECLARATION(Register, Z_R7, (7));
CONSTANT_REGISTER_DECLARATION(Register, Z_R8, (8));
CONSTANT_REGISTER_DECLARATION(Register, Z_R9, (9));
CONSTANT_REGISTER_DECLARATION(Register, Z_R10, (10));
CONSTANT_REGISTER_DECLARATION(Register, Z_R11, (11));
CONSTANT_REGISTER_DECLARATION(Register, Z_R12, (12));
CONSTANT_REGISTER_DECLARATION(Register, Z_R13, (13));
CONSTANT_REGISTER_DECLARATION(Register, Z_R14, (14));
CONSTANT_REGISTER_DECLARATION(Register, Z_R15, (15));
// Use ConditionRegister as shortcut
class ConditionRegisterImpl;
typedef ConditionRegisterImpl* ConditionRegister;
// The implementation of condition register(s) for the z/Architecture.
class ConditionRegisterImpl: public AbstractRegisterImpl {
public:
enum {
number_of_registers = 1
};
// accessors
int encoding() const {
assert(is_valid(), "invalid register"); return value();
}
// testers
bool is_valid() const {
return (0 <= value() && value() < number_of_registers);
}
bool is_volatile() const {
return true;
}
bool is_nonvolatile() const {
return false;
}
// construction.
inline friend ConditionRegister as_ConditionRegister(int encoding);
inline VMReg as_VMReg();
};
inline ConditionRegister as_ConditionRegister(int encoding) {
assert(encoding >= 0 && encoding < ConditionRegisterImpl::number_of_registers, "bad condition register encoding");
return (ConditionRegister)(long)encoding;
}
// The condition register of the z/Architecture.
CONSTANT_REGISTER_DECLARATION(ConditionRegister, Z_CR, (0));
// Because z/Architecture has so many registers, #define'ing values for them is
// beneficial in code size and is worth the cost of some of the
// dangers of defines.
// If a particular file has a problem with these defines then it's possible
// to turn them off in that file by defining
// DONT_USE_REGISTER_DEFINES. Register_definition_s390.cpp does that
// so that it's able to provide real definitions of these registers
// for use in debuggers and such.
#ifndef DONT_USE_REGISTER_DEFINES
#define noreg ((Register)(noreg_RegisterEnumValue))
#define Z_R0 ((Register)(Z_R0_RegisterEnumValue))
#define Z_R1 ((Register)(Z_R1_RegisterEnumValue))
#define Z_R2 ((Register)(Z_R2_RegisterEnumValue))
#define Z_R3 ((Register)(Z_R3_RegisterEnumValue))
#define Z_R4 ((Register)(Z_R4_RegisterEnumValue))
#define Z_R5 ((Register)(Z_R5_RegisterEnumValue))
#define Z_R6 ((Register)(Z_R6_RegisterEnumValue))
#define Z_R7 ((Register)(Z_R7_RegisterEnumValue))
#define Z_R8 ((Register)(Z_R8_RegisterEnumValue))
#define Z_R9 ((Register)(Z_R9_RegisterEnumValue))
#define Z_R10 ((Register)(Z_R10_RegisterEnumValue))
#define Z_R11 ((Register)(Z_R11_RegisterEnumValue))
#define Z_R12 ((Register)(Z_R12_RegisterEnumValue))
#define Z_R13 ((Register)(Z_R13_RegisterEnumValue))
#define Z_R14 ((Register)(Z_R14_RegisterEnumValue))
#define Z_R15 ((Register)(Z_R15_RegisterEnumValue))
#define Z_CR ((ConditionRegister)(Z_CR_ConditionRegisterEnumValue))
#endif // DONT_USE_REGISTER_DEFINES
// Use FloatRegister as shortcut
class FloatRegisterImpl;
typedef FloatRegisterImpl* FloatRegister;
// The implementation of float registers for the z/Architecture.
inline FloatRegister as_FloatRegister(int encoding) {
return (FloatRegister)(long)encoding;
}
class FloatRegisterImpl: public AbstractRegisterImpl {
public:
enum {
number_of_registers = 16,
number_of_arg_registers = 4
};
// construction
inline friend FloatRegister as_FloatRegister(int encoding);
inline VMReg as_VMReg();
// accessors
int encoding() const {
assert(is_valid(), "invalid register"); return value();
}
bool is_valid() const { return 0 <= value() && value() < number_of_registers; }
bool is_volatile() const { return (0 <= (value()&0x7F) && (value()&0x7F) <= 7); }
bool is_nonvolatile() const { return (8 <= (value()&0x7F) && (value()&0x7F) <= 15); }
const char* name() const;
FloatRegister successor() const { return as_FloatRegister(encoding() + 1); }
};
// The float registers of z/Architecture.
CONSTANT_REGISTER_DECLARATION(FloatRegister, fnoreg, (-1));
CONSTANT_REGISTER_DECLARATION(FloatRegister, Z_F0, (0));
CONSTANT_REGISTER_DECLARATION(FloatRegister, Z_F1, (1));
CONSTANT_REGISTER_DECLARATION(FloatRegister, Z_F2, (2));
CONSTANT_REGISTER_DECLARATION(FloatRegister, Z_F3, (3));
CONSTANT_REGISTER_DECLARATION(FloatRegister, Z_F4, (4));
CONSTANT_REGISTER_DECLARATION(FloatRegister, Z_F5, (5));
CONSTANT_REGISTER_DECLARATION(FloatRegister, Z_F6, (6));
CONSTANT_REGISTER_DECLARATION(FloatRegister, Z_F7, (7));
CONSTANT_REGISTER_DECLARATION(FloatRegister, Z_F8, (8));
CONSTANT_REGISTER_DECLARATION(FloatRegister, Z_F9, (9));
CONSTANT_REGISTER_DECLARATION(FloatRegister, Z_F10, (10));
CONSTANT_REGISTER_DECLARATION(FloatRegister, Z_F11, (11));
CONSTANT_REGISTER_DECLARATION(FloatRegister, Z_F12, (12));
CONSTANT_REGISTER_DECLARATION(FloatRegister, Z_F13, (13));
CONSTANT_REGISTER_DECLARATION(FloatRegister, Z_F14, (14));
CONSTANT_REGISTER_DECLARATION(FloatRegister, Z_F15, (15));
#ifndef DONT_USE_REGISTER_DEFINES
#define fnoreg ((FloatRegister)(fnoreg_FloatRegisterEnumValue))
#define Z_F0 ((FloatRegister)( Z_F0_FloatRegisterEnumValue))
#define Z_F1 ((FloatRegister)( Z_F1_FloatRegisterEnumValue))
#define Z_F2 ((FloatRegister)( Z_F2_FloatRegisterEnumValue))
#define Z_F3 ((FloatRegister)( Z_F3_FloatRegisterEnumValue))
#define Z_F4 ((FloatRegister)( Z_F4_FloatRegisterEnumValue))
#define Z_F5 ((FloatRegister)( Z_F5_FloatRegisterEnumValue))
#define Z_F6 ((FloatRegister)( Z_F6_FloatRegisterEnumValue))
#define Z_F7 ((FloatRegister)( Z_F7_FloatRegisterEnumValue))
#define Z_F8 ((FloatRegister)( Z_F8_FloatRegisterEnumValue))
#define Z_F9 ((FloatRegister)( Z_F9_FloatRegisterEnumValue))
#define Z_F10 ((FloatRegister)( Z_F10_FloatRegisterEnumValue))
#define Z_F11 ((FloatRegister)( Z_F11_FloatRegisterEnumValue))
#define Z_F12 ((FloatRegister)( Z_F12_FloatRegisterEnumValue))
#define Z_F13 ((FloatRegister)( Z_F13_FloatRegisterEnumValue))
#define Z_F14 ((FloatRegister)( Z_F14_FloatRegisterEnumValue))
#define Z_F15 ((FloatRegister)( Z_F15_FloatRegisterEnumValue))
#endif // DONT_USE_REGISTER_DEFINES
// Need to know the total number of registers of all sorts for SharedInfo.
// Define a class that exports it.
class ConcreteRegisterImpl : public AbstractRegisterImpl {
public:
enum {
number_of_registers =
(RegisterImpl::number_of_registers +
FloatRegisterImpl::number_of_registers)
* 2 // register halves
+ 1 // condition code register
};
static const int max_gpr;
static const int max_fpr;
};
// Single, Double and Quad fp reg classes. These exist to map the ADLC
// encoding for a floating point register, to the FloatRegister number
// desired by the macroassembler. A FloatRegister is a number between
// 0 and 31 passed around as a pointer. For ADLC, an fp register encoding
// is the actual bit encoding used by the z/Architecture hardware. When ADLC used
// the macroassembler to generate an instruction that references, e.g., a
// double fp reg, it passed the bit encoding to the macroassembler via
// as_FloatRegister, which, for double regs > 30, returns an illegal
// register number.
//
// Therefore we provide the following classes for use by ADLC. Their
// sole purpose is to convert from z/Architecture register encodings to FloatRegisters.
// At some future time, we might replace FloatRegister with these classes,
// hence the definitions of as_xxxFloatRegister as class methods rather
// than as external inline routines.
class SingleFloatRegisterImpl;
typedef SingleFloatRegisterImpl *SingleFloatRegister;
class SingleFloatRegisterImpl {
public:
friend FloatRegister as_SingleFloatRegister(int encoding) {
assert(encoding < 32, "bad single float register encoding");
return as_FloatRegister(encoding);
}
};
class DoubleFloatRegisterImpl;
typedef DoubleFloatRegisterImpl *DoubleFloatRegister;
class DoubleFloatRegisterImpl {
public:
friend FloatRegister as_DoubleFloatRegister(int encoding) {
assert(encoding < 32, "bad double float register encoding");
return as_FloatRegister(((encoding & 1) << 5) | (encoding & 0x1e));
}
};
class QuadFloatRegisterImpl;
typedef QuadFloatRegisterImpl *QuadFloatRegister;
class QuadFloatRegisterImpl {
public:
friend FloatRegister as_QuadFloatRegister(int encoding) {
assert(encoding < 32 && ((encoding & 2) == 0), "bad quad float register encoding");
return as_FloatRegister(((encoding & 1) << 5) | (encoding & 0x1c));
}
};
// Common register declarations used in assembler code.
REGISTER_DECLARATION(Register, Z_EXC_OOP, Z_R2);
REGISTER_DECLARATION(Register, Z_EXC_PC, Z_R3);
REGISTER_DECLARATION(Register, Z_RET, Z_R2);
REGISTER_DECLARATION(Register, Z_ARG1, Z_R2);
REGISTER_DECLARATION(Register, Z_ARG2, Z_R3);
REGISTER_DECLARATION(Register, Z_ARG3, Z_R4);
REGISTER_DECLARATION(Register, Z_ARG4, Z_R5);
REGISTER_DECLARATION(Register, Z_ARG5, Z_R6);
REGISTER_DECLARATION(Register, Z_SP, Z_R15);
REGISTER_DECLARATION(FloatRegister, Z_FRET, Z_F0);
REGISTER_DECLARATION(FloatRegister, Z_FARG1, Z_F0);
REGISTER_DECLARATION(FloatRegister, Z_FARG2, Z_F2);
REGISTER_DECLARATION(FloatRegister, Z_FARG3, Z_F4);
REGISTER_DECLARATION(FloatRegister, Z_FARG4, Z_F6);
#ifndef DONT_USE_REGISTER_DEFINES
#define Z_EXC_OOP AS_REGISTER(Register, Z_R2)
#define Z_EXC_PC AS_REGISTER(Register, Z_R3)
#define Z_RET AS_REGISTER(Register, Z_R2)
#define Z_ARG1 AS_REGISTER(Register, Z_R2)
#define Z_ARG2 AS_REGISTER(Register, Z_R3)
#define Z_ARG3 AS_REGISTER(Register, Z_R4)
#define Z_ARG4 AS_REGISTER(Register, Z_R5)
#define Z_ARG5 AS_REGISTER(Register, Z_R6)
#define Z_SP AS_REGISTER(Register, Z_R15)
#define Z_FRET AS_REGISTER(FloatRegister, Z_F0)
#define Z_FARG1 AS_REGISTER(FloatRegister, Z_F0)
#define Z_FARG2 AS_REGISTER(FloatRegister, Z_F2)
#define Z_FARG3 AS_REGISTER(FloatRegister, Z_F4)
#define Z_FARG4 AS_REGISTER(FloatRegister, Z_F6)
#endif
// Register declarations to be used in frame manager assembly code.
// Use only non-volatile registers in order to keep values across C-calls.
// Register to cache the integer value on top of the operand stack.
REGISTER_DECLARATION(Register, Z_tos, Z_R2);
// Register to cache the fp value on top of the operand stack.
REGISTER_DECLARATION(FloatRegister, Z_ftos, Z_F0);
// Expression stack pointer in interpreted java frame.
REGISTER_DECLARATION(Register, Z_esp, Z_R7);
// Address of current thread.
REGISTER_DECLARATION(Register, Z_thread, Z_R8);
// Address of current method. only valid in interpreter_entry.
REGISTER_DECLARATION(Register, Z_method, Z_R9);
// Inline cache register. used by c1 and c2.
REGISTER_DECLARATION(Register, Z_inline_cache,Z_R9);
// Frame pointer of current interpreter frame. only valid while
// executing bytecodes.
REGISTER_DECLARATION(Register, Z_fp, Z_R9);
// Address of the locals array in an interpreted java frame.
REGISTER_DECLARATION(Register, Z_locals, Z_R12);
// Bytecode pointer.
REGISTER_DECLARATION(Register, Z_bcp, Z_R13);
// Bytecode which is dispatched (short lived!).
REGISTER_DECLARATION(Register, Z_bytecode, Z_R14);
#ifndef DONT_USE_REGISTER_DEFINES
#define Z_tos AS_REGISTER(Register, Z_R2)
#define Z_ftos AS_REGISTER(FloatRegister, Z_F0)
#define Z_esp AS_REGISTER(Register, Z_R7)
#define Z_thread AS_REGISTER(Register, Z_R8)
#define Z_method AS_REGISTER(Register, Z_R9)
#define Z_inline_cache AS_REGISTER(Register, Z_R9)
#define Z_fp AS_REGISTER(Register, Z_R9)
#define Z_locals AS_REGISTER(Register, Z_R12)
#define Z_bcp AS_REGISTER(Register, Z_R13)
#define Z_bytecode AS_REGISTER(Register, Z_R14)
#endif
// Temporary registers to be used within frame manager. We can use
// the nonvolatiles because the call stub has saved them.
// Use only non-volatile registers in order to keep values across C-calls.
REGISTER_DECLARATION(Register, Z_tmp_1, Z_R10);
REGISTER_DECLARATION(Register, Z_tmp_2, Z_R11);
REGISTER_DECLARATION(Register, Z_tmp_3, Z_R12);
REGISTER_DECLARATION(Register, Z_tmp_4, Z_R13);
#ifndef DONT_USE_REGISTER_DEFINES
#define Z_tmp_1 AS_REGISTER(Register, Z_R10)
#define Z_tmp_2 AS_REGISTER(Register, Z_R11)
#define Z_tmp_3 AS_REGISTER(Register, Z_R12)
#define Z_tmp_4 AS_REGISTER(Register, Z_R13)
#endif
// Scratch registers are volatile.
REGISTER_DECLARATION(Register, Z_R0_scratch, Z_R0);
REGISTER_DECLARATION(Register, Z_R1_scratch, Z_R1);
REGISTER_DECLARATION(FloatRegister, Z_fscratch_1, Z_F1);
#ifndef DONT_USE_REGISTER_DEFINES
#define Z_R0_scratch AS_REGISTER(Register, Z_R0)
#define Z_R1_scratch AS_REGISTER(Register, Z_R1)
#define Z_fscratch_1 AS_REGISTER(FloatRegister, Z_F1)
#endif
#endif // CPU_S390_VM_REGISTER_S390_HPP

@ -0,0 +1,226 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "code/relocInfo.hpp"
#include "nativeInst_s390.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/safepoint.hpp"
void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
// we don't support splitting of relocations, so o must be zero:
assert(o == 0, "tried to split relocations");
if (!verify_only) {
switch (format()) {
case relocInfo::uncompressed_format:
nativeMovConstReg_at(addr())->set_data_plain(((intptr_t)x) + o, code());
break;
case relocInfo::compressed_format:
if (type() == relocInfo::metadata_type)
nativeMovConstReg_at(addr())->set_narrow_klass(((intptr_t)x) + o);
else if (type() == relocInfo::oop_type)
nativeMovConstReg_at(addr())->set_narrow_oop(((intptr_t)x) + o);
else
guarantee(false, "bad relocInfo type for relocInfo::narrow_oop_format");
break;
case relocInfo::pcrel_addr_format: // patch target location
nativeMovConstReg_at(addr())->set_pcrel_addr(((intptr_t)x) + o, code());
break;
case relocInfo::pcrel_data_format: // patch data at target location
nativeMovConstReg_at(addr())->set_pcrel_data(((intptr_t)x) + o, code());
break;
default:
assert(false, "not a valid relocInfo format");
break;
}
} else {
// TODO: Reading of narrow oops out of code stream is not implemented
// (see nativeMovConstReg::data()). Implement this if you want to verify.
// assert(x == (address) nativeMovConstReg_at(addr())->data(), "Instructions must match");
switch (format()) {
case relocInfo::uncompressed_format:
break;
case relocInfo::compressed_format:
break;
case relocInfo::pcrel_addr_format:
break;
case relocInfo::pcrel_data_format:
break;
default:
assert(false, "not a valid relocInfo format");
break;
}
}
}
address Relocation::pd_call_destination(address orig_addr) {
address inst_addr = addr();
if (NativeFarCall::is_far_call_at(inst_addr)) {
if (!ShortenBranches) {
if (MacroAssembler::is_call_far_pcrelative(inst_addr)) {
address a1 = MacroAssembler::get_target_addr_pcrel(orig_addr+MacroAssembler::nop_size());
#ifdef ASSERT
address a2 = MacroAssembler::get_target_addr_pcrel(inst_addr+MacroAssembler::nop_size());
address a3 = nativeFarCall_at(orig_addr)->destination();
address a4 = nativeFarCall_at(inst_addr)->destination();
if ((a1 != a3) || (a2 != a4)) {
unsigned int range = 128;
Assembler::dump_code_range(tty, inst_addr, range, "pc-relative call w/o ShortenBranches?");
Assembler::dump_code_range(tty, orig_addr, range, "pc-relative call w/o ShortenBranches?");
assert(false, "pc-relative call w/o ShortenBranches?");
}
#endif
return a1;
}
return (address)(-1);
}
NativeFarCall* call;
if (orig_addr == NULL) {
call = nativeFarCall_at(inst_addr);
} else {
if (MacroAssembler::is_call_far_patchable_pcrelative_at(inst_addr)) {
call = nativeFarCall_at(orig_addr);
} else {
call = nativeFarCall_at(orig_addr); // must access location (in CP) where destination is stored in unmoved code, because load from CP is pc-relative
}
}
return call->destination();
}
if (NativeCall::is_call_at(inst_addr)) {
NativeCall* call = nativeCall_at(inst_addr);
if (call->is_pcrelative()) {
intptr_t off = inst_addr - orig_addr;
return (address) (call->destination()-off);
}
}
return (address) nativeMovConstReg_at(inst_addr)->data();
}
void Relocation::pd_set_call_destination(address x) {
address inst_addr = addr();
if (NativeFarCall::is_far_call_at(inst_addr)) {
if (!ShortenBranches) {
if (MacroAssembler::is_call_far_pcrelative(inst_addr)) {
address a1 = MacroAssembler::get_target_addr_pcrel(inst_addr+MacroAssembler::nop_size());
#ifdef ASSERT
address a3 = nativeFarCall_at(inst_addr)->destination();
if (a1 != a3) {
unsigned int range = 128;
Assembler::dump_code_range(tty, inst_addr, range, "pc-relative call w/o ShortenBranches?");
assert(false, "pc-relative call w/o ShortenBranches?");
}
#endif
nativeFarCall_at(inst_addr)->set_destination(x, 0);
return;
}
assert(x == (address)-1, "consistency check");
return;
}
int toc_offset = -1;
if (type() == relocInfo::runtime_call_w_cp_type) {
toc_offset = ((runtime_call_w_cp_Relocation *)this)->get_constant_pool_offset();
}
if (toc_offset>=0) {
NativeFarCall* call = nativeFarCall_at(inst_addr);
call->set_destination(x, toc_offset);
return;
}
}
if (NativeCall::is_call_at(inst_addr)) {
NativeCall* call = nativeCall_at(inst_addr);
if (call->is_pcrelative()) {
call->set_destination_mt_safe(x);
return;
}
}
// constant is absolute, must use x
nativeMovConstReg_at(inst_addr)->set_data(((intptr_t)x));
}
// store the new target address into an oop_Relocation cell, if any
// return indication if update happened.
bool relocInfo::update_oop_pool(address begin, address end, address newTarget, CodeBlob* cb) {
// Try to find the CodeBlob, if not given by caller
if (cb == NULL) cb = CodeCache::find_blob(begin);
#ifdef ASSERT
else
assert(cb == CodeCache::find_blob(begin), "consistency");
#endif
// 'RelocIterator' requires an nmethod
nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL;
if (nm != NULL) {
RelocIterator iter(nm, begin, end);
oop* oop_addr = NULL;
Metadata** metadata_addr = NULL;
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
oop_Relocation *r = iter.oop_reloc();
if (oop_addr == NULL) {
oop_addr = r->oop_addr();
*oop_addr = (oop)newTarget;
} else {
assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
}
}
if (iter.type() == relocInfo::metadata_type) {
metadata_Relocation *r = iter.metadata_reloc();
if (metadata_addr == NULL) {
metadata_addr = r->metadata_addr();
*metadata_addr = (Metadata*)newTarget;
} else {
assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
}
}
}
return oop_addr || metadata_addr;
}
return false;
}
address* Relocation::pd_address_in_code() {
ShouldNotReachHere();
return 0;
}
address Relocation::pd_get_address_from_code() {
return (address) (nativeMovConstReg_at(addr())->data());
}
void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
}
void metadata_Relocation::pd_fix_value(address x) {
}

@ -0,0 +1,117 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_RELOCINFO_S390_HPP
#define CPU_S390_VM_RELOCINFO_S390_HPP
//----------------------------
// relocInfo layout
//----------------------------
// This description should be contained in code/relocInfo.hpp
// but was put here to minimize shared code diffs.
// Relocation information for a nmethod is stored in compressed
// form in an array of element type short int (16 bits).
// Each array element constitutes one relocInfo record.
// The layout of one such record is described here.
// +------------+---+---+------------------------------+
// | type | fmt | offset/offset_unit |
// +------------+---+---+------------------------------+
//
// |<-- value_width (16) ----------------------------->|
// |<type_width>|<-- nontype_width (12) -------------->|
// (4)
// | |<--+-->|<-- offset_width (10) ------->|
// / \
// / (2) \
// /<--format->\
// | width |
// only for type == data_prefix_tag:
// +------------+---+---+------------------------------+
// | type | | data |
// +------------+---+---+------------------------------+
// | 15 |<->|<-- datalen_width (11) ---------->|
// |
// +--datalen_tag (1)
// relocType
// The type field holds a value of relocType (which is
// an enum of all possible relocation types). Currently,
// there are 16 distinct relocation types, requiring
// type_width to be (at least) 4.
// relocFormat
// The format field holds a value of relocFormat (which is
// an enum of all possible relocation formats). Currently,
// there are 4 distinct relocation formats, requiring
// format_width to be (at least) 2.
// offset
// Each relocInfo is related to one specific address in the CodeBlob.
// The address always points to the first byte of the target instruction.
// It does NOT refer directly to the relocation subfield or embedded constant.
// offset contains the distance of this relocInfo from the previous one.
// offset is scaled by offset_unit (the platform-specific instruction
// alignment requirement) to maximize the encodable distance.
// To obtain the absolute address in the CodeBlob the relocInfo is
// related to, you have to iterate over all relocInfos from the
// beginning, and then use RelocIterator::addr() to get the address.
// relocType == data_prefix_tag
// These are relocInfo records containing inline data that belongs to
// the next non-data relocInfo record. Usage of that inline data is
// specific and private to that relocInfo record.
// For details refer to code/relocInfo.hpp
// machine-dependent parts of class relocInfo
private:
enum {
// Instructions are HW (2-byte) aligned on z/Architecture.
offset_unit = 2,
// Encodes Assembler::disp32_operand vs. Assembler::imm64_operand.
// (Assembler::call32_operand is used on call instructions only.)
format_width = 2
};
public:
enum relocFormat {
no_format = 0,
uncompressed_format = 0, // Relocation is for a regular oop.
compressed_format = 1, // Relocation is for a narrow (compressed) oop or klass.
// Similar to relocInfo::narrow_oop_in_const.
pcrel_addr_format = 2, // Relocation is for the target LOCATION of a pc-relative instruction.
pcrel_data_format = 3 // Relocation is for the target data of a pc-relative instruction.
};
// Store the new target address into an oop_Relocation cell, if any.
// Return indication if update happened.
static bool update_oop_pool(address begin, address end, address newTarget, CodeBlob* cb);
#endif // CPU_S390_VM_RELOCINFO_S390_HPP

@ -0,0 +1,152 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#ifdef COMPILER2
#include "asm/macroAssembler.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/vmreg.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_s390.hpp"
#include "opto/runtime.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/globalDefinitions.hpp"
#include "vmreg_s390.inline.hpp"
#endif
#define __ masm->
//------------------------------generate_exception_blob---------------------------
// creates exception blob at the end
// Using exception blob, this code is jumped from a compiled method.
// (see emit_exception_handler in s390.ad file)
//
// Given an exception pc at a call we call into the runtime for the
// handler in this method. This handler might merely restore state
// (i.e. callee save registers), unwind the frame, and jump to the
// exception handler for the nmethod if there is no Java level handler
// for the nmethod.
//
// This code is entered with a branch.
//
// Arguments:
// Z_R2(=Z_ARG1): exception oop
// Z_R3(=Z_ARG2): exception pc
//
// Results:
// Z_R2: exception oop
// Z_R3: exception pc in caller
// destination: exception handler of caller
//
// Note: the exception pc MUST be at a call (precise debug information)
void OptoRuntime::generate_exception_blob() {
// Allocate space for the code
ResourceMark rm;
// Setup code generation tools
CodeBuffer buffer("exception_blob", 2048, 1024);
MacroAssembler* masm = new MacroAssembler(&buffer);
Register handle_exception = Z_ARG5;
__ verify_thread();
__ z_stg(Z_ARG1/*exception oop*/, Address(Z_thread, JavaThread::exception_oop_offset()));
__ z_stg(Z_ARG2/*issuing pc*/, Address(Z_thread, JavaThread::exception_pc_offset()));
// Store issuing pc as return pc into
// caller's frame. stack-walking needs it. R14 is not valid here,
// because this code gets entered with a jump.
__ z_stg(Z_ARG2/*issuing pc*/, _z_abi(return_pc), Z_SP);
// The following call to function OptoRuntime::handle_exception_C
// does all the hard work. It checks if an
// exception catch exists in the method. If so, it returns the
// handler address. If the nmethod has been deoptimized and it had
// a handler the handler address is the deopt blob's
// unpack_with_exception entry.
// push a C frame for the exception blob. it is needed for the
// C call later on.
Register saved_sp = Z_R11;
__ z_lgr(saved_sp, Z_SP);
// push frame for blob.
int frame_size = __ push_frame_abi160(0);
__ get_PC(Z_R1/*scratch*/);
__ set_last_Java_frame(/*sp=*/Z_SP, /*pc=*/Z_R1);
// This call can lead to deoptimization of the nmethod holding the handler.
__ z_lgr(Z_ARG1, Z_thread); // argument of C function
__ call_c(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C));
__ z_lgr(handle_exception, Z_RET);
__ reset_last_Java_frame();
// Pop the exception blob's C frame that has been pushed before.
__ z_lgr(Z_SP, saved_sp);
// [Z_RET]!=NULL was possible in hotspot5 but not in sapjvm6.
// C2I adapter extensions are now removed by a resize in the frame manager
// (unwind_initial_activation_pending_exception).
#ifdef ASSERT
__ z_ltgr(handle_exception, handle_exception);
__ asm_assert_ne("handler must not be NULL", 0x852);
#endif
// Handle_exception contains the handler address. If the associated frame
// has been deoptimized then the handler has been patched to jump to
// the deoptimization blob.
// If the exception handler jumps to the deoptimization blob, the
// exception pc will be read from there.
__ z_lg(Z_ARG2, Address(Z_thread, JavaThread::exception_pc_offset()));
__ z_lg(Z_ARG1, Address(Z_thread, JavaThread::exception_oop_offset()));
// Clear the exception oop so GC no longer processes it as a root.
__ clear_mem(Address(Z_thread, JavaThread::exception_oop_offset()),sizeof(intptr_t));
#ifdef ASSERT
__ clear_mem(Address(Z_thread, JavaThread::exception_handler_pc_offset()), sizeof(intptr_t));
__ clear_mem(Address(Z_thread, JavaThread::exception_pc_offset()), sizeof(intptr_t));
#endif
__ z_br(handle_exception);
// Make sure all code is generated.
masm->flush();
// Set exception blob.
OopMapSet *oop_maps = NULL;
_exception_blob = ExceptionBlob::create(&buffer, oop_maps, frame_size/wordSize);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -0,0 +1,569 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.inline.hpp"
// Implementation of the platform-specific part of StubRoutines - for
// a description of how to extend it, see the stubRoutines.hpp file.
address StubRoutines::zarch::_handler_for_unsafe_access_entry = NULL;
address StubRoutines::zarch::_partial_subtype_check = NULL;
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
address StubRoutines::zarch::_trot_table_addr = NULL;
int StubRoutines::zarch::_atomic_memory_operation_lock = StubRoutines::zarch::unlocked;
#define __ masm->
void StubRoutines::zarch::generate_load_crc_table_addr(MacroAssembler* masm, Register table) {
__ load_absolute_address(table, StubRoutines::_crc_table_adr);
#ifdef ASSERT
assert(_crc_table_adr != NULL, "CRC lookup table address must be initialized by now");
{
Label L;
__ load_const_optimized(Z_R0, StubRoutines::_crc_table_adr);
__ z_cgr(table, Z_R0); // safety net
__ z_bre(L);
__ z_illtrap();
__ asm_assert_eq("crc_table: external word relocation required for load_absolute_address", 0x33);
__ bind(L);
}
{
Label L;
__ load_const_optimized(Z_R0, 0x77073096UL);
__ z_cl(Z_R0, Address(table, 4)); // safety net
__ z_bre(L);
__ z_l(Z_R0, Address(table, 4)); // Load data from memory, we know the constant we compared against.
__ z_illtrap();
__ asm_assert_eq("crc_table: address or contents seems to be messed up", 0x22);
__ bind(L);
}
#endif
}
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
void StubRoutines::zarch::generate_load_trot_table_addr(MacroAssembler* masm, Register table) {
RelocationHolder rspec = external_word_Relocation::spec((address)_trot_table);
__ relocate(rspec);
__ load_absolute_address(table, _trot_table_addr);
#ifdef ASSERT
assert(_trot_table_addr != NULL, "Translate table address must be initialized by now");
assert((p2i(_trot_table_addr) & (TROT_ALIGNMENT-1)) == 0, "Translate table alignment error");
for (int i = 0; i < 256; i++) {
assert(i == *((jshort*)(_trot_table_addr+2*i)), "trot_table[%d] = %d", i, *((jshort*)(_trot_table_addr+2*i)));
}
{
Label L;
__ load_const_optimized(Z_R0, StubRoutines::zarch::_trot_table_addr);
__ z_cgr(table, Z_R0); // safety net
__ z_bre(L);
__ z_illtrap();
__ asm_assert_eq("crc_table: external word relocation does not work for load_absolute_address", 0x33);
__ bind(L);
}
{
Label L;
__ load_const_optimized(Z_R0, 0x0004000500060007UL);
__ z_clg(Z_R0, Address(table, 8)); // safety net
__ z_bre(L);
__ z_lg(Z_R0, Address(table, 8)); // Load data from memory, we know the constant we compared against.
__ z_illtrap();
__ asm_assert_eq("trot_table: address or contents seems to be messed up", 0x22);
__ bind(L);
}
#endif
}
/**
* trot_table[]
*/
jlong StubRoutines::zarch::_trot_table[TROT_COLUMN_SIZE] = {
0x0000000100020003UL, 0x0004000500060007UL, 0x00080009000a000bUL, 0x000c000d000e000fUL,
0x0010001100120013UL, 0x0014001500160017UL, 0x00180019001a001bUL, 0x001c001d001e001fUL,
0x0020002100220023UL, 0x0024002500260027UL, 0x00280029002a002bUL, 0x002c002d002e002fUL,
0x0030003100320033UL, 0x0034003500360037UL, 0x00380039003a003bUL, 0x003c003d003e003fUL,
0x0040004100420043UL, 0x0044004500460047UL, 0x00480049004a004bUL, 0x004c004d004e004fUL,
0x0050005100520053UL, 0x0054005500560057UL, 0x00580059005a005bUL, 0x005c005d005e005fUL,
0x0060006100620063UL, 0x0064006500660067UL, 0x00680069006a006bUL, 0x006c006d006e006fUL,
0x0070007100720073UL, 0x0074007500760077UL, 0x00780079007a007bUL, 0x007c007d007e007fUL,
0x0080008100820083UL, 0x0084008500860087UL, 0x00880089008a008bUL, 0x008c008d008e008fUL,
0x0090009100920093UL, 0x0094009500960097UL, 0x00980099009a009bUL, 0x009c009d009e009fUL,
0x00a000a100a200a3UL, 0x00a400a500a600a7UL, 0x00a800a900aa00abUL, 0x00ac00ad00ae00afUL,
0x00b000b100b200b3UL, 0x00b400b500b600b7UL, 0x00b800b900ba00bbUL, 0x00bc00bd00be00bfUL,
0x00c000c100c200c3UL, 0x00c400c500c600c7UL, 0x00c800c900ca00cbUL, 0x00cc00cd00ce00cfUL,
0x00d000d100d200d3UL, 0x00d400d500d600d7UL, 0x00d800d900da00dbUL, 0x00dc00dd00de00dfUL,
0x00e000e100e200e3UL, 0x00e400e500e600e7UL, 0x00e800e900ea00ebUL, 0x00ec00ed00ee00efUL,
0x00f000f100f200f3UL, 0x00f400f500f600f7UL, 0x00f800f900fa00fbUL, 0x00fc00fd00fe00ffUL
};
// crc_table[] from jdk/src/share/native/java/util/zip/zlib-1.2.8/crc32.h
juint StubRoutines::zarch::_crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE] = {
{
0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL,
0x706af48fUL, 0xe963a535UL, 0x9e6495a3UL, 0x0edb8832UL, 0x79dcb8a4UL,
0xe0d5e91eUL, 0x97d2d988UL, 0x09b64c2bUL, 0x7eb17cbdUL, 0xe7b82d07UL,
0x90bf1d91UL, 0x1db71064UL, 0x6ab020f2UL, 0xf3b97148UL, 0x84be41deUL,
0x1adad47dUL, 0x6ddde4ebUL, 0xf4d4b551UL, 0x83d385c7UL, 0x136c9856UL,
0x646ba8c0UL, 0xfd62f97aUL, 0x8a65c9ecUL, 0x14015c4fUL, 0x63066cd9UL,
0xfa0f3d63UL, 0x8d080df5UL, 0x3b6e20c8UL, 0x4c69105eUL, 0xd56041e4UL,
0xa2677172UL, 0x3c03e4d1UL, 0x4b04d447UL, 0xd20d85fdUL, 0xa50ab56bUL,
0x35b5a8faUL, 0x42b2986cUL, 0xdbbbc9d6UL, 0xacbcf940UL, 0x32d86ce3UL,
0x45df5c75UL, 0xdcd60dcfUL, 0xabd13d59UL, 0x26d930acUL, 0x51de003aUL,
0xc8d75180UL, 0xbfd06116UL, 0x21b4f4b5UL, 0x56b3c423UL, 0xcfba9599UL,
0xb8bda50fUL, 0x2802b89eUL, 0x5f058808UL, 0xc60cd9b2UL, 0xb10be924UL,
0x2f6f7c87UL, 0x58684c11UL, 0xc1611dabUL, 0xb6662d3dUL, 0x76dc4190UL,
0x01db7106UL, 0x98d220bcUL, 0xefd5102aUL, 0x71b18589UL, 0x06b6b51fUL,
0x9fbfe4a5UL, 0xe8b8d433UL, 0x7807c9a2UL, 0x0f00f934UL, 0x9609a88eUL,
0xe10e9818UL, 0x7f6a0dbbUL, 0x086d3d2dUL, 0x91646c97UL, 0xe6635c01UL,
0x6b6b51f4UL, 0x1c6c6162UL, 0x856530d8UL, 0xf262004eUL, 0x6c0695edUL,
0x1b01a57bUL, 0x8208f4c1UL, 0xf50fc457UL, 0x65b0d9c6UL, 0x12b7e950UL,
0x8bbeb8eaUL, 0xfcb9887cUL, 0x62dd1ddfUL, 0x15da2d49UL, 0x8cd37cf3UL,
0xfbd44c65UL, 0x4db26158UL, 0x3ab551ceUL, 0xa3bc0074UL, 0xd4bb30e2UL,
0x4adfa541UL, 0x3dd895d7UL, 0xa4d1c46dUL, 0xd3d6f4fbUL, 0x4369e96aUL,
0x346ed9fcUL, 0xad678846UL, 0xda60b8d0UL, 0x44042d73UL, 0x33031de5UL,
0xaa0a4c5fUL, 0xdd0d7cc9UL, 0x5005713cUL, 0x270241aaUL, 0xbe0b1010UL,
0xc90c2086UL, 0x5768b525UL, 0x206f85b3UL, 0xb966d409UL, 0xce61e49fUL,
0x5edef90eUL, 0x29d9c998UL, 0xb0d09822UL, 0xc7d7a8b4UL, 0x59b33d17UL,
0x2eb40d81UL, 0xb7bd5c3bUL, 0xc0ba6cadUL, 0xedb88320UL, 0x9abfb3b6UL,
0x03b6e20cUL, 0x74b1d29aUL, 0xead54739UL, 0x9dd277afUL, 0x04db2615UL,
0x73dc1683UL, 0xe3630b12UL, 0x94643b84UL, 0x0d6d6a3eUL, 0x7a6a5aa8UL,
0xe40ecf0bUL, 0x9309ff9dUL, 0x0a00ae27UL, 0x7d079eb1UL, 0xf00f9344UL,
0x8708a3d2UL, 0x1e01f268UL, 0x6906c2feUL, 0xf762575dUL, 0x806567cbUL,
0x196c3671UL, 0x6e6b06e7UL, 0xfed41b76UL, 0x89d32be0UL, 0x10da7a5aUL,
0x67dd4accUL, 0xf9b9df6fUL, 0x8ebeeff9UL, 0x17b7be43UL, 0x60b08ed5UL,
0xd6d6a3e8UL, 0xa1d1937eUL, 0x38d8c2c4UL, 0x4fdff252UL, 0xd1bb67f1UL,
0xa6bc5767UL, 0x3fb506ddUL, 0x48b2364bUL, 0xd80d2bdaUL, 0xaf0a1b4cUL,
0x36034af6UL, 0x41047a60UL, 0xdf60efc3UL, 0xa867df55UL, 0x316e8eefUL,
0x4669be79UL, 0xcb61b38cUL, 0xbc66831aUL, 0x256fd2a0UL, 0x5268e236UL,
0xcc0c7795UL, 0xbb0b4703UL, 0x220216b9UL, 0x5505262fUL, 0xc5ba3bbeUL,
0xb2bd0b28UL, 0x2bb45a92UL, 0x5cb36a04UL, 0xc2d7ffa7UL, 0xb5d0cf31UL,
0x2cd99e8bUL, 0x5bdeae1dUL, 0x9b64c2b0UL, 0xec63f226UL, 0x756aa39cUL,
0x026d930aUL, 0x9c0906a9UL, 0xeb0e363fUL, 0x72076785UL, 0x05005713UL,
0x95bf4a82UL, 0xe2b87a14UL, 0x7bb12baeUL, 0x0cb61b38UL, 0x92d28e9bUL,
0xe5d5be0dUL, 0x7cdcefb7UL, 0x0bdbdf21UL, 0x86d3d2d4UL, 0xf1d4e242UL,
0x68ddb3f8UL, 0x1fda836eUL, 0x81be16cdUL, 0xf6b9265bUL, 0x6fb077e1UL,
0x18b74777UL, 0x88085ae6UL, 0xff0f6a70UL, 0x66063bcaUL, 0x11010b5cUL,
0x8f659effUL, 0xf862ae69UL, 0x616bffd3UL, 0x166ccf45UL, 0xa00ae278UL,
0xd70dd2eeUL, 0x4e048354UL, 0x3903b3c2UL, 0xa7672661UL, 0xd06016f7UL,
0x4969474dUL, 0x3e6e77dbUL, 0xaed16a4aUL, 0xd9d65adcUL, 0x40df0b66UL,
0x37d83bf0UL, 0xa9bcae53UL, 0xdebb9ec5UL, 0x47b2cf7fUL, 0x30b5ffe9UL,
0xbdbdf21cUL, 0xcabac28aUL, 0x53b39330UL, 0x24b4a3a6UL, 0xbad03605UL,
0xcdd70693UL, 0x54de5729UL, 0x23d967bfUL, 0xb3667a2eUL, 0xc4614ab8UL,
0x5d681b02UL, 0x2a6f2b94UL, 0xb40bbe37UL, 0xc30c8ea1UL, 0x5a05df1bUL,
0x2d02ef8dUL
#ifdef CRC32_BYFOUR
},
{
0x00000000UL, 0x191b3141UL, 0x32366282UL, 0x2b2d53c3UL, 0x646cc504UL,
0x7d77f445UL, 0x565aa786UL, 0x4f4196c7UL, 0xc8d98a08UL, 0xd1c2bb49UL,
0xfaefe88aUL, 0xe3f4d9cbUL, 0xacb54f0cUL, 0xb5ae7e4dUL, 0x9e832d8eUL,
0x87981ccfUL, 0x4ac21251UL, 0x53d92310UL, 0x78f470d3UL, 0x61ef4192UL,
0x2eaed755UL, 0x37b5e614UL, 0x1c98b5d7UL, 0x05838496UL, 0x821b9859UL,
0x9b00a918UL, 0xb02dfadbUL, 0xa936cb9aUL, 0xe6775d5dUL, 0xff6c6c1cUL,
0xd4413fdfUL, 0xcd5a0e9eUL, 0x958424a2UL, 0x8c9f15e3UL, 0xa7b24620UL,
0xbea97761UL, 0xf1e8e1a6UL, 0xe8f3d0e7UL, 0xc3de8324UL, 0xdac5b265UL,
0x5d5daeaaUL, 0x44469febUL, 0x6f6bcc28UL, 0x7670fd69UL, 0x39316baeUL,
0x202a5aefUL, 0x0b07092cUL, 0x121c386dUL, 0xdf4636f3UL, 0xc65d07b2UL,
0xed705471UL, 0xf46b6530UL, 0xbb2af3f7UL, 0xa231c2b6UL, 0x891c9175UL,
0x9007a034UL, 0x179fbcfbUL, 0x0e848dbaUL, 0x25a9de79UL, 0x3cb2ef38UL,
0x73f379ffUL, 0x6ae848beUL, 0x41c51b7dUL, 0x58de2a3cUL, 0xf0794f05UL,
0xe9627e44UL, 0xc24f2d87UL, 0xdb541cc6UL, 0x94158a01UL, 0x8d0ebb40UL,
0xa623e883UL, 0xbf38d9c2UL, 0x38a0c50dUL, 0x21bbf44cUL, 0x0a96a78fUL,
0x138d96ceUL, 0x5ccc0009UL, 0x45d73148UL, 0x6efa628bUL, 0x77e153caUL,
0xbabb5d54UL, 0xa3a06c15UL, 0x888d3fd6UL, 0x91960e97UL, 0xded79850UL,
0xc7cca911UL, 0xece1fad2UL, 0xf5facb93UL, 0x7262d75cUL, 0x6b79e61dUL,
0x4054b5deUL, 0x594f849fUL, 0x160e1258UL, 0x0f152319UL, 0x243870daUL,
0x3d23419bUL, 0x65fd6ba7UL, 0x7ce65ae6UL, 0x57cb0925UL, 0x4ed03864UL,
0x0191aea3UL, 0x188a9fe2UL, 0x33a7cc21UL, 0x2abcfd60UL, 0xad24e1afUL,
0xb43fd0eeUL, 0x9f12832dUL, 0x8609b26cUL, 0xc94824abUL, 0xd05315eaUL,
0xfb7e4629UL, 0xe2657768UL, 0x2f3f79f6UL, 0x362448b7UL, 0x1d091b74UL,
0x04122a35UL, 0x4b53bcf2UL, 0x52488db3UL, 0x7965de70UL, 0x607eef31UL,
0xe7e6f3feUL, 0xfefdc2bfUL, 0xd5d0917cUL, 0xcccba03dUL, 0x838a36faUL,
0x9a9107bbUL, 0xb1bc5478UL, 0xa8a76539UL, 0x3b83984bUL, 0x2298a90aUL,
0x09b5fac9UL, 0x10aecb88UL, 0x5fef5d4fUL, 0x46f46c0eUL, 0x6dd93fcdUL,
0x74c20e8cUL, 0xf35a1243UL, 0xea412302UL, 0xc16c70c1UL, 0xd8774180UL,
0x9736d747UL, 0x8e2de606UL, 0xa500b5c5UL, 0xbc1b8484UL, 0x71418a1aUL,
0x685abb5bUL, 0x4377e898UL, 0x5a6cd9d9UL, 0x152d4f1eUL, 0x0c367e5fUL,
0x271b2d9cUL, 0x3e001cddUL, 0xb9980012UL, 0xa0833153UL, 0x8bae6290UL,
0x92b553d1UL, 0xddf4c516UL, 0xc4eff457UL, 0xefc2a794UL, 0xf6d996d5UL,
0xae07bce9UL, 0xb71c8da8UL, 0x9c31de6bUL, 0x852aef2aUL, 0xca6b79edUL,
0xd37048acUL, 0xf85d1b6fUL, 0xe1462a2eUL, 0x66de36e1UL, 0x7fc507a0UL,
0x54e85463UL, 0x4df36522UL, 0x02b2f3e5UL, 0x1ba9c2a4UL, 0x30849167UL,
0x299fa026UL, 0xe4c5aeb8UL, 0xfdde9ff9UL, 0xd6f3cc3aUL, 0xcfe8fd7bUL,
0x80a96bbcUL, 0x99b25afdUL, 0xb29f093eUL, 0xab84387fUL, 0x2c1c24b0UL,
0x350715f1UL, 0x1e2a4632UL, 0x07317773UL, 0x4870e1b4UL, 0x516bd0f5UL,
0x7a468336UL, 0x635db277UL, 0xcbfad74eUL, 0xd2e1e60fUL, 0xf9ccb5ccUL,
0xe0d7848dUL, 0xaf96124aUL, 0xb68d230bUL, 0x9da070c8UL, 0x84bb4189UL,
0x03235d46UL, 0x1a386c07UL, 0x31153fc4UL, 0x280e0e85UL, 0x674f9842UL,
0x7e54a903UL, 0x5579fac0UL, 0x4c62cb81UL, 0x8138c51fUL, 0x9823f45eUL,
0xb30ea79dUL, 0xaa1596dcUL, 0xe554001bUL, 0xfc4f315aUL, 0xd7626299UL,
0xce7953d8UL, 0x49e14f17UL, 0x50fa7e56UL, 0x7bd72d95UL, 0x62cc1cd4UL,
0x2d8d8a13UL, 0x3496bb52UL, 0x1fbbe891UL, 0x06a0d9d0UL, 0x5e7ef3ecUL,
0x4765c2adUL, 0x6c48916eUL, 0x7553a02fUL, 0x3a1236e8UL, 0x230907a9UL,
0x0824546aUL, 0x113f652bUL, 0x96a779e4UL, 0x8fbc48a5UL, 0xa4911b66UL,
0xbd8a2a27UL, 0xf2cbbce0UL, 0xebd08da1UL, 0xc0fdde62UL, 0xd9e6ef23UL,
0x14bce1bdUL, 0x0da7d0fcUL, 0x268a833fUL, 0x3f91b27eUL, 0x70d024b9UL,
0x69cb15f8UL, 0x42e6463bUL, 0x5bfd777aUL, 0xdc656bb5UL, 0xc57e5af4UL,
0xee530937UL, 0xf7483876UL, 0xb809aeb1UL, 0xa1129ff0UL, 0x8a3fcc33UL,
0x9324fd72UL
},
{
0x00000000UL, 0x01c26a37UL, 0x0384d46eUL, 0x0246be59UL, 0x0709a8dcUL,
0x06cbc2ebUL, 0x048d7cb2UL, 0x054f1685UL, 0x0e1351b8UL, 0x0fd13b8fUL,
0x0d9785d6UL, 0x0c55efe1UL, 0x091af964UL, 0x08d89353UL, 0x0a9e2d0aUL,
0x0b5c473dUL, 0x1c26a370UL, 0x1de4c947UL, 0x1fa2771eUL, 0x1e601d29UL,
0x1b2f0bacUL, 0x1aed619bUL, 0x18abdfc2UL, 0x1969b5f5UL, 0x1235f2c8UL,
0x13f798ffUL, 0x11b126a6UL, 0x10734c91UL, 0x153c5a14UL, 0x14fe3023UL,
0x16b88e7aUL, 0x177ae44dUL, 0x384d46e0UL, 0x398f2cd7UL, 0x3bc9928eUL,
0x3a0bf8b9UL, 0x3f44ee3cUL, 0x3e86840bUL, 0x3cc03a52UL, 0x3d025065UL,
0x365e1758UL, 0x379c7d6fUL, 0x35dac336UL, 0x3418a901UL, 0x3157bf84UL,
0x3095d5b3UL, 0x32d36beaUL, 0x331101ddUL, 0x246be590UL, 0x25a98fa7UL,
0x27ef31feUL, 0x262d5bc9UL, 0x23624d4cUL, 0x22a0277bUL, 0x20e69922UL,
0x2124f315UL, 0x2a78b428UL, 0x2bbade1fUL, 0x29fc6046UL, 0x283e0a71UL,
0x2d711cf4UL, 0x2cb376c3UL, 0x2ef5c89aUL, 0x2f37a2adUL, 0x709a8dc0UL,
0x7158e7f7UL, 0x731e59aeUL, 0x72dc3399UL, 0x7793251cUL, 0x76514f2bUL,
0x7417f172UL, 0x75d59b45UL, 0x7e89dc78UL, 0x7f4bb64fUL, 0x7d0d0816UL,
0x7ccf6221UL, 0x798074a4UL, 0x78421e93UL, 0x7a04a0caUL, 0x7bc6cafdUL,
0x6cbc2eb0UL, 0x6d7e4487UL, 0x6f38fadeUL, 0x6efa90e9UL, 0x6bb5866cUL,
0x6a77ec5bUL, 0x68315202UL, 0x69f33835UL, 0x62af7f08UL, 0x636d153fUL,
0x612bab66UL, 0x60e9c151UL, 0x65a6d7d4UL, 0x6464bde3UL, 0x662203baUL,
0x67e0698dUL, 0x48d7cb20UL, 0x4915a117UL, 0x4b531f4eUL, 0x4a917579UL,
0x4fde63fcUL, 0x4e1c09cbUL, 0x4c5ab792UL, 0x4d98dda5UL, 0x46c49a98UL,
0x4706f0afUL, 0x45404ef6UL, 0x448224c1UL, 0x41cd3244UL, 0x400f5873UL,
0x4249e62aUL, 0x438b8c1dUL, 0x54f16850UL, 0x55330267UL, 0x5775bc3eUL,
0x56b7d609UL, 0x53f8c08cUL, 0x523aaabbUL, 0x507c14e2UL, 0x51be7ed5UL,
0x5ae239e8UL, 0x5b2053dfUL, 0x5966ed86UL, 0x58a487b1UL, 0x5deb9134UL,
0x5c29fb03UL, 0x5e6f455aUL, 0x5fad2f6dUL, 0xe1351b80UL, 0xe0f771b7UL,
0xe2b1cfeeUL, 0xe373a5d9UL, 0xe63cb35cUL, 0xe7fed96bUL, 0xe5b86732UL,
0xe47a0d05UL, 0xef264a38UL, 0xeee4200fUL, 0xeca29e56UL, 0xed60f461UL,
0xe82fe2e4UL, 0xe9ed88d3UL, 0xebab368aUL, 0xea695cbdUL, 0xfd13b8f0UL,
0xfcd1d2c7UL, 0xfe976c9eUL, 0xff5506a9UL, 0xfa1a102cUL, 0xfbd87a1bUL,
0xf99ec442UL, 0xf85cae75UL, 0xf300e948UL, 0xf2c2837fUL, 0xf0843d26UL,
0xf1465711UL, 0xf4094194UL, 0xf5cb2ba3UL, 0xf78d95faUL, 0xf64fffcdUL,
0xd9785d60UL, 0xd8ba3757UL, 0xdafc890eUL, 0xdb3ee339UL, 0xde71f5bcUL,
0xdfb39f8bUL, 0xddf521d2UL, 0xdc374be5UL, 0xd76b0cd8UL, 0xd6a966efUL,
0xd4efd8b6UL, 0xd52db281UL, 0xd062a404UL, 0xd1a0ce33UL, 0xd3e6706aUL,
0xd2241a5dUL, 0xc55efe10UL, 0xc49c9427UL, 0xc6da2a7eUL, 0xc7184049UL,
0xc25756ccUL, 0xc3953cfbUL, 0xc1d382a2UL, 0xc011e895UL, 0xcb4dafa8UL,
0xca8fc59fUL, 0xc8c97bc6UL, 0xc90b11f1UL, 0xcc440774UL, 0xcd866d43UL,
0xcfc0d31aUL, 0xce02b92dUL, 0x91af9640UL, 0x906dfc77UL, 0x922b422eUL,
0x93e92819UL, 0x96a63e9cUL, 0x976454abUL, 0x9522eaf2UL, 0x94e080c5UL,
0x9fbcc7f8UL, 0x9e7eadcfUL, 0x9c381396UL, 0x9dfa79a1UL, 0x98b56f24UL,
0x99770513UL, 0x9b31bb4aUL, 0x9af3d17dUL, 0x8d893530UL, 0x8c4b5f07UL,
0x8e0de15eUL, 0x8fcf8b69UL, 0x8a809decUL, 0x8b42f7dbUL, 0x89044982UL,
0x88c623b5UL, 0x839a6488UL, 0x82580ebfUL, 0x801eb0e6UL, 0x81dcdad1UL,
0x8493cc54UL, 0x8551a663UL, 0x8717183aUL, 0x86d5720dUL, 0xa9e2d0a0UL,
0xa820ba97UL, 0xaa6604ceUL, 0xaba46ef9UL, 0xaeeb787cUL, 0xaf29124bUL,
0xad6fac12UL, 0xacadc625UL, 0xa7f18118UL, 0xa633eb2fUL, 0xa4755576UL,
0xa5b73f41UL, 0xa0f829c4UL, 0xa13a43f3UL, 0xa37cfdaaUL, 0xa2be979dUL,
0xb5c473d0UL, 0xb40619e7UL, 0xb640a7beUL, 0xb782cd89UL, 0xb2cddb0cUL,
0xb30fb13bUL, 0xb1490f62UL, 0xb08b6555UL, 0xbbd72268UL, 0xba15485fUL,
0xb853f606UL, 0xb9919c31UL, 0xbcde8ab4UL, 0xbd1ce083UL, 0xbf5a5edaUL,
0xbe9834edUL
},
{
0x00000000UL, 0xb8bc6765UL, 0xaa09c88bUL, 0x12b5afeeUL, 0x8f629757UL,
0x37def032UL, 0x256b5fdcUL, 0x9dd738b9UL, 0xc5b428efUL, 0x7d084f8aUL,
0x6fbde064UL, 0xd7018701UL, 0x4ad6bfb8UL, 0xf26ad8ddUL, 0xe0df7733UL,
0x58631056UL, 0x5019579fUL, 0xe8a530faUL, 0xfa109f14UL, 0x42acf871UL,
0xdf7bc0c8UL, 0x67c7a7adUL, 0x75720843UL, 0xcdce6f26UL, 0x95ad7f70UL,
0x2d111815UL, 0x3fa4b7fbUL, 0x8718d09eUL, 0x1acfe827UL, 0xa2738f42UL,
0xb0c620acUL, 0x087a47c9UL, 0xa032af3eUL, 0x188ec85bUL, 0x0a3b67b5UL,
0xb28700d0UL, 0x2f503869UL, 0x97ec5f0cUL, 0x8559f0e2UL, 0x3de59787UL,
0x658687d1UL, 0xdd3ae0b4UL, 0xcf8f4f5aUL, 0x7733283fUL, 0xeae41086UL,
0x525877e3UL, 0x40edd80dUL, 0xf851bf68UL, 0xf02bf8a1UL, 0x48979fc4UL,
0x5a22302aUL, 0xe29e574fUL, 0x7f496ff6UL, 0xc7f50893UL, 0xd540a77dUL,
0x6dfcc018UL, 0x359fd04eUL, 0x8d23b72bUL, 0x9f9618c5UL, 0x272a7fa0UL,
0xbafd4719UL, 0x0241207cUL, 0x10f48f92UL, 0xa848e8f7UL, 0x9b14583dUL,
0x23a83f58UL, 0x311d90b6UL, 0x89a1f7d3UL, 0x1476cf6aUL, 0xaccaa80fUL,
0xbe7f07e1UL, 0x06c36084UL, 0x5ea070d2UL, 0xe61c17b7UL, 0xf4a9b859UL,
0x4c15df3cUL, 0xd1c2e785UL, 0x697e80e0UL, 0x7bcb2f0eUL, 0xc377486bUL,
0xcb0d0fa2UL, 0x73b168c7UL, 0x6104c729UL, 0xd9b8a04cUL, 0x446f98f5UL,
0xfcd3ff90UL, 0xee66507eUL, 0x56da371bUL, 0x0eb9274dUL, 0xb6054028UL,
0xa4b0efc6UL, 0x1c0c88a3UL, 0x81dbb01aUL, 0x3967d77fUL, 0x2bd27891UL,
0x936e1ff4UL, 0x3b26f703UL, 0x839a9066UL, 0x912f3f88UL, 0x299358edUL,
0xb4446054UL, 0x0cf80731UL, 0x1e4da8dfUL, 0xa6f1cfbaUL, 0xfe92dfecUL,
0x462eb889UL, 0x549b1767UL, 0xec277002UL, 0x71f048bbUL, 0xc94c2fdeUL,
0xdbf98030UL, 0x6345e755UL, 0x6b3fa09cUL, 0xd383c7f9UL, 0xc1366817UL,
0x798a0f72UL, 0xe45d37cbUL, 0x5ce150aeUL, 0x4e54ff40UL, 0xf6e89825UL,
0xae8b8873UL, 0x1637ef16UL, 0x048240f8UL, 0xbc3e279dUL, 0x21e91f24UL,
0x99557841UL, 0x8be0d7afUL, 0x335cb0caUL, 0xed59b63bUL, 0x55e5d15eUL,
0x47507eb0UL, 0xffec19d5UL, 0x623b216cUL, 0xda874609UL, 0xc832e9e7UL,
0x708e8e82UL, 0x28ed9ed4UL, 0x9051f9b1UL, 0x82e4565fUL, 0x3a58313aUL,
0xa78f0983UL, 0x1f336ee6UL, 0x0d86c108UL, 0xb53aa66dUL, 0xbd40e1a4UL,
0x05fc86c1UL, 0x1749292fUL, 0xaff54e4aUL, 0x322276f3UL, 0x8a9e1196UL,
0x982bbe78UL, 0x2097d91dUL, 0x78f4c94bUL, 0xc048ae2eUL, 0xd2fd01c0UL,
0x6a4166a5UL, 0xf7965e1cUL, 0x4f2a3979UL, 0x5d9f9697UL, 0xe523f1f2UL,
0x4d6b1905UL, 0xf5d77e60UL, 0xe762d18eUL, 0x5fdeb6ebUL, 0xc2098e52UL,
0x7ab5e937UL, 0x680046d9UL, 0xd0bc21bcUL, 0x88df31eaUL, 0x3063568fUL,
0x22d6f961UL, 0x9a6a9e04UL, 0x07bda6bdUL, 0xbf01c1d8UL, 0xadb46e36UL,
0x15080953UL, 0x1d724e9aUL, 0xa5ce29ffUL, 0xb77b8611UL, 0x0fc7e174UL,
0x9210d9cdUL, 0x2aacbea8UL, 0x38191146UL, 0x80a57623UL, 0xd8c66675UL,
0x607a0110UL, 0x72cfaefeUL, 0xca73c99bUL, 0x57a4f122UL, 0xef189647UL,
0xfdad39a9UL, 0x45115eccUL, 0x764dee06UL, 0xcef18963UL, 0xdc44268dUL,
0x64f841e8UL, 0xf92f7951UL, 0x41931e34UL, 0x5326b1daUL, 0xeb9ad6bfUL,
0xb3f9c6e9UL, 0x0b45a18cUL, 0x19f00e62UL, 0xa14c6907UL, 0x3c9b51beUL,
0x842736dbUL, 0x96929935UL, 0x2e2efe50UL, 0x2654b999UL, 0x9ee8defcUL,
0x8c5d7112UL, 0x34e11677UL, 0xa9362eceUL, 0x118a49abUL, 0x033fe645UL,
0xbb838120UL, 0xe3e09176UL, 0x5b5cf613UL, 0x49e959fdUL, 0xf1553e98UL,
0x6c820621UL, 0xd43e6144UL, 0xc68bceaaUL, 0x7e37a9cfUL, 0xd67f4138UL,
0x6ec3265dUL, 0x7c7689b3UL, 0xc4caeed6UL, 0x591dd66fUL, 0xe1a1b10aUL,
0xf3141ee4UL, 0x4ba87981UL, 0x13cb69d7UL, 0xab770eb2UL, 0xb9c2a15cUL,
0x017ec639UL, 0x9ca9fe80UL, 0x241599e5UL, 0x36a0360bUL, 0x8e1c516eUL,
0x866616a7UL, 0x3eda71c2UL, 0x2c6fde2cUL, 0x94d3b949UL, 0x090481f0UL,
0xb1b8e695UL, 0xa30d497bUL, 0x1bb12e1eUL, 0x43d23e48UL, 0xfb6e592dUL,
0xe9dbf6c3UL, 0x516791a6UL, 0xccb0a91fUL, 0x740cce7aUL, 0x66b96194UL,
0xde0506f1UL
},
{
0x00000000UL, 0x96300777UL, 0x2c610eeeUL, 0xba510999UL, 0x19c46d07UL,
0x8ff46a70UL, 0x35a563e9UL, 0xa395649eUL, 0x3288db0eUL, 0xa4b8dc79UL,
0x1ee9d5e0UL, 0x88d9d297UL, 0x2b4cb609UL, 0xbd7cb17eUL, 0x072db8e7UL,
0x911dbf90UL, 0x6410b71dUL, 0xf220b06aUL, 0x4871b9f3UL, 0xde41be84UL,
0x7dd4da1aUL, 0xebe4dd6dUL, 0x51b5d4f4UL, 0xc785d383UL, 0x56986c13UL,
0xc0a86b64UL, 0x7af962fdUL, 0xecc9658aUL, 0x4f5c0114UL, 0xd96c0663UL,
0x633d0ffaUL, 0xf50d088dUL, 0xc8206e3bUL, 0x5e10694cUL, 0xe44160d5UL,
0x727167a2UL, 0xd1e4033cUL, 0x47d4044bUL, 0xfd850dd2UL, 0x6bb50aa5UL,
0xfaa8b535UL, 0x6c98b242UL, 0xd6c9bbdbUL, 0x40f9bcacUL, 0xe36cd832UL,
0x755cdf45UL, 0xcf0dd6dcUL, 0x593dd1abUL, 0xac30d926UL, 0x3a00de51UL,
0x8051d7c8UL, 0x1661d0bfUL, 0xb5f4b421UL, 0x23c4b356UL, 0x9995bacfUL,
0x0fa5bdb8UL, 0x9eb80228UL, 0x0888055fUL, 0xb2d90cc6UL, 0x24e90bb1UL,
0x877c6f2fUL, 0x114c6858UL, 0xab1d61c1UL, 0x3d2d66b6UL, 0x9041dc76UL,
0x0671db01UL, 0xbc20d298UL, 0x2a10d5efUL, 0x8985b171UL, 0x1fb5b606UL,
0xa5e4bf9fUL, 0x33d4b8e8UL, 0xa2c90778UL, 0x34f9000fUL, 0x8ea80996UL,
0x18980ee1UL, 0xbb0d6a7fUL, 0x2d3d6d08UL, 0x976c6491UL, 0x015c63e6UL,
0xf4516b6bUL, 0x62616c1cUL, 0xd8306585UL, 0x4e0062f2UL, 0xed95066cUL,
0x7ba5011bUL, 0xc1f40882UL, 0x57c40ff5UL, 0xc6d9b065UL, 0x50e9b712UL,
0xeab8be8bUL, 0x7c88b9fcUL, 0xdf1ddd62UL, 0x492dda15UL, 0xf37cd38cUL,
0x654cd4fbUL, 0x5861b24dUL, 0xce51b53aUL, 0x7400bca3UL, 0xe230bbd4UL,
0x41a5df4aUL, 0xd795d83dUL, 0x6dc4d1a4UL, 0xfbf4d6d3UL, 0x6ae96943UL,
0xfcd96e34UL, 0x468867adUL, 0xd0b860daUL, 0x732d0444UL, 0xe51d0333UL,
0x5f4c0aaaUL, 0xc97c0dddUL, 0x3c710550UL, 0xaa410227UL, 0x10100bbeUL,
0x86200cc9UL, 0x25b56857UL, 0xb3856f20UL, 0x09d466b9UL, 0x9fe461ceUL,
0x0ef9de5eUL, 0x98c9d929UL, 0x2298d0b0UL, 0xb4a8d7c7UL, 0x173db359UL,
0x810db42eUL, 0x3b5cbdb7UL, 0xad6cbac0UL, 0x2083b8edUL, 0xb6b3bf9aUL,
0x0ce2b603UL, 0x9ad2b174UL, 0x3947d5eaUL, 0xaf77d29dUL, 0x1526db04UL,
0x8316dc73UL, 0x120b63e3UL, 0x843b6494UL, 0x3e6a6d0dUL, 0xa85a6a7aUL,
0x0bcf0ee4UL, 0x9dff0993UL, 0x27ae000aUL, 0xb19e077dUL, 0x44930ff0UL,
0xd2a30887UL, 0x68f2011eUL, 0xfec20669UL, 0x5d5762f7UL, 0xcb676580UL,
0x71366c19UL, 0xe7066b6eUL, 0x761bd4feUL, 0xe02bd389UL, 0x5a7ada10UL,
0xcc4add67UL, 0x6fdfb9f9UL, 0xf9efbe8eUL, 0x43beb717UL, 0xd58eb060UL,
0xe8a3d6d6UL, 0x7e93d1a1UL, 0xc4c2d838UL, 0x52f2df4fUL, 0xf167bbd1UL,
0x6757bca6UL, 0xdd06b53fUL, 0x4b36b248UL, 0xda2b0dd8UL, 0x4c1b0aafUL,
0xf64a0336UL, 0x607a0441UL, 0xc3ef60dfUL, 0x55df67a8UL, 0xef8e6e31UL,
0x79be6946UL, 0x8cb361cbUL, 0x1a8366bcUL, 0xa0d26f25UL, 0x36e26852UL,
0x95770cccUL, 0x03470bbbUL, 0xb9160222UL, 0x2f260555UL, 0xbe3bbac5UL,
0x280bbdb2UL, 0x925ab42bUL, 0x046ab35cUL, 0xa7ffd7c2UL, 0x31cfd0b5UL,
0x8b9ed92cUL, 0x1daede5bUL, 0xb0c2649bUL, 0x26f263ecUL, 0x9ca36a75UL,
0x0a936d02UL, 0xa906099cUL, 0x3f360eebUL, 0x85670772UL, 0x13570005UL,
0x824abf95UL, 0x147ab8e2UL, 0xae2bb17bUL, 0x381bb60cUL, 0x9b8ed292UL,
0x0dbed5e5UL, 0xb7efdc7cUL, 0x21dfdb0bUL, 0xd4d2d386UL, 0x42e2d4f1UL,
0xf8b3dd68UL, 0x6e83da1fUL, 0xcd16be81UL, 0x5b26b9f6UL, 0xe177b06fUL,
0x7747b718UL, 0xe65a0888UL, 0x706a0fffUL, 0xca3b0666UL, 0x5c0b0111UL,
0xff9e658fUL, 0x69ae62f8UL, 0xd3ff6b61UL, 0x45cf6c16UL, 0x78e20aa0UL,
0xeed20dd7UL, 0x5483044eUL, 0xc2b30339UL, 0x612667a7UL, 0xf71660d0UL,
0x4d476949UL, 0xdb776e3eUL, 0x4a6ad1aeUL, 0xdc5ad6d9UL, 0x660bdf40UL,
0xf03bd837UL, 0x53aebca9UL, 0xc59ebbdeUL, 0x7fcfb247UL, 0xe9ffb530UL,
0x1cf2bdbdUL, 0x8ac2bacaUL, 0x3093b353UL, 0xa6a3b424UL, 0x0536d0baUL,
0x9306d7cdUL, 0x2957de54UL, 0xbf67d923UL, 0x2e7a66b3UL, 0xb84a61c4UL,
0x021b685dUL, 0x942b6f2aUL, 0x37be0bb4UL, 0xa18e0cc3UL, 0x1bdf055aUL,
0x8def022dUL
},
{
0x00000000UL, 0x41311b19UL, 0x82623632UL, 0xc3532d2bUL, 0x04c56c64UL,
0x45f4777dUL, 0x86a75a56UL, 0xc796414fUL, 0x088ad9c8UL, 0x49bbc2d1UL,
0x8ae8effaUL, 0xcbd9f4e3UL, 0x0c4fb5acUL, 0x4d7eaeb5UL, 0x8e2d839eUL,
0xcf1c9887UL, 0x5112c24aUL, 0x1023d953UL, 0xd370f478UL, 0x9241ef61UL,
0x55d7ae2eUL, 0x14e6b537UL, 0xd7b5981cUL, 0x96848305UL, 0x59981b82UL,
0x18a9009bUL, 0xdbfa2db0UL, 0x9acb36a9UL, 0x5d5d77e6UL, 0x1c6c6cffUL,
0xdf3f41d4UL, 0x9e0e5acdUL, 0xa2248495UL, 0xe3159f8cUL, 0x2046b2a7UL,
0x6177a9beUL, 0xa6e1e8f1UL, 0xe7d0f3e8UL, 0x2483dec3UL, 0x65b2c5daUL,
0xaaae5d5dUL, 0xeb9f4644UL, 0x28cc6b6fUL, 0x69fd7076UL, 0xae6b3139UL,
0xef5a2a20UL, 0x2c09070bUL, 0x6d381c12UL, 0xf33646dfUL, 0xb2075dc6UL,
0x715470edUL, 0x30656bf4UL, 0xf7f32abbUL, 0xb6c231a2UL, 0x75911c89UL,
0x34a00790UL, 0xfbbc9f17UL, 0xba8d840eUL, 0x79dea925UL, 0x38efb23cUL,
0xff79f373UL, 0xbe48e86aUL, 0x7d1bc541UL, 0x3c2ade58UL, 0x054f79f0UL,
0x447e62e9UL, 0x872d4fc2UL, 0xc61c54dbUL, 0x018a1594UL, 0x40bb0e8dUL,
0x83e823a6UL, 0xc2d938bfUL, 0x0dc5a038UL, 0x4cf4bb21UL, 0x8fa7960aUL,
0xce968d13UL, 0x0900cc5cUL, 0x4831d745UL, 0x8b62fa6eUL, 0xca53e177UL,
0x545dbbbaUL, 0x156ca0a3UL, 0xd63f8d88UL, 0x970e9691UL, 0x5098d7deUL,
0x11a9ccc7UL, 0xd2fae1ecUL, 0x93cbfaf5UL, 0x5cd76272UL, 0x1de6796bUL,
0xdeb55440UL, 0x9f844f59UL, 0x58120e16UL, 0x1923150fUL, 0xda703824UL,
0x9b41233dUL, 0xa76bfd65UL, 0xe65ae67cUL, 0x2509cb57UL, 0x6438d04eUL,
0xa3ae9101UL, 0xe29f8a18UL, 0x21cca733UL, 0x60fdbc2aUL, 0xafe124adUL,
0xeed03fb4UL, 0x2d83129fUL, 0x6cb20986UL, 0xab2448c9UL, 0xea1553d0UL,
0x29467efbUL, 0x687765e2UL, 0xf6793f2fUL, 0xb7482436UL, 0x741b091dUL,
0x352a1204UL, 0xf2bc534bUL, 0xb38d4852UL, 0x70de6579UL, 0x31ef7e60UL,
0xfef3e6e7UL, 0xbfc2fdfeUL, 0x7c91d0d5UL, 0x3da0cbccUL, 0xfa368a83UL,
0xbb07919aUL, 0x7854bcb1UL, 0x3965a7a8UL, 0x4b98833bUL, 0x0aa99822UL,
0xc9fab509UL, 0x88cbae10UL, 0x4f5def5fUL, 0x0e6cf446UL, 0xcd3fd96dUL,
0x8c0ec274UL, 0x43125af3UL, 0x022341eaUL, 0xc1706cc1UL, 0x804177d8UL,
0x47d73697UL, 0x06e62d8eUL, 0xc5b500a5UL, 0x84841bbcUL, 0x1a8a4171UL,
0x5bbb5a68UL, 0x98e87743UL, 0xd9d96c5aUL, 0x1e4f2d15UL, 0x5f7e360cUL,
0x9c2d1b27UL, 0xdd1c003eUL, 0x120098b9UL, 0x533183a0UL, 0x9062ae8bUL,
0xd153b592UL, 0x16c5f4ddUL, 0x57f4efc4UL, 0x94a7c2efUL, 0xd596d9f6UL,
0xe9bc07aeUL, 0xa88d1cb7UL, 0x6bde319cUL, 0x2aef2a85UL, 0xed796bcaUL,
0xac4870d3UL, 0x6f1b5df8UL, 0x2e2a46e1UL, 0xe136de66UL, 0xa007c57fUL,
0x6354e854UL, 0x2265f34dUL, 0xe5f3b202UL, 0xa4c2a91bUL, 0x67918430UL,
0x26a09f29UL, 0xb8aec5e4UL, 0xf99fdefdUL, 0x3accf3d6UL, 0x7bfde8cfUL,
0xbc6ba980UL, 0xfd5ab299UL, 0x3e099fb2UL, 0x7f3884abUL, 0xb0241c2cUL,
0xf1150735UL, 0x32462a1eUL, 0x73773107UL, 0xb4e17048UL, 0xf5d06b51UL,
0x3683467aUL, 0x77b25d63UL, 0x4ed7facbUL, 0x0fe6e1d2UL, 0xccb5ccf9UL,
0x8d84d7e0UL, 0x4a1296afUL, 0x0b238db6UL, 0xc870a09dUL, 0x8941bb84UL,
0x465d2303UL, 0x076c381aUL, 0xc43f1531UL, 0x850e0e28UL, 0x42984f67UL,
0x03a9547eUL, 0xc0fa7955UL, 0x81cb624cUL, 0x1fc53881UL, 0x5ef42398UL,
0x9da70eb3UL, 0xdc9615aaUL, 0x1b0054e5UL, 0x5a314ffcUL, 0x996262d7UL,
0xd85379ceUL, 0x174fe149UL, 0x567efa50UL, 0x952dd77bUL, 0xd41ccc62UL,
0x138a8d2dUL, 0x52bb9634UL, 0x91e8bb1fUL, 0xd0d9a006UL, 0xecf37e5eUL,
0xadc26547UL, 0x6e91486cUL, 0x2fa05375UL, 0xe836123aUL, 0xa9070923UL,
0x6a542408UL, 0x2b653f11UL, 0xe479a796UL, 0xa548bc8fUL, 0x661b91a4UL,
0x272a8abdUL, 0xe0bccbf2UL, 0xa18dd0ebUL, 0x62defdc0UL, 0x23efe6d9UL,
0xbde1bc14UL, 0xfcd0a70dUL, 0x3f838a26UL, 0x7eb2913fUL, 0xb924d070UL,
0xf815cb69UL, 0x3b46e642UL, 0x7a77fd5bUL, 0xb56b65dcUL, 0xf45a7ec5UL,
0x370953eeUL, 0x763848f7UL, 0xb1ae09b8UL, 0xf09f12a1UL, 0x33cc3f8aUL,
0x72fd2493UL
},
{
0x00000000UL, 0x376ac201UL, 0x6ed48403UL, 0x59be4602UL, 0xdca80907UL,
0xebc2cb06UL, 0xb27c8d04UL, 0x85164f05UL, 0xb851130eUL, 0x8f3bd10fUL,
0xd685970dUL, 0xe1ef550cUL, 0x64f91a09UL, 0x5393d808UL, 0x0a2d9e0aUL,
0x3d475c0bUL, 0x70a3261cUL, 0x47c9e41dUL, 0x1e77a21fUL, 0x291d601eUL,
0xac0b2f1bUL, 0x9b61ed1aUL, 0xc2dfab18UL, 0xf5b56919UL, 0xc8f23512UL,
0xff98f713UL, 0xa626b111UL, 0x914c7310UL, 0x145a3c15UL, 0x2330fe14UL,
0x7a8eb816UL, 0x4de47a17UL, 0xe0464d38UL, 0xd72c8f39UL, 0x8e92c93bUL,
0xb9f80b3aUL, 0x3cee443fUL, 0x0b84863eUL, 0x523ac03cUL, 0x6550023dUL,
0x58175e36UL, 0x6f7d9c37UL, 0x36c3da35UL, 0x01a91834UL, 0x84bf5731UL,
0xb3d59530UL, 0xea6bd332UL, 0xdd011133UL, 0x90e56b24UL, 0xa78fa925UL,
0xfe31ef27UL, 0xc95b2d26UL, 0x4c4d6223UL, 0x7b27a022UL, 0x2299e620UL,
0x15f32421UL, 0x28b4782aUL, 0x1fdeba2bUL, 0x4660fc29UL, 0x710a3e28UL,
0xf41c712dUL, 0xc376b32cUL, 0x9ac8f52eUL, 0xada2372fUL, 0xc08d9a70UL,
0xf7e75871UL, 0xae591e73UL, 0x9933dc72UL, 0x1c259377UL, 0x2b4f5176UL,
0x72f11774UL, 0x459bd575UL, 0x78dc897eUL, 0x4fb64b7fUL, 0x16080d7dUL,
0x2162cf7cUL, 0xa4748079UL, 0x931e4278UL, 0xcaa0047aUL, 0xfdcac67bUL,
0xb02ebc6cUL, 0x87447e6dUL, 0xdefa386fUL, 0xe990fa6eUL, 0x6c86b56bUL,
0x5bec776aUL, 0x02523168UL, 0x3538f369UL, 0x087faf62UL, 0x3f156d63UL,
0x66ab2b61UL, 0x51c1e960UL, 0xd4d7a665UL, 0xe3bd6464UL, 0xba032266UL,
0x8d69e067UL, 0x20cbd748UL, 0x17a11549UL, 0x4e1f534bUL, 0x7975914aUL,
0xfc63de4fUL, 0xcb091c4eUL, 0x92b75a4cUL, 0xa5dd984dUL, 0x989ac446UL,
0xaff00647UL, 0xf64e4045UL, 0xc1248244UL, 0x4432cd41UL, 0x73580f40UL,
0x2ae64942UL, 0x1d8c8b43UL, 0x5068f154UL, 0x67023355UL, 0x3ebc7557UL,
0x09d6b756UL, 0x8cc0f853UL, 0xbbaa3a52UL, 0xe2147c50UL, 0xd57ebe51UL,
0xe839e25aUL, 0xdf53205bUL, 0x86ed6659UL, 0xb187a458UL, 0x3491eb5dUL,
0x03fb295cUL, 0x5a456f5eUL, 0x6d2fad5fUL, 0x801b35e1UL, 0xb771f7e0UL,
0xeecfb1e2UL, 0xd9a573e3UL, 0x5cb33ce6UL, 0x6bd9fee7UL, 0x3267b8e5UL,
0x050d7ae4UL, 0x384a26efUL, 0x0f20e4eeUL, 0x569ea2ecUL, 0x61f460edUL,
0xe4e22fe8UL, 0xd388ede9UL, 0x8a36abebUL, 0xbd5c69eaUL, 0xf0b813fdUL,
0xc7d2d1fcUL, 0x9e6c97feUL, 0xa90655ffUL, 0x2c101afaUL, 0x1b7ad8fbUL,
0x42c49ef9UL, 0x75ae5cf8UL, 0x48e900f3UL, 0x7f83c2f2UL, 0x263d84f0UL,
0x115746f1UL, 0x944109f4UL, 0xa32bcbf5UL, 0xfa958df7UL, 0xcdff4ff6UL,
0x605d78d9UL, 0x5737bad8UL, 0x0e89fcdaUL, 0x39e33edbUL, 0xbcf571deUL,
0x8b9fb3dfUL, 0xd221f5ddUL, 0xe54b37dcUL, 0xd80c6bd7UL, 0xef66a9d6UL,
0xb6d8efd4UL, 0x81b22dd5UL, 0x04a462d0UL, 0x33cea0d1UL, 0x6a70e6d3UL,
0x5d1a24d2UL, 0x10fe5ec5UL, 0x27949cc4UL, 0x7e2adac6UL, 0x494018c7UL,
0xcc5657c2UL, 0xfb3c95c3UL, 0xa282d3c1UL, 0x95e811c0UL, 0xa8af4dcbUL,
0x9fc58fcaUL, 0xc67bc9c8UL, 0xf1110bc9UL, 0x740744ccUL, 0x436d86cdUL,
0x1ad3c0cfUL, 0x2db902ceUL, 0x4096af91UL, 0x77fc6d90UL, 0x2e422b92UL,
0x1928e993UL, 0x9c3ea696UL, 0xab546497UL, 0xf2ea2295UL, 0xc580e094UL,
0xf8c7bc9fUL, 0xcfad7e9eUL, 0x9613389cUL, 0xa179fa9dUL, 0x246fb598UL,
0x13057799UL, 0x4abb319bUL, 0x7dd1f39aUL, 0x3035898dUL, 0x075f4b8cUL,
0x5ee10d8eUL, 0x698bcf8fUL, 0xec9d808aUL, 0xdbf7428bUL, 0x82490489UL,
0xb523c688UL, 0x88649a83UL, 0xbf0e5882UL, 0xe6b01e80UL, 0xd1dadc81UL,
0x54cc9384UL, 0x63a65185UL, 0x3a181787UL, 0x0d72d586UL, 0xa0d0e2a9UL,
0x97ba20a8UL, 0xce0466aaUL, 0xf96ea4abUL, 0x7c78ebaeUL, 0x4b1229afUL,
0x12ac6fadUL, 0x25c6adacUL, 0x1881f1a7UL, 0x2feb33a6UL, 0x765575a4UL,
0x413fb7a5UL, 0xc429f8a0UL, 0xf3433aa1UL, 0xaafd7ca3UL, 0x9d97bea2UL,
0xd073c4b5UL, 0xe71906b4UL, 0xbea740b6UL, 0x89cd82b7UL, 0x0cdbcdb2UL,
0x3bb10fb3UL, 0x620f49b1UL, 0x55658bb0UL, 0x6822d7bbUL, 0x5f4815baUL,
0x06f653b8UL, 0x319c91b9UL, 0xb48adebcUL, 0x83e01cbdUL, 0xda5e5abfUL,
0xed3498beUL
},
{
0x00000000UL, 0x6567bcb8UL, 0x8bc809aaUL, 0xeeafb512UL, 0x5797628fUL,
0x32f0de37UL, 0xdc5f6b25UL, 0xb938d79dUL, 0xef28b4c5UL, 0x8a4f087dUL,
0x64e0bd6fUL, 0x018701d7UL, 0xb8bfd64aUL, 0xddd86af2UL, 0x3377dfe0UL,
0x56106358UL, 0x9f571950UL, 0xfa30a5e8UL, 0x149f10faUL, 0x71f8ac42UL,
0xc8c07bdfUL, 0xada7c767UL, 0x43087275UL, 0x266fcecdUL, 0x707fad95UL,
0x1518112dUL, 0xfbb7a43fUL, 0x9ed01887UL, 0x27e8cf1aUL, 0x428f73a2UL,
0xac20c6b0UL, 0xc9477a08UL, 0x3eaf32a0UL, 0x5bc88e18UL, 0xb5673b0aUL,
0xd00087b2UL, 0x6938502fUL, 0x0c5fec97UL, 0xe2f05985UL, 0x8797e53dUL,
0xd1878665UL, 0xb4e03addUL, 0x5a4f8fcfUL, 0x3f283377UL, 0x8610e4eaUL,
0xe3775852UL, 0x0dd8ed40UL, 0x68bf51f8UL, 0xa1f82bf0UL, 0xc49f9748UL,
0x2a30225aUL, 0x4f579ee2UL, 0xf66f497fUL, 0x9308f5c7UL, 0x7da740d5UL,
0x18c0fc6dUL, 0x4ed09f35UL, 0x2bb7238dUL, 0xc518969fUL, 0xa07f2a27UL,
0x1947fdbaUL, 0x7c204102UL, 0x928ff410UL, 0xf7e848a8UL, 0x3d58149bUL,
0x583fa823UL, 0xb6901d31UL, 0xd3f7a189UL, 0x6acf7614UL, 0x0fa8caacUL,
0xe1077fbeUL, 0x8460c306UL, 0xd270a05eUL, 0xb7171ce6UL, 0x59b8a9f4UL,
0x3cdf154cUL, 0x85e7c2d1UL, 0xe0807e69UL, 0x0e2fcb7bUL, 0x6b4877c3UL,
0xa20f0dcbUL, 0xc768b173UL, 0x29c70461UL, 0x4ca0b8d9UL, 0xf5986f44UL,
0x90ffd3fcUL, 0x7e5066eeUL, 0x1b37da56UL, 0x4d27b90eUL, 0x284005b6UL,
0xc6efb0a4UL, 0xa3880c1cUL, 0x1ab0db81UL, 0x7fd76739UL, 0x9178d22bUL,
0xf41f6e93UL, 0x03f7263bUL, 0x66909a83UL, 0x883f2f91UL, 0xed589329UL,
0x546044b4UL, 0x3107f80cUL, 0xdfa84d1eUL, 0xbacff1a6UL, 0xecdf92feUL,
0x89b82e46UL, 0x67179b54UL, 0x027027ecUL, 0xbb48f071UL, 0xde2f4cc9UL,
0x3080f9dbUL, 0x55e74563UL, 0x9ca03f6bUL, 0xf9c783d3UL, 0x176836c1UL,
0x720f8a79UL, 0xcb375de4UL, 0xae50e15cUL, 0x40ff544eUL, 0x2598e8f6UL,
0x73888baeUL, 0x16ef3716UL, 0xf8408204UL, 0x9d273ebcUL, 0x241fe921UL,
0x41785599UL, 0xafd7e08bUL, 0xcab05c33UL, 0x3bb659edUL, 0x5ed1e555UL,
0xb07e5047UL, 0xd519ecffUL, 0x6c213b62UL, 0x094687daUL, 0xe7e932c8UL,
0x828e8e70UL, 0xd49eed28UL, 0xb1f95190UL, 0x5f56e482UL, 0x3a31583aUL,
0x83098fa7UL, 0xe66e331fUL, 0x08c1860dUL, 0x6da63ab5UL, 0xa4e140bdUL,
0xc186fc05UL, 0x2f294917UL, 0x4a4ef5afUL, 0xf3762232UL, 0x96119e8aUL,
0x78be2b98UL, 0x1dd99720UL, 0x4bc9f478UL, 0x2eae48c0UL, 0xc001fdd2UL,
0xa566416aUL, 0x1c5e96f7UL, 0x79392a4fUL, 0x97969f5dUL, 0xf2f123e5UL,
0x05196b4dUL, 0x607ed7f5UL, 0x8ed162e7UL, 0xebb6de5fUL, 0x528e09c2UL,
0x37e9b57aUL, 0xd9460068UL, 0xbc21bcd0UL, 0xea31df88UL, 0x8f566330UL,
0x61f9d622UL, 0x049e6a9aUL, 0xbda6bd07UL, 0xd8c101bfUL, 0x366eb4adUL,
0x53090815UL, 0x9a4e721dUL, 0xff29cea5UL, 0x11867bb7UL, 0x74e1c70fUL,
0xcdd91092UL, 0xa8beac2aUL, 0x46111938UL, 0x2376a580UL, 0x7566c6d8UL,
0x10017a60UL, 0xfeaecf72UL, 0x9bc973caUL, 0x22f1a457UL, 0x479618efUL,
0xa939adfdUL, 0xcc5e1145UL, 0x06ee4d76UL, 0x6389f1ceUL, 0x8d2644dcUL,
0xe841f864UL, 0x51792ff9UL, 0x341e9341UL, 0xdab12653UL, 0xbfd69aebUL,
0xe9c6f9b3UL, 0x8ca1450bUL, 0x620ef019UL, 0x07694ca1UL, 0xbe519b3cUL,
0xdb362784UL, 0x35999296UL, 0x50fe2e2eUL, 0x99b95426UL, 0xfcdee89eUL,
0x12715d8cUL, 0x7716e134UL, 0xce2e36a9UL, 0xab498a11UL, 0x45e63f03UL,
0x208183bbUL, 0x7691e0e3UL, 0x13f65c5bUL, 0xfd59e949UL, 0x983e55f1UL,
0x2106826cUL, 0x44613ed4UL, 0xaace8bc6UL, 0xcfa9377eUL, 0x38417fd6UL,
0x5d26c36eUL, 0xb389767cUL, 0xd6eecac4UL, 0x6fd61d59UL, 0x0ab1a1e1UL,
0xe41e14f3UL, 0x8179a84bUL, 0xd769cb13UL, 0xb20e77abUL, 0x5ca1c2b9UL,
0x39c67e01UL, 0x80fea99cUL, 0xe5991524UL, 0x0b36a036UL, 0x6e511c8eUL,
0xa7166686UL, 0xc271da3eUL, 0x2cde6f2cUL, 0x49b9d394UL, 0xf0810409UL,
0x95e6b8b1UL, 0x7b490da3UL, 0x1e2eb11bUL, 0x483ed243UL, 0x2d596efbUL,
0xc3f6dbe9UL, 0xa6916751UL, 0x1fa9b0ccUL, 0x7ace0c74UL, 0x9461b966UL,
0xf10605deUL
#endif
}
};

@ -0,0 +1,104 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_STUBROUTINES_ZARCH_64_64_HPP
#define CPU_S390_VM_STUBROUTINES_ZARCH_64_64_HPP
// This file holds the platform specific parts of the StubRoutines
// definition. See stubRoutines.hpp for a description on how to extend it.
static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; }
enum { // Platform dependent constants.
// TODO: May be able to shrink this a lot
code_size1 = 20000, // Simply increase if too small (assembler will crash if too small).
code_size2 = 20000 // Simply increase if too small (assembler will crash if too small).
};
// MethodHandles adapters
enum method_handles_platform_dependent_constants {
method_handles_adapters_code_size = 5000
};
#define CRC32_COLUMN_SIZE 256
#define CRC32_BYFOUR
#ifdef CRC32_BYFOUR
#define CRC32_TABLES 8
#else
#define CRC32_TABLES 1
#endif
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
#define TROT_ALIGNMENT 8 // Required by instruction,
// guaranteed by jlong table element type.
#define TROT_COLUMN_SIZE (256*sizeof(jchar)/sizeof(jlong))
class zarch {
friend class StubGenerator;
public:
enum { nof_instance_allocators = 10 };
// allocator lock values
enum {
unlocked = 0,
locked = 1
};
private:
static address _handler_for_unsafe_access_entry;
static int _atomic_memory_operation_lock;
static address _partial_subtype_check;
static juint _crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
static address _trot_table_addr;
static jlong _trot_table[TROT_COLUMN_SIZE];
public:
// Global lock for everyone who needs to use atomic_compare_and_exchange
// or atomic_increment -- should probably use more locks for more
// scalability -- for instance one for each eden space or group of.
// Address of the lock for atomic_compare_and_exchange.
static int* atomic_memory_operation_lock_addr() { return &_atomic_memory_operation_lock; }
// Accessor and mutator for _atomic_memory_operation_lock.
static int atomic_memory_operation_lock() { return _atomic_memory_operation_lock; }
static void set_atomic_memory_operation_lock(int value) { _atomic_memory_operation_lock = value; }
static address handler_for_unsafe_access_entry() { return _handler_for_unsafe_access_entry; }
static address partial_subtype_check() { return _partial_subtype_check; }
static void generate_load_crc_table_addr(MacroAssembler* masm, Register table);
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
static void generate_load_trot_table_addr(MacroAssembler* masm, Register table);
};
#endif // CPU_S390_VM_STUBROUTINES_ZARCH_64_64_HPP

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -0,0 +1,41 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_TEMPLATETABLE_S390_HPP
#define CPU_S390_VM_TEMPLATETABLE_S390_HPP
static void prepare_invoke(int byte_no,
Register method, // linked method (or i-klass)
Register index = noreg, // itable index, MethodType, etc.
Register recv = noreg, // If caller wants to see it.
Register flags = noreg); // If caller wants to test it.
static void invokevirtual_helper(Register index, Register recv,
Register flags);
// Helpers
static void index_check(Register array, Register index, unsigned int shift);
static void index_check_without_pop(Register array, Register index);
#endif // CPU_S390_VM_TEMPLATETABLE_S390_HPP

@ -0,0 +1,41 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_VMSTRUCTS_S390_HPP
#define CPU_S390_VM_VMSTRUCTS_S390_HPP
// These are the CPU-specific fields, types and integer
// constants required by the Serviceability Agent. This file is
// referenced by vmStructs.cpp.
#define VM_STRUCTS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field)
#define VM_TYPES_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type)
#define VM_INT_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#endif // CPU_S390_VM_VMSTRUCTS_S390_HPP

File diff suppressed because it is too large Load Diff

@ -0,0 +1,486 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_VM_VERSION_S390_HPP
#define CPU_S390_VM_VM_VERSION_S390_HPP
#include "runtime/globals_extension.hpp"
#include "runtime/vm_version.hpp"
class VM_Version: public Abstract_VM_Version {
protected:
// The following list contains the (approximate) announcement/availability
// dates of the many System z generations in existence as of now which
// implement the z/Architecture.
// z900: 2000-10
// z990: 2003-06
// z9: 2005-09
// z10: 2007-04
// z10: 2008-02
// z196: 2010-08
// ec12: 2012-09
// z13: 2015-03
//
// z/Architecture is the name of the 64-bit extension of the 31-bit s390
// architecture.
//
// ----------------------------------------------
// --- FeatureBitString Bits 0.. 63 (DW[0]) ---
// ----------------------------------------------
// 11222334445566
// 04826048260482604
#define StoreFacilityListExtendedMask 0x0100000000000000UL // z9
#define ETF2Mask 0x0000800000000000UL // z900
#define CryptoFacilityMask 0x0000400000000000UL // z990
#define LongDispFacilityMask 0x0000200000000000UL // z900 with microcode update
#define LongDispFacilityHighPerfMask 0x0000300000000000UL // z990
#define HFPMultiplyAndAddMask 0x0000080000000000UL // z990
#define ExtImmedFacilityMask 0x0000040000000000UL // z9
#define ETF3Mask 0x0000020000000000UL // z990/z9 (?)
#define HFPUnnormalizedMask 0x0000010000000000UL // z9
#define ETF2EnhancementMask 0x0000008000000000UL // z9
#define StoreClockFastMask 0x0000004000000000UL // z9
#define ParsingEnhancementsMask 0x0000002000000000UL // z10(?)
#define ETF3EnhancementMask 0x0000000200000000UL // z9
#define ExtractCPUTimeMask 0x0000000100000000UL // z10
#define CompareSwapStoreMask 0x00000000c0000000UL // z10
#define GnrlInstrExtFacilityMask 0x0000000020000000UL // z10
#define ExecuteExtensionsMask 0x0000000010000000UL // z10
#define FPExtensionsMask 0x0000000004000000UL // z196
#define FPSupportEnhancementsMask 0x0000000000400000UL // z10
#define DecimalFloatingPointMask 0x0000000000300000UL // z10
// z196 begin
#define DistinctOpndsMask 0x0000000000040000UL // z196
#define FastBCRSerializationMask DistinctOpndsMask
#define HighWordMask DistinctOpndsMask
#define LoadStoreConditionalMask DistinctOpndsMask
#define PopulationCountMask DistinctOpndsMask
#define InterlockedAccess1Mask DistinctOpndsMask
// z196 end
// EC12 begin
#define DFPZonedConversionMask 0x0000000000008000UL // ec12
#define MiscInstrExtMask 0x0000000000004000UL // ec12
#define ExecutionHintMask MiscInstrExtMask
#define LoadAndTrapMask MiscInstrExtMask
#define ProcessorAssistMask MiscInstrExtMask
#define ConstrainedTxExecutionMask 0x0000000000002000UL // ec12
#define InterlockedAccess2Mask 0x0000000000000800UL // ec12
// EC12 end
// z13 begin
#define LoadStoreConditional2Mask 0x0000000000000400UL // z13
#define CryptoExtension5Mask 0x0000000000000040UL // z13
// z13 end
// Feature-DW[0] starts to fill up. Use of these masks is risky.
#define TestFeature1ImplMask 0x0000000000000001UL
#define TestFeature2ImplMask 0x0000000000000002UL
#define TestFeature4ImplMask 0x0000000000000004UL
#define TestFeature8ImplMask 0x0000000000000008UL
// ----------------------------------------------
// --- FeatureBitString Bits 64..127 (DW[1]) ---
// ----------------------------------------------
// 11111111
// 66778889900011222
// 48260482604826048
#define TransactionalExecutionMask 0x0040000000000000UL // ec12
#define CryptoExtension3Mask 0x0008000000000000UL // z196
#define CryptoExtension4Mask 0x0004000000000000UL // z196
#define DFPPackedConversionMask 0x0000800000000000UL // z13
// ----------------------------------------------
// --- FeatureBitString Bits 128..192 (DW[2]) ---
// ----------------------------------------------
// 11111111111111111
// 23344455666778889
// 82604826048260482
#define VectorFacilityMask 0x4000000000000000UL // z13, not avail in VM guest mode!
enum {
_max_cache_levels = 8, // As limited by ECAG instruction.
_features_buffer_len = 4, // in DW
_code_buffer_len = 2*256 // For feature detection code.
};
static unsigned long _features[_features_buffer_len];
static unsigned long _cipher_features[_features_buffer_len];
static unsigned long _msgdigest_features[_features_buffer_len];
static unsigned int _nfeatures;
static unsigned int _ncipher_features;
static unsigned int _nmsgdigest_features;
static unsigned int _Dcache_lineSize;
static unsigned int _Icache_lineSize;
static bool _is_determine_features_test_running;
static bool test_feature_bit(unsigned long* featureBuffer, int featureNum, unsigned int bufLen);
static void set_features_string();
static void print_features_internal(const char* text, bool print_anyway=false);
static void determine_features();
static long call_getFeatures(unsigned long* buffer, int buflen, int functionCode);
static void set_getFeatures(address entryPoint);
static int calculate_ECAG_functionCode(unsigned int attributeIndication,
unsigned int levelIndication,
unsigned int typeIndication);
// Setting features via march=z900|z990|z9|z10|z196|ec12|z13|ztest commandline option.
static void reset_features(bool reset);
static void set_features_z900(bool reset = true);
static void set_features_z990(bool reset = true);
static void set_features_z9(bool reset = true);
static void set_features_z10(bool reset = true);
static void set_features_z196(bool reset = true);
static void set_features_ec12(bool reset = true);
static void set_features_z13(bool reset = true);
static void set_features_from(const char* march);
// Get the CPU type from feature bit settings.
static bool is_z900() { return has_long_displacement() && !has_long_displacement_fast(); }
static bool is_z990() { return has_long_displacement_fast() && !has_extended_immediate(); }
static bool is_z9() { return has_extended_immediate() && !has_GnrlInstrExtensions(); }
static bool is_z10() { return has_GnrlInstrExtensions() && !has_DistinctOpnds(); }
static bool is_z196() { return has_DistinctOpnds() && !has_MiscInstrExt(); }
static bool is_ec12() { return has_MiscInstrExt() && !has_CryptoExt5(); }
static bool is_z13() { return has_CryptoExt5();}
// Get information about cache line sizes.
// As of now and the foreseeable future, line size of all levels will be the same and 256.
static unsigned int Dcache_lineSize(unsigned int level = 0) { return _Dcache_lineSize; }
static unsigned int Icache_lineSize(unsigned int level = 0) { return _Icache_lineSize; }
public:
// Need to use nested class with unscoped enum.
// C++11 declaration "enum class Cipher { ... } is not supported.
class CipherMode {
public:
enum {
cipher = 0x00,
decipher = 0x80
};
};
class Cipher {
public:
enum { // KM only!!! KMC uses different parmBlk sizes.
_Query = 0,
_DEA = 1,
_TDEA128 = 2,
_TDEA192 = 3,
_EncryptedDEA = 9,
_EncryptedDEA128 = 10,
_EncryptedDEA192 = 11,
_AES128 = 18,
_AES192 = 19,
_AES256 = 20,
_EnccryptedAES128 = 26,
_EnccryptedAES192 = 27,
_EnccryptedAES256 = 28,
_XTSAES128 = 50,
_XTSAES256 = 52,
_EncryptedXTSAES128 = 58,
_EncryptedXTSAES256 = 60,
_PRNG = 67,
_featureBits = 128,
// Parameter block sizes (in bytes) for KM instruction.
_Query_parmBlk = 16,
_DEA_parmBlk = 8,
_TDEA128_parmBlk = 16,
_TDEA192_parmBlk = 24,
_EncryptedDEA_parmBlk = 32,
_EncryptedDEA128_parmBlk = 40,
_EncryptedDEA192_parmBlk = 48,
_AES128_parmBlk = 16,
_AES192_parmBlk = 24,
_AES256_parmBlk = 32,
_EnccryptedAES128_parmBlk = 48,
_EnccryptedAES192_parmBlk = 56,
_EnccryptedAES256_parmBlk = 64,
_XTSAES128_parmBlk = 32,
_XTSAES256_parmBlk = 48,
_EncryptedXTSAES128_parmBlk = 64,
_EncryptedXTSAES256_parmBlk = 80,
// Parameter block sizes (in bytes) for KMC instruction.
_Query_parmBlk_C = 16,
_DEA_parmBlk_C = 16,
_TDEA128_parmBlk_C = 24,
_TDEA192_parmBlk_C = 32,
_EncryptedDEA_parmBlk_C = 40,
_EncryptedDEA128_parmBlk_C = 48,
_EncryptedDEA192_parmBlk_C = 56,
_AES128_parmBlk_C = 32,
_AES192_parmBlk_C = 40,
_AES256_parmBlk_C = 48,
_EnccryptedAES128_parmBlk_C = 64,
_EnccryptedAES192_parmBlk_C = 72,
_EnccryptedAES256_parmBlk_C = 80,
_XTSAES128_parmBlk_C = 32,
_XTSAES256_parmBlk_C = 48,
_EncryptedXTSAES128_parmBlk_C = 64,
_EncryptedXTSAES256_parmBlk_C = 80,
_PRNG_parmBlk_C = 32,
// Data block sizes (in bytes).
_Query_dataBlk = 0,
_DEA_dataBlk = 8,
_TDEA128_dataBlk = 8,
_TDEA192_dataBlk = 8,
_EncryptedDEA_dataBlk = 8,
_EncryptedDEA128_dataBlk = 8,
_EncryptedDEA192_dataBlk = 8,
_AES128_dataBlk = 16,
_AES192_dataBlk = 16,
_AES256_dataBlk = 16,
_EnccryptedAES128_dataBlk = 16,
_EnccryptedAES192_dataBlk = 16,
_EnccryptedAES256_dataBlk = 16,
_XTSAES128_dataBlk = 16,
_XTSAES256_dataBlk = 16,
_EncryptedXTSAES128_dataBlk = 16,
_EncryptedXTSAES256_dataBlk = 16,
_PRNG_dataBlk = 8,
};
};
class MsgDigest {
public:
enum {
_Query = 0,
_SHA1 = 1,
_SHA256 = 2,
_SHA512 = 3,
_GHASH = 65,
_featureBits = 128,
// Parameter block sizes (in bytes) for KIMD.
_Query_parmBlk_I = 16,
_SHA1_parmBlk_I = 20,
_SHA256_parmBlk_I = 32,
_SHA512_parmBlk_I = 64,
_GHASH_parmBlk_I = 32,
// Parameter block sizes (in bytes) for KLMD.
_Query_parmBlk_L = 16,
_SHA1_parmBlk_L = 28,
_SHA256_parmBlk_L = 40,
_SHA512_parmBlk_L = 80,
// Data block sizes (in bytes).
_Query_dataBlk = 0,
_SHA1_dataBlk = 64,
_SHA256_dataBlk = 64,
_SHA512_dataBlk = 128,
_GHASH_dataBlk = 16
};
};
class MsgAuthent {
public:
enum {
_Query = 0,
_DEA = 1,
_TDEA128 = 2,
_TDEA192 = 3,
_EncryptedDEA = 9,
_EncryptedDEA128 = 10,
_EncryptedDEA192 = 11,
_AES128 = 18,
_AES192 = 19,
_AES256 = 20,
_EnccryptedAES128 = 26,
_EnccryptedAES192 = 27,
_EnccryptedAES256 = 28,
_featureBits = 128,
_Query_parmBlk = 16,
_DEA_parmBlk = 16,
_TDEA128_parmBlk = 24,
_TDEA192_parmBlk = 32,
_EncryptedDEA_parmBlk = 40,
_EncryptedDEA128_parmBlk = 48,
_EncryptedDEA192_parmBlk = 56,
_AES128_parmBlk = 32,
_AES192_parmBlk = 40,
_AES256_parmBlk = 48,
_EnccryptedAES128_parmBlk = 64,
_EnccryptedAES192_parmBlk = 72,
_EnccryptedAES256_parmBlk = 80,
_Query_dataBlk = 0,
_DEA_dataBlk = 8,
_TDEA128_dataBlk = 8,
_TDEA192_dataBlk = 8,
_EncryptedDEA_dataBlk = 8,
_EncryptedDEA128_dataBlk = 8,
_EncryptedDEA192_dataBlk = 8,
_AES128_dataBlk = 16,
_AES192_dataBlk = 16,
_AES256_dataBlk = 16,
_EnccryptedAES128_dataBlk = 16,
_EnccryptedAES192_dataBlk = 16,
_EnccryptedAES256_dataBlk = 16
};
};
// Initialization
static void initialize();
static void print_features();
static bool is_determine_features_test_running() { return _is_determine_features_test_running; }
// CPU feature query functions
static bool has_StoreFacilityListExtended() { return (_features[0] & StoreFacilityListExtendedMask) == StoreFacilityListExtendedMask; }
static bool has_Crypto() { return (_features[0] & CryptoFacilityMask) == CryptoFacilityMask; }
static bool has_ETF2() { return (_features[0] & ETF2Mask) == ETF2Mask; }
static bool has_ETF3() { return (_features[0] & ETF3Mask) == ETF3Mask; }
static bool has_ETF2Enhancements() { return (_features[0] & ETF2EnhancementMask) == ETF2EnhancementMask; }
static bool has_ETF3Enhancements() { return (_features[0] & ETF3EnhancementMask) == ETF3EnhancementMask; }
static bool has_ParsingEnhancements() { return (_features[0] & ParsingEnhancementsMask) == ParsingEnhancementsMask; }
static bool has_long_displacement() { return (_features[0] & LongDispFacilityMask) == LongDispFacilityMask; }
static bool has_long_displacement_fast() { return (_features[0] & LongDispFacilityHighPerfMask) == LongDispFacilityHighPerfMask; }
static bool has_extended_immediate() { return (_features[0] & ExtImmedFacilityMask) == ExtImmedFacilityMask; }
static bool has_StoreClockFast() { return (_features[0] & StoreClockFastMask) == StoreClockFastMask; }
static bool has_ExtractCPUtime() { return (_features[0] & ExtractCPUTimeMask) == ExtractCPUTimeMask; }
static bool has_CompareSwapStore() { return (_features[0] & CompareSwapStoreMask) == CompareSwapStoreMask; }
static bool has_HFPMultiplyAndAdd() { return (_features[0] & HFPMultiplyAndAddMask) == HFPMultiplyAndAddMask; }
static bool has_HFPUnnormalized() { return (_features[0] & HFPUnnormalizedMask) == HFPUnnormalizedMask; }
// Make sure we don't run on older ...
static bool has_GnrlInstrExtensions() { guarantee((_features[0] & GnrlInstrExtFacilityMask) == GnrlInstrExtFacilityMask, "We no more support older than z10."); return true; }
static bool has_CompareBranch() { return has_GnrlInstrExtensions() && is_z10(); } // Only z10 benefits from these.
static bool has_CompareTrap() { return has_GnrlInstrExtensions(); }
static bool has_RelativeLoadStore() { return has_GnrlInstrExtensions(); }
static bool has_MultiplySingleImm32() { return has_GnrlInstrExtensions(); }
static bool has_Prefetch() { return has_GnrlInstrExtensions() && (AllocatePrefetchStyle > 0); }
static bool has_PrefetchRaw() { return has_GnrlInstrExtensions(); }
static bool has_MoveImmToMem() { return has_GnrlInstrExtensions(); }
static bool has_ExtractCPUAttributes() { return has_GnrlInstrExtensions(); }
static bool has_ExecuteExtensions() { return (_features[0] & ExecuteExtensionsMask) == ExecuteExtensionsMask; }
// Memory-immediate arithmetic instructions. There is no performance penalty in using them.
// Moreover, these memory-immediate instructions are quasi-atomic (>99.99%) on z10
// and 100% atomic from z196 onwards, thanks to the specific operand serialization that comes new with z196.
static bool has_MemWithImmALUOps() { return has_GnrlInstrExtensions(); }
static bool has_AtomicMemWithImmALUOps() { return has_MemWithImmALUOps() && has_InterlockedAccessV1(); }
static bool has_FPExtensions() { return (_features[0] & FPExtensionsMask) == FPExtensionsMask; }
static bool has_FPSupportEnhancements() { return (_features[0] & FPSupportEnhancementsMask) == FPSupportEnhancementsMask; }
static bool has_DecimalFloatingPoint() { return (_features[0] & DecimalFloatingPointMask) == DecimalFloatingPointMask; }
static bool has_InterlockedAccessV1() { return (_features[0] & InterlockedAccess1Mask) == InterlockedAccess1Mask; }
static bool has_LoadAndALUAtomicV1() { return (_features[0] & InterlockedAccess1Mask) == InterlockedAccess1Mask; }
static bool has_PopCount() { return (_features[0] & PopulationCountMask) == PopulationCountMask; }
static bool has_LoadStoreConditional() { return (_features[0] & LoadStoreConditionalMask) == LoadStoreConditionalMask; }
static bool has_HighWordInstr() { return (_features[0] & HighWordMask) == HighWordMask; }
static bool has_FastSync() { return (_features[0] & FastBCRSerializationMask) == FastBCRSerializationMask; }
static bool has_DistinctOpnds() { return (_features[0] & DistinctOpndsMask) == DistinctOpndsMask; }
static bool has_CryptoExt3() { return (_features[1] & CryptoExtension3Mask) == CryptoExtension3Mask; }
static bool has_CryptoExt4() { return (_features[1] & CryptoExtension4Mask) == CryptoExtension4Mask; }
static bool has_DFPZonedConversion() { return (_features[0] & DFPZonedConversionMask) == DFPZonedConversionMask; }
static bool has_DFPPackedConversion() { return (_features[1] & DFPPackedConversionMask) == DFPPackedConversionMask; }
static bool has_MiscInstrExt() { return (_features[0] & MiscInstrExtMask) == MiscInstrExtMask; }
static bool has_ExecutionHint() { return (_features[0] & ExecutionHintMask) == ExecutionHintMask; }
static bool has_LoadAndTrap() { return (_features[0] & LoadAndTrapMask) == LoadAndTrapMask; }
static bool has_ProcessorAssist() { return (_features[0] & ProcessorAssistMask) == ProcessorAssistMask; }
static bool has_InterlockedAccessV2() { return (_features[0] & InterlockedAccess2Mask) == InterlockedAccess2Mask; }
static bool has_LoadAndALUAtomicV2() { return (_features[0] & InterlockedAccess2Mask) == InterlockedAccess2Mask; }
static bool has_TxMem() { return ((_features[1] & TransactionalExecutionMask) == TransactionalExecutionMask) &&
((_features[0] & ConstrainedTxExecutionMask) == ConstrainedTxExecutionMask); }
static bool has_CryptoExt5() { return (_features[0] & CryptoExtension5Mask) == CryptoExtension5Mask; }
static bool has_LoadStoreConditional2() { return (_features[0] & LoadStoreConditional2Mask) == LoadStoreConditional2Mask; }
static bool has_VectorFacility() { return (_features[2] & VectorFacilityMask) == VectorFacilityMask; }
static bool has_TestFeatureImpl() { return (_features[0] & TestFeature1ImplMask) == TestFeature1ImplMask; }
static bool has_TestFeature1Impl() { return (_features[0] & TestFeature1ImplMask) == TestFeature1ImplMask; }
static bool has_TestFeature2Impl() { return (_features[0] & TestFeature2ImplMask) == TestFeature2ImplMask; }
static bool has_TestFeature4Impl() { return (_features[0] & TestFeature4ImplMask) == TestFeature4ImplMask; }
static bool has_TestFeature8Impl() { return (_features[0] & TestFeature8ImplMask) == TestFeature8ImplMask; }
static bool has_TestFeaturesImpl() { return has_TestFeature1Impl() || has_TestFeature2Impl() || has_TestFeature4Impl() || has_TestFeature8Impl(); }
// Crypto features query functions.
static bool has_Crypto_AES128() { return has_Crypto() && test_feature_bit(&_cipher_features[0], Cipher::_AES128, Cipher::_featureBits); }
static bool has_Crypto_AES192() { return has_Crypto() && test_feature_bit(&_cipher_features[0], Cipher::_AES192, Cipher::_featureBits); }
static bool has_Crypto_AES256() { return has_Crypto() && test_feature_bit(&_cipher_features[0], Cipher::_AES256, Cipher::_featureBits); }
static bool has_Crypto_AES() { return has_Crypto_AES128() || has_Crypto_AES192() || has_Crypto_AES256(); }
static bool has_Crypto_SHA1() { return has_Crypto() && test_feature_bit(&_msgdigest_features[0], MsgDigest::_SHA1, MsgDigest::_featureBits); }
static bool has_Crypto_SHA256() { return has_Crypto() && test_feature_bit(&_msgdigest_features[0], MsgDigest::_SHA256, MsgDigest::_featureBits); }
static bool has_Crypto_SHA512() { return has_Crypto() && test_feature_bit(&_msgdigest_features[0], MsgDigest::_SHA512, MsgDigest::_featureBits); }
static bool has_Crypto_GHASH() { return has_Crypto() && test_feature_bit(&_msgdigest_features[0], MsgDigest::_GHASH, MsgDigest::_featureBits); }
static bool has_Crypto_SHA() { return has_Crypto_SHA1() || has_Crypto_SHA256() || has_Crypto_SHA512() || has_Crypto_GHASH(); }
// CPU feature setters (to force model-specific behaviour). Test/debugging only.
static void set_has_TestFeature1Impl() { _features[0] |= TestFeature1ImplMask; }
static void set_has_TestFeature2Impl() { _features[0] |= TestFeature2ImplMask; }
static void set_has_TestFeature4Impl() { _features[0] |= TestFeature4ImplMask; }
static void set_has_TestFeature8Impl() { _features[0] |= TestFeature8ImplMask; }
static void set_has_DecimalFloatingPoint() { _features[0] |= DecimalFloatingPointMask; }
static void set_has_FPSupportEnhancements() { _features[0] |= FPSupportEnhancementsMask; }
static void set_has_ExecuteExtensions() { _features[0] |= ExecuteExtensionsMask; }
static void set_has_MemWithImmALUOps() { _features[0] |= GnrlInstrExtFacilityMask; }
static void set_has_MoveImmToMem() { _features[0] |= GnrlInstrExtFacilityMask; }
static void set_has_Prefetch() { _features[0] |= GnrlInstrExtFacilityMask; }
static void set_has_MultiplySingleImm32() { _features[0] |= GnrlInstrExtFacilityMask; }
static void set_has_CompareBranch() { _features[0] |= GnrlInstrExtFacilityMask; }
static void set_has_CompareTrap() { _features[0] |= GnrlInstrExtFacilityMask; }
static void set_has_RelativeLoadStore() { _features[0] |= GnrlInstrExtFacilityMask; }
static void set_has_GnrlInstrExtensions() { _features[0] |= GnrlInstrExtFacilityMask; }
static void set_has_CompareSwapStore() { _features[0] |= CompareSwapStoreMask; }
static void set_has_HFPMultiplyAndAdd() { _features[0] |= HFPMultiplyAndAddMask; }
static void set_has_HFPUnnormalized() { _features[0] |= HFPUnnormalizedMask; }
static void set_has_ExtractCPUtime() { _features[0] |= ExtractCPUTimeMask; }
static void set_has_StoreClockFast() { _features[0] |= StoreClockFastMask; }
static void set_has_extended_immediate() { _features[0] |= ExtImmedFacilityMask; }
static void set_has_long_displacement_fast() { _features[0] |= LongDispFacilityHighPerfMask; }
static void set_has_long_displacement() { _features[0] |= LongDispFacilityMask; }
static void set_has_ETF2() { _features[0] |= ETF2Mask; }
static void set_has_ETF3() { _features[0] |= ETF3Mask; }
static void set_has_ETF2Enhancements() { _features[0] |= ETF2EnhancementMask; }
static void set_has_ETF3Enhancements() { _features[0] |= ETF3EnhancementMask; }
static void set_has_Crypto() { _features[0] |= CryptoFacilityMask; }
static void set_has_StoreFacilityListExtended() { _features[0] |= StoreFacilityListExtendedMask; }
static void set_has_InterlockedAccessV1() { _features[0] |= InterlockedAccess1Mask; }
static void set_has_PopCount() { _features[0] |= PopulationCountMask; }
static void set_has_LoadStoreConditional() { _features[0] |= LoadStoreConditionalMask; }
static void set_has_HighWordInstr() { _features[0] |= HighWordMask; }
static void set_has_FastSync() { _features[0] |= FastBCRSerializationMask; }
static void set_has_DistinctOpnds() { _features[0] |= DistinctOpndsMask; }
static void set_has_FPExtensions() { _features[0] |= FPExtensionsMask; }
static void set_has_MiscInstrExt() { _features[0] |= MiscInstrExtMask; }
static void set_has_ProcessorAssist() { _features[0] |= ProcessorAssistMask; }
static void set_has_InterlockedAccessV2() { _features[0] |= InterlockedAccess2Mask; }
static void set_has_LoadAndALUAtomicV2() { _features[0] |= InterlockedAccess2Mask; }
static void set_has_TxMem() { _features[0] |= ConstrainedTxExecutionMask; _features[1] |= TransactionalExecutionMask; }
static void set_has_CryptoExt3() { _features[1] |= CryptoExtension3Mask; }
static void set_has_CryptoExt4() { _features[1] |= CryptoExtension4Mask; }
static void set_has_LoadStoreConditional2() { _features[0] |= LoadStoreConditional2Mask; }
static void set_has_CryptoExt5() { _features[0] |= CryptoExtension5Mask; }
static void set_has_VectorFacility() { _features[2] |= VectorFacilityMask; }
// Assembler testing.
static void allow_all();
static void revert();
// Generate trapping instructions into C-code.
// Sometimes helpful for debugging.
static unsigned long z_SIGILL();
static unsigned long z_SIGSEGV();
};
#endif // CPU_S390_VM_VM_VERSION_S390_HPP

@ -0,0 +1,49 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "code/vmreg.hpp"
void VMRegImpl::set_regName() {
// Not clear why we have this duplication (triplication?)
Register reg = ::as_Register(0);
int i;
for (i = 0; i < ConcreteRegisterImpl::max_gpr;) {
regName[i++] = reg->name();
regName[i++] = reg->name();
reg = reg->successor();
}
FloatRegister freg = ::as_FloatRegister(0);
for (; i < ConcreteRegisterImpl::max_fpr;) {
regName[i++] = freg->name();
regName[i++] = freg->name();
freg = freg->successor();
}
for (; i < ConcreteRegisterImpl::number_of_registers; i ++) {
regName[i] = "NON-GPR-XMM";
}
}

@ -0,0 +1,53 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_S390_VM_VMREG_S390_HPP
#define CPU_S390_VM_VMREG_S390_HPP
inline bool is_Register() {
return (unsigned int)value() < (unsigned int)ConcreteRegisterImpl::max_gpr;
}
inline bool is_FloatRegister() {
return value() >= ConcreteRegisterImpl::max_gpr &&
value() < ConcreteRegisterImpl::max_fpr;
}
inline Register as_Register() {
assert(is_Register() && is_even(value()), "even-aligned GPR name");
return ::as_Register(value() >> 1);
}
inline FloatRegister as_FloatRegister() {
assert(is_FloatRegister() && is_even(value()), "must be");
return ::as_FloatRegister((value() - ConcreteRegisterImpl::max_gpr) >> 1);
}
inline bool is_concrete() {
assert(is_reg(), "must be");
return is_even(value());
}
#endif // CPU_S390_VM_VMREG_S390_HPP

Some files were not shown because too many files have changed in this diff Show More