8019972: PPC64 (part 9): platform files for interpreter only VM
With this change the HotSpot core build works on Linux/PPC64. The VM succesfully executes simple test programs. Reviewed-by: kvn
This commit is contained in:
parent
f4c4fd7903
commit
cf98cb05ca
hotspot/src
cpu/ppc/vm
assembler_ppc.cppassembler_ppc.hppassembler_ppc.inline.hppbytecodeInterpreter_ppc.hppbytecodeInterpreter_ppc.inline.hppbytecodes_ppc.cppbytecodes_ppc.hppbytes_ppc.hppcodeBuffer_ppc.hppcompiledIC_ppc.cppcopy_ppc.hppcppInterpreterGenerator_ppc.hppcppInterpreter_ppc.cppcppInterpreter_ppc.hppdebug_ppc.cppdepChecker_ppc.hppdisassembler_ppc.hppframe_ppc.cppframe_ppc.hppframe_ppc.inline.hppglobalDefinitions_ppc.hppglobals_ppc.hppicBuffer_ppc.cppicache_ppc.cppicache_ppc.hppinterp_masm_ppc_64.cppinterp_masm_ppc_64.hppinterpreterGenerator_ppc.hppinterpreterRT_ppc.cppinterpreterRT_ppc.hppinterpreter_ppc.cppinterpreter_ppc.hppjavaFrameAnchor_ppc.hppjniFastGetField_ppc.cppjniTypes_ppc.hppjni_ppc.hmacroAssembler_ppc.cppmacroAssembler_ppc.hppmacroAssembler_ppc.inline.hppmetaspaceShared_ppc.cppmethodHandles_ppc.cppmethodHandles_ppc.hppnativeInst_ppc.cppnativeInst_ppc.hppregisterMap_ppc.hppregister_definitions_ppc.cppregister_ppc.cppregister_ppc.hpprelocInfo_ppc.cpprelocInfo_ppc.hppsharedRuntime_ppc.cppstubGenerator_ppc.cppstubRoutines_ppc_64.cppstubRoutines_ppc_64.hppvmStructs_ppc.hppvm_version_ppc.cppvm_version_ppc.hppvmreg_ppc.cppvmreg_ppc.hppvmreg_ppc.inline.hppvtableStubs_ppc_64.cpp
os_cpu/linux_ppc/vm
atomic_linux_ppc.inline.hppglobals_linux_ppc.hpporderAccess_linux_ppc.inline.hppos_linux_ppc.cppos_linux_ppc.hppprefetch_linux_ppc.inline.hppthreadLS_linux_ppc.cppthreadLS_linux_ppc.hppthread_linux_ppc.cppthread_linux_ppc.hppvmStructs_linux_ppc.hpp
share/vm/runtime
699
hotspot/src/cpu/ppc/vm/assembler_ppc.cpp
Normal file
699
hotspot/src/cpu/ppc/vm/assembler_ppc.cpp
Normal file
@ -0,0 +1,699 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "asm/assembler.inline.hpp"
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "memory/cardTableModRefBS.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/interfaceSupport.hpp"
|
||||
#include "runtime/objectMonitor.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define BLOCK_COMMENT(str) // nothing
|
||||
#else
|
||||
#define BLOCK_COMMENT(str) block_comment(str)
|
||||
#endif
|
||||
|
||||
int AbstractAssembler::code_fill_byte() {
|
||||
return 0x00; // illegal instruction 0x00000000
|
||||
}
|
||||
|
||||
void Assembler::print_instruction(int inst) {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
// Patch instruction `inst' at offset `inst_pos' to refer to
|
||||
// `dest_pos' and return the resulting instruction. We should have
|
||||
// pcs, not offsets, but since all is relative, it will work out fine.
|
||||
int Assembler::patched_branch(int dest_pos, int inst, int inst_pos) {
|
||||
int m = 0; // mask for displacement field
|
||||
int v = 0; // new value for displacement field
|
||||
|
||||
switch (inv_op_ppc(inst)) {
|
||||
case b_op: m = li(-1); v = li(disp(dest_pos, inst_pos)); break;
|
||||
case bc_op: m = bd(-1); v = bd(disp(dest_pos, inst_pos)); break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
return inst & ~m | v;
|
||||
}
|
||||
|
||||
// Return the offset, relative to _code_begin, of the destination of
|
||||
// the branch inst at offset pos.
|
||||
int Assembler::branch_destination(int inst, int pos) {
|
||||
int r = 0;
|
||||
switch (inv_op_ppc(inst)) {
|
||||
case b_op: r = bxx_destination_offset(inst, pos); break;
|
||||
case bc_op: r = inv_bd_field(inst, pos); break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
// Low-level andi-one-instruction-macro.
|
||||
void Assembler::andi(Register a, Register s, const int ui16) {
|
||||
assert(is_uimm(ui16, 16), "must be 16-bit unsigned immediate");
|
||||
if (is_power_of_2_long(((jlong) ui16)+1)) {
|
||||
// pow2minus1
|
||||
clrldi(a, s, 64-log2_long((((jlong) ui16)+1)));
|
||||
} else if (is_power_of_2_long((jlong) ui16)) {
|
||||
// pow2
|
||||
rlwinm(a, s, 0, 31-log2_long((jlong) ui16), 31-log2_long((jlong) ui16));
|
||||
} else if (is_power_of_2_long((jlong)-ui16)) {
|
||||
// negpow2
|
||||
clrrdi(a, s, log2_long((jlong)-ui16));
|
||||
} else {
|
||||
andi_(a, s, ui16);
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterOrConstant version.
|
||||
void Assembler::ld(Register d, RegisterOrConstant roc, Register s1) {
|
||||
if (roc.is_constant()) {
|
||||
if (s1 == noreg) {
|
||||
int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
|
||||
Assembler::ld(d, simm16_rest, d);
|
||||
} else if (is_simm(roc.as_constant(), 16)) {
|
||||
Assembler::ld(d, roc.as_constant(), s1);
|
||||
} else {
|
||||
load_const_optimized(d, roc.as_constant());
|
||||
Assembler::ldx(d, d, s1);
|
||||
}
|
||||
} else {
|
||||
if (s1 == noreg)
|
||||
Assembler::ld(d, 0, roc.as_register());
|
||||
else
|
||||
Assembler::ldx(d, roc.as_register(), s1);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::lwa(Register d, RegisterOrConstant roc, Register s1) {
|
||||
if (roc.is_constant()) {
|
||||
if (s1 == noreg) {
|
||||
int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
|
||||
Assembler::lwa(d, simm16_rest, d);
|
||||
} else if (is_simm(roc.as_constant(), 16)) {
|
||||
Assembler::lwa(d, roc.as_constant(), s1);
|
||||
} else {
|
||||
load_const_optimized(d, roc.as_constant());
|
||||
Assembler::lwax(d, d, s1);
|
||||
}
|
||||
} else {
|
||||
if (s1 == noreg)
|
||||
Assembler::lwa(d, 0, roc.as_register());
|
||||
else
|
||||
Assembler::lwax(d, roc.as_register(), s1);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::lwz(Register d, RegisterOrConstant roc, Register s1) {
|
||||
if (roc.is_constant()) {
|
||||
if (s1 == noreg) {
|
||||
int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
|
||||
Assembler::lwz(d, simm16_rest, d);
|
||||
} else if (is_simm(roc.as_constant(), 16)) {
|
||||
Assembler::lwz(d, roc.as_constant(), s1);
|
||||
} else {
|
||||
load_const_optimized(d, roc.as_constant());
|
||||
Assembler::lwzx(d, d, s1);
|
||||
}
|
||||
} else {
|
||||
if (s1 == noreg)
|
||||
Assembler::lwz(d, 0, roc.as_register());
|
||||
else
|
||||
Assembler::lwzx(d, roc.as_register(), s1);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::lha(Register d, RegisterOrConstant roc, Register s1) {
|
||||
if (roc.is_constant()) {
|
||||
if (s1 == noreg) {
|
||||
int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
|
||||
Assembler::lha(d, simm16_rest, d);
|
||||
} else if (is_simm(roc.as_constant(), 16)) {
|
||||
Assembler::lha(d, roc.as_constant(), s1);
|
||||
} else {
|
||||
load_const_optimized(d, roc.as_constant());
|
||||
Assembler::lhax(d, d, s1);
|
||||
}
|
||||
} else {
|
||||
if (s1 == noreg)
|
||||
Assembler::lha(d, 0, roc.as_register());
|
||||
else
|
||||
Assembler::lhax(d, roc.as_register(), s1);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::lhz(Register d, RegisterOrConstant roc, Register s1) {
|
||||
if (roc.is_constant()) {
|
||||
if (s1 == noreg) {
|
||||
int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
|
||||
Assembler::lhz(d, simm16_rest, d);
|
||||
} else if (is_simm(roc.as_constant(), 16)) {
|
||||
Assembler::lhz(d, roc.as_constant(), s1);
|
||||
} else {
|
||||
load_const_optimized(d, roc.as_constant());
|
||||
Assembler::lhzx(d, d, s1);
|
||||
}
|
||||
} else {
|
||||
if (s1 == noreg)
|
||||
Assembler::lhz(d, 0, roc.as_register());
|
||||
else
|
||||
Assembler::lhzx(d, roc.as_register(), s1);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::lbz(Register d, RegisterOrConstant roc, Register s1) {
|
||||
if (roc.is_constant()) {
|
||||
if (s1 == noreg) {
|
||||
int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
|
||||
Assembler::lbz(d, simm16_rest, d);
|
||||
} else if (is_simm(roc.as_constant(), 16)) {
|
||||
Assembler::lbz(d, roc.as_constant(), s1);
|
||||
} else {
|
||||
load_const_optimized(d, roc.as_constant());
|
||||
Assembler::lbzx(d, d, s1);
|
||||
}
|
||||
} else {
|
||||
if (s1 == noreg)
|
||||
Assembler::lbz(d, 0, roc.as_register());
|
||||
else
|
||||
Assembler::lbzx(d, roc.as_register(), s1);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::std(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
|
||||
if (roc.is_constant()) {
|
||||
if (s1 == noreg) {
|
||||
guarantee(tmp != noreg, "Need tmp reg to encode large constants");
|
||||
int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
|
||||
Assembler::std(d, simm16_rest, tmp);
|
||||
} else if (is_simm(roc.as_constant(), 16)) {
|
||||
Assembler::std(d, roc.as_constant(), s1);
|
||||
} else {
|
||||
guarantee(tmp != noreg, "Need tmp reg to encode large constants");
|
||||
load_const_optimized(tmp, roc.as_constant());
|
||||
Assembler::stdx(d, tmp, s1);
|
||||
}
|
||||
} else {
|
||||
if (s1 == noreg)
|
||||
Assembler::std(d, 0, roc.as_register());
|
||||
else
|
||||
Assembler::stdx(d, roc.as_register(), s1);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::stw(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
|
||||
if (roc.is_constant()) {
|
||||
if (s1 == noreg) {
|
||||
guarantee(tmp != noreg, "Need tmp reg to encode large constants");
|
||||
int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
|
||||
Assembler::stw(d, simm16_rest, tmp);
|
||||
} else if (is_simm(roc.as_constant(), 16)) {
|
||||
Assembler::stw(d, roc.as_constant(), s1);
|
||||
} else {
|
||||
guarantee(tmp != noreg, "Need tmp reg to encode large constants");
|
||||
load_const_optimized(tmp, roc.as_constant());
|
||||
Assembler::stwx(d, tmp, s1);
|
||||
}
|
||||
} else {
|
||||
if (s1 == noreg)
|
||||
Assembler::stw(d, 0, roc.as_register());
|
||||
else
|
||||
Assembler::stwx(d, roc.as_register(), s1);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::sth(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
|
||||
if (roc.is_constant()) {
|
||||
if (s1 == noreg) {
|
||||
guarantee(tmp != noreg, "Need tmp reg to encode large constants");
|
||||
int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
|
||||
Assembler::sth(d, simm16_rest, tmp);
|
||||
} else if (is_simm(roc.as_constant(), 16)) {
|
||||
Assembler::sth(d, roc.as_constant(), s1);
|
||||
} else {
|
||||
guarantee(tmp != noreg, "Need tmp reg to encode large constants");
|
||||
load_const_optimized(tmp, roc.as_constant());
|
||||
Assembler::sthx(d, tmp, s1);
|
||||
}
|
||||
} else {
|
||||
if (s1 == noreg)
|
||||
Assembler::sth(d, 0, roc.as_register());
|
||||
else
|
||||
Assembler::sthx(d, roc.as_register(), s1);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::stb(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
|
||||
if (roc.is_constant()) {
|
||||
if (s1 == noreg) {
|
||||
guarantee(tmp != noreg, "Need tmp reg to encode large constants");
|
||||
int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
|
||||
Assembler::stb(d, simm16_rest, tmp);
|
||||
} else if (is_simm(roc.as_constant(), 16)) {
|
||||
Assembler::stb(d, roc.as_constant(), s1);
|
||||
} else {
|
||||
guarantee(tmp != noreg, "Need tmp reg to encode large constants");
|
||||
load_const_optimized(tmp, roc.as_constant());
|
||||
Assembler::stbx(d, tmp, s1);
|
||||
}
|
||||
} else {
|
||||
if (s1 == noreg)
|
||||
Assembler::stb(d, 0, roc.as_register());
|
||||
else
|
||||
Assembler::stbx(d, roc.as_register(), s1);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::add(Register d, RegisterOrConstant roc, Register s1) {
|
||||
if (roc.is_constant()) {
|
||||
intptr_t c = roc.as_constant();
|
||||
assert(is_simm(c, 16), "too big");
|
||||
addi(d, s1, (int)c);
|
||||
}
|
||||
else add(d, roc.as_register(), s1);
|
||||
}
|
||||
|
||||
void Assembler::subf(Register d, RegisterOrConstant roc, Register s1) {
|
||||
if (roc.is_constant()) {
|
||||
intptr_t c = roc.as_constant();
|
||||
assert(is_simm(-c, 16), "too big");
|
||||
addi(d, s1, (int)-c);
|
||||
}
|
||||
else subf(d, roc.as_register(), s1);
|
||||
}
|
||||
|
||||
void Assembler::cmpd(ConditionRegister d, RegisterOrConstant roc, Register s1) {
|
||||
if (roc.is_constant()) {
|
||||
intptr_t c = roc.as_constant();
|
||||
assert(is_simm(c, 16), "too big");
|
||||
cmpdi(d, s1, (int)c);
|
||||
}
|
||||
else cmpd(d, roc.as_register(), s1);
|
||||
}
|
||||
|
||||
// Load a 64 bit constant. Patchable.
|
||||
void Assembler::load_const(Register d, long x, Register tmp) {
|
||||
// 64-bit value: x = xa xb xc xd
|
||||
int xa = (x >> 48) & 0xffff;
|
||||
int xb = (x >> 32) & 0xffff;
|
||||
int xc = (x >> 16) & 0xffff;
|
||||
int xd = (x >> 0) & 0xffff;
|
||||
if (tmp == noreg) {
|
||||
Assembler::lis( d, (int)(short)xa);
|
||||
Assembler::ori( d, d, (unsigned int)xb);
|
||||
Assembler::sldi(d, d, 32);
|
||||
Assembler::oris(d, d, (unsigned int)xc);
|
||||
Assembler::ori( d, d, (unsigned int)xd);
|
||||
} else {
|
||||
// exploit instruction level parallelism if we have a tmp register
|
||||
assert_different_registers(d, tmp);
|
||||
Assembler::lis(tmp, (int)(short)xa);
|
||||
Assembler::lis(d, (int)(short)xc);
|
||||
Assembler::ori(tmp, tmp, (unsigned int)xb);
|
||||
Assembler::ori(d, d, (unsigned int)xd);
|
||||
Assembler::insrdi(d, tmp, 32, 0);
|
||||
}
|
||||
}
|
||||
|
||||
// Load a 64 bit constant, optimized, not identifyable.
|
||||
// Tmp can be used to increase ILP. Set return_simm16_rest=true to get a
|
||||
// 16 bit immediate offset.
|
||||
int Assembler::load_const_optimized(Register d, long x, Register tmp, bool return_simm16_rest) {
|
||||
// Avoid accidentally trying to use R0 for indexed addressing.
|
||||
assert(d != R0, "R0 not allowed");
|
||||
assert_different_registers(d, tmp);
|
||||
|
||||
short xa, xb, xc, xd; // Four 16-bit chunks of const.
|
||||
long rem = x; // Remaining part of const.
|
||||
|
||||
xd = rem & 0xFFFF; // Lowest 16-bit chunk.
|
||||
rem = (rem >> 16) + ((unsigned short)xd >> 15); // Compensation for sign extend.
|
||||
|
||||
if (rem == 0) { // opt 1: simm16
|
||||
li(d, xd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
xc = rem & 0xFFFF; // Next 16-bit chunk.
|
||||
rem = (rem >> 16) + ((unsigned short)xc >> 15); // Compensation for sign extend.
|
||||
|
||||
if (rem == 0) { // opt 2: simm32
|
||||
lis(d, xc);
|
||||
} else { // High 32 bits needed.
|
||||
|
||||
if (tmp != noreg) { // opt 3: We have a temp reg.
|
||||
// No carry propagation between xc and higher chunks here (use logical instructions).
|
||||
xa = (x >> 48) & 0xffff;
|
||||
xb = (x >> 32) & 0xffff; // No sign compensation, we use lis+ori or li to allow usage of R0.
|
||||
bool load_xa = (xa != 0) || (xb < 0);
|
||||
bool return_xd = false;
|
||||
|
||||
if (load_xa) lis(tmp, xa);
|
||||
if (xc) lis(d, xc);
|
||||
if (load_xa) {
|
||||
if (xb) ori(tmp, tmp, xb); // No addi, we support tmp == R0.
|
||||
} else {
|
||||
li(tmp, xb); // non-negative
|
||||
}
|
||||
if (xc) {
|
||||
if (return_simm16_rest && xd >= 0) { return_xd = true; } // >= 0 to avoid carry propagation after insrdi/rldimi.
|
||||
else if (xd) { addi(d, d, xd); }
|
||||
} else {
|
||||
li(d, xd);
|
||||
}
|
||||
insrdi(d, tmp, 32, 0);
|
||||
return return_xd ? xd : 0; // non-negative
|
||||
}
|
||||
|
||||
xb = rem & 0xFFFF; // Next 16-bit chunk.
|
||||
rem = (rem >> 16) + ((unsigned short)xb >> 15); // Compensation for sign extend.
|
||||
|
||||
xa = rem & 0xFFFF; // Highest 16-bit chunk.
|
||||
|
||||
// opt 4: avoid adding 0
|
||||
if (xa) { // Highest 16-bit needed?
|
||||
lis(d, xa);
|
||||
if (xb) addi(d, d, xb);
|
||||
} else {
|
||||
li(d, xb);
|
||||
}
|
||||
sldi(d, d, 32);
|
||||
if (xc) addis(d, d, xc);
|
||||
}
|
||||
|
||||
// opt 5: Return offset to be inserted into following instruction.
|
||||
if (return_simm16_rest) return xd;
|
||||
|
||||
if (xd) addi(d, d, xd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Test of ppc assembler.
|
||||
void Assembler::test_asm() {
|
||||
// PPC 1, section 3.3.8, Fixed-Point Arithmetic Instructions
|
||||
addi( R0, R1, 10);
|
||||
addis( R5, R2, 11);
|
||||
addic_( R3, R31, 42);
|
||||
subfic( R21, R12, 2112);
|
||||
add( R3, R2, R1);
|
||||
add_( R11, R22, R30);
|
||||
subf( R7, R6, R5);
|
||||
subf_( R8, R9, R4);
|
||||
addc( R11, R12, R13);
|
||||
addc_( R14, R14, R14);
|
||||
subfc( R15, R16, R17);
|
||||
subfc_( R18, R20, R19);
|
||||
adde( R20, R22, R24);
|
||||
adde_( R29, R27, R26);
|
||||
subfe( R28, R1, R0);
|
||||
subfe_( R21, R11, R29);
|
||||
neg( R21, R22);
|
||||
neg_( R13, R23);
|
||||
mulli( R0, R11, -31);
|
||||
mulld( R1, R18, R21);
|
||||
mulld_( R2, R17, R22);
|
||||
mullw( R3, R16, R23);
|
||||
mullw_( R4, R15, R24);
|
||||
divd( R5, R14, R25);
|
||||
divd_( R6, R13, R26);
|
||||
divw( R7, R12, R27);
|
||||
divw_( R8, R11, R28);
|
||||
|
||||
li( R3, -4711);
|
||||
|
||||
// PPC 1, section 3.3.9, Fixed-Point Compare Instructions
|
||||
cmpi( CCR7, 0, R27, 4711);
|
||||
cmp( CCR0, 1, R14, R11);
|
||||
cmpli( CCR5, 1, R17, 45);
|
||||
cmpl( CCR3, 0, R9, R10);
|
||||
|
||||
cmpwi( CCR7, R27, 4711);
|
||||
cmpw( CCR0, R14, R11);
|
||||
cmplwi( CCR5, R17, 45);
|
||||
cmplw( CCR3, R9, R10);
|
||||
|
||||
cmpdi( CCR7, R27, 4711);
|
||||
cmpd( CCR0, R14, R11);
|
||||
cmpldi( CCR5, R17, 45);
|
||||
cmpld( CCR3, R9, R10);
|
||||
|
||||
// PPC 1, section 3.3.11, Fixed-Point Logical Instructions
|
||||
andi_( R4, R5, 0xff);
|
||||
andis_( R12, R13, 0x7b51);
|
||||
ori( R1, R4, 13);
|
||||
oris( R3, R5, 177);
|
||||
xori( R7, R6, 51);
|
||||
xoris( R29, R0, 1);
|
||||
andr( R17, R21, R16);
|
||||
and_( R3, R5, R15);
|
||||
orr( R2, R1, R9);
|
||||
or_( R17, R15, R11);
|
||||
xorr( R19, R18, R10);
|
||||
xor_( R31, R21, R11);
|
||||
nand( R5, R7, R3);
|
||||
nand_( R3, R1, R0);
|
||||
nor( R2, R3, R5);
|
||||
nor_( R3, R6, R8);
|
||||
andc( R25, R12, R11);
|
||||
andc_( R24, R22, R21);
|
||||
orc( R20, R10, R12);
|
||||
orc_( R22, R2, R13);
|
||||
|
||||
nop();
|
||||
|
||||
// PPC 1, section 3.3.12, Fixed-Point Rotate and Shift Instructions
|
||||
sld( R5, R6, R8);
|
||||
sld_( R3, R5, R9);
|
||||
slw( R2, R1, R10);
|
||||
slw_( R6, R26, R16);
|
||||
srd( R16, R24, R8);
|
||||
srd_( R21, R14, R7);
|
||||
srw( R22, R25, R29);
|
||||
srw_( R5, R18, R17);
|
||||
srad( R7, R11, R0);
|
||||
srad_( R9, R13, R1);
|
||||
sraw( R7, R15, R2);
|
||||
sraw_( R4, R17, R3);
|
||||
sldi( R3, R18, 63);
|
||||
sldi_( R2, R20, 30);
|
||||
slwi( R1, R21, 30);
|
||||
slwi_( R7, R23, 8);
|
||||
srdi( R0, R19, 2);
|
||||
srdi_( R12, R24, 5);
|
||||
srwi( R13, R27, 6);
|
||||
srwi_( R14, R29, 7);
|
||||
sradi( R15, R30, 9);
|
||||
sradi_( R16, R31, 19);
|
||||
srawi( R17, R31, 15);
|
||||
srawi_( R18, R31, 12);
|
||||
|
||||
clrrdi( R3, R30, 5);
|
||||
clrldi( R9, R10, 11);
|
||||
|
||||
rldicr( R19, R20, 13, 15);
|
||||
rldicr_(R20, R20, 16, 14);
|
||||
rldicl( R21, R21, 30, 33);
|
||||
rldicl_(R22, R1, 20, 25);
|
||||
rlwinm( R23, R2, 25, 10, 11);
|
||||
rlwinm_(R24, R3, 12, 13, 14);
|
||||
|
||||
// PPC 1, section 3.3.2 Fixed-Point Load Instructions
|
||||
lwzx( R3, R5, R7);
|
||||
lwz( R11, 0, R1);
|
||||
lwzu( R31, -4, R11);
|
||||
|
||||
lwax( R3, R5, R7);
|
||||
lwa( R31, -4, R11);
|
||||
lhzx( R3, R5, R7);
|
||||
lhz( R31, -4, R11);
|
||||
lhzu( R31, -4, R11);
|
||||
|
||||
|
||||
lhax( R3, R5, R7);
|
||||
lha( R31, -4, R11);
|
||||
lhau( R11, 0, R1);
|
||||
|
||||
lbzx( R3, R5, R7);
|
||||
lbz( R31, -4, R11);
|
||||
lbzu( R11, 0, R1);
|
||||
|
||||
ld( R31, -4, R11);
|
||||
ldx( R3, R5, R7);
|
||||
ldu( R31, -4, R11);
|
||||
|
||||
// PPC 1, section 3.3.3 Fixed-Point Store Instructions
|
||||
stwx( R3, R5, R7);
|
||||
stw( R31, -4, R11);
|
||||
stwu( R11, 0, R1);
|
||||
|
||||
sthx( R3, R5, R7 );
|
||||
sth( R31, -4, R11);
|
||||
sthu( R31, -4, R11);
|
||||
|
||||
stbx( R3, R5, R7);
|
||||
stb( R31, -4, R11);
|
||||
stbu( R31, -4, R11);
|
||||
|
||||
std( R31, -4, R11);
|
||||
stdx( R3, R5, R7);
|
||||
stdu( R31, -4, R11);
|
||||
|
||||
// PPC 1, section 3.3.13 Move To/From System Register Instructions
|
||||
mtlr( R3);
|
||||
mflr( R3);
|
||||
mtctr( R3);
|
||||
mfctr( R3);
|
||||
mtcrf( 0xff, R15);
|
||||
mtcr( R15);
|
||||
mtcrf( 0x03, R15);
|
||||
mtcr( R15);
|
||||
mfcr( R15);
|
||||
|
||||
// PPC 1, section 2.4.1 Branch Instructions
|
||||
Label lbl1, lbl2, lbl3;
|
||||
bind(lbl1);
|
||||
|
||||
b(pc());
|
||||
b(pc() - 8);
|
||||
b(lbl1);
|
||||
b(lbl2);
|
||||
b(lbl3);
|
||||
|
||||
bl(pc() - 8);
|
||||
bl(lbl1);
|
||||
bl(lbl2);
|
||||
|
||||
bcl(4, 10, pc() - 8);
|
||||
bcl(4, 10, lbl1);
|
||||
bcl(4, 10, lbl2);
|
||||
|
||||
bclr( 4, 6, 0);
|
||||
bclrl(4, 6, 0);
|
||||
|
||||
bind(lbl2);
|
||||
|
||||
bcctr( 4, 6, 0);
|
||||
bcctrl(4, 6, 0);
|
||||
|
||||
blt(CCR0, lbl2);
|
||||
bgt(CCR1, lbl2);
|
||||
beq(CCR2, lbl2);
|
||||
bso(CCR3, lbl2);
|
||||
bge(CCR4, lbl2);
|
||||
ble(CCR5, lbl2);
|
||||
bne(CCR6, lbl2);
|
||||
bns(CCR7, lbl2);
|
||||
|
||||
bltl(CCR0, lbl2);
|
||||
bgtl(CCR1, lbl2);
|
||||
beql(CCR2, lbl2);
|
||||
bsol(CCR3, lbl2);
|
||||
bgel(CCR4, lbl2);
|
||||
blel(CCR5, lbl2);
|
||||
bnel(CCR6, lbl2);
|
||||
bnsl(CCR7, lbl2);
|
||||
blr();
|
||||
|
||||
sync();
|
||||
icbi( R1, R2);
|
||||
dcbst(R2, R3);
|
||||
|
||||
// FLOATING POINT instructions ppc.
|
||||
// PPC 1, section 4.6.2 Floating-Point Load Instructions
|
||||
lfs( F1, -11, R3);
|
||||
lfsu(F2, 123, R4);
|
||||
lfsx(F3, R5, R6);
|
||||
lfd( F4, 456, R7);
|
||||
lfdu(F5, 789, R8);
|
||||
lfdx(F6, R10, R11);
|
||||
|
||||
// PPC 1, section 4.6.3 Floating-Point Store Instructions
|
||||
stfs( F7, 876, R12);
|
||||
stfsu( F8, 543, R13);
|
||||
stfsx( F9, R14, R15);
|
||||
stfd( F10, 210, R16);
|
||||
stfdu( F11, 111, R17);
|
||||
stfdx( F12, R18, R19);
|
||||
|
||||
// PPC 1, section 4.6.4 Floating-Point Move Instructions
|
||||
fmr( F13, F14);
|
||||
fmr_( F14, F15);
|
||||
fneg( F16, F17);
|
||||
fneg_( F18, F19);
|
||||
fabs( F20, F21);
|
||||
fabs_( F22, F23);
|
||||
fnabs( F24, F25);
|
||||
fnabs_(F26, F27);
|
||||
|
||||
// PPC 1, section 4.6.5.1 Floating-Point Elementary Arithmetic
|
||||
// Instructions
|
||||
fadd( F28, F29, F30);
|
||||
fadd_( F31, F0, F1);
|
||||
fadds( F2, F3, F4);
|
||||
fadds_(F5, F6, F7);
|
||||
fsub( F8, F9, F10);
|
||||
fsub_( F11, F12, F13);
|
||||
fsubs( F14, F15, F16);
|
||||
fsubs_(F17, F18, F19);
|
||||
fmul( F20, F21, F22);
|
||||
fmul_( F23, F24, F25);
|
||||
fmuls( F26, F27, F28);
|
||||
fmuls_(F29, F30, F31);
|
||||
fdiv( F0, F1, F2);
|
||||
fdiv_( F3, F4, F5);
|
||||
fdivs( F6, F7, F8);
|
||||
fdivs_(F9, F10, F11);
|
||||
|
||||
// PPC 1, section 4.6.6 Floating-Point Rounding and Conversion
|
||||
// Instructions
|
||||
frsp( F12, F13);
|
||||
fctid( F14, F15);
|
||||
fctidz(F16, F17);
|
||||
fctiw( F18, F19);
|
||||
fctiwz(F20, F21);
|
||||
fcfid( F22, F23);
|
||||
|
||||
// PPC 1, section 4.6.7 Floating-Point Compare Instructions
|
||||
fcmpu( CCR7, F24, F25);
|
||||
|
||||
tty->print_cr("\ntest_asm disassembly (0x%lx 0x%lx):", code()->insts_begin(), code()->insts_end());
|
||||
code()->decode();
|
||||
}
|
||||
#endif // !PRODUCT
|
1963
hotspot/src/cpu/ppc/vm/assembler_ppc.hpp
Normal file
1963
hotspot/src/cpu/ppc/vm/assembler_ppc.hpp
Normal file
File diff suppressed because it is too large
Load Diff
792
hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp
Normal file
792
hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp
Normal file
@ -0,0 +1,792 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_ASSEMBLER_PPC_INLINE_HPP
|
||||
#define CPU_PPC_VM_ASSEMBLER_PPC_INLINE_HPP
|
||||
|
||||
#include "asm/assembler.inline.hpp"
|
||||
#include "asm/codeBuffer.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
|
||||
inline void Assembler::emit_int32(int x) {
|
||||
AbstractAssembler::emit_int32(x);
|
||||
}
|
||||
|
||||
inline void Assembler::emit_data(int x) {
|
||||
emit_int32(x);
|
||||
}
|
||||
|
||||
inline void Assembler::emit_data(int x, relocInfo::relocType rtype) {
|
||||
relocate(rtype);
|
||||
emit_int32(x);
|
||||
}
|
||||
|
||||
inline void Assembler::emit_data(int x, RelocationHolder const& rspec) {
|
||||
relocate(rspec);
|
||||
emit_int32(x);
|
||||
}
|
||||
|
||||
// Emit an address
|
||||
inline address Assembler::emit_addr(const address addr) {
|
||||
address start = pc();
|
||||
emit_address(addr);
|
||||
return start;
|
||||
}
|
||||
|
||||
// Emit a function descriptor with the specified entry point, TOC, and
|
||||
// ENV. If the entry point is NULL, the descriptor will point just
|
||||
// past the descriptor.
|
||||
inline address Assembler::emit_fd(address entry, address toc, address env) {
|
||||
FunctionDescriptor* fd = (FunctionDescriptor*)pc();
|
||||
|
||||
assert(sizeof(FunctionDescriptor) == 3*sizeof(address), "function descriptor size");
|
||||
|
||||
(void)emit_addr();
|
||||
(void)emit_addr();
|
||||
(void)emit_addr();
|
||||
|
||||
fd->set_entry(entry == NULL ? pc() : entry);
|
||||
fd->set_toc(toc);
|
||||
fd->set_env(env);
|
||||
|
||||
return (address)fd;
|
||||
}
|
||||
|
||||
// Issue an illegal instruction. 0 is guaranteed to be an illegal instruction.
|
||||
inline void Assembler::illtrap() { Assembler::emit_int32(0); }
|
||||
inline bool Assembler::is_illtrap(int x) { return x == 0; }
|
||||
|
||||
// PPC 1, section 3.3.8, Fixed-Point Arithmetic Instructions
|
||||
inline void Assembler::addi( Register d, Register a, int si16) { assert(a != R0, "r0 not allowed"); addi_r0ok( d, a, si16); }
|
||||
inline void Assembler::addis( Register d, Register a, int si16) { assert(a != R0, "r0 not allowed"); addis_r0ok(d, a, si16); }
|
||||
inline void Assembler::addi_r0ok(Register d,Register a,int si16) { emit_int32(ADDI_OPCODE | rt(d) | ra(a) | simm(si16, 16)); }
|
||||
inline void Assembler::addis_r0ok(Register d,Register a,int si16) { emit_int32(ADDIS_OPCODE | rt(d) | ra(a) | simm(si16, 16)); }
|
||||
inline void Assembler::addic_( Register d, Register a, int si16) { emit_int32(ADDIC__OPCODE | rt(d) | ra(a) | simm(si16, 16)); }
|
||||
inline void Assembler::subfic( Register d, Register a, int si16) { emit_int32(SUBFIC_OPCODE | rt(d) | ra(a) | simm(si16, 16)); }
|
||||
inline void Assembler::add( Register d, Register a, Register b) { emit_int32(ADD_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
|
||||
inline void Assembler::add_( Register d, Register a, Register b) { emit_int32(ADD_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
|
||||
inline void Assembler::subf( Register d, Register a, Register b) { emit_int32(SUBF_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
|
||||
inline void Assembler::sub( Register d, Register a, Register b) { subf(d, b, a); }
|
||||
inline void Assembler::subf_( Register d, Register a, Register b) { emit_int32(SUBF_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
|
||||
inline void Assembler::addc( Register d, Register a, Register b) { emit_int32(ADDC_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
|
||||
inline void Assembler::addc_( Register d, Register a, Register b) { emit_int32(ADDC_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
|
||||
inline void Assembler::subfc( Register d, Register a, Register b) { emit_int32(SUBFC_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
|
||||
inline void Assembler::subfc_( Register d, Register a, Register b) { emit_int32(SUBFC_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
|
||||
inline void Assembler::adde( Register d, Register a, Register b) { emit_int32(ADDE_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
|
||||
inline void Assembler::adde_( Register d, Register a, Register b) { emit_int32(ADDE_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
|
||||
inline void Assembler::subfe( Register d, Register a, Register b) { emit_int32(SUBFE_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
|
||||
inline void Assembler::subfe_( Register d, Register a, Register b) { emit_int32(SUBFE_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
|
||||
inline void Assembler::neg( Register d, Register a) { emit_int32(NEG_OPCODE | rt(d) | ra(a) | oe(0) | rc(0)); }
|
||||
inline void Assembler::neg_( Register d, Register a) { emit_int32(NEG_OPCODE | rt(d) | ra(a) | oe(0) | rc(1)); }
|
||||
inline void Assembler::mulli( Register d, Register a, int si16) { emit_int32(MULLI_OPCODE | rt(d) | ra(a) | simm(si16, 16)); }
|
||||
inline void Assembler::mulld( Register d, Register a, Register b) { emit_int32(MULLD_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
|
||||
inline void Assembler::mulld_( Register d, Register a, Register b) { emit_int32(MULLD_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
|
||||
inline void Assembler::mullw( Register d, Register a, Register b) { emit_int32(MULLW_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
|
||||
inline void Assembler::mullw_( Register d, Register a, Register b) { emit_int32(MULLW_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
|
||||
inline void Assembler::mulhw( Register d, Register a, Register b) { emit_int32(MULHW_OPCODE | rt(d) | ra(a) | rb(b) | rc(0)); }
|
||||
inline void Assembler::mulhw_( Register d, Register a, Register b) { emit_int32(MULHW_OPCODE | rt(d) | ra(a) | rb(b) | rc(1)); }
|
||||
inline void Assembler::mulhd( Register d, Register a, Register b) { emit_int32(MULHD_OPCODE | rt(d) | ra(a) | rb(b) | rc(0)); }
|
||||
inline void Assembler::mulhd_( Register d, Register a, Register b) { emit_int32(MULHD_OPCODE | rt(d) | ra(a) | rb(b) | rc(1)); }
|
||||
inline void Assembler::mulhdu( Register d, Register a, Register b) { emit_int32(MULHDU_OPCODE | rt(d) | ra(a) | rb(b) | rc(0)); }
|
||||
inline void Assembler::mulhdu_(Register d, Register a, Register b) { emit_int32(MULHDU_OPCODE | rt(d) | ra(a) | rb(b) | rc(1)); }
|
||||
inline void Assembler::divd( Register d, Register a, Register b) { emit_int32(DIVD_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
|
||||
inline void Assembler::divd_( Register d, Register a, Register b) { emit_int32(DIVD_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
|
||||
inline void Assembler::divw( Register d, Register a, Register b) { emit_int32(DIVW_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
|
||||
inline void Assembler::divw_( Register d, Register a, Register b) { emit_int32(DIVW_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
|
||||
|
||||
// extended mnemonics
|
||||
inline void Assembler::li( Register d, int si16) { Assembler::addi_r0ok( d, R0, si16); }
|
||||
inline void Assembler::lis( Register d, int si16) { Assembler::addis_r0ok(d, R0, si16); }
|
||||
inline void Assembler::addir(Register d, int si16, Register a) { Assembler::addi(d, a, si16); }
|
||||
|
||||
// PPC 1, section 3.3.9, Fixed-Point Compare Instructions
|
||||
inline void Assembler::cmpi( ConditionRegister f, int l, Register a, int si16) { emit_int32( CMPI_OPCODE | bf(f) | l10(l) | ra(a) | simm(si16,16)); }
|
||||
inline void Assembler::cmp( ConditionRegister f, int l, Register a, Register b) { emit_int32( CMP_OPCODE | bf(f) | l10(l) | ra(a) | rb(b)); }
|
||||
inline void Assembler::cmpli( ConditionRegister f, int l, Register a, int ui16) { emit_int32( CMPLI_OPCODE | bf(f) | l10(l) | ra(a) | uimm(ui16,16)); }
|
||||
inline void Assembler::cmpl( ConditionRegister f, int l, Register a, Register b) { emit_int32( CMPL_OPCODE | bf(f) | l10(l) | ra(a) | rb(b)); }
|
||||
|
||||
// extended mnemonics of Compare Instructions
|
||||
inline void Assembler::cmpwi( ConditionRegister crx, Register a, int si16) { Assembler::cmpi( crx, 0, a, si16); }
|
||||
inline void Assembler::cmpdi( ConditionRegister crx, Register a, int si16) { Assembler::cmpi( crx, 1, a, si16); }
|
||||
inline void Assembler::cmpw( ConditionRegister crx, Register a, Register b) { Assembler::cmp( crx, 0, a, b); }
|
||||
inline void Assembler::cmpd( ConditionRegister crx, Register a, Register b) { Assembler::cmp( crx, 1, a, b); }
|
||||
inline void Assembler::cmplwi(ConditionRegister crx, Register a, int ui16) { Assembler::cmpli(crx, 0, a, ui16); }
|
||||
inline void Assembler::cmpldi(ConditionRegister crx, Register a, int ui16) { Assembler::cmpli(crx, 1, a, ui16); }
|
||||
inline void Assembler::cmplw( ConditionRegister crx, Register a, Register b) { Assembler::cmpl( crx, 0, a, b); }
|
||||
inline void Assembler::cmpld( ConditionRegister crx, Register a, Register b) { Assembler::cmpl( crx, 1, a, b); }
|
||||
|
||||
inline void Assembler::isel(Register d, Register a, Register b, int c) { emit_int32(ISEL_OPCODE | rt(d) | ra(a) | rb(b) | bc(c)); }
|
||||
|
||||
// PPC 1, section 3.3.11, Fixed-Point Logical Instructions
|
||||
inline void Assembler::andi_( Register a, Register s, int ui16) { emit_int32(ANDI_OPCODE | rta(a) | rs(s) | uimm(ui16, 16)); }
|
||||
inline void Assembler::andis_( Register a, Register s, int ui16) { emit_int32(ANDIS_OPCODE | rta(a) | rs(s) | uimm(ui16, 16)); }
|
||||
inline void Assembler::ori( Register a, Register s, int ui16) { emit_int32(ORI_OPCODE | rta(a) | rs(s) | uimm(ui16, 16)); }
|
||||
inline void Assembler::oris( Register a, Register s, int ui16) { emit_int32(ORIS_OPCODE | rta(a) | rs(s) | uimm(ui16, 16)); }
|
||||
inline void Assembler::xori( Register a, Register s, int ui16) { emit_int32(XORI_OPCODE | rta(a) | rs(s) | uimm(ui16, 16)); }
|
||||
inline void Assembler::xoris( Register a, Register s, int ui16) { emit_int32(XORIS_OPCODE | rta(a) | rs(s) | uimm(ui16, 16)); }
|
||||
inline void Assembler::andr( Register a, Register s, Register b) { emit_int32(AND_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
|
||||
inline void Assembler::and_( Register a, Register s, Register b) { emit_int32(AND_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
|
||||
|
||||
inline void Assembler::or_unchecked(Register a, Register s, Register b){ emit_int32(OR_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
|
||||
inline void Assembler::orr( Register a, Register s, Register b) { if (a==s && s==b) { Assembler::nop(); } else { Assembler::or_unchecked(a,s,b); } }
|
||||
inline void Assembler::or_( Register a, Register s, Register b) { emit_int32(OR_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
|
||||
inline void Assembler::xorr( Register a, Register s, Register b) { emit_int32(XOR_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
|
||||
inline void Assembler::xor_( Register a, Register s, Register b) { emit_int32(XOR_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
|
||||
inline void Assembler::nand( Register a, Register s, Register b) { emit_int32(NAND_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
|
||||
inline void Assembler::nand_( Register a, Register s, Register b) { emit_int32(NAND_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
|
||||
inline void Assembler::nor( Register a, Register s, Register b) { emit_int32(NOR_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
|
||||
inline void Assembler::nor_( Register a, Register s, Register b) { emit_int32(NOR_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
|
||||
inline void Assembler::andc( Register a, Register s, Register b) { emit_int32(ANDC_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
|
||||
inline void Assembler::andc_( Register a, Register s, Register b) { emit_int32(ANDC_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
|
||||
inline void Assembler::orc( Register a, Register s, Register b) { emit_int32(ORC_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
|
||||
inline void Assembler::orc_( Register a, Register s, Register b) { emit_int32(ORC_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
|
||||
inline void Assembler::extsb( Register a, Register s) { emit_int32(EXTSB_OPCODE | rta(a) | rs(s) | rc(0)); }
|
||||
inline void Assembler::extsh( Register a, Register s) { emit_int32(EXTSH_OPCODE | rta(a) | rs(s) | rc(0)); }
|
||||
inline void Assembler::extsw( Register a, Register s) { emit_int32(EXTSW_OPCODE | rta(a) | rs(s) | rc(0)); }
|
||||
|
||||
// extended mnemonics
|
||||
inline void Assembler::nop() { Assembler::ori(R0, R0, 0); }
|
||||
// NOP for FP and BR units (different versions to allow them to be in one group)
|
||||
inline void Assembler::fpnop0() { Assembler::fmr(F30, F30); }
|
||||
inline void Assembler::fpnop1() { Assembler::fmr(F31, F31); }
|
||||
inline void Assembler::brnop0() { Assembler::mcrf(CCR2, CCR2); }
|
||||
inline void Assembler::brnop1() { Assembler::mcrf(CCR3, CCR3); }
|
||||
inline void Assembler::brnop2() { Assembler::mcrf(CCR4, CCR4); }
|
||||
|
||||
inline void Assembler::mr( Register d, Register s) { Assembler::orr(d, s, s); }
|
||||
inline void Assembler::ori_opt( Register d, int ui16) { if (ui16!=0) Assembler::ori( d, d, ui16); }
|
||||
inline void Assembler::oris_opt(Register d, int ui16) { if (ui16!=0) Assembler::oris(d, d, ui16); }
|
||||
|
||||
inline void Assembler::endgroup() { Assembler::ori(R1, R1, 0); }
|
||||
|
||||
// count instructions
|
||||
inline void Assembler::cntlzw( Register a, Register s) { emit_int32(CNTLZW_OPCODE | rta(a) | rs(s) | rc(0)); }
|
||||
inline void Assembler::cntlzw_( Register a, Register s) { emit_int32(CNTLZW_OPCODE | rta(a) | rs(s) | rc(1)); }
|
||||
inline void Assembler::cntlzd( Register a, Register s) { emit_int32(CNTLZD_OPCODE | rta(a) | rs(s) | rc(0)); }
|
||||
inline void Assembler::cntlzd_( Register a, Register s) { emit_int32(CNTLZD_OPCODE | rta(a) | rs(s) | rc(1)); }
|
||||
|
||||
// PPC 1, section 3.3.12, Fixed-Point Rotate and Shift Instructions
|
||||
inline void Assembler::sld( Register a, Register s, Register b) { emit_int32(SLD_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
|
||||
inline void Assembler::sld_( Register a, Register s, Register b) { emit_int32(SLD_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
|
||||
inline void Assembler::slw( Register a, Register s, Register b) { emit_int32(SLW_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
|
||||
inline void Assembler::slw_( Register a, Register s, Register b) { emit_int32(SLW_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
|
||||
inline void Assembler::srd( Register a, Register s, Register b) { emit_int32(SRD_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
|
||||
inline void Assembler::srd_( Register a, Register s, Register b) { emit_int32(SRD_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
|
||||
inline void Assembler::srw( Register a, Register s, Register b) { emit_int32(SRW_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
|
||||
inline void Assembler::srw_( Register a, Register s, Register b) { emit_int32(SRW_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
|
||||
inline void Assembler::srad( Register a, Register s, Register b) { emit_int32(SRAD_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
|
||||
inline void Assembler::srad_( Register a, Register s, Register b) { emit_int32(SRAD_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
|
||||
inline void Assembler::sraw( Register a, Register s, Register b) { emit_int32(SRAW_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
|
||||
inline void Assembler::sraw_( Register a, Register s, Register b) { emit_int32(SRAW_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
|
||||
inline void Assembler::sradi( Register a, Register s, int sh6) { emit_int32(SRADI_OPCODE | rta(a) | rs(s) | sh162030(sh6) | rc(0)); }
|
||||
inline void Assembler::sradi_( Register a, Register s, int sh6) { emit_int32(SRADI_OPCODE | rta(a) | rs(s) | sh162030(sh6) | rc(1)); }
|
||||
inline void Assembler::srawi( Register a, Register s, int sh5) { emit_int32(SRAWI_OPCODE | rta(a) | rs(s) | sh1620(sh5) | rc(0)); }
|
||||
inline void Assembler::srawi_( Register a, Register s, int sh5) { emit_int32(SRAWI_OPCODE | rta(a) | rs(s) | sh1620(sh5) | rc(1)); }
|
||||
|
||||
// extended mnemonics for Shift Instructions
|
||||
inline void Assembler::sldi( Register a, Register s, int sh6) { Assembler::rldicr(a, s, sh6, 63-sh6); }
|
||||
inline void Assembler::sldi_( Register a, Register s, int sh6) { Assembler::rldicr_(a, s, sh6, 63-sh6); }
|
||||
inline void Assembler::slwi( Register a, Register s, int sh5) { Assembler::rlwinm(a, s, sh5, 0, 31-sh5); }
|
||||
inline void Assembler::slwi_( Register a, Register s, int sh5) { Assembler::rlwinm_(a, s, sh5, 0, 31-sh5); }
|
||||
inline void Assembler::srdi( Register a, Register s, int sh6) { Assembler::rldicl(a, s, 64-sh6, sh6); }
|
||||
inline void Assembler::srdi_( Register a, Register s, int sh6) { Assembler::rldicl_(a, s, 64-sh6, sh6); }
|
||||
inline void Assembler::srwi( Register a, Register s, int sh5) { Assembler::rlwinm(a, s, 32-sh5, sh5, 31); }
|
||||
inline void Assembler::srwi_( Register a, Register s, int sh5) { Assembler::rlwinm_(a, s, 32-sh5, sh5, 31); }
|
||||
|
||||
inline void Assembler::clrrdi( Register a, Register s, int ui6) { Assembler::rldicr(a, s, 0, 63-ui6); }
|
||||
inline void Assembler::clrrdi_( Register a, Register s, int ui6) { Assembler::rldicr_(a, s, 0, 63-ui6); }
|
||||
inline void Assembler::clrldi( Register a, Register s, int ui6) { Assembler::rldicl(a, s, 0, ui6); }
|
||||
inline void Assembler::clrldi_( Register a, Register s, int ui6) { Assembler::rldicl_(a, s, 0, ui6); }
|
||||
inline void Assembler::clrlsldi( Register a, Register s, int clrl6, int shl6) { Assembler::rldic( a, s, shl6, clrl6-shl6); }
|
||||
inline void Assembler::clrlsldi_(Register a, Register s, int clrl6, int shl6) { Assembler::rldic_(a, s, shl6, clrl6-shl6); }
|
||||
inline void Assembler::extrdi( Register a, Register s, int n, int b){ Assembler::rldicl(a, s, b+n, 64-n); }
|
||||
// testbit with condition register.
|
||||
inline void Assembler::testbitdi(ConditionRegister cr, Register a, Register s, int ui6) {
|
||||
Assembler::rldicr(a, s, 63-ui6, 0);
|
||||
Assembler::cmpdi(cr, a, 0);
|
||||
}
|
||||
|
||||
// rotate instructions
|
||||
inline void Assembler::rotldi( Register a, Register s, int n) { Assembler::rldicl(a, s, n, 0); }
|
||||
inline void Assembler::rotrdi( Register a, Register s, int n) { Assembler::rldicl(a, s, 64-n, 0); }
|
||||
inline void Assembler::rotlwi( Register a, Register s, int n) { Assembler::rlwinm(a, s, n, 0, 31); }
|
||||
inline void Assembler::rotrwi( Register a, Register s, int n) { Assembler::rlwinm(a, s, 32-n, 0, 31); }
|
||||
|
||||
inline void Assembler::rldic( Register a, Register s, int sh6, int mb6) { emit_int32(RLDIC_OPCODE | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(0)); }
|
||||
inline void Assembler::rldic_( Register a, Register s, int sh6, int mb6) { emit_int32(RLDIC_OPCODE | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(1)); }
|
||||
inline void Assembler::rldicr( Register a, Register s, int sh6, int mb6) { emit_int32(RLDICR_OPCODE | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(0)); }
|
||||
inline void Assembler::rldicr_( Register a, Register s, int sh6, int mb6) { emit_int32(RLDICR_OPCODE | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(1)); }
|
||||
inline void Assembler::rldicl( Register a, Register s, int sh6, int me6) { emit_int32(RLDICL_OPCODE | rta(a) | rs(s) | sh162030(sh6) | me2126(me6) | rc(0)); }
|
||||
inline void Assembler::rldicl_( Register a, Register s, int sh6, int me6) { emit_int32(RLDICL_OPCODE | rta(a) | rs(s) | sh162030(sh6) | me2126(me6) | rc(1)); }
|
||||
inline void Assembler::rlwinm( Register a, Register s, int sh5, int mb5, int me5){ emit_int32(RLWINM_OPCODE | rta(a) | rs(s) | sh1620(sh5) | mb2125(mb5) | me2630(me5) | rc(0)); }
|
||||
inline void Assembler::rlwinm_( Register a, Register s, int sh5, int mb5, int me5){ emit_int32(RLWINM_OPCODE | rta(a) | rs(s) | sh1620(sh5) | mb2125(mb5) | me2630(me5) | rc(1)); }
|
||||
inline void Assembler::rldimi( Register a, Register s, int sh6, int mb6) { emit_int32(RLDIMI_OPCODE | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(0)); }
|
||||
inline void Assembler::rlwimi( Register a, Register s, int sh5, int mb5, int me5){ emit_int32(RLWIMI_OPCODE | rta(a) | rs(s) | sh1620(sh5) | mb2125(mb5) | me2630(me5) | rc(0)); }
|
||||
inline void Assembler::rldimi_( Register a, Register s, int sh6, int mb6) { emit_int32(RLDIMI_OPCODE | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(1)); }
|
||||
inline void Assembler::insrdi( Register a, Register s, int n, int b) { Assembler::rldimi(a, s, 64-(b+n), b); }
|
||||
inline void Assembler::insrwi( Register a, Register s, int n, int b) { Assembler::rlwimi(a, s, 32-(b+n), b, b+n-1); }
|
||||
|
||||
// PPC 1, section 3.3.2 Fixed-Point Load Instructions
|
||||
inline void Assembler::lwzx( Register d, Register s1, Register s2) { emit_int32(LWZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
|
||||
inline void Assembler::lwz( Register d, int si16, Register s1) { emit_int32(LWZ_OPCODE | rt(d) | d1(si16) | ra0mem(s1));}
|
||||
inline void Assembler::lwzu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LWZU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
|
||||
|
||||
inline void Assembler::lwax( Register d, Register s1, Register s2) { emit_int32(LWAX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
|
||||
inline void Assembler::lwa( Register d, int si16, Register s1) { emit_int32(LWA_OPCODE | rt(d) | ds(si16) | ra0mem(s1));}
|
||||
|
||||
inline void Assembler::lhzx( Register d, Register s1, Register s2) { emit_int32(LHZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
|
||||
inline void Assembler::lhz( Register d, int si16, Register s1) { emit_int32(LHZ_OPCODE | rt(d) | d1(si16) | ra0mem(s1));}
|
||||
inline void Assembler::lhzu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LHZU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
|
||||
|
||||
inline void Assembler::lhax( Register d, Register s1, Register s2) { emit_int32(LHAX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
|
||||
inline void Assembler::lha( Register d, int si16, Register s1) { emit_int32(LHA_OPCODE | rt(d) | d1(si16) | ra0mem(s1));}
|
||||
inline void Assembler::lhau( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LHAU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
|
||||
|
||||
inline void Assembler::lbzx( Register d, Register s1, Register s2) { emit_int32(LBZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
|
||||
inline void Assembler::lbz( Register d, int si16, Register s1) { emit_int32(LBZ_OPCODE | rt(d) | d1(si16) | ra0mem(s1));}
|
||||
inline void Assembler::lbzu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LBZU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
|
||||
|
||||
inline void Assembler::ld( Register d, int si16, Register s1) { emit_int32(LD_OPCODE | rt(d) | ds(si16) | ra0mem(s1));}
|
||||
inline void Assembler::ldx( Register d, Register s1, Register s2) { emit_int32(LDX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
|
||||
inline void Assembler::ldu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LDU_OPCODE | rt(d) | ds(si16) | rta0mem(s1));}
|
||||
|
||||
// PPC 1, section 3.3.3 Fixed-Point Store Instructions
|
||||
inline void Assembler::stwx( Register d, Register s1, Register s2) { emit_int32(STWX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
|
||||
inline void Assembler::stw( Register d, int si16, Register s1) { emit_int32(STW_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}
|
||||
inline void Assembler::stwu( Register d, int si16, Register s1) { emit_int32(STWU_OPCODE | rs(d) | d1(si16) | rta0mem(s1));}
|
||||
|
||||
inline void Assembler::sthx( Register d, Register s1, Register s2) { emit_int32(STHX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
|
||||
inline void Assembler::sth( Register d, int si16, Register s1) { emit_int32(STH_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}
|
||||
inline void Assembler::sthu( Register d, int si16, Register s1) { emit_int32(STHU_OPCODE | rs(d) | d1(si16) | rta0mem(s1));}
|
||||
|
||||
inline void Assembler::stbx( Register d, Register s1, Register s2) { emit_int32(STBX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
|
||||
inline void Assembler::stb( Register d, int si16, Register s1) { emit_int32(STB_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}
|
||||
inline void Assembler::stbu( Register d, int si16, Register s1) { emit_int32(STBU_OPCODE | rs(d) | d1(si16) | rta0mem(s1));}
|
||||
|
||||
inline void Assembler::std( Register d, int si16, Register s1) { emit_int32(STD_OPCODE | rs(d) | ds(si16) | ra0mem(s1));}
|
||||
inline void Assembler::stdx( Register d, Register s1, Register s2) { emit_int32(STDX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
|
||||
inline void Assembler::stdu( Register d, int si16, Register s1) { emit_int32(STDU_OPCODE | rs(d) | ds(si16) | rta0mem(s1));}
|
||||
inline void Assembler::stdux(Register s, Register a, Register b) { emit_int32(STDUX_OPCODE| rs(s) | rta0mem(a) | rb(b));}
|
||||
|
||||
// PPC 1, section 3.3.13 Move To/From System Register Instructions
|
||||
inline void Assembler::mtlr( Register s1) { emit_int32(MTLR_OPCODE | rs(s1)); }
|
||||
inline void Assembler::mflr( Register d ) { emit_int32(MFLR_OPCODE | rt(d)); }
|
||||
inline void Assembler::mtctr(Register s1) { emit_int32(MTCTR_OPCODE | rs(s1)); }
|
||||
inline void Assembler::mfctr(Register d ) { emit_int32(MFCTR_OPCODE | rt(d)); }
|
||||
inline void Assembler::mtcrf(int afxm, Register s){ emit_int32(MTCRF_OPCODE | fxm(afxm) | rs(s)); }
|
||||
inline void Assembler::mfcr( Register d ) { emit_int32(MFCR_OPCODE | rt(d)); }
|
||||
inline void Assembler::mcrf( ConditionRegister crd, ConditionRegister cra)
|
||||
{ emit_int32(MCRF_OPCODE | bf(crd) | bfa(cra)); }
|
||||
inline void Assembler::mtcr( Register s) { Assembler::mtcrf(0xff, s); }
|
||||
|
||||
// SAP JVM 2006-02-13 PPC branch instruction.
|
||||
// PPC 1, section 2.4.1 Branch Instructions
|
||||
inline void Assembler::b( address a, relocInfo::relocType rt) { emit_data(BXX_OPCODE| li(disp( intptr_t(a), intptr_t(pc()))) |aa(0)|lk(0), rt); }
|
||||
inline void Assembler::b( Label& L) { b( target(L)); }
|
||||
inline void Assembler::bl(address a, relocInfo::relocType rt) { emit_data(BXX_OPCODE| li(disp( intptr_t(a), intptr_t(pc()))) |aa(0)|lk(1), rt); }
|
||||
inline void Assembler::bl(Label& L) { bl(target(L)); }
|
||||
inline void Assembler::bc( int boint, int biint, address a, relocInfo::relocType rt) { emit_data(BCXX_OPCODE| bo(boint) | bi(biint) | bd(disp( intptr_t(a), intptr_t(pc()))) | aa(0) | lk(0), rt); }
|
||||
inline void Assembler::bc( int boint, int biint, Label& L) { bc(boint, biint, target(L)); }
|
||||
inline void Assembler::bcl(int boint, int biint, address a, relocInfo::relocType rt) { emit_data(BCXX_OPCODE| bo(boint) | bi(biint) | bd(disp( intptr_t(a), intptr_t(pc()))) | aa(0)|lk(1)); }
|
||||
inline void Assembler::bcl(int boint, int biint, Label& L) { bcl(boint, biint, target(L)); }
|
||||
|
||||
inline void Assembler::bclr( int boint, int biint, int bhint, relocInfo::relocType rt) { emit_data(BCLR_OPCODE | bo(boint) | bi(biint) | bh(bhint) | aa(0) | lk(0), rt); }
|
||||
inline void Assembler::bclrl( int boint, int biint, int bhint, relocInfo::relocType rt) { emit_data(BCLR_OPCODE | bo(boint) | bi(biint) | bh(bhint) | aa(0) | lk(1), rt); }
|
||||
inline void Assembler::bcctr( int boint, int biint, int bhint, relocInfo::relocType rt) { emit_data(BCCTR_OPCODE| bo(boint) | bi(biint) | bh(bhint) | aa(0) | lk(0), rt); }
|
||||
inline void Assembler::bcctrl(int boint, int biint, int bhint, relocInfo::relocType rt) { emit_data(BCCTR_OPCODE| bo(boint) | bi(biint) | bh(bhint) | aa(0) | lk(1), rt); }
|
||||
|
||||
// helper function for b
|
||||
inline bool Assembler::is_within_range_of_b(address a, address pc) {
|
||||
// Guard against illegal branch targets, e.g. -1 (see CompiledStaticCall and ad-file).
|
||||
if ((((uint64_t)a) & 0x3) != 0) return false;
|
||||
|
||||
const int range = 1 << (29-6); // li field is from bit 6 to bit 29.
|
||||
int value = disp(intptr_t(a), intptr_t(pc));
|
||||
bool result = -range <= value && value < range-1;
|
||||
#ifdef ASSERT
|
||||
if (result) li(value); // Assert that value is in correct range.
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
// helper functions for bcxx.
|
||||
inline bool Assembler::is_within_range_of_bcxx(address a, address pc) {
|
||||
// Guard against illegal branch targets, e.g. -1 (see CompiledStaticCall and ad-file).
|
||||
if ((((uint64_t)a) & 0x3) != 0) return false;
|
||||
|
||||
const int range = 1 << (29-16); // bd field is from bit 16 to bit 29.
|
||||
int value = disp(intptr_t(a), intptr_t(pc));
|
||||
bool result = -range <= value && value < range-1;
|
||||
#ifdef ASSERT
|
||||
if (result) bd(value); // Assert that value is in correct range.
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
// Get the destination of a bxx branch (b, bl, ba, bla).
|
||||
address Assembler::bxx_destination(address baddr) { return bxx_destination(*(int*)baddr, baddr); }
|
||||
address Assembler::bxx_destination(int instr, address pc) { return (address)bxx_destination_offset(instr, (intptr_t)pc); }
|
||||
intptr_t Assembler::bxx_destination_offset(int instr, intptr_t bxx_pos) {
|
||||
intptr_t displ = inv_li_field(instr);
|
||||
return bxx_pos + displ;
|
||||
}
|
||||
|
||||
// Extended mnemonics for Branch Instructions
|
||||
inline void Assembler::blt(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs1, bi0(crx, less), L); }
|
||||
inline void Assembler::bgt(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs1, bi0(crx, greater), L); }
|
||||
inline void Assembler::beq(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs1, bi0(crx, equal), L); }
|
||||
inline void Assembler::bso(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs1, bi0(crx, summary_overflow), L); }
|
||||
inline void Assembler::bge(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs0, bi0(crx, less), L); }
|
||||
inline void Assembler::ble(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs0, bi0(crx, greater), L); }
|
||||
inline void Assembler::bne(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs0, bi0(crx, equal), L); }
|
||||
inline void Assembler::bns(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs0, bi0(crx, summary_overflow), L); }
|
||||
|
||||
// Branch instructions with static prediction hints.
|
||||
inline void Assembler::blt_predict_taken (ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsTaken, bi0(crx, less), L); }
|
||||
inline void Assembler::bgt_predict_taken (ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsTaken, bi0(crx, greater), L); }
|
||||
inline void Assembler::beq_predict_taken (ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsTaken, bi0(crx, equal), L); }
|
||||
inline void Assembler::bso_predict_taken (ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsTaken, bi0(crx, summary_overflow), L); }
|
||||
inline void Assembler::bge_predict_taken (ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsTaken, bi0(crx, less), L); }
|
||||
inline void Assembler::ble_predict_taken (ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsTaken, bi0(crx, greater), L); }
|
||||
inline void Assembler::bne_predict_taken (ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsTaken, bi0(crx, equal), L); }
|
||||
inline void Assembler::bns_predict_taken (ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsTaken, bi0(crx, summary_overflow), L); }
|
||||
inline void Assembler::blt_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsNotTaken, bi0(crx, less), L); }
|
||||
inline void Assembler::bgt_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsNotTaken, bi0(crx, greater), L); }
|
||||
inline void Assembler::beq_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsNotTaken, bi0(crx, equal), L); }
|
||||
inline void Assembler::bso_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsNotTaken, bi0(crx, summary_overflow), L); }
|
||||
inline void Assembler::bge_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsNotTaken, bi0(crx, less), L); }
|
||||
inline void Assembler::ble_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsNotTaken, bi0(crx, greater), L); }
|
||||
inline void Assembler::bne_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsNotTaken, bi0(crx, equal), L); }
|
||||
inline void Assembler::bns_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsNotTaken, bi0(crx, summary_overflow), L); }
|
||||
|
||||
// For use in conjunction with testbitdi:
|
||||
inline void Assembler::btrue( ConditionRegister crx, Label& L) { Assembler::bne(crx, L); }
|
||||
inline void Assembler::bfalse(ConditionRegister crx, Label& L) { Assembler::beq(crx, L); }
|
||||
|
||||
inline void Assembler::bltl(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs1, bi0(crx, less), L); }
|
||||
inline void Assembler::bgtl(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs1, bi0(crx, greater), L); }
|
||||
inline void Assembler::beql(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs1, bi0(crx, equal), L); }
|
||||
inline void Assembler::bsol(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs1, bi0(crx, summary_overflow), L); }
|
||||
inline void Assembler::bgel(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs0, bi0(crx, less), L); }
|
||||
inline void Assembler::blel(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs0, bi0(crx, greater), L); }
|
||||
inline void Assembler::bnel(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs0, bi0(crx, equal), L); }
|
||||
inline void Assembler::bnsl(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs0, bi0(crx, summary_overflow), L); }
|
||||
|
||||
// Extended mnemonics for Branch Instructions via LR.
|
||||
// We use `blr' for returns.
|
||||
inline void Assembler::blr(relocInfo::relocType rt) { Assembler::bclr(bcondAlways, 0, bhintbhBCLRisReturn, rt); }
|
||||
|
||||
// Extended mnemonics for Branch Instructions with CTR.
|
||||
// Bdnz means `decrement CTR and jump to L if CTR is not zero'.
|
||||
inline void Assembler::bdnz(Label& L) { Assembler::bc(16, 0, L); }
|
||||
// Decrement and branch if result is zero.
|
||||
inline void Assembler::bdz(Label& L) { Assembler::bc(18, 0, L); }
|
||||
// We use `bctr[l]' for jumps/calls in function descriptor glue
|
||||
// code, e.g. for calls to runtime functions.
|
||||
inline void Assembler::bctr( relocInfo::relocType rt) { Assembler::bcctr(bcondAlways, 0, bhintbhBCCTRisNotReturnButSame, rt); }
|
||||
inline void Assembler::bctrl(relocInfo::relocType rt) { Assembler::bcctrl(bcondAlways, 0, bhintbhBCCTRisNotReturnButSame, rt); }
|
||||
// Conditional jumps/branches via CTR.
|
||||
inline void Assembler::beqctr( ConditionRegister crx, relocInfo::relocType rt) { Assembler::bcctr( bcondCRbiIs1, bi0(crx, equal), bhintbhBCCTRisNotReturnButSame, rt); }
|
||||
inline void Assembler::beqctrl(ConditionRegister crx, relocInfo::relocType rt) { Assembler::bcctrl(bcondCRbiIs1, bi0(crx, equal), bhintbhBCCTRisNotReturnButSame, rt); }
|
||||
inline void Assembler::bnectr( ConditionRegister crx, relocInfo::relocType rt) { Assembler::bcctr( bcondCRbiIs0, bi0(crx, equal), bhintbhBCCTRisNotReturnButSame, rt); }
|
||||
inline void Assembler::bnectrl(ConditionRegister crx, relocInfo::relocType rt) { Assembler::bcctrl(bcondCRbiIs0, bi0(crx, equal), bhintbhBCCTRisNotReturnButSame, rt); }
|
||||
|
||||
// condition register logic instructions
|
||||
inline void Assembler::crand( int d, int s1, int s2) { emit_int32(CRAND_OPCODE | bt(d) | ba(s1) | bb(s2)); }
|
||||
inline void Assembler::crnand(int d, int s1, int s2) { emit_int32(CRNAND_OPCODE | bt(d) | ba(s1) | bb(s2)); }
|
||||
inline void Assembler::cror( int d, int s1, int s2) { emit_int32(CROR_OPCODE | bt(d) | ba(s1) | bb(s2)); }
|
||||
inline void Assembler::crxor( int d, int s1, int s2) { emit_int32(CRXOR_OPCODE | bt(d) | ba(s1) | bb(s2)); }
|
||||
inline void Assembler::crnor( int d, int s1, int s2) { emit_int32(CRNOR_OPCODE | bt(d) | ba(s1) | bb(s2)); }
|
||||
inline void Assembler::creqv( int d, int s1, int s2) { emit_int32(CREQV_OPCODE | bt(d) | ba(s1) | bb(s2)); }
|
||||
inline void Assembler::crandc(int d, int s1, int s2) { emit_int32(CRANDC_OPCODE | bt(d) | ba(s1) | bb(s2)); }
|
||||
inline void Assembler::crorc( int d, int s1, int s2) { emit_int32(CRORC_OPCODE | bt(d) | ba(s1) | bb(s2)); }
|
||||
|
||||
// PPC 2, section 3.2.1 Instruction Cache Instructions
|
||||
inline void Assembler::icbi( Register s1, Register s2) { emit_int32( ICBI_OPCODE | ra0mem(s1) | rb(s2) ); }
|
||||
// PPC 2, section 3.2.2 Data Cache Instructions
|
||||
//inline void Assembler::dcba( Register s1, Register s2) { emit_int32( DCBA_OPCODE | ra0mem(s1) | rb(s2) ); }
|
||||
inline void Assembler::dcbz( Register s1, Register s2) { emit_int32( DCBZ_OPCODE | ra0mem(s1) | rb(s2) ); }
|
||||
inline void Assembler::dcbst( Register s1, Register s2) { emit_int32( DCBST_OPCODE | ra0mem(s1) | rb(s2) ); }
|
||||
inline void Assembler::dcbf( Register s1, Register s2) { emit_int32( DCBF_OPCODE | ra0mem(s1) | rb(s2) ); }
|
||||
// dcache read hint
|
||||
inline void Assembler::dcbt( Register s1, Register s2) { emit_int32( DCBT_OPCODE | ra0mem(s1) | rb(s2) ); }
|
||||
inline void Assembler::dcbtct( Register s1, Register s2, int ct) { emit_int32( DCBT_OPCODE | ra0mem(s1) | rb(s2) | thct(ct)); }
|
||||
inline void Assembler::dcbtds( Register s1, Register s2, int ds) { emit_int32( DCBT_OPCODE | ra0mem(s1) | rb(s2) | thds(ds)); }
|
||||
// dcache write hint
|
||||
inline void Assembler::dcbtst( Register s1, Register s2) { emit_int32( DCBTST_OPCODE | ra0mem(s1) | rb(s2) ); }
|
||||
inline void Assembler::dcbtstct(Register s1, Register s2, int ct) { emit_int32( DCBTST_OPCODE | ra0mem(s1) | rb(s2) | thct(ct)); }
|
||||
|
||||
// machine barrier instructions:
|
||||
inline void Assembler::sync(int a) { emit_int32( SYNC_OPCODE | l910(a)); }
|
||||
inline void Assembler::sync() { Assembler::sync(0); }
|
||||
inline void Assembler::lwsync() { Assembler::sync(1); }
|
||||
inline void Assembler::ptesync() { Assembler::sync(2); }
|
||||
inline void Assembler::eieio() { emit_int32( EIEIO_OPCODE); }
|
||||
inline void Assembler::isync() { emit_int32( ISYNC_OPCODE); }
|
||||
|
||||
inline void Assembler::release() { Assembler::lwsync(); }
|
||||
inline void Assembler::acquire() { Assembler::lwsync(); }
|
||||
inline void Assembler::fence() { Assembler::sync(); }
|
||||
|
||||
// atomics
|
||||
// Use ra0mem to disallow R0 as base.
|
||||
inline void Assembler::lwarx_unchecked(Register d, Register a, Register b, int eh1) { emit_int32( LWARX_OPCODE | rt(d) | ra0mem(a) | rb(b) | eh(eh1)); }
|
||||
inline void Assembler::ldarx_unchecked(Register d, Register a, Register b, int eh1) { emit_int32( LDARX_OPCODE | rt(d) | ra0mem(a) | rb(b) | eh(eh1)); }
|
||||
inline bool Assembler::lxarx_hint_exclusive_access() { return VM_Version::has_lxarxeh(); }
|
||||
inline void Assembler::lwarx( Register d, Register a, Register b, bool hint_exclusive_access) { lwarx_unchecked(d, a, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
|
||||
inline void Assembler::ldarx( Register d, Register a, Register b, bool hint_exclusive_access) { ldarx_unchecked(d, a, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
|
||||
inline void Assembler::stwcx_(Register s, Register a, Register b) { emit_int32( STWCX_OPCODE | rs(s) | ra0mem(a) | rb(b) | rc(1)); }
|
||||
inline void Assembler::stdcx_(Register s, Register a, Register b) { emit_int32( STDCX_OPCODE | rs(s) | ra0mem(a) | rb(b) | rc(1)); }
|
||||
|
||||
// Instructions for adjusting thread priority
|
||||
// for simultaneous multithreading (SMT) on POWER5.
|
||||
inline void Assembler::smt_prio_very_low() { Assembler::or_unchecked(R31, R31, R31); }
|
||||
inline void Assembler::smt_prio_low() { Assembler::or_unchecked(R1, R1, R1); }
|
||||
inline void Assembler::smt_prio_medium_low() { Assembler::or_unchecked(R6, R6, R6); }
|
||||
inline void Assembler::smt_prio_medium() { Assembler::or_unchecked(R2, R2, R2); }
|
||||
inline void Assembler::smt_prio_medium_high() { Assembler::or_unchecked(R5, R5, R5); }
|
||||
inline void Assembler::smt_prio_high() { Assembler::or_unchecked(R3, R3, R3); }
|
||||
|
||||
inline void Assembler::twi_0(Register a) { twi_unchecked(0, a, 0);}
|
||||
|
||||
// trap instructions
|
||||
inline void Assembler::tdi_unchecked(int tobits, Register a, int si16){ emit_int32( TDI_OPCODE | to(tobits) | ra(a) | si(si16)); }
|
||||
inline void Assembler::twi_unchecked(int tobits, Register a, int si16){ emit_int32( TWI_OPCODE | to(tobits) | ra(a) | si(si16)); }
|
||||
inline void Assembler::tdi(int tobits, Register a, int si16) { assert(UseSIGTRAP, "precondition"); tdi_unchecked(tobits, a, si16); }
|
||||
inline void Assembler::twi(int tobits, Register a, int si16) { assert(UseSIGTRAP, "precondition"); twi_unchecked(tobits, a, si16); }
|
||||
inline void Assembler::td( int tobits, Register a, Register b) { assert(UseSIGTRAP, "precondition"); emit_int32( TD_OPCODE | to(tobits) | ra(a) | rb(b)); }
|
||||
inline void Assembler::tw( int tobits, Register a, Register b) { assert(UseSIGTRAP, "precondition"); emit_int32( TW_OPCODE | to(tobits) | ra(a) | rb(b)); }
|
||||
|
||||
// FLOATING POINT instructions ppc.
|
||||
// PPC 1, section 4.6.2 Floating-Point Load Instructions
|
||||
// Use ra0mem instead of ra in some instructions below.
|
||||
inline void Assembler::lfs( FloatRegister d, int si16, Register a) { emit_int32( LFS_OPCODE | frt(d) | ra0mem(a) | simm(si16,16)); }
|
||||
inline void Assembler::lfsu(FloatRegister d, int si16, Register a) { emit_int32( LFSU_OPCODE | frt(d) | ra(a) | simm(si16,16)); }
|
||||
inline void Assembler::lfsx(FloatRegister d, Register a, Register b) { emit_int32( LFSX_OPCODE | frt(d) | ra0mem(a) | rb(b)); }
|
||||
inline void Assembler::lfd( FloatRegister d, int si16, Register a) { emit_int32( LFD_OPCODE | frt(d) | ra0mem(a) | simm(si16,16)); }
|
||||
inline void Assembler::lfdu(FloatRegister d, int si16, Register a) { emit_int32( LFDU_OPCODE | frt(d) | ra(a) | simm(si16,16)); }
|
||||
inline void Assembler::lfdx(FloatRegister d, Register a, Register b) { emit_int32( LFDX_OPCODE | frt(d) | ra0mem(a) | rb(b)); }
|
||||
|
||||
// PPC 1, section 4.6.3 Floating-Point Store Instructions
|
||||
// Use ra0mem instead of ra in some instructions below.
|
||||
inline void Assembler::stfs( FloatRegister s, int si16, Register a) { emit_int32( STFS_OPCODE | frs(s) | ra0mem(a) | simm(si16,16)); }
|
||||
inline void Assembler::stfsu(FloatRegister s, int si16, Register a) { emit_int32( STFSU_OPCODE | frs(s) | ra(a) | simm(si16,16)); }
|
||||
inline void Assembler::stfsx(FloatRegister s, Register a, Register b){ emit_int32( STFSX_OPCODE | frs(s) | ra0mem(a) | rb(b)); }
|
||||
inline void Assembler::stfd( FloatRegister s, int si16, Register a) { emit_int32( STFD_OPCODE | frs(s) | ra0mem(a) | simm(si16,16)); }
|
||||
inline void Assembler::stfdu(FloatRegister s, int si16, Register a) { emit_int32( STFDU_OPCODE | frs(s) | ra(a) | simm(si16,16)); }
|
||||
inline void Assembler::stfdx(FloatRegister s, Register a, Register b){ emit_int32( STFDX_OPCODE | frs(s) | ra0mem(a) | rb(b)); }
|
||||
|
||||
// PPC 1, section 4.6.4 Floating-Point Move Instructions
|
||||
inline void Assembler::fmr( FloatRegister d, FloatRegister b) { emit_int32( FMR_OPCODE | frt(d) | frb(b) | rc(0)); }
|
||||
inline void Assembler::fmr_(FloatRegister d, FloatRegister b) { emit_int32( FMR_OPCODE | frt(d) | frb(b) | rc(1)); }
|
||||
|
||||
// These are special Power6 opcodes, reused for "lfdepx" and "stfdepx"
|
||||
// on Power7. Do not use.
|
||||
//inline void Assembler::mffgpr( FloatRegister d, Register b) { emit_int32( MFFGPR_OPCODE | frt(d) | rb(b) | rc(0)); }
|
||||
//inline void Assembler::mftgpr( Register d, FloatRegister b) { emit_int32( MFTGPR_OPCODE | rt(d) | frb(b) | rc(0)); }
|
||||
// add cmpb and popcntb to detect ppc power version.
|
||||
inline void Assembler::cmpb( Register a, Register s, Register b) { emit_int32( CMPB_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
|
||||
inline void Assembler::popcntb(Register a, Register s) { emit_int32( POPCNTB_OPCODE | rta(a) | rs(s)); };
|
||||
inline void Assembler::popcntw(Register a, Register s) { emit_int32( POPCNTW_OPCODE | rta(a) | rs(s)); };
|
||||
inline void Assembler::popcntd(Register a, Register s) { emit_int32( POPCNTD_OPCODE | rta(a) | rs(s)); };
|
||||
|
||||
inline void Assembler::fneg( FloatRegister d, FloatRegister b) { emit_int32( FNEG_OPCODE | frt(d) | frb(b) | rc(0)); }
|
||||
inline void Assembler::fneg_( FloatRegister d, FloatRegister b) { emit_int32( FNEG_OPCODE | frt(d) | frb(b) | rc(1)); }
|
||||
inline void Assembler::fabs( FloatRegister d, FloatRegister b) { emit_int32( FABS_OPCODE | frt(d) | frb(b) | rc(0)); }
|
||||
inline void Assembler::fabs_( FloatRegister d, FloatRegister b) { emit_int32( FABS_OPCODE | frt(d) | frb(b) | rc(1)); }
|
||||
inline void Assembler::fnabs( FloatRegister d, FloatRegister b) { emit_int32( FNABS_OPCODE | frt(d) | frb(b) | rc(0)); }
|
||||
inline void Assembler::fnabs_(FloatRegister d, FloatRegister b) { emit_int32( FNABS_OPCODE | frt(d) | frb(b) | rc(1)); }
|
||||
|
||||
// PPC 1, section 4.6.5.1 Floating-Point Elementary Arithmetic Instructions
|
||||
inline void Assembler::fadd( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FADD_OPCODE | frt(d) | fra(a) | frb(b) | rc(0)); }
|
||||
inline void Assembler::fadd_( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FADD_OPCODE | frt(d) | fra(a) | frb(b) | rc(1)); }
|
||||
inline void Assembler::fadds( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FADDS_OPCODE | frt(d) | fra(a) | frb(b) | rc(0)); }
|
||||
inline void Assembler::fadds_(FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FADDS_OPCODE | frt(d) | fra(a) | frb(b) | rc(1)); }
|
||||
inline void Assembler::fsub( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FSUB_OPCODE | frt(d) | fra(a) | frb(b) | rc(0)); }
|
||||
inline void Assembler::fsub_( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FSUB_OPCODE | frt(d) | fra(a) | frb(b) | rc(1)); }
|
||||
inline void Assembler::fsubs( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FSUBS_OPCODE | frt(d) | fra(a) | frb(b) | rc(0)); }
|
||||
inline void Assembler::fsubs_(FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FSUBS_OPCODE | frt(d) | fra(a) | frb(b) | rc(1)); }
|
||||
inline void Assembler::fmul( FloatRegister d, FloatRegister a, FloatRegister c) { emit_int32( FMUL_OPCODE | frt(d) | fra(a) | frc(c) | rc(0)); }
|
||||
inline void Assembler::fmul_( FloatRegister d, FloatRegister a, FloatRegister c) { emit_int32( FMUL_OPCODE | frt(d) | fra(a) | frc(c) | rc(1)); }
|
||||
inline void Assembler::fmuls( FloatRegister d, FloatRegister a, FloatRegister c) { emit_int32( FMULS_OPCODE | frt(d) | fra(a) | frc(c) | rc(0)); }
|
||||
inline void Assembler::fmuls_(FloatRegister d, FloatRegister a, FloatRegister c) { emit_int32( FMULS_OPCODE | frt(d) | fra(a) | frc(c) | rc(1)); }
|
||||
inline void Assembler::fdiv( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FDIV_OPCODE | frt(d) | fra(a) | frb(b) | rc(0)); }
|
||||
inline void Assembler::fdiv_( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FDIV_OPCODE | frt(d) | fra(a) | frb(b) | rc(1)); }
|
||||
inline void Assembler::fdivs( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FDIVS_OPCODE | frt(d) | fra(a) | frb(b) | rc(0)); }
|
||||
inline void Assembler::fdivs_(FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FDIVS_OPCODE | frt(d) | fra(a) | frb(b) | rc(1)); }
|
||||
|
||||
// PPC 1, section 4.6.6 Floating-Point Rounding and Conversion Instructions
|
||||
inline void Assembler::frsp( FloatRegister d, FloatRegister b) { emit_int32( FRSP_OPCODE | frt(d) | frb(b) | rc(0)); }
|
||||
inline void Assembler::fctid( FloatRegister d, FloatRegister b) { emit_int32( FCTID_OPCODE | frt(d) | frb(b) | rc(0)); }
|
||||
inline void Assembler::fctidz(FloatRegister d, FloatRegister b) { emit_int32( FCTIDZ_OPCODE | frt(d) | frb(b) | rc(0)); }
|
||||
inline void Assembler::fctiw( FloatRegister d, FloatRegister b) { emit_int32( FCTIW_OPCODE | frt(d) | frb(b) | rc(0)); }
|
||||
inline void Assembler::fctiwz(FloatRegister d, FloatRegister b) { emit_int32( FCTIWZ_OPCODE | frt(d) | frb(b) | rc(0)); }
|
||||
inline void Assembler::fcfid( FloatRegister d, FloatRegister b) { emit_int32( FCFID_OPCODE | frt(d) | frb(b) | rc(0)); }
|
||||
inline void Assembler::fcfids(FloatRegister d, FloatRegister b) { emit_int32( FCFIDS_OPCODE | frt(d) | frb(b) | rc(0)); }
|
||||
|
||||
// PPC 1, section 4.6.7 Floating-Point Compare Instructions
|
||||
inline void Assembler::fcmpu( ConditionRegister crx, FloatRegister a, FloatRegister b) { emit_int32( FCMPU_OPCODE | bf(crx) | fra(a) | frb(b)); }
|
||||
|
||||
// PPC 1, section 5.2.1 Floating-Point Arithmetic Instructions
|
||||
inline void Assembler::fsqrt( FloatRegister d, FloatRegister b) { emit_int32( FSQRT_OPCODE | frt(d) | frb(b) | rc(0)); }
|
||||
inline void Assembler::fsqrts(FloatRegister d, FloatRegister b) { emit_int32( FSQRTS_OPCODE | frt(d) | frb(b) | rc(0)); }
|
||||
|
||||
// Vector instructions for >= Power6.
|
||||
inline void Assembler::lvebx( VectorRegister d, Register s1, Register s2) { emit_int32( LVEBX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
|
||||
inline void Assembler::lvehx( VectorRegister d, Register s1, Register s2) { emit_int32( LVEHX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
|
||||
inline void Assembler::lvewx( VectorRegister d, Register s1, Register s2) { emit_int32( LVEWX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
|
||||
inline void Assembler::lvx( VectorRegister d, Register s1, Register s2) { emit_int32( LVX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
|
||||
inline void Assembler::lvxl( VectorRegister d, Register s1, Register s2) { emit_int32( LVXL_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
|
||||
inline void Assembler::stvebx(VectorRegister d, Register s1, Register s2) { emit_int32( STVEBX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
|
||||
inline void Assembler::stvehx(VectorRegister d, Register s1, Register s2) { emit_int32( STVEHX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
|
||||
inline void Assembler::stvewx(VectorRegister d, Register s1, Register s2) { emit_int32( STVEWX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
|
||||
inline void Assembler::stvx( VectorRegister d, Register s1, Register s2) { emit_int32( STVX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
|
||||
inline void Assembler::stvxl( VectorRegister d, Register s1, Register s2) { emit_int32( STVXL_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
|
||||
inline void Assembler::lvsl( VectorRegister d, Register s1, Register s2) { emit_int32( LVSL_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
|
||||
inline void Assembler::lvsr( VectorRegister d, Register s1, Register s2) { emit_int32( LVSR_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
|
||||
|
||||
inline void Assembler::vpkpx( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKPX_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vpkshss( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKSHSS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vpkswss( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKSWSS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vpkshus( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKSHUS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vpkswus( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKSWUS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vpkuhum( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKUHUM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vpkuwum( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKUWUM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vpkuhus( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKUHUS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vpkuwus( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKUWUS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vupkhpx( VectorRegister d, VectorRegister b) { emit_int32( VUPKHPX_OPCODE | vrt(d) | vrb(b)); }
|
||||
inline void Assembler::vupkhsb( VectorRegister d, VectorRegister b) { emit_int32( VUPKHSB_OPCODE | vrt(d) | vrb(b)); }
|
||||
inline void Assembler::vupkhsh( VectorRegister d, VectorRegister b) { emit_int32( VUPKHSH_OPCODE | vrt(d) | vrb(b)); }
|
||||
inline void Assembler::vupklpx( VectorRegister d, VectorRegister b) { emit_int32( VUPKLPX_OPCODE | vrt(d) | vrb(b)); }
|
||||
inline void Assembler::vupklsb( VectorRegister d, VectorRegister b) { emit_int32( VUPKLSB_OPCODE | vrt(d) | vrb(b)); }
|
||||
inline void Assembler::vupklsh( VectorRegister d, VectorRegister b) { emit_int32( VUPKLSH_OPCODE | vrt(d) | vrb(b)); }
|
||||
inline void Assembler::vmrghb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGHB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vmrghw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGHW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vmrghh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGHH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vmrglb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGLB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vmrglw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGLW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vmrglh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGLH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsplt( VectorRegister d, int ui4, VectorRegister b) { emit_int32( VSPLT_OPCODE | vrt(d) | vsplt_uim(uimm(ui4,4)) | vrb(b)); }
|
||||
inline void Assembler::vsplth( VectorRegister d, int ui3, VectorRegister b) { emit_int32( VSPLTH_OPCODE | vrt(d) | vsplt_uim(uimm(ui3,3)) | vrb(b)); }
|
||||
inline void Assembler::vspltw( VectorRegister d, int ui2, VectorRegister b) { emit_int32( VSPLTW_OPCODE | vrt(d) | vsplt_uim(uimm(ui2,2)) | vrb(b)); }
|
||||
inline void Assembler::vspltisb(VectorRegister d, int si5) { emit_int32( VSPLTISB_OPCODE| vrt(d) | vsplti_sim(simm(si5,5))); }
|
||||
inline void Assembler::vspltish(VectorRegister d, int si5) { emit_int32( VSPLTISH_OPCODE| vrt(d) | vsplti_sim(simm(si5,5))); }
|
||||
inline void Assembler::vspltisw(VectorRegister d, int si5) { emit_int32( VSPLTISW_OPCODE| vrt(d) | vsplti_sim(simm(si5,5))); }
|
||||
inline void Assembler::vperm( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c){ emit_int32( VPERM_OPCODE | vrt(d) | vra(a) | vrb(b) | vrc(c)); }
|
||||
inline void Assembler::vsel( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c){ emit_int32( VSEL_OPCODE | vrt(d) | vra(a) | vrb(b) | vrc(c)); }
|
||||
inline void Assembler::vsl( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSL_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsldoi( VectorRegister d, VectorRegister a, VectorRegister b, int si4) { emit_int32( VSLDOI_OPCODE| vrt(d) | vra(a) | vrb(b) | vsldoi_shb(simm(si4,4))); }
|
||||
inline void Assembler::vslo( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSLO_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsr( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsro( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRO_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vaddcuw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDCUW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vaddshs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDSHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vaddsbs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDSBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vaddsws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDSWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vaddubm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUBM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vadduwm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUWM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vadduhm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUHM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vaddubs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vadduws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vadduhs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsubcuw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBCUW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsubshs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBSHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsubsbs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBSBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsubsws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBSWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsububm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUBM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsubuwm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUWM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsubuhm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUHM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsububs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsubuws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsubuhs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vmulesb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULESB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vmuleub( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULEUB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vmulesh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULESH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vmuleuh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULEUH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vmulosb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULOSB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vmuloub( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULOUB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vmulosh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULOSH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vmulouh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULOUH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vmhaddshs(VectorRegister d,VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMHADDSHS_OPCODE | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
|
||||
inline void Assembler::vmhraddshs(VectorRegister d,VectorRegister a,VectorRegister b, VectorRegister c) { emit_int32( VMHRADDSHS_OPCODE| vrt(d) | vra(a) | vrb(b)| vrc(c)); }
|
||||
inline void Assembler::vmladduhm(VectorRegister d,VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMLADDUHM_OPCODE | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
|
||||
inline void Assembler::vmsubuhm(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUBUHM_OPCODE | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
|
||||
inline void Assembler::vmsummbm(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUMMBM_OPCODE | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
|
||||
inline void Assembler::vmsumshm(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUMSHM_OPCODE | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
|
||||
inline void Assembler::vmsumshs(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUMSHS_OPCODE | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
|
||||
inline void Assembler::vmsumuhm(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUMUHM_OPCODE | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
|
||||
inline void Assembler::vmsumuhs(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUMUHS_OPCODE | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
|
||||
inline void Assembler::vsumsws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUMSWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsum2sws(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUM2SWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsum4sbs(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUM4SBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsum4ubs(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUM4UBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsum4shs(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUM4SHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vavgsb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGSB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vavgsw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGSW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vavgsh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGSH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vavgub( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGUB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vavguw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGUW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vavguh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGUH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vmaxsb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXSB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vmaxsw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXSW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vmaxsh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXSH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vmaxub( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXUB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vmaxuw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXUW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vmaxuh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXUH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vminsb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINSB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vminsw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINSW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vminsh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINSH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vminub( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINUB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vminuw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINUW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vminuh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINUH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vcmpequb(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
|
||||
inline void Assembler::vcmpequh(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
|
||||
inline void Assembler::vcmpequw(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
|
||||
inline void Assembler::vcmpgtsh(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
|
||||
inline void Assembler::vcmpgtsb(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
|
||||
inline void Assembler::vcmpgtsw(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
|
||||
inline void Assembler::vcmpgtub(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
|
||||
inline void Assembler::vcmpgtuh(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
|
||||
inline void Assembler::vcmpgtuw(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
|
||||
inline void Assembler::vcmpequb_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
|
||||
inline void Assembler::vcmpequh_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
|
||||
inline void Assembler::vcmpequw_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
|
||||
inline void Assembler::vcmpgtsh_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
|
||||
inline void Assembler::vcmpgtsb_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
|
||||
inline void Assembler::vcmpgtsw_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
|
||||
inline void Assembler::vcmpgtub_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
|
||||
inline void Assembler::vcmpgtuh_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
|
||||
inline void Assembler::vcmpgtuw_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
|
||||
inline void Assembler::vand( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAND_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vandc( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VANDC_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vnor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNOR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VOR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vxor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VXOR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vrlb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vrlw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vrlh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vslb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSLB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vskw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSKW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vslh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSLH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsrb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsrw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsrh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsrab( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRAB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsraw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRAW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsrah( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRAH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::mtvscr( VectorRegister b) { emit_int32( MTVSCR_OPCODE | vrb(b)); }
|
||||
inline void Assembler::mfvscr( VectorRegister d) { emit_int32( MFVSCR_OPCODE | vrt(d)); }
|
||||
|
||||
// ra0 version
|
||||
inline void Assembler::lwzx( Register d, Register s2) { emit_int32( LWZX_OPCODE | rt(d) | rb(s2));}
|
||||
inline void Assembler::lwz( Register d, int si16 ) { emit_int32( LWZ_OPCODE | rt(d) | d1(si16));}
|
||||
inline void Assembler::lwax( Register d, Register s2) { emit_int32( LWAX_OPCODE | rt(d) | rb(s2));}
|
||||
inline void Assembler::lwa( Register d, int si16 ) { emit_int32( LWA_OPCODE | rt(d) | ds(si16));}
|
||||
inline void Assembler::lhzx( Register d, Register s2) { emit_int32( LHZX_OPCODE | rt(d) | rb(s2));}
|
||||
inline void Assembler::lhz( Register d, int si16 ) { emit_int32( LHZ_OPCODE | rt(d) | d1(si16));}
|
||||
inline void Assembler::lhax( Register d, Register s2) { emit_int32( LHAX_OPCODE | rt(d) | rb(s2));}
|
||||
inline void Assembler::lha( Register d, int si16 ) { emit_int32( LHA_OPCODE | rt(d) | d1(si16));}
|
||||
inline void Assembler::lbzx( Register d, Register s2) { emit_int32( LBZX_OPCODE | rt(d) | rb(s2));}
|
||||
inline void Assembler::lbz( Register d, int si16 ) { emit_int32( LBZ_OPCODE | rt(d) | d1(si16));}
|
||||
inline void Assembler::ld( Register d, int si16 ) { emit_int32( LD_OPCODE | rt(d) | ds(si16));}
|
||||
inline void Assembler::ldx( Register d, Register s2) { emit_int32( LDX_OPCODE | rt(d) | rb(s2));}
|
||||
inline void Assembler::stwx( Register d, Register s2) { emit_int32( STWX_OPCODE | rs(d) | rb(s2));}
|
||||
inline void Assembler::stw( Register d, int si16 ) { emit_int32( STW_OPCODE | rs(d) | d1(si16));}
|
||||
inline void Assembler::sthx( Register d, Register s2) { emit_int32( STHX_OPCODE | rs(d) | rb(s2));}
|
||||
inline void Assembler::sth( Register d, int si16 ) { emit_int32( STH_OPCODE | rs(d) | d1(si16));}
|
||||
inline void Assembler::stbx( Register d, Register s2) { emit_int32( STBX_OPCODE | rs(d) | rb(s2));}
|
||||
inline void Assembler::stb( Register d, int si16 ) { emit_int32( STB_OPCODE | rs(d) | d1(si16));}
|
||||
inline void Assembler::std( Register d, int si16 ) { emit_int32( STD_OPCODE | rs(d) | ds(si16));}
|
||||
inline void Assembler::stdx( Register d, Register s2) { emit_int32( STDX_OPCODE | rs(d) | rb(s2));}
|
||||
|
||||
// ra0 version
|
||||
inline void Assembler::icbi( Register s2) { emit_int32( ICBI_OPCODE | rb(s2) ); }
|
||||
//inline void Assembler::dcba( Register s2) { emit_int32( DCBA_OPCODE | rb(s2) ); }
|
||||
inline void Assembler::dcbz( Register s2) { emit_int32( DCBZ_OPCODE | rb(s2) ); }
|
||||
inline void Assembler::dcbst( Register s2) { emit_int32( DCBST_OPCODE | rb(s2) ); }
|
||||
inline void Assembler::dcbf( Register s2) { emit_int32( DCBF_OPCODE | rb(s2) ); }
|
||||
inline void Assembler::dcbt( Register s2) { emit_int32( DCBT_OPCODE | rb(s2) ); }
|
||||
inline void Assembler::dcbtct( Register s2, int ct) { emit_int32( DCBT_OPCODE | rb(s2) | thct(ct)); }
|
||||
inline void Assembler::dcbtds( Register s2, int ds) { emit_int32( DCBT_OPCODE | rb(s2) | thds(ds)); }
|
||||
inline void Assembler::dcbtst( Register s2) { emit_int32( DCBTST_OPCODE | rb(s2) ); }
|
||||
inline void Assembler::dcbtstct(Register s2, int ct) { emit_int32( DCBTST_OPCODE | rb(s2) | thct(ct)); }
|
||||
|
||||
// ra0 version
|
||||
inline void Assembler::lwarx_unchecked(Register d, Register b, int eh1) { emit_int32( LWARX_OPCODE | rt(d) | rb(b) | eh(eh1)); }
|
||||
inline void Assembler::ldarx_unchecked(Register d, Register b, int eh1) { emit_int32( LDARX_OPCODE | rt(d) | rb(b) | eh(eh1)); }
|
||||
inline void Assembler::lwarx( Register d, Register b, bool hint_exclusive_access){ lwarx_unchecked(d, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
|
||||
inline void Assembler::ldarx( Register d, Register b, bool hint_exclusive_access){ ldarx_unchecked(d, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
|
||||
inline void Assembler::stwcx_(Register s, Register b) { emit_int32( STWCX_OPCODE | rs(s) | rb(b) | rc(1)); }
|
||||
inline void Assembler::stdcx_(Register s, Register b) { emit_int32( STDCX_OPCODE | rs(s) | rb(b) | rc(1)); }
|
||||
|
||||
// ra0 version
|
||||
inline void Assembler::lfs( FloatRegister d, int si16) { emit_int32( LFS_OPCODE | frt(d) | simm(si16,16)); }
|
||||
inline void Assembler::lfsx(FloatRegister d, Register b) { emit_int32( LFSX_OPCODE | frt(d) | rb(b)); }
|
||||
inline void Assembler::lfd( FloatRegister d, int si16) { emit_int32( LFD_OPCODE | frt(d) | simm(si16,16)); }
|
||||
inline void Assembler::lfdx(FloatRegister d, Register b) { emit_int32( LFDX_OPCODE | frt(d) | rb(b)); }
|
||||
|
||||
// ra0 version
|
||||
inline void Assembler::stfs( FloatRegister s, int si16) { emit_int32( STFS_OPCODE | frs(s) | simm(si16, 16)); }
|
||||
inline void Assembler::stfsx(FloatRegister s, Register b) { emit_int32( STFSX_OPCODE | frs(s) | rb(b)); }
|
||||
inline void Assembler::stfd( FloatRegister s, int si16) { emit_int32( STFD_OPCODE | frs(s) | simm(si16, 16)); }
|
||||
inline void Assembler::stfdx(FloatRegister s, Register b) { emit_int32( STFDX_OPCODE | frs(s) | rb(b)); }
|
||||
|
||||
// ra0 version
|
||||
inline void Assembler::lvebx( VectorRegister d, Register s2) { emit_int32( LVEBX_OPCODE | vrt(d) | rb(s2)); }
|
||||
inline void Assembler::lvehx( VectorRegister d, Register s2) { emit_int32( LVEHX_OPCODE | vrt(d) | rb(s2)); }
|
||||
inline void Assembler::lvewx( VectorRegister d, Register s2) { emit_int32( LVEWX_OPCODE | vrt(d) | rb(s2)); }
|
||||
inline void Assembler::lvx( VectorRegister d, Register s2) { emit_int32( LVX_OPCODE | vrt(d) | rb(s2)); }
|
||||
inline void Assembler::lvxl( VectorRegister d, Register s2) { emit_int32( LVXL_OPCODE | vrt(d) | rb(s2)); }
|
||||
inline void Assembler::stvebx(VectorRegister d, Register s2) { emit_int32( STVEBX_OPCODE | vrt(d) | rb(s2)); }
|
||||
inline void Assembler::stvehx(VectorRegister d, Register s2) { emit_int32( STVEHX_OPCODE | vrt(d) | rb(s2)); }
|
||||
inline void Assembler::stvewx(VectorRegister d, Register s2) { emit_int32( STVEWX_OPCODE | vrt(d) | rb(s2)); }
|
||||
inline void Assembler::stvx( VectorRegister d, Register s2) { emit_int32( STVX_OPCODE | vrt(d) | rb(s2)); }
|
||||
inline void Assembler::stvxl( VectorRegister d, Register s2) { emit_int32( STVXL_OPCODE | vrt(d) | rb(s2)); }
|
||||
inline void Assembler::lvsl( VectorRegister d, Register s2) { emit_int32( LVSL_OPCODE | vrt(d) | rb(s2)); }
|
||||
inline void Assembler::lvsr( VectorRegister d, Register s2) { emit_int32( LVSR_OPCODE | vrt(d) | rb(s2)); }
|
||||
|
||||
|
||||
inline void Assembler::load_const(Register d, void* x, Register tmp) {
|
||||
load_const(d, (long)x, tmp);
|
||||
}
|
||||
|
||||
// Load a 64 bit constant encoded by a `Label'. This works for bound
|
||||
// labels as well as unbound ones. For unbound labels, the code will
|
||||
// be patched as soon as the label gets bound.
|
||||
inline void Assembler::load_const(Register d, Label& L, Register tmp) {
|
||||
load_const(d, target(L), tmp);
|
||||
}
|
||||
|
||||
// Load a 64 bit constant encoded by an AddressLiteral. patchable.
|
||||
inline void Assembler::load_const(Register d, AddressLiteral& a, Register tmp) {
|
||||
assert(d != R0, "R0 not allowed");
|
||||
// First relocate (we don't change the offset in the RelocationHolder,
|
||||
// just pass a.rspec()), then delegate to load_const(Register, long).
|
||||
relocate(a.rspec());
|
||||
load_const(d, (long)a.value(), tmp);
|
||||
}
|
||||
|
||||
|
||||
#endif // CPU_PPC_VM_ASSEMBLER_PPC_INLINE_HPP
|
105
hotspot/src/cpu/ppc/vm/bytecodeInterpreter_ppc.hpp
Normal file
105
hotspot/src/cpu/ppc/vm/bytecodeInterpreter_ppc.hpp
Normal file
@ -0,0 +1,105 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_BYTECODEINTERPRETER_PPC_HPP
|
||||
#define CPU_PPC_VM_BYTECODEINTERPRETER_PPC_HPP
|
||||
|
||||
// Platform specific for C++ based Interpreter
|
||||
#define LOTS_OF_REGS /* Lets interpreter use plenty of registers */
|
||||
|
||||
private:
|
||||
|
||||
// Save the bottom of the stack after frame manager setup. For ease of restoration after return
|
||||
// from recursive interpreter call.
|
||||
intptr_t* _frame_bottom; // Saved bottom of frame manager frame.
|
||||
address _last_Java_pc; // Pc to return to in frame manager.
|
||||
intptr_t* _last_Java_fp; // frame pointer
|
||||
intptr_t* _last_Java_sp; // stack pointer
|
||||
interpreterState _self_link; // Previous interpreter state // sometimes points to self???
|
||||
double _native_fresult; // Save result of native calls that might return floats.
|
||||
intptr_t _native_lresult; // Save result of native calls that might return handle/longs.
|
||||
|
||||
public:
|
||||
address last_Java_pc(void) { return _last_Java_pc; }
|
||||
intptr_t* last_Java_fp(void) { return _last_Java_fp; }
|
||||
|
||||
static ByteSize native_lresult_offset() {
|
||||
return byte_offset_of(BytecodeInterpreter, _native_lresult);
|
||||
}
|
||||
|
||||
static ByteSize native_fresult_offset() {
|
||||
return byte_offset_of(BytecodeInterpreter, _native_fresult);
|
||||
}
|
||||
|
||||
static void pd_layout_interpreterState(interpreterState istate, address last_Java_pc, intptr_t* last_Java_fp);
|
||||
|
||||
#define SET_LAST_JAVA_FRAME() THREAD->frame_anchor()->set(istate->_last_Java_sp, istate->_last_Java_pc);
|
||||
#define RESET_LAST_JAVA_FRAME() THREAD->frame_anchor()->clear();
|
||||
|
||||
|
||||
// Macros for accessing the stack.
|
||||
#undef STACK_INT
|
||||
#undef STACK_FLOAT
|
||||
#undef STACK_ADDR
|
||||
#undef STACK_OBJECT
|
||||
#undef STACK_DOUBLE
|
||||
#undef STACK_LONG
|
||||
|
||||
// JavaStack Implementation
|
||||
#define STACK_SLOT(offset) ((address) &topOfStack[-(offset)])
|
||||
#define STACK_INT(offset) (*((jint*) &topOfStack[-(offset)]))
|
||||
#define STACK_FLOAT(offset) (*((jfloat *) &topOfStack[-(offset)]))
|
||||
#define STACK_OBJECT(offset) (*((oop *) &topOfStack [-(offset)]))
|
||||
#define STACK_DOUBLE(offset) (((VMJavaVal64*) &topOfStack[-(offset)])->d)
|
||||
#define STACK_LONG(offset) (((VMJavaVal64 *) &topOfStack[-(offset)])->l)
|
||||
|
||||
#define SET_STACK_SLOT(value, offset) (*(intptr_t*)&topOfStack[-(offset)] = *(intptr_t*)(value))
|
||||
#define SET_STACK_ADDR(value, offset) (*((address *)&topOfStack[-(offset)]) = (value))
|
||||
#define SET_STACK_INT(value, offset) (*((jint *)&topOfStack[-(offset)]) = (value))
|
||||
#define SET_STACK_FLOAT(value, offset) (*((jfloat *)&topOfStack[-(offset)]) = (value))
|
||||
#define SET_STACK_OBJECT(value, offset) (*((oop *)&topOfStack[-(offset)]) = (value))
|
||||
#define SET_STACK_DOUBLE(value, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->d = (value))
|
||||
#define SET_STACK_DOUBLE_FROM_ADDR(addr, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->d = \
|
||||
((VMJavaVal64*)(addr))->d)
|
||||
#define SET_STACK_LONG(value, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->l = (value))
|
||||
#define SET_STACK_LONG_FROM_ADDR(addr, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->l = \
|
||||
((VMJavaVal64*)(addr))->l)
|
||||
// JavaLocals implementation
|
||||
|
||||
#define LOCALS_SLOT(offset) ((intptr_t*)&locals[-(offset)])
|
||||
#define LOCALS_ADDR(offset) ((address)locals[-(offset)])
|
||||
#define LOCALS_INT(offset) (*(jint*)&(locals[-(offset)]))
|
||||
#define LOCALS_OBJECT(offset) ((oop)locals[-(offset)])
|
||||
#define LOCALS_LONG_AT(offset) (((address)&locals[-((offset) + 1)]))
|
||||
#define LOCALS_DOUBLE_AT(offset) (((address)&locals[-((offset) + 1)]))
|
||||
|
||||
#define SET_LOCALS_SLOT(value, offset) (*(intptr_t*)&locals[-(offset)] = *(intptr_t *)(value))
|
||||
#define SET_LOCALS_INT(value, offset) (*((jint *)&locals[-(offset)]) = (value))
|
||||
#define SET_LOCALS_DOUBLE(value, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->d = (value))
|
||||
#define SET_LOCALS_LONG(value, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->l = (value))
|
||||
#define SET_LOCALS_DOUBLE_FROM_ADDR(addr, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->d = \
|
||||
|
||||
|
||||
#endif // CPU_PPC_VM_BYTECODEINTERPRETER_PPC_PP
|
290
hotspot/src/cpu/ppc/vm/bytecodeInterpreter_ppc.inline.hpp
Normal file
290
hotspot/src/cpu/ppc/vm/bytecodeInterpreter_ppc.inline.hpp
Normal file
@ -0,0 +1,290 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_BYTECODEINTERPRETER_PPC_INLINE_HPP
|
||||
#define CPU_PPC_VM_BYTECODEINTERPRETER_PPC_INLINE_HPP
|
||||
|
||||
#ifdef CC_INTERP
|
||||
|
||||
// Inline interpreter functions for ppc.
|
||||
|
||||
#include <math.h>
|
||||
|
||||
inline jfloat BytecodeInterpreter::VMfloatAdd(jfloat op1, jfloat op2) { return op1 + op2; }
|
||||
inline jfloat BytecodeInterpreter::VMfloatSub(jfloat op1, jfloat op2) { return op1 - op2; }
|
||||
inline jfloat BytecodeInterpreter::VMfloatMul(jfloat op1, jfloat op2) { return op1 * op2; }
|
||||
inline jfloat BytecodeInterpreter::VMfloatDiv(jfloat op1, jfloat op2) { return op1 / op2; }
|
||||
inline jfloat BytecodeInterpreter::VMfloatRem(jfloat op1, jfloat op2) { return (jfloat)fmod((double)op1, (double)op2); }
|
||||
|
||||
inline jfloat BytecodeInterpreter::VMfloatNeg(jfloat op) { return -op; }
|
||||
|
||||
inline int32_t BytecodeInterpreter::VMfloatCompare(jfloat op1, jfloat op2, int32_t direction) {
|
||||
return ( op1 < op2 ? -1 :
|
||||
op1 > op2 ? 1 :
|
||||
op1 == op2 ? 0 :
|
||||
(direction == -1 || direction == 1) ? direction : 0);
|
||||
|
||||
}
|
||||
|
||||
inline void BytecodeInterpreter::VMmemCopy64(uint32_t to[2], const uint32_t from[2]) {
|
||||
to[0] = from[0]; to[1] = from[1];
|
||||
}
|
||||
|
||||
// The long operations depend on compiler support for "long long" on ppc.
|
||||
|
||||
inline jlong BytecodeInterpreter::VMlongAdd(jlong op1, jlong op2) {
|
||||
return op1 + op2;
|
||||
}
|
||||
|
||||
inline jlong BytecodeInterpreter::VMlongAnd(jlong op1, jlong op2) {
|
||||
return op1 & op2;
|
||||
}
|
||||
|
||||
inline jlong BytecodeInterpreter::VMlongDiv(jlong op1, jlong op2) {
|
||||
if (op1 == min_jlong && op2 == -1) return op1;
|
||||
return op1 / op2;
|
||||
}
|
||||
|
||||
inline jlong BytecodeInterpreter::VMlongMul(jlong op1, jlong op2) {
|
||||
return op1 * op2;
|
||||
}
|
||||
|
||||
inline jlong BytecodeInterpreter::VMlongOr(jlong op1, jlong op2) {
|
||||
return op1 | op2;
|
||||
}
|
||||
|
||||
inline jlong BytecodeInterpreter::VMlongSub(jlong op1, jlong op2) {
|
||||
return op1 - op2;
|
||||
}
|
||||
|
||||
inline jlong BytecodeInterpreter::VMlongXor(jlong op1, jlong op2) {
|
||||
return op1 ^ op2;
|
||||
}
|
||||
|
||||
inline jlong BytecodeInterpreter::VMlongRem(jlong op1, jlong op2) {
|
||||
if (op1 == min_jlong && op2 == -1) return 0;
|
||||
return op1 % op2;
|
||||
}
|
||||
|
||||
inline jlong BytecodeInterpreter::VMlongUshr(jlong op1, jint op2) {
|
||||
return ((uint64_t) op1) >> (op2 & 0x3F);
|
||||
}
|
||||
|
||||
inline jlong BytecodeInterpreter::VMlongShr(jlong op1, jint op2) {
|
||||
return op1 >> (op2 & 0x3F);
|
||||
}
|
||||
|
||||
inline jlong BytecodeInterpreter::VMlongShl(jlong op1, jint op2) {
|
||||
return op1 << (op2 & 0x3F);
|
||||
}
|
||||
|
||||
inline jlong BytecodeInterpreter::VMlongNeg(jlong op) {
|
||||
return -op;
|
||||
}
|
||||
|
||||
inline jlong BytecodeInterpreter::VMlongNot(jlong op) {
|
||||
return ~op;
|
||||
}
|
||||
|
||||
inline int32_t BytecodeInterpreter::VMlongLtz(jlong op) {
|
||||
return (op <= 0);
|
||||
}
|
||||
|
||||
inline int32_t BytecodeInterpreter::VMlongGez(jlong op) {
|
||||
return (op >= 0);
|
||||
}
|
||||
|
||||
inline int32_t BytecodeInterpreter::VMlongEqz(jlong op) {
|
||||
return (op == 0);
|
||||
}
|
||||
|
||||
inline int32_t BytecodeInterpreter::VMlongEq(jlong op1, jlong op2) {
|
||||
return (op1 == op2);
|
||||
}
|
||||
|
||||
inline int32_t BytecodeInterpreter::VMlongNe(jlong op1, jlong op2) {
|
||||
return (op1 != op2);
|
||||
}
|
||||
|
||||
inline int32_t BytecodeInterpreter::VMlongGe(jlong op1, jlong op2) {
|
||||
return (op1 >= op2);
|
||||
}
|
||||
|
||||
inline int32_t BytecodeInterpreter::VMlongLe(jlong op1, jlong op2) {
|
||||
return (op1 <= op2);
|
||||
}
|
||||
|
||||
inline int32_t BytecodeInterpreter::VMlongLt(jlong op1, jlong op2) {
|
||||
return (op1 < op2);
|
||||
}
|
||||
|
||||
inline int32_t BytecodeInterpreter::VMlongGt(jlong op1, jlong op2) {
|
||||
return (op1 > op2);
|
||||
}
|
||||
|
||||
inline int32_t BytecodeInterpreter::VMlongCompare(jlong op1, jlong op2) {
|
||||
return (VMlongLt(op1, op2) ? -1 : VMlongGt(op1, op2) ? 1 : 0);
|
||||
}
|
||||
|
||||
// Long conversions
|
||||
|
||||
inline jdouble BytecodeInterpreter::VMlong2Double(jlong val) {
|
||||
return (jdouble) val;
|
||||
}
|
||||
|
||||
inline jfloat BytecodeInterpreter::VMlong2Float(jlong val) {
|
||||
return (jfloat) val;
|
||||
}
|
||||
|
||||
inline jint BytecodeInterpreter::VMlong2Int(jlong val) {
|
||||
return (jint) val;
|
||||
}
|
||||
|
||||
// Double Arithmetic
|
||||
|
||||
inline jdouble BytecodeInterpreter::VMdoubleAdd(jdouble op1, jdouble op2) {
|
||||
return op1 + op2;
|
||||
}
|
||||
|
||||
inline jdouble BytecodeInterpreter::VMdoubleDiv(jdouble op1, jdouble op2) {
|
||||
return op1 / op2;
|
||||
}
|
||||
|
||||
inline jdouble BytecodeInterpreter::VMdoubleMul(jdouble op1, jdouble op2) {
|
||||
return op1 * op2;
|
||||
}
|
||||
|
||||
inline jdouble BytecodeInterpreter::VMdoubleNeg(jdouble op) {
|
||||
return -op;
|
||||
}
|
||||
|
||||
inline jdouble BytecodeInterpreter::VMdoubleRem(jdouble op1, jdouble op2) {
|
||||
return fmod(op1, op2);
|
||||
}
|
||||
|
||||
inline jdouble BytecodeInterpreter::VMdoubleSub(jdouble op1, jdouble op2) {
|
||||
return op1 - op2;
|
||||
}
|
||||
|
||||
inline int32_t BytecodeInterpreter::VMdoubleCompare(jdouble op1, jdouble op2, int32_t direction) {
|
||||
return ( op1 < op2 ? -1 :
|
||||
op1 > op2 ? 1 :
|
||||
op1 == op2 ? 0 :
|
||||
(direction == -1 || direction == 1) ? direction : 0);
|
||||
}
|
||||
|
||||
// Double Conversions
|
||||
|
||||
inline jfloat BytecodeInterpreter::VMdouble2Float(jdouble val) {
|
||||
return (jfloat) val;
|
||||
}
|
||||
|
||||
// Float Conversions
|
||||
|
||||
inline jdouble BytecodeInterpreter::VMfloat2Double(jfloat op) {
|
||||
return (jdouble) op;
|
||||
}
|
||||
|
||||
// Integer Arithmetic
|
||||
|
||||
inline jint BytecodeInterpreter::VMintAdd(jint op1, jint op2) {
|
||||
return op1 + op2;
|
||||
}
|
||||
|
||||
inline jint BytecodeInterpreter::VMintAnd(jint op1, jint op2) {
|
||||
return op1 & op2;
|
||||
}
|
||||
|
||||
inline jint BytecodeInterpreter::VMintDiv(jint op1, jint op2) {
|
||||
/* it's possible we could catch this special case implicitly */
|
||||
if ((juint)op1 == 0x80000000 && op2 == -1) return op1;
|
||||
else return op1 / op2;
|
||||
}
|
||||
|
||||
inline jint BytecodeInterpreter::VMintMul(jint op1, jint op2) {
|
||||
return op1 * op2;
|
||||
}
|
||||
|
||||
inline jint BytecodeInterpreter::VMintNeg(jint op) {
|
||||
return -op;
|
||||
}
|
||||
|
||||
inline jint BytecodeInterpreter::VMintOr(jint op1, jint op2) {
|
||||
return op1 | op2;
|
||||
}
|
||||
|
||||
inline jint BytecodeInterpreter::VMintRem(jint op1, jint op2) {
|
||||
/* it's possible we could catch this special case implicitly */
|
||||
if ((juint)op1 == 0x80000000 && op2 == -1) return 0;
|
||||
else return op1 % op2;
|
||||
}
|
||||
|
||||
inline jint BytecodeInterpreter::VMintShl(jint op1, jint op2) {
|
||||
return op1 << (op2 & 0x1f);
|
||||
}
|
||||
|
||||
inline jint BytecodeInterpreter::VMintShr(jint op1, jint op2) {
|
||||
return op1 >> (op2 & 0x1f);
|
||||
}
|
||||
|
||||
inline jint BytecodeInterpreter::VMintSub(jint op1, jint op2) {
|
||||
return op1 - op2;
|
||||
}
|
||||
|
||||
inline juint BytecodeInterpreter::VMintUshr(jint op1, jint op2) {
|
||||
return ((juint) op1) >> (op2 & 0x1f);
|
||||
}
|
||||
|
||||
inline jint BytecodeInterpreter::VMintXor(jint op1, jint op2) {
|
||||
return op1 ^ op2;
|
||||
}
|
||||
|
||||
inline jdouble BytecodeInterpreter::VMint2Double(jint val) {
|
||||
return (jdouble) val;
|
||||
}
|
||||
|
||||
inline jfloat BytecodeInterpreter::VMint2Float(jint val) {
|
||||
return (jfloat) val;
|
||||
}
|
||||
|
||||
inline jlong BytecodeInterpreter::VMint2Long(jint val) {
|
||||
return (jlong) val;
|
||||
}
|
||||
|
||||
inline jchar BytecodeInterpreter::VMint2Char(jint val) {
|
||||
return (jchar) val;
|
||||
}
|
||||
|
||||
inline jshort BytecodeInterpreter::VMint2Short(jint val) {
|
||||
return (jshort) val;
|
||||
}
|
||||
|
||||
inline jbyte BytecodeInterpreter::VMint2Byte(jint val) {
|
||||
return (jbyte) val;
|
||||
}
|
||||
|
||||
#endif // CC_INTERP
|
||||
|
||||
#endif // CPU_PPC_VM_BYTECODEINTERPRETER_PPC_INLINE_HPP
|
31
hotspot/src/cpu/ppc/vm/bytecodes_ppc.cpp
Normal file
31
hotspot/src/cpu/ppc/vm/bytecodes_ppc.cpp
Normal file
@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "interpreter/bytecodes.hpp"
|
||||
|
||||
void Bytecodes::pd_initialize() {
|
||||
// No ppc specific initialization.
|
||||
}
|
31
hotspot/src/cpu/ppc/vm/bytecodes_ppc.hpp
Normal file
31
hotspot/src/cpu/ppc/vm/bytecodes_ppc.hpp
Normal file
@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_BYTECODES_PPC_HPP
|
||||
#define CPU_PPC_VM_BYTECODES_PPC_HPP
|
||||
|
||||
// No ppc64 specific bytecodes
|
||||
|
||||
#endif // CPU_PPC_VM_BYTECODES_PPC_HPP
|
156
hotspot/src/cpu/ppc/vm/bytes_ppc.hpp
Normal file
156
hotspot/src/cpu/ppc/vm/bytes_ppc.hpp
Normal file
@ -0,0 +1,156 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_BYTES_PPC_HPP
|
||||
#define CPU_PPC_VM_BYTES_PPC_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
class Bytes: AllStatic {
|
||||
public:
|
||||
// Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering
|
||||
// PowerPC needs to check for alignment.
|
||||
|
||||
// can I count on address always being a pointer to an unsigned char? Yes
|
||||
|
||||
// Returns true, if the byte ordering used by Java is different from the nativ byte ordering
|
||||
// of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
|
||||
static inline bool is_Java_byte_ordering_different() { return false; }
|
||||
|
||||
// Thus, a swap between native and Java ordering is always a no-op:
|
||||
static inline u2 swap_u2(u2 x) { return x; }
|
||||
static inline u4 swap_u4(u4 x) { return x; }
|
||||
static inline u8 swap_u8(u8 x) { return x; }
|
||||
|
||||
static inline u2 get_native_u2(address p) {
|
||||
return (intptr_t(p) & 1) == 0
|
||||
? *(u2*)p
|
||||
: ( u2(p[0]) << 8 )
|
||||
| ( u2(p[1]) );
|
||||
}
|
||||
|
||||
static inline u4 get_native_u4(address p) {
|
||||
switch (intptr_t(p) & 3) {
|
||||
case 0: return *(u4*)p;
|
||||
|
||||
case 2: return ( u4( ((u2*)p)[0] ) << 16 )
|
||||
| ( u4( ((u2*)p)[1] ) );
|
||||
|
||||
default: return ( u4(p[0]) << 24 )
|
||||
| ( u4(p[1]) << 16 )
|
||||
| ( u4(p[2]) << 8 )
|
||||
| u4(p[3]);
|
||||
}
|
||||
}
|
||||
|
||||
static inline u8 get_native_u8(address p) {
|
||||
switch (intptr_t(p) & 7) {
|
||||
case 0: return *(u8*)p;
|
||||
|
||||
case 4: return ( u8( ((u4*)p)[0] ) << 32 )
|
||||
| ( u8( ((u4*)p)[1] ) );
|
||||
|
||||
case 2: return ( u8( ((u2*)p)[0] ) << 48 )
|
||||
| ( u8( ((u2*)p)[1] ) << 32 )
|
||||
| ( u8( ((u2*)p)[2] ) << 16 )
|
||||
| ( u8( ((u2*)p)[3] ) );
|
||||
|
||||
default: return ( u8(p[0]) << 56 )
|
||||
| ( u8(p[1]) << 48 )
|
||||
| ( u8(p[2]) << 40 )
|
||||
| ( u8(p[3]) << 32 )
|
||||
| ( u8(p[4]) << 24 )
|
||||
| ( u8(p[5]) << 16 )
|
||||
| ( u8(p[6]) << 8 )
|
||||
| u8(p[7]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
static inline void put_native_u2(address p, u2 x) {
|
||||
if ( (intptr_t(p) & 1) == 0 ) { *(u2*)p = x; }
|
||||
else {
|
||||
p[0] = x >> 8;
|
||||
p[1] = x;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void put_native_u4(address p, u4 x) {
|
||||
switch ( intptr_t(p) & 3 ) {
|
||||
case 0: *(u4*)p = x;
|
||||
break;
|
||||
|
||||
case 2: ((u2*)p)[0] = x >> 16;
|
||||
((u2*)p)[1] = x;
|
||||
break;
|
||||
|
||||
default: ((u1*)p)[0] = x >> 24;
|
||||
((u1*)p)[1] = x >> 16;
|
||||
((u1*)p)[2] = x >> 8;
|
||||
((u1*)p)[3] = x;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void put_native_u8(address p, u8 x) {
|
||||
switch ( intptr_t(p) & 7 ) {
|
||||
case 0: *(u8*)p = x;
|
||||
break;
|
||||
|
||||
case 4: ((u4*)p)[0] = x >> 32;
|
||||
((u4*)p)[1] = x;
|
||||
break;
|
||||
|
||||
case 2: ((u2*)p)[0] = x >> 48;
|
||||
((u2*)p)[1] = x >> 32;
|
||||
((u2*)p)[2] = x >> 16;
|
||||
((u2*)p)[3] = x;
|
||||
break;
|
||||
|
||||
default: ((u1*)p)[0] = x >> 56;
|
||||
((u1*)p)[1] = x >> 48;
|
||||
((u1*)p)[2] = x >> 40;
|
||||
((u1*)p)[3] = x >> 32;
|
||||
((u1*)p)[4] = x >> 24;
|
||||
((u1*)p)[5] = x >> 16;
|
||||
((u1*)p)[6] = x >> 8;
|
||||
((u1*)p)[7] = x;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Efficient reading and writing of unaligned unsigned data in Java byte ordering (i.e. big-endian ordering)
|
||||
// (no byte-order reversal is needed since Power CPUs are big-endian oriented).
|
||||
static inline u2 get_Java_u2(address p) { return get_native_u2(p); }
|
||||
static inline u4 get_Java_u4(address p) { return get_native_u4(p); }
|
||||
static inline u8 get_Java_u8(address p) { return get_native_u8(p); }
|
||||
|
||||
static inline void put_Java_u2(address p, u2 x) { put_native_u2(p, x); }
|
||||
static inline void put_Java_u4(address p, u4 x) { put_native_u4(p, x); }
|
||||
static inline void put_Java_u8(address p, u8 x) { put_native_u8(p, x); }
|
||||
};
|
||||
|
||||
#endif // CPU_PPC_VM_BYTES_PPC_HPP
|
35
hotspot/src/cpu/ppc/vm/codeBuffer_ppc.hpp
Normal file
35
hotspot/src/cpu/ppc/vm/codeBuffer_ppc.hpp
Normal file
@ -0,0 +1,35 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_CODEBUFFER_PPC_HPP
|
||||
#define CPU_PPC_VM_CODEBUFFER_PPC_HPP
|
||||
|
||||
private:
|
||||
void pd_initialize() {}
|
||||
|
||||
public:
|
||||
void flush_bundle(bool start_new_bundle) {}
|
||||
|
||||
#endif // CPU_PPC_VM_CODEBUFFER_PPC_HPP
|
261
hotspot/src/cpu/ppc/vm/compiledIC_ppc.cpp
Normal file
261
hotspot/src/cpu/ppc/vm/compiledIC_ppc.cpp
Normal file
@ -0,0 +1,261 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "code/compiledIC.hpp"
|
||||
#include "code/icBuffer.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#ifdef COMPILER2
|
||||
#include "opto/matcher.hpp"
|
||||
#endif
|
||||
|
||||
// Release the CompiledICHolder* associated with this call site is there is one.
|
||||
void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) {
|
||||
// This call site might have become stale so inspect it carefully.
|
||||
NativeCall* call = nativeCall_at(call_site->addr());
|
||||
if (is_icholder_entry(call->destination())) {
|
||||
NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
|
||||
InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
|
||||
}
|
||||
}
|
||||
|
||||
bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
|
||||
// This call site might have become stale so inspect it carefully.
|
||||
NativeCall* call = nativeCall_at(call_site->addr());
|
||||
return is_icholder_entry(call->destination());
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// High-level access to an inline cache. Guaranteed to be MT-safe.
|
||||
|
||||
CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
|
||||
: _ic_call(call)
|
||||
{
|
||||
address ic_call = call->instruction_address();
|
||||
|
||||
assert(ic_call != NULL, "ic_call address must be set");
|
||||
assert(nm != NULL, "must pass nmethod");
|
||||
assert(nm->contains(ic_call), "must be in nmethod");
|
||||
|
||||
// Search for the ic_call at the given address.
|
||||
RelocIterator iter(nm, ic_call, ic_call+1);
|
||||
bool ret = iter.next();
|
||||
assert(ret == true, "relocInfo must exist at this address");
|
||||
assert(iter.addr() == ic_call, "must find ic_call");
|
||||
if (iter.type() == relocInfo::virtual_call_type) {
|
||||
virtual_call_Relocation* r = iter.virtual_call_reloc();
|
||||
_is_optimized = false;
|
||||
_value = nativeMovConstReg_at(r->cached_value());
|
||||
} else {
|
||||
assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
|
||||
_is_optimized = true;
|
||||
_value = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// A PPC CompiledStaticCall looks like this:
|
||||
//
|
||||
// >>>> consts
|
||||
//
|
||||
// [call target1]
|
||||
// [IC cache]
|
||||
// [call target2]
|
||||
//
|
||||
// <<<< consts
|
||||
// >>>> insts
|
||||
//
|
||||
// bl offset16 -+ -+ ??? // How many bits available?
|
||||
// | |
|
||||
// <<<< insts | |
|
||||
// >>>> stubs | |
|
||||
// | |- trampoline_stub_Reloc
|
||||
// trampoline stub: | <-+
|
||||
// r2 = toc |
|
||||
// r2 = [r2 + offset] | // Load call target1 from const section
|
||||
// mtctr r2 |
|
||||
// bctr |- static_stub_Reloc
|
||||
// comp_to_interp_stub: <---+
|
||||
// r1 = toc
|
||||
// ICreg = [r1 + IC_offset] // Load IC from const section
|
||||
// r1 = [r1 + offset] // Load call target2 from const section
|
||||
// mtctr r1
|
||||
// bctr
|
||||
//
|
||||
// <<<< stubs
|
||||
//
|
||||
// The call instruction in the code either
|
||||
// - branches directly to a compiled method if offset encodable in instruction
|
||||
// - branches to the trampoline stub if offset to compiled method not encodable
|
||||
// - branches to the compiled_to_interp stub if target interpreted
|
||||
//
|
||||
// Further there are three relocations from the loads to the constants in
|
||||
// the constant section.
|
||||
//
|
||||
// Usage of r1 and r2 in the stubs allows to distinguish them.
|
||||
|
||||
const int IC_pos_in_java_to_interp_stub = 8;
|
||||
#define __ _masm.
|
||||
void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
|
||||
#ifdef COMPILER2
|
||||
// Get the mark within main instrs section which is set to the address of the call.
|
||||
address call_addr = cbuf.insts_mark();
|
||||
|
||||
// Note that the code buffer's insts_mark is always relative to insts.
|
||||
// That's why we must use the macroassembler to generate a stub.
|
||||
MacroAssembler _masm(&cbuf);
|
||||
|
||||
// Start the stub.
|
||||
address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size());
|
||||
if (stub == NULL) {
|
||||
Compile::current()->env()->record_out_of_memory_failure();
|
||||
return;
|
||||
}
|
||||
|
||||
// For java_to_interp stubs we use R11_scratch1 as scratch register
|
||||
// and in call trampoline stubs we use R12_scratch2. This way we
|
||||
// can distinguish them (see is_NativeCallTrampolineStub_at()).
|
||||
Register reg_scratch = R11_scratch1;
|
||||
|
||||
// Create a static stub relocation which relates this stub
|
||||
// with the call instruction at insts_call_instruction_offset in the
|
||||
// instructions code-section.
|
||||
__ relocate(static_stub_Relocation::spec(call_addr));
|
||||
const int stub_start_offset = __ offset();
|
||||
|
||||
// Now, create the stub's code:
|
||||
// - load the TOC
|
||||
// - load the inline cache oop from the constant pool
|
||||
// - load the call target from the constant pool
|
||||
// - call
|
||||
__ calculate_address_from_global_toc(reg_scratch, __ method_toc());
|
||||
AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL);
|
||||
__ load_const_from_method_toc(as_Register(Matcher::inline_cache_reg_encode()), ic, reg_scratch);
|
||||
|
||||
if (ReoptimizeCallSequences) {
|
||||
__ b64_patchable((address)-1, relocInfo::none);
|
||||
} else {
|
||||
AddressLiteral a((address)-1);
|
||||
__ load_const_from_method_toc(reg_scratch, a, reg_scratch);
|
||||
__ mtctr(reg_scratch);
|
||||
__ bctr();
|
||||
}
|
||||
|
||||
// FIXME: Assert that the stub can be identified and patched.
|
||||
|
||||
// Java_to_interp_stub_size should be good.
|
||||
assert((__ offset() - stub_start_offset) <= CompiledStaticCall::to_interp_stub_size(),
|
||||
"should be good size");
|
||||
assert(!is_NativeCallTrampolineStub_at(__ addr_at(stub_start_offset)),
|
||||
"must not confuse java_to_interp with trampoline stubs");
|
||||
|
||||
// End the stub.
|
||||
__ end_a_stub();
|
||||
#else
|
||||
ShouldNotReachHere();
|
||||
#endif
|
||||
}
|
||||
#undef __
|
||||
|
||||
// Size of java_to_interp stub, this doesn't need to be accurate but it must
|
||||
// be larger or equal to the real size of the stub.
|
||||
// Used for optimization in Compile::Shorten_branches.
|
||||
int CompiledStaticCall::to_interp_stub_size() {
|
||||
return 12 * BytesPerInstWord;
|
||||
}
|
||||
|
||||
// Relocation entries for call stub, compiled java to interpreter.
|
||||
// Used for optimization in Compile::Shorten_branches.
|
||||
int CompiledStaticCall::reloc_to_interp_stub() {
|
||||
return 5;
|
||||
}
|
||||
|
||||
void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
|
||||
address stub = find_stub();
|
||||
guarantee(stub != NULL, "stub not found");
|
||||
|
||||
if (TraceICs) {
|
||||
ResourceMark rm;
|
||||
tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
|
||||
instruction_address(),
|
||||
callee->name_and_sig_as_C_string());
|
||||
}
|
||||
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub);
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
|
||||
assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(),
|
||||
"a) MT-unsafe modification of inline cache");
|
||||
assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry,
|
||||
"b) MT-unsafe modification of inline cache");
|
||||
|
||||
// Update stub.
|
||||
method_holder->set_data((intptr_t)callee());
|
||||
jump->set_jump_destination(entry);
|
||||
|
||||
// Update jump to call.
|
||||
set_destination_mt_safe(stub);
|
||||
}
|
||||
|
||||
void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
|
||||
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
|
||||
// Reset stub.
|
||||
address stub = static_stub->addr();
|
||||
assert(stub != NULL, "stub not found");
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub);
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
method_holder->set_data(0);
|
||||
jump->set_jump_destination((address)-1);
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Non-product mode code
|
||||
#ifndef PRODUCT
|
||||
|
||||
void CompiledStaticCall::verify() {
|
||||
// Verify call.
|
||||
NativeCall::verify();
|
||||
if (os::is_MP()) {
|
||||
verify_alignment();
|
||||
}
|
||||
|
||||
// Verify stub.
|
||||
address stub = find_stub();
|
||||
assert(stub != NULL, "no stub found for static call");
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub);
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
|
||||
// Verify state.
|
||||
assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
|
||||
}
|
||||
|
||||
#endif // !PRODUCT
|
167
hotspot/src/cpu/ppc/vm/copy_ppc.hpp
Normal file
167
hotspot/src/cpu/ppc/vm/copy_ppc.hpp
Normal file
@ -0,0 +1,167 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_COPY_PPC_HPP
|
||||
#define CPU_PPC_VM_COPY_PPC_HPP
|
||||
|
||||
#ifndef PPC64
|
||||
#error "copy currently only implemented for PPC64"
|
||||
#endif
|
||||
|
||||
// Inline functions for memory copy and fill.
|
||||
|
||||
static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
|
||||
(void)memmove(to, from, count * HeapWordSize);
|
||||
}
|
||||
|
||||
static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
|
||||
switch (count) {
|
||||
case 8: to[7] = from[7];
|
||||
case 7: to[6] = from[6];
|
||||
case 6: to[5] = from[5];
|
||||
case 5: to[4] = from[4];
|
||||
case 4: to[3] = from[3];
|
||||
case 3: to[2] = from[2];
|
||||
case 2: to[1] = from[1];
|
||||
case 1: to[0] = from[0];
|
||||
case 0: break;
|
||||
default: (void)memcpy(to, from, count * HeapWordSize);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) {
|
||||
switch (count) {
|
||||
case 8: to[7] = from[7];
|
||||
case 7: to[6] = from[6];
|
||||
case 6: to[5] = from[5];
|
||||
case 5: to[4] = from[4];
|
||||
case 4: to[3] = from[3];
|
||||
case 3: to[2] = from[2];
|
||||
case 2: to[1] = from[1];
|
||||
case 1: to[0] = from[0];
|
||||
case 0: break;
|
||||
default: while (count-- > 0) {
|
||||
*to++ = *from++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
|
||||
(void)memmove(to, from, count * HeapWordSize);
|
||||
}
|
||||
|
||||
static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
|
||||
pd_disjoint_words(from, to, count);
|
||||
}
|
||||
|
||||
static void pd_conjoint_bytes(void* from, void* to, size_t count) {
|
||||
(void)memmove(to, from, count);
|
||||
}
|
||||
|
||||
static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) {
|
||||
(void)memmove(to, from, count);
|
||||
}
|
||||
|
||||
// Template for atomic, element-wise copy.
|
||||
template <class T>
|
||||
static void copy_conjoint_atomic(T* from, T* to, size_t count) {
|
||||
if (from > to) {
|
||||
while (count-- > 0) {
|
||||
// Copy forwards
|
||||
*to++ = *from++;
|
||||
}
|
||||
} else {
|
||||
from += count - 1;
|
||||
to += count - 1;
|
||||
while (count-- > 0) {
|
||||
// Copy backwards
|
||||
*to-- = *from--;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
|
||||
copy_conjoint_atomic<jshort>(from, to, count);
|
||||
}
|
||||
|
||||
static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
|
||||
copy_conjoint_atomic<jint>(from, to, count);
|
||||
}
|
||||
|
||||
static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
|
||||
copy_conjoint_atomic<jlong>(from, to, count);
|
||||
}
|
||||
|
||||
static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
|
||||
copy_conjoint_atomic<oop>(from, to, count);
|
||||
}
|
||||
|
||||
static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) {
|
||||
pd_conjoint_bytes_atomic(from, to, count);
|
||||
}
|
||||
|
||||
static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) {
|
||||
pd_conjoint_jshorts_atomic((jshort*)from, (jshort*)to, count);
|
||||
}
|
||||
|
||||
static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) {
|
||||
pd_conjoint_jints_atomic((jint*)from, (jint*)to, count);
|
||||
}
|
||||
|
||||
static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) {
|
||||
pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
|
||||
}
|
||||
|
||||
static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) {
|
||||
pd_conjoint_oops_atomic((oop*)from, (oop*)to, count);
|
||||
}
|
||||
|
||||
static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
|
||||
julong* to = (julong*)tohw;
|
||||
julong v = ((julong)value << 32) | value;
|
||||
while (count-- > 0) {
|
||||
*to++ = v;
|
||||
}
|
||||
}
|
||||
|
||||
static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
|
||||
pd_fill_to_words(tohw, count, value);
|
||||
}
|
||||
|
||||
static void pd_fill_to_bytes(void* to, size_t count, jubyte value) {
|
||||
(void)memset(to, value, count);
|
||||
}
|
||||
|
||||
static void pd_zero_to_words(HeapWord* tohw, size_t count) {
|
||||
pd_fill_to_words(tohw, count, 0);
|
||||
}
|
||||
|
||||
static void pd_zero_to_bytes(void* to, size_t count) {
|
||||
(void)memset(to, 0, count);
|
||||
}
|
||||
|
||||
#endif // CPU_PPC_VM_COPY_PPC_HPP
|
43
hotspot/src/cpu/ppc/vm/cppInterpreterGenerator_ppc.hpp
Normal file
43
hotspot/src/cpu/ppc/vm/cppInterpreterGenerator_ppc.hpp
Normal file
@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
|
||||
#define CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
|
||||
|
||||
address generate_normal_entry(void);
|
||||
address generate_native_entry(void);
|
||||
|
||||
void lock_method(void);
|
||||
void unlock_method(void);
|
||||
|
||||
void generate_counter_incr(Label& overflow);
|
||||
void generate_counter_overflow(Label& do_continue);
|
||||
|
||||
void generate_more_monitors();
|
||||
void generate_deopt_handling(Register result_index);
|
||||
|
||||
void generate_compute_interpreter_state(Label& exception_return);
|
||||
|
||||
#endif // CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
|
3044
hotspot/src/cpu/ppc/vm/cppInterpreter_ppc.cpp
Normal file
3044
hotspot/src/cpu/ppc/vm/cppInterpreter_ppc.cpp
Normal file
File diff suppressed because it is too large
Load Diff
39
hotspot/src/cpu/ppc/vm/cppInterpreter_ppc.hpp
Normal file
39
hotspot/src/cpu/ppc/vm/cppInterpreter_ppc.hpp
Normal file
@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_CPPINTERPRETER_PPC_HPP
|
||||
#define CPU_PPC_VM_CPPINTERPRETER_PPC_HPP
|
||||
|
||||
protected:
|
||||
|
||||
// Size of interpreter code. Increase if too small. Interpreter will
|
||||
// fail with a guarantee ("not enough space for interpreter generation");
|
||||
// if too small.
|
||||
// Run with +PrintInterpreter to get the VM to print out the size.
|
||||
// Max size with JVMTI
|
||||
|
||||
const static int InterpreterCodeSize = 12*K;
|
||||
|
||||
#endif // CPU_PPC_VM_CPPINTERPRETER_PPC_HPP
|
35
hotspot/src/cpu/ppc/vm/debug_ppc.cpp
Normal file
35
hotspot/src/cpu/ppc/vm/debug_ppc.cpp
Normal file
@ -0,0 +1,35 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "runtime/frame.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
void pd_ps(frame f) {}
|
31
hotspot/src/cpu/ppc/vm/depChecker_ppc.hpp
Normal file
31
hotspot/src/cpu/ppc/vm/depChecker_ppc.hpp
Normal file
@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_DEPCHECKER_PPC_HPP
|
||||
#define CPU_PPC_VM_DEPCHECKER_PPC_HPP
|
||||
|
||||
// Nothing to do on ppc64
|
||||
|
||||
#endif // CPU_PPC_VM_DEPCHECKER_PPC_HPP
|
37
hotspot/src/cpu/ppc/vm/disassembler_ppc.hpp
Normal file
37
hotspot/src/cpu/ppc/vm/disassembler_ppc.hpp
Normal file
@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_DISASSEMBLER_PPC_HPP
|
||||
#define CPU_PPC_VM_DISASSEMBLER_PPC_HPP
|
||||
|
||||
static int pd_instruction_alignment() {
|
||||
return sizeof(int);
|
||||
}
|
||||
|
||||
static const char* pd_cpu_opts() {
|
||||
return "ppc64";
|
||||
}
|
||||
|
||||
#endif // CPU_PPC_VM_DISASSEMBLER_PPC_HPP
|
306
hotspot/src/cpu/ppc/vm/frame_ppc.cpp
Normal file
306
hotspot/src/cpu/ppc/vm/frame_ppc.cpp
Normal file
@ -0,0 +1,306 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/markOop.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/javaCalls.hpp"
|
||||
#include "runtime/monitorChunk.hpp"
|
||||
#include "runtime/signature.hpp"
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "vmreg_ppc.inline.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#include "runtime/vframeArray.hpp"
|
||||
#endif
|
||||
|
||||
#ifndef CC_INTERP
|
||||
#error "CC_INTERP must be defined on PPC64"
|
||||
#endif
|
||||
|
||||
#ifdef ASSERT
|
||||
void RegisterMap::check_location_valid() {
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
bool frame::safe_for_sender(JavaThread *thread) {
|
||||
bool safe = false;
|
||||
address cursp = (address)sp();
|
||||
address curfp = (address)fp();
|
||||
if ((cursp != NULL && curfp != NULL &&
|
||||
(cursp <= thread->stack_base() && cursp >= thread->stack_base() - thread->stack_size())) &&
|
||||
(curfp <= thread->stack_base() && curfp >= thread->stack_base() - thread->stack_size())) {
|
||||
safe = true;
|
||||
}
|
||||
return safe;
|
||||
}
|
||||
|
||||
bool frame::is_interpreted_frame() const {
|
||||
return Interpreter::contains(pc());
|
||||
}
|
||||
|
||||
frame frame::sender_for_entry_frame(RegisterMap *map) const {
|
||||
assert(map != NULL, "map must be set");
|
||||
// Java frame called from C; skip all C frames and return top C
|
||||
// frame of that chunk as the sender.
|
||||
JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
|
||||
assert(!entry_frame_is_first(), "next Java fp must be non zero");
|
||||
assert(jfa->last_Java_sp() > _sp, "must be above this frame on stack");
|
||||
map->clear();
|
||||
assert(map->include_argument_oops(), "should be set by clear");
|
||||
|
||||
if (jfa->last_Java_pc() != NULL) {
|
||||
frame fr(jfa->last_Java_sp(), jfa->last_Java_pc());
|
||||
return fr;
|
||||
}
|
||||
// Last_java_pc is not set, if we come here from compiled code. The
|
||||
// constructor retrieves the PC from the stack.
|
||||
frame fr(jfa->last_Java_sp());
|
||||
return fr;
|
||||
}
|
||||
|
||||
frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
|
||||
// Pass callers initial_caller_sp as unextended_sp.
|
||||
return frame(sender_sp(), sender_pc(), (intptr_t*)((parent_ijava_frame_abi *)callers_abi())->initial_caller_sp);
|
||||
}
|
||||
|
||||
frame frame::sender_for_compiled_frame(RegisterMap *map) const {
|
||||
assert(map != NULL, "map must be set");
|
||||
|
||||
// Frame owned by compiler.
|
||||
address pc = *compiled_sender_pc_addr(_cb);
|
||||
frame caller(compiled_sender_sp(_cb), pc);
|
||||
|
||||
// Now adjust the map.
|
||||
|
||||
// Get the rest.
|
||||
if (map->update_map()) {
|
||||
// Tell GC to use argument oopmaps for some runtime stubs that need it.
|
||||
map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
|
||||
if (_cb->oop_maps() != NULL) {
|
||||
OopMapSet::update_register_map(this, map);
|
||||
}
|
||||
}
|
||||
|
||||
return caller;
|
||||
}
|
||||
|
||||
intptr_t* frame::compiled_sender_sp(CodeBlob* cb) const {
|
||||
return sender_sp();
|
||||
}
|
||||
|
||||
address* frame::compiled_sender_pc_addr(CodeBlob* cb) const {
|
||||
return sender_pc_addr();
|
||||
}
|
||||
|
||||
frame frame::sender(RegisterMap* map) const {
|
||||
// Default is we do have to follow them. The sender_for_xxx will
|
||||
// update it accordingly.
|
||||
map->set_include_argument_oops(false);
|
||||
|
||||
if (is_entry_frame()) return sender_for_entry_frame(map);
|
||||
if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
|
||||
assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
|
||||
|
||||
if (_cb != NULL) {
|
||||
return sender_for_compiled_frame(map);
|
||||
}
|
||||
// Must be native-compiled frame, i.e. the marshaling code for native
|
||||
// methods that exists in the core system.
|
||||
return frame(sender_sp(), sender_pc());
|
||||
}
|
||||
|
||||
void frame::patch_pc(Thread* thread, address pc) {
|
||||
if (TracePcPatching) {
|
||||
tty->print_cr("patch_pc at address " PTR_FORMAT " [" PTR_FORMAT " -> " PTR_FORMAT "]",
|
||||
&((address*) _sp)[-1], ((address*) _sp)[-1], pc);
|
||||
}
|
||||
own_abi()->lr = (uint64_t)pc;
|
||||
_cb = CodeCache::find_blob(pc);
|
||||
if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
|
||||
address orig = (((nmethod*)_cb)->get_original_pc(this));
|
||||
assert(orig == _pc, "expected original to be stored before patching");
|
||||
_deopt_state = is_deoptimized;
|
||||
// Leave _pc as is.
|
||||
} else {
|
||||
_deopt_state = not_deoptimized;
|
||||
_pc = pc;
|
||||
}
|
||||
}
|
||||
|
||||
void frame::pd_gc_epilog() {
|
||||
if (is_interpreted_frame()) {
|
||||
// Set constant pool cache entry for interpreter.
|
||||
Method* m = interpreter_frame_method();
|
||||
|
||||
*interpreter_frame_cpoolcache_addr() = m->constants()->cache();
|
||||
}
|
||||
}
|
||||
|
||||
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
|
||||
// Is there anything to do?
|
||||
assert(is_interpreted_frame(), "Not an interpreted frame");
|
||||
return true;
|
||||
}
|
||||
|
||||
BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
|
||||
assert(is_interpreted_frame(), "interpreted frame expected");
|
||||
Method* method = interpreter_frame_method();
|
||||
BasicType type = method->result_type();
|
||||
|
||||
#ifdef CC_INTERP
|
||||
if (method->is_native()) {
|
||||
// Prior to calling into the runtime to notify the method exit the possible
|
||||
// result value is saved into the interpreter frame.
|
||||
interpreterState istate = get_interpreterState();
|
||||
address lresult = (address)istate + in_bytes(BytecodeInterpreter::native_lresult_offset());
|
||||
address fresult = (address)istate + in_bytes(BytecodeInterpreter::native_fresult_offset());
|
||||
|
||||
switch (method->result_type()) {
|
||||
case T_OBJECT:
|
||||
case T_ARRAY: {
|
||||
oop* obj_p = *(oop**)lresult;
|
||||
oop obj = (obj_p == NULL) ? NULL : *obj_p;
|
||||
assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
|
||||
*oop_result = obj;
|
||||
break;
|
||||
}
|
||||
// We use std/stfd to store the values.
|
||||
case T_BOOLEAN : value_result->z = (jboolean) *(unsigned long*)lresult; break;
|
||||
case T_INT : value_result->i = (jint) *(long*)lresult; break;
|
||||
case T_CHAR : value_result->c = (jchar) *(unsigned long*)lresult; break;
|
||||
case T_SHORT : value_result->s = (jshort) *(long*)lresult; break;
|
||||
case T_BYTE : value_result->z = (jbyte) *(long*)lresult; break;
|
||||
case T_LONG : value_result->j = (jlong) *(long*)lresult; break;
|
||||
case T_FLOAT : value_result->f = (jfloat) *(double*)fresult; break;
|
||||
case T_DOUBLE : value_result->d = (jdouble) *(double*)fresult; break;
|
||||
case T_VOID : /* Nothing to do */ break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
} else {
|
||||
intptr_t* tos_addr = interpreter_frame_tos_address();
|
||||
switch (method->result_type()) {
|
||||
case T_OBJECT:
|
||||
case T_ARRAY: {
|
||||
oop obj = *(oop*)tos_addr;
|
||||
assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
|
||||
*oop_result = obj;
|
||||
}
|
||||
case T_BOOLEAN : value_result->z = (jboolean) *(jint*)tos_addr; break;
|
||||
case T_BYTE : value_result->b = (jbyte) *(jint*)tos_addr; break;
|
||||
case T_CHAR : value_result->c = (jchar) *(jint*)tos_addr; break;
|
||||
case T_SHORT : value_result->s = (jshort) *(jint*)tos_addr; break;
|
||||
case T_INT : value_result->i = *(jint*)tos_addr; break;
|
||||
case T_LONG : value_result->j = *(jlong*)tos_addr; break;
|
||||
case T_FLOAT : value_result->f = *(jfloat*)tos_addr; break;
|
||||
case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break;
|
||||
case T_VOID : /* Nothing to do */ break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
#else
|
||||
Unimplemented();
|
||||
#endif
|
||||
return type;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
if (is_interpreted_frame()) {
|
||||
#ifdef CC_INTERP
|
||||
interpreterState istate = get_interpreterState();
|
||||
values.describe(frame_no, (intptr_t*)istate, "istate");
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_thread), " thread");
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_bcp), " bcp");
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_locals), " locals");
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_constants), " constants");
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_method), err_msg(" method = %s", istate->_method->name_and_sig_as_C_string()));
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_mdx), " mdx");
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_stack), " stack");
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_msg), err_msg(" msg = %s", BytecodeInterpreter::C_msg(istate->_msg)));
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_result), " result");
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_prev_link), " prev_link");
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_oop_temp), " oop_temp");
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_stack_base), " stack_base");
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_stack_limit), " stack_limit");
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_monitor_base), " monitor_base");
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_frame_bottom), " frame_bottom");
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_last_Java_pc), " last_Java_pc");
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_last_Java_fp), " last_Java_fp");
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_last_Java_sp), " last_Java_sp");
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_self_link), " self_link");
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_native_fresult), " native_fresult");
|
||||
values.describe(frame_no, (intptr_t*)&(istate->_native_lresult), " native_lresult");
|
||||
#else
|
||||
Unimplemented();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void frame::adjust_unextended_sp() {
|
||||
// If we are returning to a compiled MethodHandle call site, the
|
||||
// saved_fp will in fact be a saved value of the unextended SP. The
|
||||
// simplest way to tell whether we are returning to such a call site
|
||||
// is as follows:
|
||||
|
||||
if (is_compiled_frame() && false /*is_at_mh_callsite()*/) { // TODO PPC port
|
||||
// If the sender PC is a deoptimization point, get the original
|
||||
// PC. For MethodHandle call site the unextended_sp is stored in
|
||||
// saved_fp.
|
||||
_unextended_sp = _fp - _cb->frame_size();
|
||||
|
||||
#ifdef ASSERT
|
||||
nmethod *sender_nm = _cb->as_nmethod_or_null();
|
||||
assert(sender_nm && *_sp == *_unextended_sp, "backlink changed");
|
||||
|
||||
intptr_t* sp = _unextended_sp; // check if stack can be walked from here
|
||||
for (int x = 0; x < 5; ++x) { // check up to a couple of backlinks
|
||||
intptr_t* prev_sp = *(intptr_t**)sp;
|
||||
if (prev_sp == 0) break; // end of stack
|
||||
assert(prev_sp>sp, "broken stack");
|
||||
sp = prev_sp;
|
||||
}
|
||||
|
||||
if (sender_nm->is_deopt_mh_entry(_pc)) { // checks for deoptimization
|
||||
address original_pc = sender_nm->get_original_pc(this);
|
||||
assert(sender_nm->insts_contains(original_pc), "original PC must be in nmethod");
|
||||
assert(sender_nm->is_method_handle_return(original_pc), "must be");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
intptr_t *frame::initial_deoptimization_info() {
|
||||
// unused... but returns fp() to minimize changes introduced by 7087445
|
||||
return fp();
|
||||
}
|
449
hotspot/src/cpu/ppc/vm/frame_ppc.hpp
Normal file
449
hotspot/src/cpu/ppc/vm/frame_ppc.hpp
Normal file
@ -0,0 +1,449 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_FRAME_PPC_HPP
|
||||
#define CPU_PPC_VM_FRAME_PPC_HPP
|
||||
|
||||
#include "runtime/synchronizer.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
#ifndef CC_INTERP
|
||||
#error "CC_INTERP must be defined on PPC64"
|
||||
#endif
|
||||
|
||||
// C frame layout on PPC-64.
|
||||
//
|
||||
// In this figure the stack grows upwards, while memory grows
|
||||
// downwards. See "64-bit PowerPC ELF ABI Supplement Version 1.7",
|
||||
// IBM Corp. (2003-10-29)
|
||||
// (http://math-atlas.sourceforge.net/devel/assembly/PPC-elf64abi-1.7.pdf).
|
||||
//
|
||||
// Square brackets denote stack regions possibly larger
|
||||
// than a single 64 bit slot.
|
||||
//
|
||||
// STACK:
|
||||
// 0 [C_FRAME] <-- SP after prolog (mod 16 = 0)
|
||||
// [C_FRAME] <-- SP before prolog
|
||||
// ...
|
||||
// [C_FRAME]
|
||||
//
|
||||
// C_FRAME:
|
||||
// 0 [ABI_112]
|
||||
// 112 CARG_9: outgoing arg 9 (arg_1 ... arg_8 via gpr_3 ... gpr_{10})
|
||||
// ...
|
||||
// 40+M*8 CARG_M: outgoing arg M (M is the maximum of outgoing args taken over all call sites in the procedure)
|
||||
// local 1
|
||||
// ...
|
||||
// local N
|
||||
// spill slot for vector reg (16 bytes aligned)
|
||||
// ...
|
||||
// spill slot for vector reg
|
||||
// alignment (4 or 12 bytes)
|
||||
// V SR_VRSAVE
|
||||
// V+4 spill slot for GR
|
||||
// ... ...
|
||||
// spill slot for GR
|
||||
// spill slot for FR
|
||||
// ...
|
||||
// spill slot for FR
|
||||
//
|
||||
// ABI_48:
|
||||
// 0 caller's SP
|
||||
// 8 space for condition register (CR) for next call
|
||||
// 16 space for link register (LR) for next call
|
||||
// 24 reserved
|
||||
// 32 reserved
|
||||
// 40 space for TOC (=R2) register for next call
|
||||
//
|
||||
// ABI_112:
|
||||
// 0 [ABI_48]
|
||||
// 48 CARG_1: spill slot for outgoing arg 1. used by next callee.
|
||||
// ... ...
|
||||
// 104 CARG_8: spill slot for outgoing arg 8. used by next callee.
|
||||
//
|
||||
|
||||
public:
|
||||
|
||||
// C frame layout
|
||||
|
||||
enum {
|
||||
// stack alignment
|
||||
alignment_in_bytes = 16,
|
||||
// log_2(16*8 bits) = 7.
|
||||
log_2_of_alignment_in_bits = 7
|
||||
};
|
||||
|
||||
// ABI_48:
|
||||
struct abi_48 {
|
||||
uint64_t callers_sp;
|
||||
uint64_t cr; //_16
|
||||
uint64_t lr;
|
||||
uint64_t reserved1; //_16
|
||||
uint64_t reserved2;
|
||||
uint64_t toc; //_16
|
||||
// nothing to add here!
|
||||
// aligned to frame::alignment_in_bytes (16)
|
||||
};
|
||||
|
||||
enum {
|
||||
abi_48_size = sizeof(abi_48)
|
||||
};
|
||||
|
||||
struct abi_112 : abi_48 {
|
||||
uint64_t carg_1;
|
||||
uint64_t carg_2; //_16
|
||||
uint64_t carg_3;
|
||||
uint64_t carg_4; //_16
|
||||
uint64_t carg_5;
|
||||
uint64_t carg_6; //_16
|
||||
uint64_t carg_7;
|
||||
uint64_t carg_8; //_16
|
||||
// aligned to frame::alignment_in_bytes (16)
|
||||
};
|
||||
|
||||
enum {
|
||||
abi_112_size = sizeof(abi_112)
|
||||
};
|
||||
|
||||
#define _abi(_component) \
|
||||
(offset_of(frame::abi_112, _component))
|
||||
|
||||
struct abi_112_spill : abi_112 {
|
||||
// additional spill slots
|
||||
uint64_t spill_ret;
|
||||
uint64_t spill_fret; //_16
|
||||
// aligned to frame::alignment_in_bytes (16)
|
||||
};
|
||||
|
||||
enum {
|
||||
abi_112_spill_size = sizeof(abi_112_spill)
|
||||
};
|
||||
|
||||
#define _abi_112_spill(_component) \
|
||||
(offset_of(frame::abi_112_spill, _component))
|
||||
|
||||
// non-volatile GPRs:
|
||||
|
||||
struct spill_nonvolatiles {
|
||||
uint64_t r14;
|
||||
uint64_t r15; //_16
|
||||
uint64_t r16;
|
||||
uint64_t r17; //_16
|
||||
uint64_t r18;
|
||||
uint64_t r19; //_16
|
||||
uint64_t r20;
|
||||
uint64_t r21; //_16
|
||||
uint64_t r22;
|
||||
uint64_t r23; //_16
|
||||
uint64_t r24;
|
||||
uint64_t r25; //_16
|
||||
uint64_t r26;
|
||||
uint64_t r27; //_16
|
||||
uint64_t r28;
|
||||
uint64_t r29; //_16
|
||||
uint64_t r30;
|
||||
uint64_t r31; //_16
|
||||
|
||||
double f14;
|
||||
double f15;
|
||||
double f16;
|
||||
double f17;
|
||||
double f18;
|
||||
double f19;
|
||||
double f20;
|
||||
double f21;
|
||||
double f22;
|
||||
double f23;
|
||||
double f24;
|
||||
double f25;
|
||||
double f26;
|
||||
double f27;
|
||||
double f28;
|
||||
double f29;
|
||||
double f30;
|
||||
double f31;
|
||||
|
||||
// aligned to frame::alignment_in_bytes (16)
|
||||
};
|
||||
|
||||
enum {
|
||||
spill_nonvolatiles_size = sizeof(spill_nonvolatiles)
|
||||
};
|
||||
|
||||
#define _spill_nonvolatiles_neg(_component) \
|
||||
(int)(-frame::spill_nonvolatiles_size + offset_of(frame::spill_nonvolatiles, _component))
|
||||
|
||||
// Frame layout for the Java interpreter on PPC64.
|
||||
//
|
||||
// This frame layout provides a C-like frame for every Java frame.
|
||||
//
|
||||
// In these figures the stack grows upwards, while memory grows
|
||||
// downwards. Square brackets denote regions possibly larger than
|
||||
// single 64 bit slots.
|
||||
//
|
||||
// STACK (no JNI, no compiled code, no library calls,
|
||||
// interpreter-loop is active):
|
||||
// 0 [InterpretMethod]
|
||||
// [TOP_IJAVA_FRAME]
|
||||
// [PARENT_IJAVA_FRAME]
|
||||
// ...
|
||||
// [PARENT_IJAVA_FRAME]
|
||||
// [ENTRY_FRAME]
|
||||
// [C_FRAME]
|
||||
// ...
|
||||
// [C_FRAME]
|
||||
//
|
||||
// TOP_IJAVA_FRAME:
|
||||
// 0 [TOP_IJAVA_FRAME_ABI]
|
||||
// alignment (optional)
|
||||
// [operand stack]
|
||||
// [monitors] (optional)
|
||||
// [cInterpreter object]
|
||||
// result, locals, and arguments are in parent frame!
|
||||
//
|
||||
// PARENT_IJAVA_FRAME:
|
||||
// 0 [PARENT_IJAVA_FRAME_ABI]
|
||||
// alignment (optional)
|
||||
// [callee's Java result]
|
||||
// [callee's locals w/o arguments]
|
||||
// [outgoing arguments]
|
||||
// [used part of operand stack w/o arguments]
|
||||
// [monitors] (optional)
|
||||
// [cInterpreter object]
|
||||
//
|
||||
// ENTRY_FRAME:
|
||||
// 0 [PARENT_IJAVA_FRAME_ABI]
|
||||
// alignment (optional)
|
||||
// [callee's Java result]
|
||||
// [callee's locals w/o arguments]
|
||||
// [outgoing arguments]
|
||||
// [ENTRY_FRAME_LOCALS]
|
||||
//
|
||||
// PARENT_IJAVA_FRAME_ABI:
|
||||
// 0 [ABI_48]
|
||||
// top_frame_sp
|
||||
// initial_caller_sp
|
||||
//
|
||||
// TOP_IJAVA_FRAME_ABI:
|
||||
// 0 [PARENT_IJAVA_FRAME_ABI]
|
||||
// carg_3_unused
|
||||
// carg_4_unused
|
||||
// carg_5_unused
|
||||
// carg_6_unused
|
||||
// carg_7_unused
|
||||
// frame_manager_lr
|
||||
//
|
||||
|
||||
// PARENT_IJAVA_FRAME_ABI
|
||||
|
||||
struct parent_ijava_frame_abi : abi_48 {
|
||||
// SOE registers.
|
||||
// C2i adapters spill their top-frame stack-pointer here.
|
||||
uint64_t top_frame_sp; // carg_1
|
||||
// Sp of calling compiled frame before it was resized by the c2i
|
||||
// adapter or sp of call stub. Does not contain a valid value for
|
||||
// non-initial frames.
|
||||
uint64_t initial_caller_sp; // carg_2
|
||||
// aligned to frame::alignment_in_bytes (16)
|
||||
};
|
||||
|
||||
enum {
|
||||
parent_ijava_frame_abi_size = sizeof(parent_ijava_frame_abi)
|
||||
};
|
||||
|
||||
#define _parent_ijava_frame_abi(_component) \
|
||||
(offset_of(frame::parent_ijava_frame_abi, _component))
|
||||
|
||||
// TOP_IJAVA_FRAME_ABI
|
||||
|
||||
struct top_ijava_frame_abi : parent_ijava_frame_abi {
|
||||
uint64_t carg_3_unused; // carg_3
|
||||
uint64_t card_4_unused; //_16 carg_4
|
||||
uint64_t carg_5_unused; // carg_5
|
||||
uint64_t carg_6_unused; //_16 carg_6
|
||||
uint64_t carg_7_unused; // carg_7
|
||||
// Use arg8 for storing frame_manager_lr. The size of
|
||||
// top_ijava_frame_abi must match abi_112.
|
||||
uint64_t frame_manager_lr; //_16 carg_8
|
||||
// nothing to add here!
|
||||
// aligned to frame::alignment_in_bytes (16)
|
||||
};
|
||||
|
||||
enum {
|
||||
top_ijava_frame_abi_size = sizeof(top_ijava_frame_abi)
|
||||
};
|
||||
|
||||
#define _top_ijava_frame_abi(_component) \
|
||||
(offset_of(frame::top_ijava_frame_abi, _component))
|
||||
|
||||
// ENTRY_FRAME
|
||||
|
||||
struct entry_frame_locals {
|
||||
uint64_t call_wrapper_address;
|
||||
uint64_t result_address; //_16
|
||||
uint64_t result_type;
|
||||
uint64_t arguments_tos_address; //_16
|
||||
// aligned to frame::alignment_in_bytes (16)
|
||||
uint64_t r[spill_nonvolatiles_size/sizeof(uint64_t)];
|
||||
};
|
||||
|
||||
enum {
|
||||
entry_frame_locals_size = sizeof(entry_frame_locals)
|
||||
};
|
||||
|
||||
#define _entry_frame_locals_neg(_component) \
|
||||
(int)(-frame::entry_frame_locals_size + offset_of(frame::entry_frame_locals, _component))
|
||||
|
||||
|
||||
// Frame layout for JIT generated methods
|
||||
//
|
||||
// In these figures the stack grows upwards, while memory grows
|
||||
// downwards. Square brackets denote regions possibly larger than single
|
||||
// 64 bit slots.
|
||||
//
|
||||
// STACK (interpreted Java calls JIT generated Java):
|
||||
// [JIT_FRAME] <-- SP (mod 16 = 0)
|
||||
// [TOP_IJAVA_FRAME]
|
||||
// ...
|
||||
//
|
||||
// JIT_FRAME (is a C frame according to PPC-64 ABI):
|
||||
// [out_preserve]
|
||||
// [out_args]
|
||||
// [spills]
|
||||
// [pad_1]
|
||||
// [monitor] (optional)
|
||||
// ...
|
||||
// [monitor] (optional)
|
||||
// [pad_2]
|
||||
// [in_preserve] added / removed by prolog / epilog
|
||||
//
|
||||
|
||||
// JIT_ABI (TOP and PARENT)
|
||||
|
||||
struct jit_abi {
|
||||
uint64_t callers_sp;
|
||||
uint64_t cr;
|
||||
uint64_t lr;
|
||||
uint64_t toc;
|
||||
// Nothing to add here!
|
||||
// NOT ALIGNED to frame::alignment_in_bytes (16).
|
||||
};
|
||||
|
||||
struct jit_out_preserve : jit_abi {
|
||||
// Nothing to add here!
|
||||
};
|
||||
|
||||
struct jit_in_preserve {
|
||||
// Nothing to add here!
|
||||
};
|
||||
|
||||
enum {
|
||||
jit_out_preserve_size = sizeof(jit_out_preserve),
|
||||
jit_in_preserve_size = sizeof(jit_in_preserve)
|
||||
};
|
||||
|
||||
struct jit_monitor {
|
||||
uint64_t monitor[1];
|
||||
};
|
||||
|
||||
enum {
|
||||
jit_monitor_size = sizeof(jit_monitor),
|
||||
};
|
||||
|
||||
private:
|
||||
|
||||
// STACK:
|
||||
// ...
|
||||
// [THIS_FRAME] <-- this._sp (stack pointer for this frame)
|
||||
// [CALLER_FRAME] <-- this.fp() (_sp of caller's frame)
|
||||
// ...
|
||||
//
|
||||
|
||||
// frame pointer for this frame
|
||||
intptr_t* _fp;
|
||||
|
||||
// The frame's stack pointer before it has been extended by a c2i adapter;
|
||||
// needed by deoptimization
|
||||
intptr_t* _unextended_sp;
|
||||
void adjust_unextended_sp();
|
||||
|
||||
public:
|
||||
|
||||
// Accessors for fields
|
||||
intptr_t* fp() const { return _fp; }
|
||||
|
||||
// Accessors for ABIs
|
||||
inline abi_48* own_abi() const { return (abi_48*) _sp; }
|
||||
inline abi_48* callers_abi() const { return (abi_48*) _fp; }
|
||||
|
||||
private:
|
||||
|
||||
// Find codeblob and set deopt_state.
|
||||
inline void find_codeblob_and_set_pc_and_deopt_state(address pc);
|
||||
|
||||
public:
|
||||
|
||||
// Constructors
|
||||
inline frame(intptr_t* sp);
|
||||
frame(intptr_t* sp, address pc);
|
||||
inline frame(intptr_t* sp, address pc, intptr_t* unextended_sp);
|
||||
|
||||
private:
|
||||
|
||||
intptr_t* compiled_sender_sp(CodeBlob* cb) const;
|
||||
address* compiled_sender_pc_addr(CodeBlob* cb) const;
|
||||
address* sender_pc_addr(void) const;
|
||||
|
||||
public:
|
||||
|
||||
#ifdef CC_INTERP
|
||||
// Additional interface for interpreter frames:
|
||||
inline interpreterState get_interpreterState() const;
|
||||
#endif
|
||||
|
||||
// Size of a monitor in bytes.
|
||||
static int interpreter_frame_monitor_size_in_bytes();
|
||||
|
||||
// The size of a cInterpreter object.
|
||||
static inline int interpreter_frame_cinterpreterstate_size_in_bytes();
|
||||
|
||||
private:
|
||||
|
||||
// PPC port: permgen stuff
|
||||
ConstantPoolCache** interpreter_frame_cpoolcache_addr() const;
|
||||
|
||||
public:
|
||||
|
||||
// Additional interface for entry frames:
|
||||
inline entry_frame_locals* get_entry_frame_locals() const {
|
||||
return (entry_frame_locals*) (((address) fp()) - entry_frame_locals_size);
|
||||
}
|
||||
|
||||
enum {
|
||||
// normal return address is 1 bundle past PC
|
||||
pc_return_offset = 0
|
||||
};
|
||||
|
||||
#endif // CPU_PPC_VM_FRAME_PPC_HPP
|
239
hotspot/src/cpu/ppc/vm/frame_ppc.inline.hpp
Normal file
239
hotspot/src/cpu/ppc/vm/frame_ppc.inline.hpp
Normal file
@ -0,0 +1,239 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_FRAME_PPC_INLINE_HPP
|
||||
#define CPU_PPC_VM_FRAME_PPC_INLINE_HPP
|
||||
|
||||
#ifndef CC_INTERP
|
||||
#error "CC_INTERP must be defined on PPC64"
|
||||
#endif
|
||||
|
||||
// Inline functions for ppc64 frames:
|
||||
|
||||
// Find codeblob and set deopt_state.
|
||||
inline void frame::find_codeblob_and_set_pc_and_deopt_state(address pc) {
|
||||
assert(pc != NULL, "precondition: must have PC");
|
||||
|
||||
_cb = CodeCache::find_blob(pc);
|
||||
_pc = pc; // Must be set for get_deopt_original_pc()
|
||||
|
||||
_fp = (intptr_t*)own_abi()->callers_sp;
|
||||
// Use _fp - frame_size, needs to be done between _cb and _pc initialization
|
||||
// and get_deopt_original_pc.
|
||||
adjust_unextended_sp();
|
||||
|
||||
address original_pc = nmethod::get_deopt_original_pc(this);
|
||||
if (original_pc != NULL) {
|
||||
_pc = original_pc;
|
||||
_deopt_state = is_deoptimized;
|
||||
} else {
|
||||
_deopt_state = not_deoptimized;
|
||||
}
|
||||
|
||||
assert(((uint64_t)_sp & 0xf) == 0, "SP must be 16-byte aligned");
|
||||
}
|
||||
|
||||
// Constructors
|
||||
|
||||
// Initialize all fields, _unextended_sp will be adjusted in find_codeblob_and_set_pc_and_deopt_state.
|
||||
inline frame::frame() : _sp(NULL), _unextended_sp(NULL), _fp(NULL), _cb(NULL), _pc(NULL), _deopt_state(unknown) {}
|
||||
|
||||
inline frame::frame(intptr_t* sp) : _sp(sp), _unextended_sp(sp) {
|
||||
find_codeblob_and_set_pc_and_deopt_state((address)own_abi()->lr); // also sets _fp and adjusts _unextended_sp
|
||||
}
|
||||
|
||||
inline frame::frame(intptr_t* sp, address pc) : _sp(sp), _unextended_sp(sp) {
|
||||
find_codeblob_and_set_pc_and_deopt_state(pc); // also sets _fp and adjusts _unextended_sp
|
||||
}
|
||||
|
||||
inline frame::frame(intptr_t* sp, address pc, intptr_t* unextended_sp) : _sp(sp), _unextended_sp(unextended_sp) {
|
||||
find_codeblob_and_set_pc_and_deopt_state(pc); // also sets _fp and adjusts _unextended_sp
|
||||
}
|
||||
|
||||
// Accessors
|
||||
|
||||
// Return unique id for this frame. The id must have a value where we
|
||||
// can distinguish identity and younger/older relationship. NULL
|
||||
// represents an invalid (incomparable) frame.
|
||||
inline intptr_t* frame::id(void) const {
|
||||
// Use the _unextended_pc as the frame's ID. Because we have no
|
||||
// adapters, but resized compiled frames, some of the new code
|
||||
// (e.g. JVMTI) wouldn't work if we return the (current) SP of the
|
||||
// frame.
|
||||
return _unextended_sp;
|
||||
}
|
||||
|
||||
// Return true if this frame is older (less recent activation) than
|
||||
// the frame represented by id.
|
||||
inline bool frame::is_older(intptr_t* id) const {
|
||||
assert(this->id() != NULL && id != NULL, "NULL frame id");
|
||||
// Stack grows towards smaller addresses on ppc64.
|
||||
return this->id() > id;
|
||||
}
|
||||
|
||||
inline int frame::frame_size(RegisterMap* map) const {
|
||||
// Stack grows towards smaller addresses on PPC64: sender is at a higher address.
|
||||
return sender_sp() - sp();
|
||||
}
|
||||
|
||||
// Return the frame's stack pointer before it has been extended by a
|
||||
// c2i adapter. This is needed by deoptimization for ignoring c2i adapter
|
||||
// frames.
|
||||
inline intptr_t* frame::unextended_sp() const {
|
||||
return _unextended_sp;
|
||||
}
|
||||
|
||||
// All frames have this field.
|
||||
inline address frame::sender_pc() const {
|
||||
return (address)callers_abi()->lr;
|
||||
}
|
||||
inline address* frame::sender_pc_addr() const {
|
||||
return (address*)&(callers_abi()->lr);
|
||||
}
|
||||
|
||||
// All frames have this field.
|
||||
inline intptr_t* frame::sender_sp() const {
|
||||
return (intptr_t*)callers_abi();
|
||||
}
|
||||
|
||||
// All frames have this field.
|
||||
inline intptr_t* frame::link() const {
|
||||
return (intptr_t*)callers_abi()->callers_sp;
|
||||
}
|
||||
|
||||
inline intptr_t* frame::real_fp() const {
|
||||
return fp();
|
||||
}
|
||||
|
||||
#ifdef CC_INTERP
|
||||
|
||||
inline interpreterState frame::get_interpreterState() const {
|
||||
return (interpreterState)(((address)callers_abi())
|
||||
- frame::interpreter_frame_cinterpreterstate_size_in_bytes());
|
||||
}
|
||||
|
||||
inline intptr_t** frame::interpreter_frame_locals_addr() const {
|
||||
interpreterState istate = get_interpreterState();
|
||||
return (intptr_t**)&istate->_locals;
|
||||
}
|
||||
|
||||
inline intptr_t* frame::interpreter_frame_bcx_addr() const {
|
||||
interpreterState istate = get_interpreterState();
|
||||
return (intptr_t*)&istate->_bcp;
|
||||
}
|
||||
|
||||
inline intptr_t* frame::interpreter_frame_mdx_addr() const {
|
||||
interpreterState istate = get_interpreterState();
|
||||
return (intptr_t*)&istate->_mdx;
|
||||
}
|
||||
|
||||
inline intptr_t* frame::interpreter_frame_expression_stack() const {
|
||||
return (intptr_t*)interpreter_frame_monitor_end() - 1;
|
||||
}
|
||||
|
||||
inline jint frame::interpreter_frame_expression_stack_direction() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// top of expression stack
|
||||
inline intptr_t* frame::interpreter_frame_tos_address() const {
|
||||
interpreterState istate = get_interpreterState();
|
||||
return istate->_stack + 1;
|
||||
}
|
||||
|
||||
inline intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
|
||||
return &interpreter_frame_tos_address()[offset];
|
||||
}
|
||||
|
||||
// monitor elements
|
||||
|
||||
// in keeping with Intel side: end is lower in memory than begin;
|
||||
// and beginning element is oldest element
|
||||
// Also begin is one past last monitor.
|
||||
|
||||
inline BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
|
||||
return get_interpreterState()->monitor_base();
|
||||
}
|
||||
|
||||
inline BasicObjectLock* frame::interpreter_frame_monitor_end() const {
|
||||
return (BasicObjectLock*)get_interpreterState()->stack_base();
|
||||
}
|
||||
|
||||
inline int frame::interpreter_frame_cinterpreterstate_size_in_bytes() {
|
||||
// Size of an interpreter object. Not aligned with frame size.
|
||||
return round_to(sizeof(BytecodeInterpreter), 8);
|
||||
}
|
||||
|
||||
inline Method** frame::interpreter_frame_method_addr() const {
|
||||
interpreterState istate = get_interpreterState();
|
||||
return &istate->_method;
|
||||
}
|
||||
|
||||
// Constant pool cache
|
||||
|
||||
inline ConstantPoolCache** frame::interpreter_frame_cpoolcache_addr() const {
|
||||
interpreterState istate = get_interpreterState();
|
||||
return &istate->_constants; // should really use accessor
|
||||
}
|
||||
|
||||
inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
|
||||
interpreterState istate = get_interpreterState();
|
||||
return &istate->_constants;
|
||||
}
|
||||
#endif // CC_INTERP
|
||||
|
||||
inline int frame::interpreter_frame_monitor_size() {
|
||||
// Number of stack slots for a monitor.
|
||||
return round_to(BasicObjectLock::size(), // number of stack slots
|
||||
WordsPerLong); // number of stack slots for a Java long
|
||||
}
|
||||
|
||||
inline int frame::interpreter_frame_monitor_size_in_bytes() {
|
||||
return frame::interpreter_frame_monitor_size() * wordSize;
|
||||
}
|
||||
|
||||
// entry frames
|
||||
|
||||
inline intptr_t* frame::entry_frame_argument_at(int offset) const {
|
||||
// Since an entry frame always calls the interpreter first, the
|
||||
// parameters are on the stack and relative to known register in the
|
||||
// entry frame.
|
||||
intptr_t* tos = (intptr_t*)get_entry_frame_locals()->arguments_tos_address;
|
||||
return &tos[offset + 1]; // prepushed tos
|
||||
}
|
||||
|
||||
inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
|
||||
return (JavaCallWrapper**)&get_entry_frame_locals()->call_wrapper_address;
|
||||
}
|
||||
|
||||
inline oop frame::saved_oop_result(RegisterMap* map) const {
|
||||
return *((oop*)map->location(R3->as_VMReg()));
|
||||
}
|
||||
|
||||
inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
|
||||
*((oop*)map->location(R3->as_VMReg())) = obj;
|
||||
}
|
||||
|
||||
#endif // CPU_PPC_VM_FRAME_PPC_INLINE_HPP
|
34
hotspot/src/cpu/ppc/vm/globalDefinitions_ppc.hpp
Normal file
34
hotspot/src/cpu/ppc/vm/globalDefinitions_ppc.hpp
Normal file
@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP
|
||||
#define CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP
|
||||
|
||||
// Size of PPC Instructions
|
||||
const int BytesPerInstWord = 4;
|
||||
|
||||
const int StackAlignmentInBytes = 16;
|
||||
|
||||
#endif // CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP
|
116
hotspot/src/cpu/ppc/vm/globals_ppc.hpp
Normal file
116
hotspot/src/cpu/ppc/vm/globals_ppc.hpp
Normal file
@ -0,0 +1,116 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_GLOBALS_PPC_HPP
|
||||
#define CPU_PPC_VM_GLOBALS_PPC_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
// Sets the default values for platform dependent flags used by the runtime system.
|
||||
// (see globals.hpp)
|
||||
|
||||
define_pd_global(bool, ConvertSleepToYield, true);
|
||||
define_pd_global(bool, ShareVtableStubs, false); // Improves performance markedly for mtrt and compress.
|
||||
define_pd_global(bool, NeedsDeoptSuspend, false); // Only register window machines need this.
|
||||
|
||||
|
||||
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks.
|
||||
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs passed to check cast.
|
||||
|
||||
// Use large code-entry alignment.
|
||||
define_pd_global(intx, CodeEntryAlignment, 128);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
define_pd_global(intx, InlineFrequencyCount, 100);
|
||||
define_pd_global(intx, InlineSmallCode, 1500);
|
||||
|
||||
define_pd_global(intx, PreInflateSpin, 10);
|
||||
|
||||
// Flags for template interpreter.
|
||||
define_pd_global(bool, RewriteBytecodes, true);
|
||||
define_pd_global(bool, RewriteFrequentPairs, true);
|
||||
|
||||
define_pd_global(bool, UseMembar, false);
|
||||
|
||||
// GC Ergo Flags
|
||||
define_pd_global(intx, CMSYoungGenPerWorker, 16*M); // Default max size of CMS young gen, per GC worker thread.
|
||||
|
||||
|
||||
// Platform dependent flag handling: flags only defined on this platform.
|
||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
|
||||
product(uintx, PowerArchitecturePPC64, 0, \
|
||||
"CPU Version: x for PowerX. Currently recognizes Power5 to " \
|
||||
"Power7. Default is 0. CPUs newer than Power7 will be " \
|
||||
"recognized as Power7.") \
|
||||
\
|
||||
/* Reoptimize code-sequences of calls at runtime, e.g. replace an */ \
|
||||
/* indirect call by a direct call. */ \
|
||||
product(bool, ReoptimizeCallSequences, true, \
|
||||
"Reoptimize code-sequences of calls at runtime.") \
|
||||
\
|
||||
product(bool, UseLoadInstructionsForStackBangingPPC64, false, \
|
||||
"Use load instructions for stack banging.") \
|
||||
\
|
||||
/* special instructions */ \
|
||||
\
|
||||
product(bool, UseCountLeadingZerosInstructionsPPC64, true, \
|
||||
"Use count leading zeros instructions.") \
|
||||
\
|
||||
product(bool, UseExtendedLoadAndReserveInstructionsPPC64, false, \
|
||||
"Use extended versions of load-and-reserve instructions.") \
|
||||
\
|
||||
product(bool, UseRotateAndMaskInstructionsPPC64, true, \
|
||||
"Use rotate and mask instructions.") \
|
||||
\
|
||||
product(bool, UseStaticBranchPredictionInCompareAndSwapPPC64, true, \
|
||||
"Use static branch prediction hints in CAS operations.") \
|
||||
\
|
||||
/* Trap based checks. */ \
|
||||
/* Trap based checks use the ppc trap instructions to check certain */ \
|
||||
/* conditions. This instruction raises a SIGTRAP caught by the */ \
|
||||
/* exception handler of the VM. */ \
|
||||
product(bool, UseSIGTRAP, false, \
|
||||
"Allow trap instructions that make use of SIGTRAP. Use this to " \
|
||||
"switch off all optimizations requiring SIGTRAP.") \
|
||||
product(bool, TrapBasedICMissChecks, true, \
|
||||
"Raise and handle SIGTRAP if inline cache miss detected.") \
|
||||
product(bool, TrapBasedNotEntrantChecks, true, \
|
||||
"Raise and handle SIGTRAP if calling not entrant or zombie" \
|
||||
" method.") \
|
||||
product(bool, TrapBasedNullChecks, true, \
|
||||
"Generate code for null checks that uses a cmp and trap " \
|
||||
"instruction raising SIGTRAP. This is only used if an access to" \
|
||||
"null (+offset) will not raise a SIGSEGV.") \
|
||||
product(bool, TrapBasedRangeChecks, true, \
|
||||
"Raise and handle SIGTRAP if array out of bounds check fails.") \
|
||||
product(bool, TraceTraps, false, "Trace all traps the signal handler" \
|
||||
"handles.") \
|
||||
\
|
||||
product(bool, ZapMemory, false, "Write 0x0101... to empty memory." \
|
||||
" Use this to ease debugging.") \
|
||||
|
||||
|
||||
|
||||
#endif // CPU_PPC_VM_GLOBALS_PPC_HPP
|
71
hotspot/src/cpu/ppc/vm/icBuffer_ppc.cpp
Normal file
71
hotspot/src/cpu/ppc/vm/icBuffer_ppc.cpp
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "assembler_ppc.inline.hpp"
|
||||
#include "code/icBuffer.hpp"
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "interpreter/bytecodes.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "nativeInst_ppc.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/oop.inline2.hpp"
|
||||
|
||||
#define __ masm.
|
||||
|
||||
int InlineCacheBuffer::ic_stub_code_size() {
|
||||
return MacroAssembler::load_const_size + MacroAssembler::b64_patchable_size;
|
||||
}
|
||||
|
||||
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
|
||||
ResourceMark rm;
|
||||
CodeBuffer code(code_begin, ic_stub_code_size());
|
||||
MacroAssembler masm(&code);
|
||||
// Note: even though the code contains an embedded metadata, we do not need reloc info
|
||||
// because
|
||||
// (1) the metadata is old (i.e., doesn't matter for scavenges)
|
||||
// (2) these ICStubs are removed *before* a GC happens, so the roots disappear.
|
||||
|
||||
// Load the oop ...
|
||||
__ load_const(R19_method, (address) cached_value, R0);
|
||||
// ... and jump to entry point.
|
||||
__ b64_patchable((address) entry_point, relocInfo::none);
|
||||
|
||||
__ flush();
|
||||
}
|
||||
|
||||
address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
|
||||
NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object
|
||||
NativeJump* jump = nativeJump_at(move->next_instruction_address());
|
||||
return jump->jump_destination();
|
||||
}
|
||||
|
||||
void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
|
||||
NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object
|
||||
void* o = (void*)move->data();
|
||||
return o;
|
||||
}
|
||||
|
77
hotspot/src/cpu/ppc/vm/icache_ppc.cpp
Normal file
77
hotspot/src/cpu/ppc/vm/icache_ppc.cpp
Normal file
@ -0,0 +1,77 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "assembler_ppc.inline.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
|
||||
// Use inline assembler to implement icache flush.
|
||||
int ppc64_flush_icache(address start, int lines, int magic){
|
||||
address end = start + (unsigned int)lines*ICache::line_size;
|
||||
assert(start <= end, "flush_icache parms");
|
||||
|
||||
// store modified cache lines from data cache
|
||||
for (address a=start; a<end; a+=ICache::line_size) {
|
||||
__asm__ __volatile__(
|
||||
"dcbst 0, %0 \n"
|
||||
:
|
||||
: "r" (a)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
// sync instruction
|
||||
__asm__ __volatile__(
|
||||
"sync \n"
|
||||
:
|
||||
:
|
||||
: "memory");
|
||||
|
||||
// invalidate respective cache lines in instruction cache
|
||||
for (address a=start; a<end; a+=ICache::line_size) {
|
||||
__asm__ __volatile__(
|
||||
"icbi 0, %0 \n"
|
||||
:
|
||||
: "r" (a)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
// discard fetched instructions
|
||||
__asm__ __volatile__(
|
||||
"isync \n"
|
||||
:
|
||||
:
|
||||
: "memory");
|
||||
|
||||
return magic;
|
||||
}
|
||||
|
||||
void ICacheStubGenerator::generate_icache_flush(ICache::flush_icache_stub_t* flush_icache_stub) {
|
||||
StubCodeMark mark(this, "ICache", "flush_icache_stub");
|
||||
|
||||
*flush_icache_stub = (ICache::flush_icache_stub_t)ppc64_flush_icache;
|
||||
|
||||
// First call to flush itself
|
||||
ICache::invalidate_range((address)(*flush_icache_stub), 0);
|
||||
}
|
44
hotspot/src/cpu/ppc/vm/icache_ppc.hpp
Normal file
44
hotspot/src/cpu/ppc/vm/icache_ppc.hpp
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_ICACHE_PPC_HPP
|
||||
#define CPU_PPC_VM_ICACHE_PPC_HPP
|
||||
|
||||
// Interface for updating the instruction cache. Whenever the VM modifies
|
||||
// code, part of the processor instruction cache potentially has to be flushed.
|
||||
|
||||
class ICache : public AbstractICache {
|
||||
public:
|
||||
enum {
|
||||
// On PowerPC the cache line size is 32 bytes.
|
||||
stub_size = 160, // Size of the icache flush stub in bytes.
|
||||
line_size = 32, // Flush instruction affects 32 bytes.
|
||||
log2_line_size = 5 // log2(line_size)
|
||||
};
|
||||
|
||||
// Use default implementation
|
||||
};
|
||||
|
||||
#endif // CPU_PPC_VM_ICACHE_PPC_HPP
|
504
hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp
Normal file
504
hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp
Normal file
@ -0,0 +1,504 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "interp_masm_ppc_64.hpp"
|
||||
#include "interpreter/interpreterRuntime.hpp"
|
||||
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define BLOCK_COMMENT(str) // nothing
|
||||
#else
|
||||
#define BLOCK_COMMENT(str) block_comment(str)
|
||||
#endif
|
||||
|
||||
// Lock object
|
||||
//
|
||||
// Registers alive
|
||||
// monitor - Address of the BasicObjectLock to be used for locking,
|
||||
// which must be initialized with the object to lock.
|
||||
// object - Address of the object to be locked.
|
||||
//
|
||||
void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
||||
if (UseHeavyMonitors) {
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
|
||||
monitor, /*check_for_exceptions=*/false);
|
||||
} else {
|
||||
// template code:
|
||||
//
|
||||
// markOop displaced_header = obj->mark().set_unlocked();
|
||||
// monitor->lock()->set_displaced_header(displaced_header);
|
||||
// if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
|
||||
// // We stored the monitor address into the object's mark word.
|
||||
// } else if (THREAD->is_lock_owned((address)displaced_header))
|
||||
// // Simple recursive case.
|
||||
// monitor->lock()->set_displaced_header(NULL);
|
||||
// } else {
|
||||
// // Slow path.
|
||||
// InterpreterRuntime::monitorenter(THREAD, monitor);
|
||||
// }
|
||||
|
||||
const Register displaced_header = R7_ARG5;
|
||||
const Register object_mark_addr = R8_ARG6;
|
||||
const Register current_header = R9_ARG7;
|
||||
const Register tmp = R10_ARG8;
|
||||
|
||||
Label done;
|
||||
Label slow_case;
|
||||
|
||||
assert_different_registers(displaced_header, object_mark_addr, current_header, tmp);
|
||||
|
||||
|
||||
// markOop displaced_header = obj->mark().set_unlocked();
|
||||
|
||||
// Load markOop from object into displaced_header.
|
||||
ld(displaced_header, oopDesc::mark_offset_in_bytes(), object);
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
biased_locking_enter(CCR0, object, displaced_header, tmp, current_header, done, &slow_case);
|
||||
}
|
||||
|
||||
// Set displaced_header to be (markOop of object | UNLOCK_VALUE).
|
||||
ori(displaced_header, displaced_header, markOopDesc::unlocked_value);
|
||||
|
||||
|
||||
// monitor->lock()->set_displaced_header(displaced_header);
|
||||
|
||||
// Initialize the box (Must happen before we update the object mark!).
|
||||
std(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
|
||||
BasicLock::displaced_header_offset_in_bytes(), monitor);
|
||||
|
||||
// if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
|
||||
|
||||
// Store stack address of the BasicObjectLock (this is monitor) into object.
|
||||
addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
|
||||
|
||||
// Must fence, otherwise, preceding store(s) may float below cmpxchg.
|
||||
// CmpxchgX sets CCR0 to cmpX(current, displaced).
|
||||
fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
|
||||
cmpxchgd(/*flag=*/CCR0,
|
||||
/*current_value=*/current_header,
|
||||
/*compare_value=*/displaced_header, /*exchange_value=*/monitor,
|
||||
/*where=*/object_mark_addr,
|
||||
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
|
||||
MacroAssembler::cmpxchgx_hint_acquire_lock());
|
||||
|
||||
// If the compare-and-exchange succeeded, then we found an unlocked
|
||||
// object and we have now locked it.
|
||||
beq(CCR0, done);
|
||||
|
||||
|
||||
// } else if (THREAD->is_lock_owned((address)displaced_header))
|
||||
// // Simple recursive case.
|
||||
// monitor->lock()->set_displaced_header(NULL);
|
||||
|
||||
// We did not see an unlocked object so try the fast recursive case.
|
||||
|
||||
// Check if owner is self by comparing the value in the markOop of object
|
||||
// (current_header) with the stack pointer.
|
||||
sub(current_header, current_header, R1_SP);
|
||||
|
||||
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
|
||||
load_const_optimized(tmp,
|
||||
(address) (~(os::vm_page_size()-1) |
|
||||
markOopDesc::lock_mask_in_place));
|
||||
|
||||
and_(R0/*==0?*/, current_header, tmp);
|
||||
// If condition is true we are done and hence we can store 0 in the displaced
|
||||
// header indicating it is a recursive lock.
|
||||
bne(CCR0, slow_case);
|
||||
release();
|
||||
std(R0/*==0!*/, BasicObjectLock::lock_offset_in_bytes() +
|
||||
BasicLock::displaced_header_offset_in_bytes(), monitor);
|
||||
b(done);
|
||||
|
||||
|
||||
// } else {
|
||||
// // Slow path.
|
||||
// InterpreterRuntime::monitorenter(THREAD, monitor);
|
||||
|
||||
// None of the above fast optimizations worked so we have to get into the
|
||||
// slow case of monitor enter.
|
||||
bind(slow_case);
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
|
||||
monitor, /*check_for_exceptions=*/false);
|
||||
// }
|
||||
|
||||
bind(done);
|
||||
}
|
||||
}
|
||||
|
||||
// Unlocks an object. Used in monitorexit bytecode and remove_activation.
|
||||
//
|
||||
// Registers alive
|
||||
// monitor - Address of the BasicObjectLock to be used for locking,
|
||||
// which must be initialized with the object to lock.
|
||||
//
|
||||
// Throw IllegalMonitorException if object is not locked by current thread.
|
||||
void InterpreterMacroAssembler::unlock_object(Register monitor) {
|
||||
if (UseHeavyMonitors) {
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
|
||||
monitor, /*check_for_exceptions=*/false);
|
||||
} else {
|
||||
|
||||
// template code:
|
||||
//
|
||||
// if ((displaced_header = monitor->displaced_header()) == NULL) {
|
||||
// // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
|
||||
// monitor->set_obj(NULL);
|
||||
// } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
|
||||
// // We swapped the unlocked mark in displaced_header into the object's mark word.
|
||||
// monitor->set_obj(NULL);
|
||||
// } else {
|
||||
// // Slow path.
|
||||
// InterpreterRuntime::monitorexit(THREAD, monitor);
|
||||
// }
|
||||
|
||||
const Register object = R7_ARG5;
|
||||
const Register displaced_header = R8_ARG6;
|
||||
const Register object_mark_addr = R9_ARG7;
|
||||
const Register current_header = R10_ARG8;
|
||||
|
||||
Label no_recursive_unlock;
|
||||
Label slow_case;
|
||||
Label done;
|
||||
|
||||
assert_different_registers(object, displaced_header, object_mark_addr, current_header);
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
// The object address from the monitor is in object.
|
||||
ld(object, BasicObjectLock::obj_offset_in_bytes(), monitor);
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
|
||||
biased_locking_exit(CCR0, object, displaced_header, done);
|
||||
}
|
||||
|
||||
// Test first if we are in the fast recursive case.
|
||||
ld(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
|
||||
BasicLock::displaced_header_offset_in_bytes(), monitor);
|
||||
|
||||
// If the displaced header is zero, we have a recursive unlock.
|
||||
cmpdi(CCR0, displaced_header, 0);
|
||||
bne(CCR0, no_recursive_unlock);
|
||||
// Release in recursive unlock is not necessary.
|
||||
// release();
|
||||
std(displaced_header/*==0!*/, BasicObjectLock::obj_offset_in_bytes(), monitor);
|
||||
b(done);
|
||||
|
||||
bind(no_recursive_unlock);
|
||||
|
||||
// } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
|
||||
// // We swapped the unlocked mark in displaced_header into the object's mark word.
|
||||
// monitor->set_obj(NULL);
|
||||
|
||||
// If we still have a lightweight lock, unlock the object and be done.
|
||||
|
||||
// The object address from the monitor is in object.
|
||||
ld(object, BasicObjectLock::obj_offset_in_bytes(), monitor);
|
||||
addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
|
||||
|
||||
// We have the displaced header in displaced_header. If the lock is still
|
||||
// lightweight, it will contain the monitor address and we'll store the
|
||||
// displaced header back into the object's mark word.
|
||||
// CmpxchgX sets CCR0 to cmpX(current, monitor).
|
||||
cmpxchgd(/*flag=*/CCR0,
|
||||
/*current_value=*/current_header,
|
||||
/*compare_value=*/monitor, /*exchange_value=*/displaced_header,
|
||||
/*where=*/object_mark_addr,
|
||||
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
|
||||
MacroAssembler::cmpxchgx_hint_release_lock());
|
||||
bne(CCR0, slow_case);
|
||||
|
||||
// Exchange worked, do monitor->set_obj(NULL).
|
||||
li(R0, 0);
|
||||
// Must realease earlier (see cmpxchgd above).
|
||||
// release();
|
||||
std(R0, BasicObjectLock::obj_offset_in_bytes(), monitor);
|
||||
b(done);
|
||||
|
||||
|
||||
// } else {
|
||||
// // Slow path.
|
||||
// InterpreterRuntime::monitorexit(THREAD, monitor);
|
||||
|
||||
// The lock has been converted into a heavy lock and hence
|
||||
// we need to get into the slow case.
|
||||
bind(slow_case);
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
|
||||
monitor, /*check_for_exceptions=*/false);
|
||||
// }
|
||||
|
||||
bind(done);
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::get_method_counters(Register method,
|
||||
Register Rcounters,
|
||||
Label& skip) {
|
||||
BLOCK_COMMENT("Load and ev. allocate counter object {");
|
||||
Label has_counters;
|
||||
ld(Rcounters, in_bytes(Method::method_counters_offset()), method);
|
||||
cmpdi(CCR0, Rcounters, 0);
|
||||
bne(CCR0, has_counters);
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::build_method_counters), method, false);
|
||||
ld(Rcounters, in_bytes(Method::method_counters_offset()), method);
|
||||
cmpdi(CCR0, Rcounters, 0);
|
||||
beq(CCR0, skip); // No MethodCounters, OutOfMemory.
|
||||
BLOCK_COMMENT("} Load and ev. allocate counter object");
|
||||
|
||||
bind(has_counters);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::increment_invocation_counter(Register Rcounters, Register iv_be_count, Register Rtmp_r0) {
|
||||
assert(UseCompiler, "incrementing must be useful");
|
||||
Register invocation_count = iv_be_count;
|
||||
Register backedge_count = Rtmp_r0;
|
||||
int delta = InvocationCounter::count_increment;
|
||||
|
||||
// Load each counter in a register.
|
||||
// ld(inv_counter, Rtmp);
|
||||
// ld(be_counter, Rtmp2);
|
||||
int inv_counter_offset = in_bytes(MethodCounters::invocation_counter_offset() +
|
||||
InvocationCounter::counter_offset());
|
||||
int be_counter_offset = in_bytes(MethodCounters::backedge_counter_offset() +
|
||||
InvocationCounter::counter_offset());
|
||||
|
||||
BLOCK_COMMENT("Increment profiling counters {");
|
||||
|
||||
// Load the backedge counter.
|
||||
lwz(backedge_count, be_counter_offset, Rcounters); // is unsigned int
|
||||
// Mask the backedge counter.
|
||||
Register tmp = invocation_count;
|
||||
li(tmp, InvocationCounter::count_mask_value);
|
||||
andr(backedge_count, tmp, backedge_count); // Cannot use andi, need sign extension of count_mask_value.
|
||||
|
||||
// Load the invocation counter.
|
||||
lwz(invocation_count, inv_counter_offset, Rcounters); // is unsigned int
|
||||
// Add the delta to the invocation counter and store the result.
|
||||
addi(invocation_count, invocation_count, delta);
|
||||
// Store value.
|
||||
stw(invocation_count, inv_counter_offset, Rcounters);
|
||||
|
||||
// Add invocation counter + backedge counter.
|
||||
add(iv_be_count, backedge_count, invocation_count);
|
||||
|
||||
// Note that this macro must leave the backedge_count + invocation_count in
|
||||
// register iv_be_count!
|
||||
BLOCK_COMMENT("} Increment profiling counters");
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
|
||||
if (state == atos) { MacroAssembler::verify_oop(reg); }
|
||||
}
|
||||
|
||||
// Inline assembly for:
|
||||
//
|
||||
// if (thread is in interp_only_mode) {
|
||||
// InterpreterRuntime::post_method_entry();
|
||||
// }
|
||||
// if (*jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY ) ||
|
||||
// *jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY2) ) {
|
||||
// SharedRuntime::jvmpi_method_entry(method, receiver);
|
||||
// }
|
||||
void InterpreterMacroAssembler::notify_method_entry() {
|
||||
// JVMTI
|
||||
// Whenever JVMTI puts a thread in interp_only_mode, method
|
||||
// entry/exit events are sent for that thread to track stack
|
||||
// depth. If it is possible to enter interp_only_mode we add
|
||||
// the code to check if the event should be sent.
|
||||
if (JvmtiExport::can_post_interpreter_events()) {
|
||||
Label jvmti_post_done;
|
||||
|
||||
lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
|
||||
cmpwi(CCR0, R0, 0);
|
||||
beq(CCR0, jvmti_post_done);
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry),
|
||||
/*check_exceptions=*/false);
|
||||
|
||||
bind(jvmti_post_done);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Inline assembly for:
|
||||
//
|
||||
// if (thread is in interp_only_mode) {
|
||||
// // save result
|
||||
// InterpreterRuntime::post_method_exit();
|
||||
// // restore result
|
||||
// }
|
||||
// if (*jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_EXIT)) {
|
||||
// // save result
|
||||
// SharedRuntime::jvmpi_method_exit();
|
||||
// // restore result
|
||||
// }
|
||||
//
|
||||
// Native methods have their result stored in d_tmp and l_tmp.
|
||||
// Java methods have their result stored in the expression stack.
|
||||
void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, TosState state) {
|
||||
// JVMTI
|
||||
// Whenever JVMTI puts a thread in interp_only_mode, method
|
||||
// entry/exit events are sent for that thread to track stack
|
||||
// depth. If it is possible to enter interp_only_mode we add
|
||||
// the code to check if the event should be sent.
|
||||
if (JvmtiExport::can_post_interpreter_events()) {
|
||||
Label jvmti_post_done;
|
||||
|
||||
lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
|
||||
cmpwi(CCR0, R0, 0);
|
||||
beq(CCR0, jvmti_post_done);
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit),
|
||||
/*check_exceptions=*/false);
|
||||
|
||||
bind(jvmti_post_done);
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the current TOP_IJAVA_FRAME into a PARENT_IJAVA_FRAME
|
||||
// (using parent_frame_resize) and push a new interpreter
|
||||
// TOP_IJAVA_FRAME (using frame_size).
|
||||
void InterpreterMacroAssembler::push_interpreter_frame(Register top_frame_size, Register parent_frame_resize,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
Register tmp4, Register pc) {
|
||||
assert_different_registers(top_frame_size, parent_frame_resize, tmp1, tmp2, tmp3, tmp4);
|
||||
ld(tmp1, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
|
||||
mr(tmp2/*top_frame_sp*/, R1_SP);
|
||||
// Move initial_caller_sp.
|
||||
ld(tmp4, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
|
||||
neg(parent_frame_resize, parent_frame_resize);
|
||||
resize_frame(parent_frame_resize/*-parent_frame_resize*/, tmp3);
|
||||
|
||||
// Set LR in new parent frame.
|
||||
std(tmp1, _abi(lr), R1_SP);
|
||||
// Set top_frame_sp info for new parent frame.
|
||||
std(tmp2, _parent_ijava_frame_abi(top_frame_sp), R1_SP);
|
||||
std(tmp4, _parent_ijava_frame_abi(initial_caller_sp), R1_SP);
|
||||
|
||||
// Push new TOP_IJAVA_FRAME.
|
||||
push_frame(top_frame_size, tmp2);
|
||||
|
||||
get_PC_trash_LR(tmp3);
|
||||
std(tmp3, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
|
||||
// Used for non-initial callers by unextended_sp().
|
||||
std(R1_SP, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
|
||||
}
|
||||
|
||||
// Pop the topmost TOP_IJAVA_FRAME and convert the previous
|
||||
// PARENT_IJAVA_FRAME back into a TOP_IJAVA_FRAME.
|
||||
void InterpreterMacroAssembler::pop_interpreter_frame(Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
|
||||
assert_different_registers(tmp1, tmp2, tmp3, tmp4);
|
||||
|
||||
ld(tmp1/*caller's sp*/, _abi(callers_sp), R1_SP);
|
||||
ld(tmp3, _abi(lr), tmp1);
|
||||
|
||||
ld(tmp4, _parent_ijava_frame_abi(initial_caller_sp), tmp1);
|
||||
|
||||
ld(tmp2/*caller's caller's sp*/, _abi(callers_sp), tmp1);
|
||||
// Merge top frame.
|
||||
std(tmp2, _abi(callers_sp), R1_SP);
|
||||
|
||||
ld(tmp2, _parent_ijava_frame_abi(top_frame_sp), tmp1);
|
||||
|
||||
// Update C stack pointer to caller's top_abi.
|
||||
resize_frame_absolute(tmp2/*addr*/, tmp1/*tmp*/, tmp2/*tmp*/);
|
||||
|
||||
// Update LR in top_frame.
|
||||
std(tmp3, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
|
||||
|
||||
std(tmp4, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
|
||||
|
||||
// Store the top-frame stack-pointer for c2i adapters.
|
||||
std(R1_SP, _top_ijava_frame_abi(top_frame_sp), R1_SP);
|
||||
}
|
||||
|
||||
#ifdef CC_INTERP
|
||||
// Turn state's interpreter frame into the current TOP_IJAVA_FRAME.
|
||||
void InterpreterMacroAssembler::pop_interpreter_frame_to_state(Register state, Register tmp1, Register tmp2, Register tmp3) {
|
||||
assert_different_registers(R14_state, R15_prev_state, tmp1, tmp2, tmp3);
|
||||
|
||||
if (state == R14_state) {
|
||||
ld(tmp1/*state's fp*/, state_(_last_Java_fp));
|
||||
ld(tmp2/*state's sp*/, state_(_last_Java_sp));
|
||||
} else if (state == R15_prev_state) {
|
||||
ld(tmp1/*state's fp*/, prev_state_(_last_Java_fp));
|
||||
ld(tmp2/*state's sp*/, prev_state_(_last_Java_sp));
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
// Merge top frames.
|
||||
std(tmp1, _abi(callers_sp), R1_SP);
|
||||
|
||||
// Tmp2 is new SP.
|
||||
// Tmp1 is parent's SP.
|
||||
resize_frame_absolute(tmp2/*addr*/, tmp1/*tmp*/, tmp2/*tmp*/);
|
||||
|
||||
// Update LR in top_frame.
|
||||
// Must be interpreter frame.
|
||||
get_PC_trash_LR(tmp3);
|
||||
std(tmp3, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
|
||||
// Used for non-initial callers by unextended_sp().
|
||||
std(R1_SP, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
|
||||
}
|
||||
#endif // CC_INTERP
|
||||
|
||||
// Set SP to initial caller's sp, but before fix the back chain.
|
||||
void InterpreterMacroAssembler::resize_frame_to_initial_caller(Register tmp1, Register tmp2) {
|
||||
ld(tmp1, _parent_ijava_frame_abi(initial_caller_sp), R1_SP);
|
||||
ld(tmp2, _parent_ijava_frame_abi(callers_sp), R1_SP);
|
||||
std(tmp2, _parent_ijava_frame_abi(callers_sp), tmp1); // Fix back chain ...
|
||||
mr(R1_SP, tmp1); // ... and resize to initial caller.
|
||||
}
|
||||
|
||||
#ifdef CC_INTERP
|
||||
// Pop the current interpreter state (without popping the correspoding
|
||||
// frame) and restore R14_state and R15_prev_state accordingly.
|
||||
// Use prev_state_may_be_0 to indicate whether prev_state may be 0
|
||||
// in order to generate an extra check before retrieving prev_state_(_prev_link).
|
||||
void InterpreterMacroAssembler::pop_interpreter_state(bool prev_state_may_be_0)
|
||||
{
|
||||
// Move prev_state to state and restore prev_state from state_(_prev_link).
|
||||
Label prev_state_is_0;
|
||||
mr(R14_state, R15_prev_state);
|
||||
|
||||
// Don't retrieve /*state==*/prev_state_(_prev_link)
|
||||
// if /*state==*/prev_state is 0.
|
||||
if (prev_state_may_be_0) {
|
||||
cmpdi(CCR0, R15_prev_state, 0);
|
||||
beq(CCR0, prev_state_is_0);
|
||||
}
|
||||
|
||||
ld(R15_prev_state, /*state==*/prev_state_(_prev_link));
|
||||
bind(prev_state_is_0);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::restore_prev_state() {
|
||||
// _prev_link is private, but cInterpreter is a friend.
|
||||
ld(R15_prev_state, state_(_prev_link));
|
||||
}
|
||||
#endif // CC_INTERP
|
89
hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.hpp
Normal file
89
hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.hpp
Normal file
@ -0,0 +1,89 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
|
||||
#define CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
|
||||
|
||||
#include "assembler_ppc.inline.hpp"
|
||||
#include "interpreter/invocationCounter.hpp"
|
||||
|
||||
// This file specializes the assembler with interpreter-specific macros
|
||||
|
||||
|
||||
class InterpreterMacroAssembler: public MacroAssembler {
|
||||
|
||||
public:
|
||||
InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {}
|
||||
|
||||
// Handy address generation macros
|
||||
#define thread_(field_name) in_bytes(JavaThread::field_name ## _offset()), R16_thread
|
||||
#define method_(field_name) in_bytes(Method::field_name ## _offset()), R19_method
|
||||
|
||||
#ifdef CC_INTERP
|
||||
#define state_(field_name) in_bytes(byte_offset_of(BytecodeInterpreter, field_name)), R14_state
|
||||
#define prev_state_(field_name) in_bytes(byte_offset_of(BytecodeInterpreter, field_name)), R15_prev_state
|
||||
#endif
|
||||
|
||||
void get_method_counters(Register method, Register Rcounters, Label& skip);
|
||||
void increment_invocation_counter(Register iv_be_count, Register Rtmp1, Register Rtmp2_r0);
|
||||
|
||||
// Object locking
|
||||
void lock_object (Register lock_reg, Register obj_reg);
|
||||
void unlock_object(Register lock_reg);
|
||||
|
||||
// Debugging
|
||||
void verify_oop(Register reg, TosState state = atos); // only if +VerifyOops && state == atos
|
||||
|
||||
// support for jvmdi/jvmpi
|
||||
void notify_method_entry();
|
||||
void notify_method_exit(bool save_result, TosState state);
|
||||
|
||||
// Convert the current TOP_IJAVA_FRAME into a PARENT_IJAVA_FRAME
|
||||
// (using parent_frame_resize) and push a new interpreter
|
||||
// TOP_IJAVA_FRAME (using frame_size).
|
||||
void push_interpreter_frame(Register top_frame_size, Register parent_frame_resize,
|
||||
Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register pc=noreg);
|
||||
|
||||
// Pop the topmost TOP_IJAVA_FRAME and convert the previous
|
||||
// PARENT_IJAVA_FRAME back into a TOP_IJAVA_FRAME.
|
||||
void pop_interpreter_frame(Register tmp1, Register tmp2, Register tmp3, Register tmp4);
|
||||
|
||||
// Turn state's interpreter frame into the current TOP_IJAVA_FRAME.
|
||||
void pop_interpreter_frame_to_state(Register state, Register tmp1, Register tmp2, Register tmp3);
|
||||
|
||||
// Set SP to initial caller's sp, but before fix the back chain.
|
||||
void resize_frame_to_initial_caller(Register tmp1, Register tmp2);
|
||||
|
||||
// Pop the current interpreter state (without popping the
|
||||
// correspoding frame) and restore R14_state and R15_prev_state
|
||||
// accordingly. Use prev_state_may_be_0 to indicate whether
|
||||
// prev_state may be 0 in order to generate an extra check before
|
||||
// retrieving prev_state_(_prev_link).
|
||||
void pop_interpreter_state(bool prev_state_may_be_0);
|
||||
|
||||
void restore_prev_state();
|
||||
};
|
||||
|
||||
#endif // CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
|
37
hotspot/src/cpu/ppc/vm/interpreterGenerator_ppc.hpp
Normal file
37
hotspot/src/cpu/ppc/vm/interpreterGenerator_ppc.hpp
Normal file
@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP
|
||||
#define CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP
|
||||
|
||||
friend class AbstractInterpreterGenerator;
|
||||
|
||||
private:
|
||||
|
||||
address generate_abstract_entry(void);
|
||||
address generate_accessor_entry(void);
|
||||
address generate_Reference_get_entry(void);
|
||||
|
||||
#endif // CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP
|
150
hotspot/src/cpu/ppc/vm/interpreterRT_ppc.cpp
Normal file
150
hotspot/src/cpu/ppc/vm/interpreterRT_ppc.cpp
Normal file
@ -0,0 +1,150 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/interpreterRuntime.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/universe.inline.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
#include "runtime/interfaceSupport.hpp"
|
||||
#include "runtime/signature.hpp"
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
// Access macros for Java and C arguments.
|
||||
// The first Java argument is at index -1.
|
||||
#define locals_j_arg_at(index) (Interpreter::local_offset_in_bytes(index)), R18_locals
|
||||
// The first C argument is at index 0.
|
||||
#define sp_c_arg_at(index) ((index)*wordSize + _abi(carg_1)), R1_SP
|
||||
|
||||
// Implementation of SignatureHandlerGenerator
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_int() {
|
||||
Argument jni_arg(jni_offset());
|
||||
Register r = jni_arg.is_register() ? jni_arg.as_register() : R0;
|
||||
|
||||
__ lwa(r, locals_j_arg_at(offset())); // sign extension of integer
|
||||
if (DEBUG_ONLY(true ||) !jni_arg.is_register()) {
|
||||
__ std(r, sp_c_arg_at(jni_arg.number()));
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
|
||||
Argument jni_arg(jni_offset());
|
||||
Register r = jni_arg.is_register() ? jni_arg.as_register() : R0;
|
||||
|
||||
__ ld(r, locals_j_arg_at(offset()+1)); // long resides in upper slot
|
||||
if (DEBUG_ONLY(true ||) !jni_arg.is_register()) {
|
||||
__ std(r, sp_c_arg_at(jni_arg.number()));
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
|
||||
FloatRegister fp_reg = (_num_used_fp_arg_regs < 13/*max_fp_register_arguments*/)
|
||||
? as_FloatRegister((_num_used_fp_arg_regs++) + F1_ARG1->encoding())
|
||||
: F0;
|
||||
|
||||
__ lfs(fp_reg, locals_j_arg_at(offset()));
|
||||
if (DEBUG_ONLY(true ||) jni_offset() > 8) {
|
||||
__ stfs(fp_reg, sp_c_arg_at(jni_offset()));
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_double() {
|
||||
FloatRegister fp_reg = (_num_used_fp_arg_regs < 13/*max_fp_register_arguments*/)
|
||||
? as_FloatRegister((_num_used_fp_arg_regs++) + F1_ARG1->encoding())
|
||||
: F0;
|
||||
|
||||
__ lfd(fp_reg, locals_j_arg_at(offset()+1));
|
||||
if (DEBUG_ONLY(true ||) jni_offset() > 8) {
|
||||
__ stfd(fp_reg, sp_c_arg_at(jni_offset()));
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
|
||||
Argument jni_arg(jni_offset());
|
||||
Register r = jni_arg.is_register() ? jni_arg.as_register() : R11_scratch1;
|
||||
|
||||
// The handle for a receiver will never be null.
|
||||
bool do_NULL_check = offset() != 0 || is_static();
|
||||
|
||||
Label do_null;
|
||||
if (do_NULL_check) {
|
||||
__ ld(R0, locals_j_arg_at(offset()));
|
||||
__ cmpdi(CCR0, R0, 0);
|
||||
__ li(r, 0);
|
||||
__ beq(CCR0, do_null);
|
||||
}
|
||||
__ addir(r, locals_j_arg_at(offset()));
|
||||
__ bind(do_null);
|
||||
if (DEBUG_ONLY(true ||) !jni_arg.is_register()) {
|
||||
__ std(r, sp_c_arg_at(jni_arg.number()));
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprint) {
|
||||
// Emit fd for current codebuffer. Needs patching!
|
||||
__ emit_fd();
|
||||
|
||||
// Generate code to handle arguments.
|
||||
iterate(fingerprint);
|
||||
|
||||
// Return the result handler.
|
||||
__ load_const(R3_RET, AbstractInterpreter::result_handler(method()->result_type()));
|
||||
__ blr();
|
||||
|
||||
__ flush();
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
// Implementation of SignatureHandlerLibrary
|
||||
|
||||
void SignatureHandlerLibrary::pd_set_handler(address handler) {
|
||||
// patch fd here.
|
||||
FunctionDescriptor* fd = (FunctionDescriptor*) handler;
|
||||
|
||||
fd->set_entry(handler + (int)sizeof(FunctionDescriptor));
|
||||
assert(fd->toc() == (address)0xcafe, "need to adjust TOC here");
|
||||
}
|
||||
|
||||
|
||||
// Access function to get the signature.
|
||||
IRT_ENTRY(address, InterpreterRuntime::get_signature(JavaThread* thread, Method* method))
|
||||
methodHandle m(thread, method);
|
||||
assert(m->is_native(), "sanity check");
|
||||
Symbol *s = m->signature();
|
||||
return (address) s->base();
|
||||
IRT_END
|
||||
|
||||
IRT_ENTRY(address, InterpreterRuntime::get_result_handler(JavaThread* thread, Method* method))
|
||||
methodHandle m(thread, method);
|
||||
assert(m->is_native(), "sanity check");
|
||||
return AbstractInterpreter::result_handler(m->result_type());
|
||||
IRT_END
|
62
hotspot/src/cpu/ppc/vm/interpreterRT_ppc.hpp
Normal file
62
hotspot/src/cpu/ppc/vm/interpreterRT_ppc.hpp
Normal file
@ -0,0 +1,62 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_INTERPRETERRT_PPC_HPP
|
||||
#define CPU_PPC_VM_INTERPRETERRT_PPC_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
// native method calls
|
||||
|
||||
class SignatureHandlerGenerator: public NativeSignatureIterator {
|
||||
private:
|
||||
MacroAssembler* _masm;
|
||||
// number of already used floating-point argument registers
|
||||
int _num_used_fp_arg_regs;
|
||||
|
||||
void pass_int();
|
||||
void pass_long();
|
||||
void pass_double();
|
||||
void pass_float();
|
||||
void pass_object();
|
||||
|
||||
public:
|
||||
// Creation
|
||||
SignatureHandlerGenerator(methodHandle method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
|
||||
_masm = new MacroAssembler(buffer);
|
||||
_num_used_fp_arg_regs = 0;
|
||||
}
|
||||
|
||||
// Code generation
|
||||
void generate(uint64_t fingerprint);
|
||||
};
|
||||
|
||||
// Support for generate_slow_signature_handler.
|
||||
static address get_result_handler(JavaThread* thread, Method* method);
|
||||
|
||||
// A function to get the signature.
|
||||
static address get_signature(JavaThread* thread, Method* method);
|
||||
|
||||
#endif // CPU_PPC_VM_INTERPRETERRT_PPC_HPP
|
736
hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp
Normal file
736
hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp
Normal file
@ -0,0 +1,736 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "interpreter/bytecodeHistogram.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/interpreterGenerator.hpp"
|
||||
#include "interpreter/interpreterRuntime.hpp"
|
||||
#include "interpreter/templateTable.hpp"
|
||||
#include "oops/arrayOop.hpp"
|
||||
#include "oops/methodData.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "prims/jvmtiExport.hpp"
|
||||
#include "prims/jvmtiThreadState.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/deoptimization.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "runtime/synchronizer.hpp"
|
||||
#include "runtime/timer.hpp"
|
||||
#include "runtime/vframeArray.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#endif
|
||||
|
||||
#ifndef CC_INTERP
|
||||
#error "CC_INTERP must be defined on PPC"
|
||||
#endif
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define BLOCK_COMMENT(str) // nothing
|
||||
#else
|
||||
#define BLOCK_COMMENT(str) __ block_comment(str)
|
||||
#endif
|
||||
|
||||
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
|
||||
|
||||
int AbstractInterpreter::BasicType_as_index(BasicType type) {
|
||||
int i = 0;
|
||||
switch (type) {
|
||||
case T_BOOLEAN: i = 0; break;
|
||||
case T_CHAR : i = 1; break;
|
||||
case T_BYTE : i = 2; break;
|
||||
case T_SHORT : i = 3; break;
|
||||
case T_INT : i = 4; break;
|
||||
case T_LONG : i = 5; break;
|
||||
case T_VOID : i = 6; break;
|
||||
case T_FLOAT : i = 7; break;
|
||||
case T_DOUBLE : i = 8; break;
|
||||
case T_OBJECT : i = 9; break;
|
||||
case T_ARRAY : i = 9; break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
|
||||
return i;
|
||||
}
|
||||
|
||||
address AbstractInterpreterGenerator::generate_slow_signature_handler() {
|
||||
// Slow_signature handler that respects the PPC C calling conventions.
|
||||
//
|
||||
// We get called by the native entry code with our output register
|
||||
// area == 8. First we call InterpreterRuntime::get_result_handler
|
||||
// to copy the pointer to the signature string temporarily to the
|
||||
// first C-argument and to return the result_handler in
|
||||
// R3_RET. Since native_entry will copy the jni-pointer to the
|
||||
// first C-argument slot later on, it is OK to occupy this slot
|
||||
// temporarilly. Then we copy the argument list on the java
|
||||
// expression stack into native varargs format on the native stack
|
||||
// and load arguments into argument registers. Integer arguments in
|
||||
// the varargs vector will be sign-extended to 8 bytes.
|
||||
//
|
||||
// On entry:
|
||||
// R3_ARG1 - intptr_t* Address of java argument list in memory.
|
||||
// R15_prev_state - BytecodeInterpreter* Address of interpreter state for
|
||||
// this method
|
||||
// R19_method
|
||||
//
|
||||
// On exit (just before return instruction):
|
||||
// R3_RET - contains the address of the result_handler.
|
||||
// R4_ARG2 - is not updated for static methods and contains "this" otherwise.
|
||||
// R5_ARG3-R10_ARG8: - When the (i-2)th Java argument is not of type float or double,
|
||||
// ARGi contains this argument. Otherwise, ARGi is not updated.
|
||||
// F1_ARG1-F13_ARG13 - contain the first 13 arguments of type float or double.
|
||||
|
||||
const int LogSizeOfTwoInstructions = 3;
|
||||
|
||||
// FIXME: use Argument:: GL: Argument names different numbers!
|
||||
const int max_fp_register_arguments = 13;
|
||||
const int max_int_register_arguments = 6; // first 2 are reserved
|
||||
|
||||
const Register arg_java = R21_tmp1;
|
||||
const Register arg_c = R22_tmp2;
|
||||
const Register signature = R23_tmp3; // is string
|
||||
const Register sig_byte = R24_tmp4;
|
||||
const Register fpcnt = R25_tmp5;
|
||||
const Register argcnt = R26_tmp6;
|
||||
const Register intSlot = R27_tmp7;
|
||||
const Register target_sp = R28_tmp8;
|
||||
const FloatRegister floatSlot = F0;
|
||||
|
||||
address entry = __ emit_fd();
|
||||
|
||||
__ save_LR_CR(R0);
|
||||
__ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
|
||||
// We use target_sp for storing arguments in the C frame.
|
||||
__ mr(target_sp, R1_SP);
|
||||
__ push_frame_abi112_nonvolatiles(0, R11_scratch1);
|
||||
|
||||
__ mr(arg_java, R3_ARG1);
|
||||
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_signature), R16_thread, R19_method);
|
||||
|
||||
// Signature is in R3_RET. Signature is callee saved.
|
||||
__ mr(signature, R3_RET);
|
||||
|
||||
// Reload method, it may have moved.
|
||||
#ifdef CC_INTERP
|
||||
__ ld(R19_method, state_(_method));
|
||||
#else
|
||||
__ unimplemented("slow signature handler 1");
|
||||
#endif
|
||||
|
||||
// Get the result handler.
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_result_handler), R16_thread, R19_method);
|
||||
|
||||
// Reload method, it may have moved.
|
||||
#ifdef CC_INTERP
|
||||
__ ld(R19_method, state_(_method));
|
||||
#else
|
||||
__ unimplemented("slow signature handler 2");
|
||||
#endif
|
||||
|
||||
{
|
||||
Label L;
|
||||
// test if static
|
||||
// _access_flags._flags must be at offset 0.
|
||||
// TODO PPC port: requires change in shared code.
|
||||
//assert(in_bytes(AccessFlags::flags_offset()) == 0,
|
||||
// "MethodOopDesc._access_flags == MethodOopDesc._access_flags._flags");
|
||||
// _access_flags must be a 32 bit value.
|
||||
assert(sizeof(AccessFlags) == 4, "wrong size");
|
||||
__ lwa(R11_scratch1/*access_flags*/, method_(access_flags));
|
||||
// testbit with condition register.
|
||||
__ testbitdi(CCR0, R0, R11_scratch1/*access_flags*/, JVM_ACC_STATIC_BIT);
|
||||
__ btrue(CCR0, L);
|
||||
// For non-static functions, pass "this" in R4_ARG2 and copy it
|
||||
// to 2nd C-arg slot.
|
||||
// We need to box the Java object here, so we use arg_java
|
||||
// (address of current Java stack slot) as argument and don't
|
||||
// dereference it as in case of ints, floats, etc.
|
||||
__ mr(R4_ARG2, arg_java);
|
||||
__ addi(arg_java, arg_java, -BytesPerWord);
|
||||
__ std(R4_ARG2, _abi(carg_2), target_sp);
|
||||
__ bind(L);
|
||||
}
|
||||
|
||||
// Will be incremented directly after loop_start. argcnt=0
|
||||
// corresponds to 3rd C argument.
|
||||
__ li(argcnt, -1);
|
||||
// arg_c points to 3rd C argument
|
||||
__ addi(arg_c, target_sp, _abi(carg_3));
|
||||
// no floating-point args parsed so far
|
||||
__ li(fpcnt, 0);
|
||||
|
||||
Label move_intSlot_to_ARG, move_floatSlot_to_FARG;
|
||||
Label loop_start, loop_end;
|
||||
Label do_int, do_long, do_float, do_double, do_dontreachhere, do_object, do_array, do_boxed;
|
||||
|
||||
// signature points to '(' at entry
|
||||
#ifdef ASSERT
|
||||
__ lbz(sig_byte, 0, signature);
|
||||
__ cmplwi(CCR0, sig_byte, '(');
|
||||
__ bne(CCR0, do_dontreachhere);
|
||||
#endif
|
||||
|
||||
__ bind(loop_start);
|
||||
|
||||
__ addi(argcnt, argcnt, 1);
|
||||
__ lbzu(sig_byte, 1, signature);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, ')'); // end of signature
|
||||
__ beq(CCR0, loop_end);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, 'B'); // byte
|
||||
__ beq(CCR0, do_int);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, 'C'); // char
|
||||
__ beq(CCR0, do_int);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, 'D'); // double
|
||||
__ beq(CCR0, do_double);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, 'F'); // float
|
||||
__ beq(CCR0, do_float);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, 'I'); // int
|
||||
__ beq(CCR0, do_int);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, 'J'); // long
|
||||
__ beq(CCR0, do_long);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, 'S'); // short
|
||||
__ beq(CCR0, do_int);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, 'Z'); // boolean
|
||||
__ beq(CCR0, do_int);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, 'L'); // object
|
||||
__ beq(CCR0, do_object);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, '['); // array
|
||||
__ beq(CCR0, do_array);
|
||||
|
||||
// __ cmplwi(CCR0, sig_byte, 'V'); // void cannot appear since we do not parse the return type
|
||||
// __ beq(CCR0, do_void);
|
||||
|
||||
__ bind(do_dontreachhere);
|
||||
|
||||
__ unimplemented("ShouldNotReachHere in slow_signature_handler", 120);
|
||||
|
||||
__ bind(do_array);
|
||||
|
||||
{
|
||||
Label start_skip, end_skip;
|
||||
|
||||
__ bind(start_skip);
|
||||
__ lbzu(sig_byte, 1, signature);
|
||||
__ cmplwi(CCR0, sig_byte, '[');
|
||||
__ beq(CCR0, start_skip); // skip further brackets
|
||||
__ cmplwi(CCR0, sig_byte, '9');
|
||||
__ bgt(CCR0, end_skip); // no optional size
|
||||
__ cmplwi(CCR0, sig_byte, '0');
|
||||
__ bge(CCR0, start_skip); // skip optional size
|
||||
__ bind(end_skip);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, 'L');
|
||||
__ beq(CCR0, do_object); // for arrays of objects, the name of the object must be skipped
|
||||
__ b(do_boxed); // otherwise, go directly to do_boxed
|
||||
}
|
||||
|
||||
__ bind(do_object);
|
||||
{
|
||||
Label L;
|
||||
__ bind(L);
|
||||
__ lbzu(sig_byte, 1, signature);
|
||||
__ cmplwi(CCR0, sig_byte, ';');
|
||||
__ bne(CCR0, L);
|
||||
}
|
||||
// Need to box the Java object here, so we use arg_java (address of
|
||||
// current Java stack slot) as argument and don't dereference it as
|
||||
// in case of ints, floats, etc.
|
||||
Label do_null;
|
||||
__ bind(do_boxed);
|
||||
__ ld(R0,0, arg_java);
|
||||
__ cmpdi(CCR0, R0, 0);
|
||||
__ li(intSlot,0);
|
||||
__ beq(CCR0, do_null);
|
||||
__ mr(intSlot, arg_java);
|
||||
__ bind(do_null);
|
||||
__ std(intSlot, 0, arg_c);
|
||||
__ addi(arg_java, arg_java, -BytesPerWord);
|
||||
__ addi(arg_c, arg_c, BytesPerWord);
|
||||
__ cmplwi(CCR0, argcnt, max_int_register_arguments);
|
||||
__ blt(CCR0, move_intSlot_to_ARG);
|
||||
__ b(loop_start);
|
||||
|
||||
__ bind(do_int);
|
||||
__ lwa(intSlot, 0, arg_java);
|
||||
__ std(intSlot, 0, arg_c);
|
||||
__ addi(arg_java, arg_java, -BytesPerWord);
|
||||
__ addi(arg_c, arg_c, BytesPerWord);
|
||||
__ cmplwi(CCR0, argcnt, max_int_register_arguments);
|
||||
__ blt(CCR0, move_intSlot_to_ARG);
|
||||
__ b(loop_start);
|
||||
|
||||
__ bind(do_long);
|
||||
__ ld(intSlot, -BytesPerWord, arg_java);
|
||||
__ std(intSlot, 0, arg_c);
|
||||
__ addi(arg_java, arg_java, - 2 * BytesPerWord);
|
||||
__ addi(arg_c, arg_c, BytesPerWord);
|
||||
__ cmplwi(CCR0, argcnt, max_int_register_arguments);
|
||||
__ blt(CCR0, move_intSlot_to_ARG);
|
||||
__ b(loop_start);
|
||||
|
||||
__ bind(do_float);
|
||||
__ lfs(floatSlot, 0, arg_java);
|
||||
#if defined(LINUX)
|
||||
__ stfs(floatSlot, 4, arg_c);
|
||||
#elif defined(AIX)
|
||||
__ stfs(floatSlot, 0, arg_c);
|
||||
#else
|
||||
#error "unknown OS"
|
||||
#endif
|
||||
__ addi(arg_java, arg_java, -BytesPerWord);
|
||||
__ addi(arg_c, arg_c, BytesPerWord);
|
||||
__ cmplwi(CCR0, fpcnt, max_fp_register_arguments);
|
||||
__ blt(CCR0, move_floatSlot_to_FARG);
|
||||
__ b(loop_start);
|
||||
|
||||
__ bind(do_double);
|
||||
__ lfd(floatSlot, - BytesPerWord, arg_java);
|
||||
__ stfd(floatSlot, 0, arg_c);
|
||||
__ addi(arg_java, arg_java, - 2 * BytesPerWord);
|
||||
__ addi(arg_c, arg_c, BytesPerWord);
|
||||
__ cmplwi(CCR0, fpcnt, max_fp_register_arguments);
|
||||
__ blt(CCR0, move_floatSlot_to_FARG);
|
||||
__ b(loop_start);
|
||||
|
||||
__ bind(loop_end);
|
||||
|
||||
__ pop_frame();
|
||||
__ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
|
||||
__ restore_LR_CR(R0);
|
||||
|
||||
__ blr();
|
||||
|
||||
Label move_int_arg, move_float_arg;
|
||||
__ bind(move_int_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions)
|
||||
__ mr(R5_ARG3, intSlot); __ b(loop_start);
|
||||
__ mr(R6_ARG4, intSlot); __ b(loop_start);
|
||||
__ mr(R7_ARG5, intSlot); __ b(loop_start);
|
||||
__ mr(R8_ARG6, intSlot); __ b(loop_start);
|
||||
__ mr(R9_ARG7, intSlot); __ b(loop_start);
|
||||
__ mr(R10_ARG8, intSlot); __ b(loop_start);
|
||||
|
||||
__ bind(move_float_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions)
|
||||
__ fmr(F1_ARG1, floatSlot); __ b(loop_start);
|
||||
__ fmr(F2_ARG2, floatSlot); __ b(loop_start);
|
||||
__ fmr(F3_ARG3, floatSlot); __ b(loop_start);
|
||||
__ fmr(F4_ARG4, floatSlot); __ b(loop_start);
|
||||
__ fmr(F5_ARG5, floatSlot); __ b(loop_start);
|
||||
__ fmr(F6_ARG6, floatSlot); __ b(loop_start);
|
||||
__ fmr(F7_ARG7, floatSlot); __ b(loop_start);
|
||||
__ fmr(F8_ARG8, floatSlot); __ b(loop_start);
|
||||
__ fmr(F9_ARG9, floatSlot); __ b(loop_start);
|
||||
__ fmr(F10_ARG10, floatSlot); __ b(loop_start);
|
||||
__ fmr(F11_ARG11, floatSlot); __ b(loop_start);
|
||||
__ fmr(F12_ARG12, floatSlot); __ b(loop_start);
|
||||
__ fmr(F13_ARG13, floatSlot); __ b(loop_start);
|
||||
|
||||
__ bind(move_intSlot_to_ARG);
|
||||
__ sldi(R0, argcnt, LogSizeOfTwoInstructions);
|
||||
__ load_const(R11_scratch1, move_int_arg); // Label must be bound here.
|
||||
__ add(R11_scratch1, R0, R11_scratch1);
|
||||
__ mtctr(R11_scratch1/*branch_target*/);
|
||||
__ bctr();
|
||||
__ bind(move_floatSlot_to_FARG);
|
||||
__ sldi(R0, fpcnt, LogSizeOfTwoInstructions);
|
||||
__ addi(fpcnt, fpcnt, 1);
|
||||
__ load_const(R11_scratch1, move_float_arg); // Label must be bound here.
|
||||
__ add(R11_scratch1, R0, R11_scratch1);
|
||||
__ mtctr(R11_scratch1/*branch_target*/);
|
||||
__ bctr();
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
address AbstractInterpreterGenerator::generate_result_handler_for(BasicType type) {
|
||||
//
|
||||
// Registers alive
|
||||
// R3_RET
|
||||
// LR
|
||||
//
|
||||
// Registers updated
|
||||
// R3_RET
|
||||
//
|
||||
|
||||
Label done;
|
||||
Label is_false;
|
||||
|
||||
address entry = __ pc();
|
||||
|
||||
switch (type) {
|
||||
case T_BOOLEAN:
|
||||
__ cmpwi(CCR0, R3_RET, 0);
|
||||
__ beq(CCR0, is_false);
|
||||
__ li(R3_RET, 1);
|
||||
__ b(done);
|
||||
__ bind(is_false);
|
||||
__ li(R3_RET, 0);
|
||||
break;
|
||||
case T_BYTE:
|
||||
// sign extend 8 bits
|
||||
__ extsb(R3_RET, R3_RET);
|
||||
break;
|
||||
case T_CHAR:
|
||||
// zero extend 16 bits
|
||||
__ clrldi(R3_RET, R3_RET, 48);
|
||||
break;
|
||||
case T_SHORT:
|
||||
// sign extend 16 bits
|
||||
__ extsh(R3_RET, R3_RET);
|
||||
break;
|
||||
case T_INT:
|
||||
// sign extend 32 bits
|
||||
__ extsw(R3_RET, R3_RET);
|
||||
break;
|
||||
case T_LONG:
|
||||
break;
|
||||
case T_OBJECT:
|
||||
// unbox result if not null
|
||||
__ cmpdi(CCR0, R3_RET, 0);
|
||||
__ beq(CCR0, done);
|
||||
__ ld(R3_RET, 0, R3_RET);
|
||||
__ verify_oop(R3_RET);
|
||||
break;
|
||||
case T_FLOAT:
|
||||
break;
|
||||
case T_DOUBLE:
|
||||
break;
|
||||
case T_VOID:
|
||||
break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
|
||||
__ BIND(done);
|
||||
__ blr();
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
// Abstract method entry.
|
||||
//
|
||||
address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
address entry = __ pc();
|
||||
|
||||
//
|
||||
// Registers alive
|
||||
// R16_thread - JavaThread*
|
||||
// R19_method - callee's methodOop (method to be invoked)
|
||||
// R1_SP - SP prepared such that caller's outgoing args are near top
|
||||
// LR - return address to caller
|
||||
//
|
||||
// Stack layout at this point:
|
||||
//
|
||||
// 0 [TOP_IJAVA_FRAME_ABI] <-- R1_SP
|
||||
// alignment (optional)
|
||||
// [outgoing Java arguments]
|
||||
// ...
|
||||
// PARENT [PARENT_IJAVA_FRAME_ABI]
|
||||
// ...
|
||||
//
|
||||
|
||||
// Can't use call_VM here because we have not set up a new
|
||||
// interpreter state. Make the call to the vm and make it look like
|
||||
// our caller set up the JavaFrameAnchor.
|
||||
__ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/);
|
||||
|
||||
// Push a new C frame and save LR.
|
||||
__ save_LR_CR(R0);
|
||||
__ push_frame_abi112_nonvolatiles(0, R11_scratch1);
|
||||
|
||||
// This is not a leaf but we have a JavaFrameAnchor now and we will
|
||||
// check (create) exceptions afterward so this is ok.
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
|
||||
|
||||
// Pop the C frame and restore LR.
|
||||
__ pop_frame();
|
||||
__ restore_LR_CR(R0);
|
||||
|
||||
// Reset JavaFrameAnchor from call_VM_leaf above.
|
||||
__ reset_last_Java_frame();
|
||||
|
||||
// Return to frame manager, it will handle the pending exception.
|
||||
__ blr();
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
// Call an accessor method (assuming it is resolved, otherwise drop into
|
||||
// vanilla (slow path) entry.
|
||||
address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
if(!UseFastAccessorMethods && (!FLAG_IS_ERGO(UseFastAccessorMethods)))
|
||||
return NULL;
|
||||
|
||||
Label Ldone, Lslow_path;
|
||||
|
||||
const Register Rthis = R3_ARG1,
|
||||
Rconst_method = R4_ARG2,
|
||||
Rcodes = Rconst_method,
|
||||
Rcpool_cache = R5_ARG3,
|
||||
Rscratch = R11_scratch1,
|
||||
Rjvmti_mode = Rscratch,
|
||||
Roffset = R12_scratch2,
|
||||
Rflags = R6_ARG4;
|
||||
|
||||
address entry = __ pc();
|
||||
|
||||
// Check for safepoint:
|
||||
// Ditch this, real man don't need safepoint checks.
|
||||
|
||||
// Also check for JVMTI mode
|
||||
// Check for null obj, take slow path if so.
|
||||
#ifdef CC_INTERP
|
||||
__ ld(Rthis, Interpreter::stackElementSize, R17_tos);
|
||||
#else
|
||||
Unimplemented()
|
||||
#endif
|
||||
__ lwz(Rjvmti_mode, thread_(interp_only_mode));
|
||||
__ cmpdi(CCR1, Rthis, 0);
|
||||
__ cmpwi(CCR0, Rjvmti_mode, 0);
|
||||
__ crorc(/*CCR0 eq*/2, /*CCR1 eq*/4+2, /*CCR0 eq*/2);
|
||||
__ beq(CCR0, Lslow_path); // this==null or jvmti_mode!=0
|
||||
|
||||
// Do 2 things in parallel:
|
||||
// 1. Load the index out of the first instruction word, which looks like this:
|
||||
// <0x2a><0xb4><index (2 byte, native endianess)>.
|
||||
// 2. Load constant pool cache base.
|
||||
__ ld(Rconst_method, in_bytes(Method::const_offset()), R19_method);
|
||||
__ ld(Rcpool_cache, in_bytes(ConstMethod::constants_offset()), Rconst_method);
|
||||
|
||||
__ lhz(Rcodes, in_bytes(ConstMethod::codes_offset()) + 2, Rconst_method); // Lower half of 32 bit field.
|
||||
__ ld(Rcpool_cache, ConstantPool::cache_offset_in_bytes(), Rcpool_cache);
|
||||
|
||||
// Get the const pool entry by means of <index>.
|
||||
const int codes_shift = exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord);
|
||||
__ slwi(Rscratch, Rcodes, codes_shift); // (codes&0xFFFF)<<codes_shift
|
||||
__ add(Rcpool_cache, Rscratch, Rcpool_cache);
|
||||
|
||||
// Check if cpool cache entry is resolved.
|
||||
// We are resolved if the indices offset contains the current bytecode.
|
||||
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
|
||||
// Big Endian:
|
||||
__ lbz(Rscratch, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::indices_offset()) + 7 - 2, Rcpool_cache);
|
||||
__ cmpwi(CCR0, Rscratch, Bytecodes::_getfield);
|
||||
__ bne(CCR0, Lslow_path);
|
||||
__ isync(); // Order succeeding loads wrt. load of _indices field from cpool_cache.
|
||||
|
||||
// Finally, start loading the value: Get cp cache entry into regs.
|
||||
__ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcpool_cache);
|
||||
__ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcpool_cache);
|
||||
|
||||
// Get field type.
|
||||
// (Rflags>>ConstantPoolCacheEntry::tos_state_shift)&((1<<ConstantPoolCacheEntry::tos_state_bits)-1)
|
||||
__ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
|
||||
|
||||
#ifdef ASSERT
|
||||
__ ld(R9_ARG7, 0, R1_SP);
|
||||
__ ld(R10_ARG8, 0, R21_sender_SP);
|
||||
__ cmpd(CCR0, R9_ARG7, R10_ARG8);
|
||||
__ asm_assert_eq("backlink", 0x543);
|
||||
#endif // ASSERT
|
||||
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
|
||||
|
||||
// Load the return value according to field type.
|
||||
Label Litos, Lltos, Lbtos, Lctos, Lstos;
|
||||
__ cmpdi(CCR1, Rflags, itos);
|
||||
__ cmpdi(CCR0, Rflags, ltos);
|
||||
__ beq(CCR1, Litos);
|
||||
__ beq(CCR0, Lltos);
|
||||
__ cmpdi(CCR1, Rflags, btos);
|
||||
__ cmpdi(CCR0, Rflags, ctos);
|
||||
__ beq(CCR1, Lbtos);
|
||||
__ beq(CCR0, Lctos);
|
||||
__ cmpdi(CCR1, Rflags, stos);
|
||||
__ beq(CCR1, Lstos);
|
||||
#ifdef ASSERT
|
||||
__ cmpdi(CCR0, Rflags, atos);
|
||||
__ asm_assert_eq("what type is this?", 0x432);
|
||||
#endif
|
||||
// fallthru: __ bind(Latos);
|
||||
__ load_heap_oop(R3_RET, (RegisterOrConstant)Roffset, Rthis);
|
||||
__ blr();
|
||||
|
||||
__ bind(Litos);
|
||||
__ lwax(R3_RET, Rthis, Roffset);
|
||||
__ blr();
|
||||
|
||||
__ bind(Lltos);
|
||||
__ ldx(R3_RET, Rthis, Roffset);
|
||||
__ blr();
|
||||
|
||||
__ bind(Lbtos);
|
||||
__ lbzx(R3_RET, Rthis, Roffset);
|
||||
__ extsb(R3_RET, R3_RET);
|
||||
__ blr();
|
||||
|
||||
__ bind(Lctos);
|
||||
__ lhzx(R3_RET, Rthis, Roffset);
|
||||
__ blr();
|
||||
|
||||
__ bind(Lstos);
|
||||
__ lhax(R3_RET, Rthis, Roffset);
|
||||
__ blr();
|
||||
|
||||
__ bind(Lslow_path);
|
||||
assert(Interpreter::entry_for_kind(Interpreter::zerolocals), "Normal entry must have been generated by now");
|
||||
__ load_const_optimized(Rscratch, Interpreter::entry_for_kind(Interpreter::zerolocals), R0);
|
||||
__ mtctr(Rscratch);
|
||||
__ bctr();
|
||||
__ flush();
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
// Interpreter intrinsic for WeakReference.get().
|
||||
// 1. Don't push a full blown frame and go on dispatching, but fetch the value
|
||||
// into R8 and return quickly
|
||||
// 2. If G1 is active we *must* execute this intrinsic for corrrectness:
|
||||
// It contains a GC barrier which puts the reference into the satb buffer
|
||||
// to indicate that someone holds a strong reference to the object the
|
||||
// weak ref points to!
|
||||
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
// Code: _aload_0, _getfield, _areturn
|
||||
// parameter size = 1
|
||||
//
|
||||
// The code that gets generated by this routine is split into 2 parts:
|
||||
// 1. the "intrinsified" code for G1 (or any SATB based GC),
|
||||
// 2. the slow path - which is an expansion of the regular method entry.
|
||||
//
|
||||
// Notes:
|
||||
// * In the G1 code we do not check whether we need to block for
|
||||
// a safepoint. If G1 is enabled then we must execute the specialized
|
||||
// code for Reference.get (except when the Reference object is null)
|
||||
// so that we can log the value in the referent field with an SATB
|
||||
// update buffer.
|
||||
// If the code for the getfield template is modified so that the
|
||||
// G1 pre-barrier code is executed when the current method is
|
||||
// Reference.get() then going through the normal method entry
|
||||
// will be fine.
|
||||
// * The G1 code can, however, check the receiver object (the instance
|
||||
// of java.lang.Reference) and jump to the slow path if null. If the
|
||||
// Reference object is null then we obviously cannot fetch the referent
|
||||
// and so we don't need to call the G1 pre-barrier. Thus we can use the
|
||||
// regular method entry code to generate the NPE.
|
||||
//
|
||||
// This code is based on generate_accessor_enty.
|
||||
|
||||
address entry = __ pc();
|
||||
|
||||
const int referent_offset = java_lang_ref_Reference::referent_offset;
|
||||
guarantee(referent_offset > 0, "referent offset not initialized");
|
||||
|
||||
if (UseG1GC) {
|
||||
Label slow_path;
|
||||
|
||||
// Debugging not possible, so can't use __ skip_if_jvmti_mode(slow_path, GR31_SCRATCH);
|
||||
|
||||
// In the G1 code we don't check if we need to reach a safepoint. We
|
||||
// continue and the thread will safepoint at the next bytecode dispatch.
|
||||
|
||||
// If the receiver is null then it is OK to jump to the slow path.
|
||||
#ifdef CC_INTERP
|
||||
__ ld(R3_RET, Interpreter::stackElementSize, R17_tos); // get receiver
|
||||
#else
|
||||
Unimplemented();
|
||||
#endif
|
||||
|
||||
// Check if receiver == NULL and go the slow path.
|
||||
__ cmpdi(CCR0, R3_RET, 0);
|
||||
__ beq(CCR0, slow_path);
|
||||
|
||||
// Load the value of the referent field.
|
||||
__ load_heap_oop_not_null(R3_RET, referent_offset, R3_RET);
|
||||
|
||||
// Generate the G1 pre-barrier code to log the value of
|
||||
// the referent field in an SATB buffer. Note with
|
||||
// these parameters the pre-barrier does not generate
|
||||
// the load of the previous value.
|
||||
|
||||
// Restore caller sp for c2i case.
|
||||
#ifdef ASSERT
|
||||
__ ld(R9_ARG7, 0, R1_SP);
|
||||
__ ld(R10_ARG8, 0, R21_sender_SP);
|
||||
__ cmpd(CCR0, R9_ARG7, R10_ARG8);
|
||||
__ asm_assert_eq("backlink", 0x544);
|
||||
#endif // ASSERT
|
||||
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
|
||||
|
||||
__ g1_write_barrier_pre(noreg, // obj
|
||||
noreg, // offset
|
||||
R3_RET, // pre_val
|
||||
R11_scratch1, // tmp
|
||||
R12_scratch2, // tmp
|
||||
true); // needs_frame
|
||||
|
||||
__ blr();
|
||||
|
||||
// Generate regular method entry.
|
||||
__ bind(slow_path);
|
||||
assert(Interpreter::entry_for_kind(Interpreter::zerolocals), "Normal entry must have been generated by now");
|
||||
__ load_const_optimized(R11_scratch1, Interpreter::entry_for_kind(Interpreter::zerolocals), R0);
|
||||
__ mtctr(R11_scratch1);
|
||||
__ bctr();
|
||||
__ flush();
|
||||
|
||||
return entry;
|
||||
} else {
|
||||
return generate_accessor_entry();
|
||||
}
|
||||
}
|
||||
|
||||
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
|
||||
// This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
|
||||
// the days we had adapter frames. When we deoptimize a situation where a
|
||||
// compiled caller calls a compiled caller will have registers it expects
|
||||
// to survive the call to the callee. If we deoptimize the callee the only
|
||||
// way we can restore these registers is to have the oldest interpreter
|
||||
// frame that we create restore these values. That is what this routine
|
||||
// will accomplish.
|
||||
|
||||
// At the moment we have modified c2 to not have any callee save registers
|
||||
// so this problem does not exist and this routine is just a place holder.
|
||||
|
||||
assert(f->is_interpreted_frame(), "must be interpreted");
|
||||
}
|
42
hotspot/src/cpu/ppc/vm/interpreter_ppc.hpp
Normal file
42
hotspot/src/cpu/ppc/vm/interpreter_ppc.hpp
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_INTERPRETER_PPC_HPP
|
||||
#define CPU_PPC_VM_INTERPRETER_PPC_HPP
|
||||
|
||||
public:
|
||||
|
||||
// Stack index relative to tos (which points at value)
|
||||
static int expr_index_at(int i) {
|
||||
return stackElementWords * i;
|
||||
}
|
||||
|
||||
// Already negated by c++ interpreter
|
||||
static int local_index_at(int i) {
|
||||
assert(i <= 0, "local direction already negated");
|
||||
return stackElementWords * i;
|
||||
}
|
||||
|
||||
#endif // CPU_PPC_VM_INTERPRETER_PPC_PP
|
82
hotspot/src/cpu/ppc/vm/javaFrameAnchor_ppc.hpp
Normal file
82
hotspot/src/cpu/ppc/vm/javaFrameAnchor_ppc.hpp
Normal file
@ -0,0 +1,82 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_JAVAFRAMEANCHOR_PPC_HPP
|
||||
#define CPU_PPC_VM_JAVAFRAMEANCHOR_PPC_HPP
|
||||
|
||||
#ifndef CC_INTERP
|
||||
#error "CC_INTERP must be defined on PPC64"
|
||||
#endif
|
||||
|
||||
public:
|
||||
// Each arch must define reset, save, restore
|
||||
// These are used by objects that only care about:
|
||||
// 1 - initializing a new state (thread creation, javaCalls)
|
||||
// 2 - saving a current state (javaCalls)
|
||||
// 3 - restoring an old state (javaCalls)
|
||||
|
||||
inline void clear(void) {
|
||||
// clearing _last_Java_sp must be first
|
||||
_last_Java_sp = NULL;
|
||||
// fence?
|
||||
OrderAccess::release();
|
||||
_last_Java_pc = NULL;
|
||||
}
|
||||
|
||||
inline void set(intptr_t* sp, address pc) {
|
||||
_last_Java_pc = pc;
|
||||
OrderAccess::release();
|
||||
_last_Java_sp = sp;
|
||||
}
|
||||
|
||||
void copy(JavaFrameAnchor* src) {
|
||||
// In order to make sure the transition state is valid for "this".
|
||||
// We must clear _last_Java_sp before copying the rest of the new data.
|
||||
//
|
||||
// Hack Alert: Temporary bugfix for 4717480/4721647
|
||||
// To act like previous version (pd_cache_state) don't NULL _last_Java_sp
|
||||
// unless the value is changing.
|
||||
if (_last_Java_sp != src->_last_Java_sp) {
|
||||
_last_Java_sp = NULL;
|
||||
OrderAccess::release();
|
||||
}
|
||||
_last_Java_pc = src->_last_Java_pc;
|
||||
// Must be last so profiler will always see valid frame if has_last_frame() is true.
|
||||
OrderAccess::release();
|
||||
_last_Java_sp = src->_last_Java_sp;
|
||||
}
|
||||
|
||||
// Always walkable.
|
||||
bool walkable(void) { return true; }
|
||||
// Never any thing to do since we are always walkable and can find address of return addresses.
|
||||
void make_walkable(JavaThread* thread) { }
|
||||
|
||||
intptr_t* last_Java_sp(void) const { return _last_Java_sp; }
|
||||
|
||||
address last_Java_pc(void) { return _last_Java_pc; }
|
||||
|
||||
void set_last_Java_sp(intptr_t* sp) { OrderAccess::release(); _last_Java_sp = sp; }
|
||||
|
||||
#endif // CPU_PPC_VM_JAVAFRAMEANCHOR_PPC_HPP
|
75
hotspot/src/cpu/ppc/vm/jniFastGetField_ppc.cpp
Normal file
75
hotspot/src/cpu/ppc/vm/jniFastGetField_ppc.cpp
Normal file
@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "assembler_ppc.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "prims/jniFastGetField.hpp"
|
||||
#include "prims/jvm_misc.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
// we don't have fast jni accessors.
|
||||
return (address) -1;
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_boolean_field() {
|
||||
return generate_fast_get_int_field0(T_BOOLEAN);
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_byte_field() {
|
||||
return generate_fast_get_int_field0(T_BYTE);
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_char_field() {
|
||||
return generate_fast_get_int_field0(T_CHAR);
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_short_field() {
|
||||
return generate_fast_get_int_field0(T_SHORT);
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_int_field() {
|
||||
return generate_fast_get_int_field0(T_INT);
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_long_field() {
|
||||
// we don't have fast jni accessors.
|
||||
return (address) -1;
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
|
||||
// e don't have fast jni accessors.
|
||||
return (address) -1;
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_float_field() {
|
||||
return generate_fast_get_float_field0(T_FLOAT);
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_double_field() {
|
||||
return generate_fast_get_float_field0(T_DOUBLE);
|
||||
}
|
110
hotspot/src/cpu/ppc/vm/jniTypes_ppc.hpp
Normal file
110
hotspot/src/cpu/ppc/vm/jniTypes_ppc.hpp
Normal file
@ -0,0 +1,110 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_JNITYPES_PPC_HPP
|
||||
#define CPU_PPC_VM_JNITYPES_PPC_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "prims/jni.h"
|
||||
|
||||
// This file holds platform-dependent routines used to write primitive
|
||||
// jni types to the array of arguments passed into JavaCalls::call.
|
||||
|
||||
class JNITypes : AllStatic {
|
||||
// These functions write a java primitive type (in native format) to
|
||||
// a java stack slot array to be passed as an argument to
|
||||
// JavaCalls:calls. I.e., they are functionally 'push' operations
|
||||
// if they have a 'pos' formal parameter. Note that jlong's and
|
||||
// jdouble's are written _in reverse_ of the order in which they
|
||||
// appear in the interpreter stack. This is because call stubs (see
|
||||
// stubGenerator_sparc.cpp) reverse the argument list constructed by
|
||||
// JavaCallArguments (see javaCalls.hpp).
|
||||
|
||||
private:
|
||||
|
||||
#ifndef PPC64
|
||||
#error "ppc32 support currently not implemented!!!"
|
||||
#endif // PPC64
|
||||
|
||||
public:
|
||||
// Ints are stored in native format in one JavaCallArgument slot at *to.
|
||||
static inline void put_int(jint from, intptr_t *to) { *(jint *)(to + 0 ) = from; }
|
||||
static inline void put_int(jint from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = from; }
|
||||
static inline void put_int(jint *from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = *from; }
|
||||
|
||||
// Longs are stored in native format in one JavaCallArgument slot at
|
||||
// *(to+1).
|
||||
static inline void put_long(jlong from, intptr_t *to) {
|
||||
*(jlong*) (to + 1) = from;
|
||||
}
|
||||
|
||||
static inline void put_long(jlong from, intptr_t *to, int& pos) {
|
||||
*(jlong*) (to + 1 + pos) = from;
|
||||
pos += 2;
|
||||
}
|
||||
|
||||
static inline void put_long(jlong *from, intptr_t *to, int& pos) {
|
||||
*(jlong*) (to + 1 + pos) = *from;
|
||||
pos += 2;
|
||||
}
|
||||
|
||||
// Oops are stored in native format in one JavaCallArgument slot at *to.
|
||||
static inline void put_obj(oop from, intptr_t *to) { *(oop *)(to + 0 ) = from; }
|
||||
static inline void put_obj(oop from, intptr_t *to, int& pos) { *(oop *)(to + pos++) = from; }
|
||||
static inline void put_obj(oop *from, intptr_t *to, int& pos) { *(oop *)(to + pos++) = *from; }
|
||||
|
||||
// Floats are stored in native format in one JavaCallArgument slot at *to.
|
||||
static inline void put_float(jfloat from, intptr_t *to) { *(jfloat *)(to + 0 ) = from; }
|
||||
static inline void put_float(jfloat from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = from; }
|
||||
static inline void put_float(jfloat *from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = *from; }
|
||||
|
||||
// Doubles are stored in native word format in one JavaCallArgument
|
||||
// slot at *(to+1).
|
||||
static inline void put_double(jdouble from, intptr_t *to) {
|
||||
*(jdouble*) (to + 1) = from;
|
||||
}
|
||||
|
||||
static inline void put_double(jdouble from, intptr_t *to, int& pos) {
|
||||
*(jdouble*) (to + 1 + pos) = from;
|
||||
pos += 2;
|
||||
}
|
||||
|
||||
static inline void put_double(jdouble *from, intptr_t *to, int& pos) {
|
||||
*(jdouble*) (to + 1 + pos) = *from;
|
||||
pos += 2;
|
||||
}
|
||||
|
||||
// The get_xxx routines, on the other hand, actually _do_ fetch
|
||||
// java primitive types from the interpreter stack.
|
||||
// No need to worry about alignment on Intel.
|
||||
static inline jint get_int (intptr_t *from) { return *(jint *) from; }
|
||||
static inline jlong get_long (intptr_t *from) { return *(jlong *) (from + 1); }
|
||||
static inline oop get_obj (intptr_t *from) { return *(oop *) from; }
|
||||
static inline jfloat get_float (intptr_t *from) { return *(jfloat *) from; }
|
||||
static inline jdouble get_double(intptr_t *from) { return *(jdouble *)(from + 1); }
|
||||
};
|
||||
|
||||
#endif // CPU_PPC_VM_JNITYPES_PPC_HPP
|
53
hotspot/src/cpu/ppc/vm/jni_ppc.h
Normal file
53
hotspot/src/cpu/ppc/vm/jni_ppc.h
Normal file
@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_JNI_PPC_H
|
||||
#define CPU_PPC_VM_JNI_PPC_H
|
||||
|
||||
// Note: please do not change these without also changing jni_md.h in the JDK
|
||||
// repository
|
||||
#ifndef __has_attribute
|
||||
#define __has_attribute(x) 0
|
||||
#endif
|
||||
#if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility)
|
||||
#define JNIEXPORT __attribute__((visibility("default")))
|
||||
#define JNIIMPORT __attribute__((visibility("default")))
|
||||
#else
|
||||
#define JNIEXPORT
|
||||
#define JNIIMPORT
|
||||
#endif
|
||||
|
||||
#define JNICALL
|
||||
typedef int jint;
|
||||
#if defined(_LP64)
|
||||
typedef long jlong;
|
||||
#else
|
||||
typedef long long jlong;
|
||||
#endif
|
||||
|
||||
typedef signed char jbyte;
|
||||
|
||||
#endif // CPU_PPC_VM_JNI_PPC_H
|
3017
hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp
Normal file
3017
hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp
Normal file
File diff suppressed because it is too large
Load Diff
658
hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp
Normal file
658
hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp
Normal file
@ -0,0 +1,658 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
|
||||
#define CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
|
||||
|
||||
#include "asm/assembler.hpp"
|
||||
|
||||
// MacroAssembler extends Assembler by a few frequently used macros.
|
||||
|
||||
class ciTypeArray;
|
||||
|
||||
class MacroAssembler: public Assembler {
|
||||
public:
|
||||
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
|
||||
|
||||
//
|
||||
// Optimized instruction emitters
|
||||
//
|
||||
|
||||
inline static int largeoffset_si16_si16_hi(int si31) { return (si31 + (1<<15)) >> 16; }
|
||||
inline static int largeoffset_si16_si16_lo(int si31) { return si31 - (((si31 + (1<<15)) >> 16) << 16); }
|
||||
|
||||
// load d = *[a+si31]
|
||||
// Emits several instructions if the offset is not encodable in one instruction.
|
||||
void ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop);
|
||||
void ld_largeoffset (Register d, int si31, Register a, int emit_filler_nop);
|
||||
inline static bool is_ld_largeoffset(address a);
|
||||
inline static int get_ld_largeoffset_offset(address a);
|
||||
|
||||
inline void round_to(Register r, int modulus);
|
||||
|
||||
// Load/store with type given by parameter.
|
||||
void load_sized_value( Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes, bool is_signed);
|
||||
void store_sized_value(Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes);
|
||||
|
||||
// Move register if destination register and target register are different
|
||||
inline void mr_if_needed(Register rd, Register rs);
|
||||
|
||||
// nop padding
|
||||
void align(int modulus);
|
||||
|
||||
//
|
||||
// Constants, loading constants, TOC support
|
||||
//
|
||||
|
||||
// Address of the global TOC.
|
||||
inline static address global_toc();
|
||||
// Offset of given address to the global TOC.
|
||||
inline static int offset_to_global_toc(const address addr);
|
||||
|
||||
// Address of TOC of the current method.
|
||||
inline address method_toc();
|
||||
// Offset of given address to TOC of the current method.
|
||||
inline int offset_to_method_toc(const address addr);
|
||||
|
||||
// Global TOC.
|
||||
void calculate_address_from_global_toc(Register dst, address addr,
|
||||
bool hi16 = true, bool lo16 = true,
|
||||
bool add_relocation = true, bool emit_dummy_addr = false);
|
||||
inline void calculate_address_from_global_toc_hi16only(Register dst, address addr) {
|
||||
calculate_address_from_global_toc(dst, addr, true, false);
|
||||
};
|
||||
inline void calculate_address_from_global_toc_lo16only(Register dst, address addr) {
|
||||
calculate_address_from_global_toc(dst, addr, false, true);
|
||||
};
|
||||
|
||||
inline static bool is_calculate_address_from_global_toc_at(address a, address bound);
|
||||
static int patch_calculate_address_from_global_toc_at(address a, address addr, address bound);
|
||||
static address get_address_of_calculate_address_from_global_toc_at(address a, address addr);
|
||||
|
||||
#ifdef _LP64
|
||||
// Patch narrow oop constant.
|
||||
inline static bool is_set_narrow_oop(address a, address bound);
|
||||
static int patch_set_narrow_oop(address a, address bound, narrowOop data);
|
||||
static narrowOop get_narrow_oop(address a, address bound);
|
||||
#endif
|
||||
|
||||
inline static bool is_load_const_at(address a);
|
||||
|
||||
// Emits an oop const to the constant pool, loads the constant, and
|
||||
// sets a relocation info with address current_pc.
|
||||
void load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc);
|
||||
void load_toc_from_toc(Register dst, AddressLiteral& a, Register toc) {
|
||||
assert(dst == R2_TOC, "base register must be TOC");
|
||||
load_const_from_method_toc(dst, a, toc);
|
||||
}
|
||||
|
||||
static bool is_load_const_from_method_toc_at(address a);
|
||||
static int get_offset_of_load_const_from_method_toc_at(address a);
|
||||
|
||||
// Get the 64 bit constant from a `load_const' sequence.
|
||||
static long get_const(address load_const);
|
||||
|
||||
// Patch the 64 bit constant of a `load_const' sequence. This is a
|
||||
// low level procedure. It neither flushes the instruction cache nor
|
||||
// is it atomic.
|
||||
static void patch_const(address load_const, long x);
|
||||
|
||||
// Metadata in code that we have to keep track of.
|
||||
AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index
|
||||
AddressLiteral constant_metadata_address(Metadata* obj); // find_index
|
||||
// Oops used directly in compiled code are stored in the constant pool,
|
||||
// and loaded from there.
|
||||
// Allocate new entry for oop in constant pool. Generate relocation.
|
||||
AddressLiteral allocate_oop_address(jobject obj);
|
||||
// Find oop obj in constant pool. Return relocation with it's index.
|
||||
AddressLiteral constant_oop_address(jobject obj);
|
||||
|
||||
// Find oop in constant pool and emit instructions to load it.
|
||||
// Uses constant_oop_address.
|
||||
inline void set_oop_constant(jobject obj, Register d);
|
||||
// Same as load_address.
|
||||
inline void set_oop (AddressLiteral obj_addr, Register d);
|
||||
|
||||
// Read runtime constant: Issue load if constant not yet established,
|
||||
// else use real constant.
|
||||
virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
|
||||
Register tmp,
|
||||
int offset);
|
||||
|
||||
//
|
||||
// branch, jump
|
||||
//
|
||||
|
||||
inline void pd_patch_instruction(address branch, address target);
|
||||
NOT_PRODUCT(static void pd_print_patched_instruction(address branch);)
|
||||
|
||||
// Conditional far branch for destinations encodable in 24+2 bits.
|
||||
// Same interface as bc, e.g. no inverse boint-field.
|
||||
enum {
|
||||
bc_far_optimize_not = 0,
|
||||
bc_far_optimize_on_relocate = 1
|
||||
};
|
||||
// optimize: flag for telling the conditional far branch to optimize
|
||||
// itself when relocated.
|
||||
void bc_far(int boint, int biint, Label& dest, int optimize);
|
||||
// Relocation of conditional far branches.
|
||||
static bool is_bc_far_at(address instruction_addr);
|
||||
static address get_dest_of_bc_far_at(address instruction_addr);
|
||||
static void set_dest_of_bc_far_at(address instruction_addr, address dest);
|
||||
private:
|
||||
static bool inline is_bc_far_variant1_at(address instruction_addr);
|
||||
static bool inline is_bc_far_variant2_at(address instruction_addr);
|
||||
static bool inline is_bc_far_variant3_at(address instruction_addr);
|
||||
public:
|
||||
|
||||
// Convenience bc_far versions.
|
||||
inline void blt_far(ConditionRegister crx, Label& L, int optimize);
|
||||
inline void bgt_far(ConditionRegister crx, Label& L, int optimize);
|
||||
inline void beq_far(ConditionRegister crx, Label& L, int optimize);
|
||||
inline void bso_far(ConditionRegister crx, Label& L, int optimize);
|
||||
inline void bge_far(ConditionRegister crx, Label& L, int optimize);
|
||||
inline void ble_far(ConditionRegister crx, Label& L, int optimize);
|
||||
inline void bne_far(ConditionRegister crx, Label& L, int optimize);
|
||||
inline void bns_far(ConditionRegister crx, Label& L, int optimize);
|
||||
|
||||
// Emit, identify and patch a NOT mt-safe patchable 64 bit absolute call/jump.
|
||||
private:
|
||||
enum {
|
||||
bxx64_patchable_instruction_count = (2/*load_codecache_const*/ + 3/*5load_const*/ + 1/*mtctr*/ + 1/*bctrl*/),
|
||||
bxx64_patchable_size = bxx64_patchable_instruction_count * BytesPerInstWord,
|
||||
bxx64_patchable_ret_addr_offset = bxx64_patchable_size
|
||||
};
|
||||
void bxx64_patchable(address target, relocInfo::relocType rt, bool link);
|
||||
static bool is_bxx64_patchable_at( address instruction_addr, bool link);
|
||||
// Does the instruction use a pc-relative encoding of the destination?
|
||||
static bool is_bxx64_patchable_pcrelative_at( address instruction_addr, bool link);
|
||||
static bool is_bxx64_patchable_variant1_at( address instruction_addr, bool link);
|
||||
// Load destination relative to global toc.
|
||||
static bool is_bxx64_patchable_variant1b_at( address instruction_addr, bool link);
|
||||
static bool is_bxx64_patchable_variant2_at( address instruction_addr, bool link);
|
||||
static void set_dest_of_bxx64_patchable_at( address instruction_addr, address target, bool link);
|
||||
static address get_dest_of_bxx64_patchable_at(address instruction_addr, bool link);
|
||||
|
||||
public:
|
||||
// call
|
||||
enum {
|
||||
bl64_patchable_instruction_count = bxx64_patchable_instruction_count,
|
||||
bl64_patchable_size = bxx64_patchable_size,
|
||||
bl64_patchable_ret_addr_offset = bxx64_patchable_ret_addr_offset
|
||||
};
|
||||
inline void bl64_patchable(address target, relocInfo::relocType rt) {
|
||||
bxx64_patchable(target, rt, /*link=*/true);
|
||||
}
|
||||
inline static bool is_bl64_patchable_at(address instruction_addr) {
|
||||
return is_bxx64_patchable_at(instruction_addr, /*link=*/true);
|
||||
}
|
||||
inline static bool is_bl64_patchable_pcrelative_at(address instruction_addr) {
|
||||
return is_bxx64_patchable_pcrelative_at(instruction_addr, /*link=*/true);
|
||||
}
|
||||
inline static void set_dest_of_bl64_patchable_at(address instruction_addr, address target) {
|
||||
set_dest_of_bxx64_patchable_at(instruction_addr, target, /*link=*/true);
|
||||
}
|
||||
inline static address get_dest_of_bl64_patchable_at(address instruction_addr) {
|
||||
return get_dest_of_bxx64_patchable_at(instruction_addr, /*link=*/true);
|
||||
}
|
||||
// jump
|
||||
enum {
|
||||
b64_patchable_instruction_count = bxx64_patchable_instruction_count,
|
||||
b64_patchable_size = bxx64_patchable_size,
|
||||
};
|
||||
inline void b64_patchable(address target, relocInfo::relocType rt) {
|
||||
bxx64_patchable(target, rt, /*link=*/false);
|
||||
}
|
||||
inline static bool is_b64_patchable_at(address instruction_addr) {
|
||||
return is_bxx64_patchable_at(instruction_addr, /*link=*/false);
|
||||
}
|
||||
inline static bool is_b64_patchable_pcrelative_at(address instruction_addr) {
|
||||
return is_bxx64_patchable_pcrelative_at(instruction_addr, /*link=*/false);
|
||||
}
|
||||
inline static void set_dest_of_b64_patchable_at(address instruction_addr, address target) {
|
||||
set_dest_of_bxx64_patchable_at(instruction_addr, target, /*link=*/false);
|
||||
}
|
||||
inline static address get_dest_of_b64_patchable_at(address instruction_addr) {
|
||||
return get_dest_of_bxx64_patchable_at(instruction_addr, /*link=*/false);
|
||||
}
|
||||
|
||||
//
|
||||
// Support for frame handling
|
||||
//
|
||||
|
||||
// some ABI-related functions
|
||||
void save_nonvolatile_gprs( Register dst_base, int offset);
|
||||
void restore_nonvolatile_gprs(Register src_base, int offset);
|
||||
void save_volatile_gprs( Register dst_base, int offset);
|
||||
void restore_volatile_gprs(Register src_base, int offset);
|
||||
void save_LR_CR( Register tmp); // tmp contains LR on return.
|
||||
void restore_LR_CR(Register tmp);
|
||||
|
||||
// Get current PC using bl-next-instruction trick.
|
||||
address get_PC_trash_LR(Register result);
|
||||
|
||||
// Resize current frame either relatively wrt to current SP or absolute.
|
||||
void resize_frame(Register offset, Register tmp);
|
||||
void resize_frame(int offset, Register tmp);
|
||||
void resize_frame_absolute(Register addr, Register tmp1, Register tmp2);
|
||||
|
||||
// Push a frame of size bytes.
|
||||
void push_frame(Register bytes, Register tmp);
|
||||
|
||||
// Push a frame of size `bytes'. No abi space provided.
|
||||
void push_frame(unsigned int bytes, Register tmp);
|
||||
|
||||
// Push a frame of size `bytes' plus abi112 on top.
|
||||
void push_frame_abi112(unsigned int bytes, Register tmp);
|
||||
|
||||
// Setup up a new C frame with a spill area for non-volatile GPRs and additional
|
||||
// space for local variables
|
||||
void push_frame_abi112_nonvolatiles(unsigned int bytes, Register tmp);
|
||||
|
||||
// pop current C frame
|
||||
void pop_frame();
|
||||
|
||||
//
|
||||
// Calls
|
||||
//
|
||||
|
||||
private:
|
||||
address _last_calls_return_pc;
|
||||
|
||||
// Generic version of a call to C function via a function descriptor
|
||||
// with variable support for C calling conventions (TOC, ENV, etc.).
|
||||
// updates and returns _last_calls_return_pc.
|
||||
address branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call,
|
||||
bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee);
|
||||
|
||||
public:
|
||||
|
||||
// Get the pc where the last call will return to. returns _last_calls_return_pc.
|
||||
inline address last_calls_return_pc();
|
||||
|
||||
// Call a C function via a function descriptor and use full C
|
||||
// calling conventions. Updates and returns _last_calls_return_pc.
|
||||
address call_c(Register function_descriptor);
|
||||
address call_c(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt);
|
||||
address call_c_using_toc(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt,
|
||||
Register toc);
|
||||
|
||||
protected:
|
||||
|
||||
// It is imperative that all calls into the VM are handled via the
|
||||
// call_VM macros. They make sure that the stack linkage is setup
|
||||
// correctly. call_VM's correspond to ENTRY/ENTRY_X entry points
|
||||
// while call_VM_leaf's correspond to LEAF entry points.
|
||||
//
|
||||
// This is the base routine called by the different versions of
|
||||
// call_VM. The interpreter may customize this version by overriding
|
||||
// it for its purposes (e.g., to save/restore additional registers
|
||||
// when doing a VM call).
|
||||
//
|
||||
// If no last_java_sp is specified (noreg) then SP will be used instead.
|
||||
virtual void call_VM_base(
|
||||
// where an oop-result ends up if any; use noreg otherwise
|
||||
Register oop_result,
|
||||
// to set up last_Java_frame in stubs; use noreg otherwise
|
||||
Register last_java_sp,
|
||||
// the entry point
|
||||
address entry_point,
|
||||
// flag which indicates if exception should be checked
|
||||
bool check_exception=true
|
||||
);
|
||||
|
||||
// Support for VM calls. This is the base routine called by the
|
||||
// different versions of call_VM_leaf. The interpreter may customize
|
||||
// this version by overriding it for its purposes (e.g., to
|
||||
// save/restore additional registers when doing a VM call).
|
||||
void call_VM_leaf_base(address entry_point);
|
||||
|
||||
public:
|
||||
// Call into the VM.
|
||||
// Passes the thread pointer (in R3_ARG1) as a prepended argument.
|
||||
// Makes sure oop return values are visible to the GC.
|
||||
void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
|
||||
void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
|
||||
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
|
||||
void call_VM_leaf(address entry_point);
|
||||
void call_VM_leaf(address entry_point, Register arg_1);
|
||||
void call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
|
||||
void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
|
||||
|
||||
// Call a stub function via a function descriptor, but don't save
|
||||
// TOC before call, don't setup TOC and ENV for call, and don't
|
||||
// restore TOC after call. Updates and returns _last_calls_return_pc.
|
||||
inline address call_stub(Register function_entry);
|
||||
inline void call_stub_and_return_to(Register function_entry, Register return_pc);
|
||||
|
||||
//
|
||||
// Java utilities
|
||||
//
|
||||
|
||||
// Read from the polling page, its address is already in a register.
|
||||
inline void load_from_polling_page(Register polling_page_address, int offset = 0);
|
||||
// Check whether instruction is a read access to the polling page
|
||||
// which was emitted by load_from_polling_page(..).
|
||||
static bool is_load_from_polling_page(int instruction, void* ucontext/*may be NULL*/,
|
||||
address* polling_address_ptr = NULL);
|
||||
|
||||
// Check whether instruction is a write access to the memory
|
||||
// serialization page realized by one of the instructions stw, stwu,
|
||||
// stwx, or stwux.
|
||||
static bool is_memory_serialization(int instruction, JavaThread* thread, void* ucontext);
|
||||
|
||||
// Support for NULL-checks
|
||||
//
|
||||
// Generates code that causes a NULL OS exception if the content of reg is NULL.
|
||||
// If the accessed location is M[reg + offset] and the offset is known, provide the
|
||||
// offset. No explicit code generation is needed if the offset is within a certain
|
||||
// range (0 <= offset <= page_size).
|
||||
|
||||
// Stack overflow checking
|
||||
void bang_stack_with_offset(int offset);
|
||||
|
||||
// If instruction is a stack bang of the form ld, stdu, or
|
||||
// stdux, return the banged address. Otherwise, return 0.
|
||||
static address get_stack_bang_address(int instruction, void* ucontext);
|
||||
|
||||
// Atomics
|
||||
// CmpxchgX sets condition register to cmpX(current, compare).
|
||||
// (flag == ne) => (dest_current_value != compare_value), (!swapped)
|
||||
// (flag == eq) => (dest_current_value == compare_value), ( swapped)
|
||||
static inline bool cmpxchgx_hint_acquire_lock() { return true; }
|
||||
// The stxcx will probably not be succeeded by a releasing store.
|
||||
static inline bool cmpxchgx_hint_release_lock() { return false; }
|
||||
static inline bool cmpxchgx_hint_atomic_update() { return false; }
|
||||
|
||||
// Cmpxchg semantics
|
||||
enum {
|
||||
MemBarNone = 0,
|
||||
MemBarRel = 1,
|
||||
MemBarAcq = 2,
|
||||
MemBarFenceAfter = 4 // use powers of 2
|
||||
};
|
||||
void cmpxchgw(ConditionRegister flag,
|
||||
Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
|
||||
int semantics, bool cmpxchgx_hint = false,
|
||||
Register int_flag_success = noreg, bool contention_hint = false);
|
||||
void cmpxchgd(ConditionRegister flag,
|
||||
Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
|
||||
int semantics, bool cmpxchgx_hint = false,
|
||||
Register int_flag_success = noreg, Label* failed = NULL, bool contention_hint = false);
|
||||
|
||||
// interface method calling
|
||||
void lookup_interface_method(Register recv_klass,
|
||||
Register intf_klass,
|
||||
RegisterOrConstant itable_index,
|
||||
Register method_result,
|
||||
Register temp_reg, Register temp2_reg,
|
||||
Label& no_such_interface);
|
||||
|
||||
// virtual method calling
|
||||
void lookup_virtual_method(Register recv_klass,
|
||||
RegisterOrConstant vtable_index,
|
||||
Register method_result);
|
||||
|
||||
// Test sub_klass against super_klass, with fast and slow paths.
|
||||
|
||||
// The fast path produces a tri-state answer: yes / no / maybe-slow.
|
||||
// One of the three labels can be NULL, meaning take the fall-through.
|
||||
// If super_check_offset is -1, the value is loaded up from super_klass.
|
||||
// No registers are killed, except temp_reg and temp2_reg.
|
||||
// If super_check_offset is not -1, temp2_reg is not used and can be noreg.
|
||||
void check_klass_subtype_fast_path(Register sub_klass,
|
||||
Register super_klass,
|
||||
Register temp1_reg,
|
||||
Register temp2_reg,
|
||||
Label& L_success,
|
||||
Label& L_failure);
|
||||
|
||||
// The rest of the type check; must be wired to a corresponding fast path.
|
||||
// It does not repeat the fast path logic, so don't use it standalone.
|
||||
// The temp_reg can be noreg, if no temps are available.
|
||||
// It can also be sub_klass or super_klass, meaning it's OK to kill that one.
|
||||
// Updates the sub's secondary super cache as necessary.
|
||||
void check_klass_subtype_slow_path(Register sub_klass,
|
||||
Register super_klass,
|
||||
Register temp1_reg,
|
||||
Register temp2_reg,
|
||||
Label* L_success = NULL,
|
||||
Register result_reg = noreg);
|
||||
|
||||
// Simplified, combined version, good for typical uses.
|
||||
// Falls through on failure.
|
||||
void check_klass_subtype(Register sub_klass,
|
||||
Register super_klass,
|
||||
Register temp1_reg,
|
||||
Register temp2_reg,
|
||||
Label& L_success);
|
||||
|
||||
// Method handle support (JSR 292).
|
||||
void check_method_handle_type(Register mtype_reg, Register mh_reg, Register temp_reg, Label& wrong_method_type);
|
||||
|
||||
RegisterOrConstant argument_offset(RegisterOrConstant arg_slot, Register temp_reg, int extra_slot_offset = 0);
|
||||
|
||||
// Biased locking support
|
||||
// Upon entry,obj_reg must contain the target object, and mark_reg
|
||||
// must contain the target object's header.
|
||||
// Destroys mark_reg if an attempt is made to bias an anonymously
|
||||
// biased lock. In this case a failure will go either to the slow
|
||||
// case or fall through with the notEqual condition code set with
|
||||
// the expectation that the slow case in the runtime will be called.
|
||||
// In the fall-through case where the CAS-based lock is done,
|
||||
// mark_reg is not destroyed.
|
||||
void biased_locking_enter(ConditionRegister cr_reg, Register obj_reg, Register mark_reg, Register temp_reg,
|
||||
Register temp2_reg, Label& done, Label* slow_case = NULL);
|
||||
// Upon entry, the base register of mark_addr must contain the oop.
|
||||
// Destroys temp_reg.
|
||||
// If allow_delay_slot_filling is set to true, the next instruction
|
||||
// emitted after this one will go in an annulled delay slot if the
|
||||
// biased locking exit case failed.
|
||||
void biased_locking_exit(ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done);
|
||||
|
||||
void compiler_fast_lock_object( ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3);
|
||||
void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3);
|
||||
|
||||
// Support for serializing memory accesses between threads
|
||||
void serialize_memory(Register thread, Register tmp1, Register tmp2);
|
||||
|
||||
// GC barrier support.
|
||||
void card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp);
|
||||
void card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj);
|
||||
|
||||
#ifndef SERIALGC
|
||||
// General G1 pre-barrier generator.
|
||||
void g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
|
||||
Register Rtmp1, Register Rtmp2, bool needs_frame = false);
|
||||
// General G1 post-barrier generator
|
||||
void g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1,
|
||||
Register Rtmp2, Register Rtmp3, Label *filtered_ext = NULL);
|
||||
#endif // SERIALGC
|
||||
|
||||
// Support for managing the JavaThread pointer (i.e.; the reference to
|
||||
// thread-local information).
|
||||
|
||||
// Support for last Java frame (but use call_VM instead where possible):
|
||||
// access R16_thread->last_Java_sp.
|
||||
void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
|
||||
void reset_last_Java_frame(void);
|
||||
void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1);
|
||||
|
||||
// Read vm result from thread: oop_result = R16_thread->result;
|
||||
void get_vm_result (Register oop_result);
|
||||
void get_vm_result_2(Register metadata_result);
|
||||
|
||||
static bool needs_explicit_null_check(intptr_t offset);
|
||||
|
||||
// Trap-instruction-based checks.
|
||||
// Range checks can be distinguished from zero checks as they check 32 bit,
|
||||
// zero checks all 64 bits (tw, td).
|
||||
inline void trap_null_check(Register a, trap_to_bits cmp = traptoEqual);
|
||||
static bool is_trap_null_check(int x) {
|
||||
return is_tdi(x, traptoEqual, -1/*any reg*/, 0) ||
|
||||
is_tdi(x, traptoGreaterThanUnsigned, -1/*any reg*/, 0);
|
||||
}
|
||||
|
||||
inline void trap_zombie_not_entrant();
|
||||
static bool is_trap_zombie_not_entrant(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 1); }
|
||||
|
||||
inline void trap_should_not_reach_here();
|
||||
static bool is_trap_should_not_reach_here(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 2); }
|
||||
|
||||
inline void trap_ic_miss_check(Register a, Register b);
|
||||
static bool is_trap_ic_miss_check(int x) {
|
||||
return is_td(x, traptoGreaterThanUnsigned | traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/);
|
||||
}
|
||||
|
||||
// Implicit or explicit null check, jumps to static address exception_entry.
|
||||
inline void null_check_throw(Register a, int offset, Register temp_reg, address exception_entry);
|
||||
|
||||
// Check accessed object for null. Use SIGTRAP-based null checks on AIX.
|
||||
inline void ld_with_trap_null_check(Register d, int si16, Register s1);
|
||||
// Variant for heap OOPs including decompression of compressed OOPs.
|
||||
inline void load_heap_oop_with_trap_null_check(Register d, RegisterOrConstant offs, Register s1);
|
||||
|
||||
// Load heap oop and decompress. Loaded oop may not be null.
|
||||
inline void load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1 = noreg);
|
||||
|
||||
// Null allowed.
|
||||
inline void load_heap_oop(Register d, RegisterOrConstant offs, Register s1 = noreg);
|
||||
|
||||
// Encode/decode heap oop. Oop may not be null, else en/decoding goes wrong.
|
||||
inline void encode_heap_oop_not_null(Register d);
|
||||
inline void decode_heap_oop_not_null(Register d);
|
||||
|
||||
// Null allowed.
|
||||
inline void decode_heap_oop(Register d);
|
||||
|
||||
// Load/Store klass oop from klass field. Compress.
|
||||
void load_klass(Register dst, Register src);
|
||||
void load_klass_with_trap_null_check(Register dst, Register src);
|
||||
void store_klass(Register dst_oop, Register klass, Register tmp = R0);
|
||||
void decode_klass_not_null(Register dst, Register src = noreg);
|
||||
void encode_klass_not_null(Register dst, Register src = noreg);
|
||||
|
||||
// Load common heap base into register.
|
||||
void reinit_heapbase(Register d, Register tmp = noreg);
|
||||
|
||||
// SIGTRAP-based range checks for arrays.
|
||||
inline void trap_range_check_l(Register a, Register b);
|
||||
inline void trap_range_check_l(Register a, int si16);
|
||||
static bool is_trap_range_check_l(int x) {
|
||||
return (is_tw (x, traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/) ||
|
||||
is_twi(x, traptoLessThanUnsigned, -1/*any reg*/) );
|
||||
}
|
||||
inline void trap_range_check_le(Register a, int si16);
|
||||
static bool is_trap_range_check_le(int x) {
|
||||
return is_twi(x, traptoEqual | traptoLessThanUnsigned, -1/*any reg*/);
|
||||
}
|
||||
inline void trap_range_check_g(Register a, int si16);
|
||||
static bool is_trap_range_check_g(int x) {
|
||||
return is_twi(x, traptoGreaterThanUnsigned, -1/*any reg*/);
|
||||
}
|
||||
inline void trap_range_check_ge(Register a, Register b);
|
||||
inline void trap_range_check_ge(Register a, int si16);
|
||||
static bool is_trap_range_check_ge(int x) {
|
||||
return (is_tw (x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/, -1/*any reg*/) ||
|
||||
is_twi(x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/) );
|
||||
}
|
||||
static bool is_trap_range_check(int x) {
|
||||
return is_trap_range_check_l(x) || is_trap_range_check_le(x) ||
|
||||
is_trap_range_check_g(x) || is_trap_range_check_ge(x);
|
||||
}
|
||||
|
||||
// Needle of length 1.
|
||||
void string_indexof_1(Register result, Register haystack, Register haycnt,
|
||||
Register needle, jchar needleChar,
|
||||
Register tmp1, Register tmp2);
|
||||
// General indexof, eventually with constant needle length.
|
||||
void string_indexof(Register result, Register haystack, Register haycnt,
|
||||
Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval,
|
||||
Register tmp1, Register tmp2, Register tmp3, Register tmp4);
|
||||
void string_compare(Register str1_reg, Register str2_reg, Register cnt1_reg, Register cnt2_reg,
|
||||
Register result_reg, Register tmp_reg);
|
||||
void char_arrays_equals(Register str1_reg, Register str2_reg, Register cnt_reg, Register result_reg,
|
||||
Register tmp1_reg, Register tmp2_reg, Register tmp3_reg, Register tmp4_reg,
|
||||
Register tmp5_reg);
|
||||
void char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg,
|
||||
Register tmp1_reg, Register tmp2_reg);
|
||||
|
||||
//
|
||||
// Debugging
|
||||
//
|
||||
|
||||
// assert on cr0
|
||||
void asm_assert(bool check_equal, const char* msg, int id);
|
||||
void asm_assert_eq(const char* msg, int id) { asm_assert(true, msg, id); }
|
||||
void asm_assert_ne(const char* msg, int id) { asm_assert(false, msg, id); }
|
||||
|
||||
private:
|
||||
void asm_assert_mems_zero(bool check_equal, int size, int mem_offset, Register mem_base,
|
||||
const char* msg, int id);
|
||||
|
||||
public:
|
||||
|
||||
void asm_assert_mem8_is_zero(int mem_offset, Register mem_base, const char* msg, int id) {
|
||||
asm_assert_mems_zero(true, 8, mem_offset, mem_base, msg, id);
|
||||
}
|
||||
void asm_assert_mem8_isnot_zero(int mem_offset, Register mem_base, const char* msg, int id) {
|
||||
asm_assert_mems_zero(false, 8, mem_offset, mem_base, msg, id);
|
||||
}
|
||||
|
||||
// Verify R16_thread contents.
|
||||
void verify_thread();
|
||||
|
||||
// Emit code to verify that reg contains a valid oop if +VerifyOops is set.
|
||||
void verify_oop(Register reg, const char* s = "broken oop");
|
||||
|
||||
// TODO: verify method and klass metadata (compare against vptr?)
|
||||
void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
|
||||
void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
|
||||
|
||||
#define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
|
||||
#define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
|
||||
|
||||
private:
|
||||
|
||||
enum {
|
||||
stop_stop = 0,
|
||||
stop_untested = 1,
|
||||
stop_unimplemented = 2,
|
||||
stop_shouldnotreachhere = 3,
|
||||
stop_end = 4
|
||||
};
|
||||
void stop(int type, const char* msg, int id);
|
||||
|
||||
public:
|
||||
// Prints msg, dumps registers and stops execution.
|
||||
void stop (const char* msg = "", int id = 0) { stop(stop_stop, msg, id); }
|
||||
void untested (const char* msg = "", int id = 0) { stop(stop_untested, msg, id); }
|
||||
void unimplemented(const char* msg = "", int id = 0) { stop(stop_unimplemented, msg, id); }
|
||||
void should_not_reach_here() { stop(stop_shouldnotreachhere, "", -1); }
|
||||
|
||||
void zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
#endif // CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
|
382
hotspot/src/cpu/ppc/vm/macroAssembler_ppc.inline.hpp
Normal file
382
hotspot/src/cpu/ppc/vm/macroAssembler_ppc.inline.hpp
Normal file
@ -0,0 +1,382 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_MACROASSEMBLER_PPC_INLINE_HPP
|
||||
#define CPU_PPC_VM_MACROASSEMBLER_PPC_INLINE_HPP
|
||||
|
||||
#include "asm/assembler.inline.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "asm/codeBuffer.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
|
||||
inline bool MacroAssembler::is_ld_largeoffset(address a) {
|
||||
const int inst1 = *(int *)a;
|
||||
const int inst2 = *(int *)(a+4);
|
||||
return (is_ld(inst1)) ||
|
||||
(is_addis(inst1) && is_ld(inst2) && inv_ra_field(inst2) == inv_rt_field(inst1));
|
||||
}
|
||||
|
||||
inline int MacroAssembler::get_ld_largeoffset_offset(address a) {
|
||||
assert(MacroAssembler::is_ld_largeoffset(a), "must be ld with large offset");
|
||||
|
||||
const int inst1 = *(int *)a;
|
||||
if (is_ld(inst1)) {
|
||||
return inv_d1_field(inst1);
|
||||
} else {
|
||||
const int inst2 = *(int *)(a+4);
|
||||
return (inv_d1_field(inst1) << 16) + inv_d1_field(inst2);
|
||||
}
|
||||
}
|
||||
|
||||
inline void MacroAssembler::round_to(Register r, int modulus) {
|
||||
assert(is_power_of_2_long((jlong)modulus), "must be power of 2");
|
||||
addi(r, r, modulus-1);
|
||||
clrrdi(r, r, log2_long((jlong)modulus));
|
||||
}
|
||||
|
||||
// Move register if destination register and target register are different.
|
||||
inline void MacroAssembler::mr_if_needed(Register rd, Register rs) {
|
||||
if(rs !=rd) mr(rd, rs);
|
||||
}
|
||||
|
||||
// Address of the global TOC.
|
||||
inline address MacroAssembler::global_toc() {
|
||||
return CodeCache::low_bound();
|
||||
}
|
||||
|
||||
// Offset of given address to the global TOC.
|
||||
inline int MacroAssembler::offset_to_global_toc(const address addr) {
|
||||
intptr_t offset = (intptr_t)addr - (intptr_t)MacroAssembler::global_toc();
|
||||
assert(Assembler::is_simm((long)offset, 31) && offset >= 0, "must be in range");
|
||||
return (int)offset;
|
||||
}
|
||||
|
||||
// Address of current method's TOC.
|
||||
inline address MacroAssembler::method_toc() {
|
||||
return code()->consts()->start();
|
||||
}
|
||||
|
||||
// Offset of given address to current method's TOC.
|
||||
inline int MacroAssembler::offset_to_method_toc(address addr) {
|
||||
intptr_t offset = (intptr_t)addr - (intptr_t)method_toc();
|
||||
assert(is_simm((long)offset, 31) && offset >= 0, "must be in range");
|
||||
return (int)offset;
|
||||
}
|
||||
|
||||
inline bool MacroAssembler::is_calculate_address_from_global_toc_at(address a, address bound) {
|
||||
const address inst2_addr = a;
|
||||
const int inst2 = *(int *) a;
|
||||
|
||||
// The relocation points to the second instruction, the addi.
|
||||
if (!is_addi(inst2)) return false;
|
||||
|
||||
// The addi reads and writes the same register dst.
|
||||
const int dst = inv_rt_field(inst2);
|
||||
if (inv_ra_field(inst2) != dst) return false;
|
||||
|
||||
// Now, find the preceding addis which writes to dst.
|
||||
int inst1 = 0;
|
||||
address inst1_addr = inst2_addr - BytesPerInstWord;
|
||||
while (inst1_addr >= bound) {
|
||||
inst1 = *(int *) inst1_addr;
|
||||
if (is_addis(inst1) && inv_rt_field(inst1) == dst) {
|
||||
// stop, found the addis which writes dst
|
||||
break;
|
||||
}
|
||||
inst1_addr -= BytesPerInstWord;
|
||||
}
|
||||
|
||||
if (!(inst1 == 0 || inv_ra_field(inst1) == 29 /* R29 */)) return false;
|
||||
return is_addis(inst1);
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
// Detect narrow oop constants.
|
||||
inline bool MacroAssembler::is_set_narrow_oop(address a, address bound) {
|
||||
const address inst2_addr = a;
|
||||
const int inst2 = *(int *)a;
|
||||
|
||||
// The relocation points to the second instruction, the addi.
|
||||
if (!is_addi(inst2)) return false;
|
||||
|
||||
// The addi reads and writes the same register dst.
|
||||
const int dst = inv_rt_field(inst2);
|
||||
if (inv_ra_field(inst2) != dst) return false;
|
||||
|
||||
// Now, find the preceding addis which writes to dst.
|
||||
int inst1 = 0;
|
||||
address inst1_addr = inst2_addr - BytesPerInstWord;
|
||||
while (inst1_addr >= bound) {
|
||||
inst1 = *(int *) inst1_addr;
|
||||
if (is_lis(inst1) && inv_rs_field(inst1) == dst) return true;
|
||||
inst1_addr -= BytesPerInstWord;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
inline bool MacroAssembler::is_load_const_at(address a) {
|
||||
const int* p_inst = (int *) a;
|
||||
bool b = is_lis(*p_inst++);
|
||||
if (is_ori(*p_inst)) {
|
||||
p_inst++;
|
||||
b = b && is_rldicr(*p_inst++); // TODO: could be made more precise: `sldi'!
|
||||
b = b && is_oris(*p_inst++);
|
||||
b = b && is_ori(*p_inst);
|
||||
} else if (is_lis(*p_inst)) {
|
||||
p_inst++;
|
||||
b = b && is_ori(*p_inst++);
|
||||
b = b && is_ori(*p_inst);
|
||||
// TODO: could enhance reliability by adding is_insrdi
|
||||
} else return false;
|
||||
return b;
|
||||
}
|
||||
|
||||
inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
|
||||
set_oop(constant_oop_address(obj), d);
|
||||
}
|
||||
|
||||
inline void MacroAssembler::set_oop(AddressLiteral obj_addr, Register d) {
|
||||
assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
|
||||
load_const(d, obj_addr);
|
||||
}
|
||||
|
||||
inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
|
||||
jint& stub_inst = *(jint*) branch;
|
||||
stub_inst = patched_branch(target - branch, stub_inst, 0);
|
||||
}
|
||||
|
||||
// Relocation of conditional far branches.
|
||||
inline bool MacroAssembler::is_bc_far_variant1_at(address instruction_addr) {
|
||||
// Variant 1, the 1st instruction contains the destination address:
|
||||
//
|
||||
// bcxx DEST
|
||||
// endgroup
|
||||
//
|
||||
const int instruction_1 = *(int*)(instruction_addr);
|
||||
const int instruction_2 = *(int*)(instruction_addr + 4);
|
||||
return is_bcxx(instruction_1) &&
|
||||
(inv_bd_field(instruction_1, (intptr_t)instruction_addr) != (intptr_t)(instruction_addr + 2*4)) &&
|
||||
is_endgroup(instruction_2);
|
||||
}
|
||||
|
||||
// Relocation of conditional far branches.
|
||||
inline bool MacroAssembler::is_bc_far_variant2_at(address instruction_addr) {
|
||||
// Variant 2, the 2nd instruction contains the destination address:
|
||||
//
|
||||
// b!cxx SKIP
|
||||
// bxx DEST
|
||||
// SKIP:
|
||||
//
|
||||
const int instruction_1 = *(int*)(instruction_addr);
|
||||
const int instruction_2 = *(int*)(instruction_addr + 4);
|
||||
return is_bcxx(instruction_1) &&
|
||||
(inv_bd_field(instruction_1, (intptr_t)instruction_addr) == (intptr_t)(instruction_addr + 2*4)) &&
|
||||
is_bxx(instruction_2);
|
||||
}
|
||||
|
||||
// Relocation for conditional branches
|
||||
inline bool MacroAssembler::is_bc_far_variant3_at(address instruction_addr) {
|
||||
// Variant 3, far cond branch to the next instruction, already patched to nops:
|
||||
//
|
||||
// nop
|
||||
// endgroup
|
||||
// SKIP/DEST:
|
||||
//
|
||||
const int instruction_1 = *(int*)(instruction_addr);
|
||||
const int instruction_2 = *(int*)(instruction_addr + 4);
|
||||
return is_nop(instruction_1) &&
|
||||
is_endgroup(instruction_2);
|
||||
}
|
||||
|
||||
|
||||
// Convenience bc_far versions
|
||||
inline void MacroAssembler::blt_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, less), L, optimize); }
|
||||
inline void MacroAssembler::bgt_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, greater), L, optimize); }
|
||||
inline void MacroAssembler::beq_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, equal), L, optimize); }
|
||||
inline void MacroAssembler::bso_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, summary_overflow), L, optimize); }
|
||||
inline void MacroAssembler::bge_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, less), L, optimize); }
|
||||
inline void MacroAssembler::ble_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, greater), L, optimize); }
|
||||
inline void MacroAssembler::bne_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, equal), L, optimize); }
|
||||
inline void MacroAssembler::bns_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, summary_overflow), L, optimize); }
|
||||
|
||||
inline address MacroAssembler::call_stub(Register function_entry) {
|
||||
mtctr(function_entry);
|
||||
bctrl();
|
||||
return pc();
|
||||
}
|
||||
|
||||
inline void MacroAssembler::call_stub_and_return_to(Register function_entry, Register return_pc) {
|
||||
assert_different_registers(function_entry, return_pc);
|
||||
mtlr(return_pc);
|
||||
mtctr(function_entry);
|
||||
bctr();
|
||||
}
|
||||
|
||||
// Get the pc where the last emitted call will return to.
|
||||
inline address MacroAssembler::last_calls_return_pc() {
|
||||
return _last_calls_return_pc;
|
||||
}
|
||||
|
||||
// Read from the polling page, its address is already in a register.
|
||||
inline void MacroAssembler::load_from_polling_page(Register polling_page_address, int offset) {
|
||||
ld(R0, offset, polling_page_address);
|
||||
}
|
||||
|
||||
// Trap-instruction-based checks.
|
||||
|
||||
inline void MacroAssembler::trap_null_check(Register a, trap_to_bits cmp) {
|
||||
assert(TrapBasedNullChecks, "sanity");
|
||||
tdi(cmp, a/*reg a*/, 0);
|
||||
}
|
||||
inline void MacroAssembler::trap_zombie_not_entrant() {
|
||||
tdi(traptoUnconditional, 0/*reg 0*/, 1);
|
||||
}
|
||||
inline void MacroAssembler::trap_should_not_reach_here() {
|
||||
tdi_unchecked(traptoUnconditional, 0/*reg 0*/, 2);
|
||||
}
|
||||
|
||||
inline void MacroAssembler::trap_ic_miss_check(Register a, Register b) {
|
||||
td(traptoGreaterThanUnsigned | traptoLessThanUnsigned, a, b);
|
||||
}
|
||||
|
||||
// Do an explicit null check if access to a+offset will not raise a SIGSEGV.
|
||||
// Either issue a trap instruction that raises SIGTRAP, or do a compare that
|
||||
// branches to exception_entry.
|
||||
// No support for compressed oops (base page of heap). Does not distinguish
|
||||
// loads and stores.
|
||||
inline void MacroAssembler::null_check_throw(Register a, int offset, Register temp_reg, address exception_entry) {
|
||||
if (!ImplicitNullChecks || needs_explicit_null_check(offset) NOT_LINUX(|| true) /*!os::zero_page_read_protected()*/) {
|
||||
if (TrapBasedNullChecks) {
|
||||
assert(UseSIGTRAP, "sanity");
|
||||
trap_null_check(a);
|
||||
} else {
|
||||
Label ok;
|
||||
cmpdi(CCR0, a, 0);
|
||||
bne(CCR0, ok);
|
||||
load_const_optimized(temp_reg, exception_entry);
|
||||
mtctr(temp_reg);
|
||||
bctr();
|
||||
bind(ok);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void MacroAssembler::ld_with_trap_null_check(Register d, int si16, Register s1) {
|
||||
if ( NOT_LINUX(true) LINUX_ONLY(false)/*!os::zero_page_read_protected()*/) {
|
||||
if (TrapBasedNullChecks) {
|
||||
trap_null_check(s1);
|
||||
}
|
||||
}
|
||||
ld(d, si16, s1);
|
||||
}
|
||||
|
||||
// Attention: No null check for loaded uncompressed OOP. Can be used for loading klass field.
|
||||
inline void MacroAssembler::load_heap_oop_with_trap_null_check(Register d, RegisterOrConstant si16,
|
||||
Register s1) {
|
||||
if ( NOT_LINUX(true)LINUX_ONLY(false) /*!os::zero_page_read_protected()*/) {
|
||||
if (TrapBasedNullChecks) {
|
||||
trap_null_check(s1);
|
||||
}
|
||||
}
|
||||
load_heap_oop_not_null(d, si16, s1);
|
||||
}
|
||||
|
||||
inline void MacroAssembler::load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1) {
|
||||
if (UseCompressedOops) {
|
||||
lwz(d, offs, s1);
|
||||
// Attention: no null check here!
|
||||
decode_heap_oop_not_null(d);
|
||||
} else {
|
||||
ld(d, offs, s1);
|
||||
}
|
||||
}
|
||||
|
||||
inline void MacroAssembler::load_heap_oop(Register d, RegisterOrConstant offs, Register s1) {
|
||||
if (UseCompressedOops) {
|
||||
lwz(d, offs, s1);
|
||||
decode_heap_oop(d);
|
||||
} else {
|
||||
ld(d, offs, s1);
|
||||
}
|
||||
}
|
||||
|
||||
inline void MacroAssembler::encode_heap_oop_not_null(Register d) {
|
||||
if (Universe::narrow_oop_base() != NULL) {
|
||||
sub(d, d, R30);
|
||||
}
|
||||
if (Universe::narrow_oop_shift() != 0) {
|
||||
srdi(d, d, LogMinObjAlignmentInBytes);
|
||||
}
|
||||
}
|
||||
|
||||
inline void MacroAssembler::decode_heap_oop_not_null(Register d) {
|
||||
if (Universe::narrow_oop_shift() != 0) {
|
||||
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
|
||||
sldi(d, d, LogMinObjAlignmentInBytes);
|
||||
}
|
||||
if (Universe::narrow_oop_base() != NULL) {
|
||||
add(d, d, R30);
|
||||
}
|
||||
}
|
||||
|
||||
inline void MacroAssembler::decode_heap_oop(Register d) {
|
||||
Label isNull;
|
||||
if (Universe::narrow_oop_base() != NULL) {
|
||||
cmpwi(CCR0, d, 0);
|
||||
beq(CCR0, isNull);
|
||||
}
|
||||
if (Universe::narrow_oop_shift() != 0) {
|
||||
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
|
||||
sldi(d, d, LogMinObjAlignmentInBytes);
|
||||
}
|
||||
if (Universe::narrow_oop_base() != NULL) {
|
||||
add(d, d, R30);
|
||||
}
|
||||
bind(isNull);
|
||||
}
|
||||
|
||||
// SIGTRAP-based range checks for arrays.
|
||||
inline void MacroAssembler::trap_range_check_l(Register a, Register b) {
|
||||
tw (traptoLessThanUnsigned, a/*reg a*/, b/*reg b*/);
|
||||
}
|
||||
inline void MacroAssembler::trap_range_check_l(Register a, int si16) {
|
||||
twi(traptoLessThanUnsigned, a/*reg a*/, si16);
|
||||
}
|
||||
inline void MacroAssembler::trap_range_check_le(Register a, int si16) {
|
||||
twi(traptoEqual | traptoLessThanUnsigned, a/*reg a*/, si16);
|
||||
}
|
||||
inline void MacroAssembler::trap_range_check_g(Register a, int si16) {
|
||||
twi(traptoGreaterThanUnsigned, a/*reg a*/, si16);
|
||||
}
|
||||
inline void MacroAssembler::trap_range_check_ge(Register a, Register b) {
|
||||
tw (traptoEqual | traptoGreaterThanUnsigned, a/*reg a*/, b/*reg b*/);
|
||||
}
|
||||
inline void MacroAssembler::trap_range_check_ge(Register a, int si16) {
|
||||
twi(traptoEqual | traptoGreaterThanUnsigned, a/*reg a*/, si16);
|
||||
}
|
||||
|
||||
#endif // CPU_PPC_VM_MACROASSEMBLER_PPC_INLINE_HPP
|
61
hotspot/src/cpu/ppc/vm/metaspaceShared_ppc.cpp
Normal file
61
hotspot/src/cpu/ppc/vm/metaspaceShared_ppc.cpp
Normal file
@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "asm/codeBuffer.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
|
||||
// Generate the self-patching vtable method:
|
||||
//
|
||||
// This method will be called (as any other Klass virtual method) with
|
||||
// the Klass itself as the first argument. Example:
|
||||
//
|
||||
// oop obj;
|
||||
// int size = obj->klass()->klass_part()->oop_size(this);
|
||||
//
|
||||
// for which the virtual method call is Klass::oop_size();
|
||||
//
|
||||
// The dummy method is called with the Klass object as the first
|
||||
// operand, and an object as the second argument.
|
||||
//
|
||||
|
||||
//=====================================================================
|
||||
|
||||
// All of the dummy methods in the vtable are essentially identical,
|
||||
// differing only by an ordinal constant, and they bear no releationship
|
||||
// to the original method which the caller intended. Also, there needs
|
||||
// to be 'vtbl_list_size' instances of the vtable in order to
|
||||
// differentiate between the 'vtable_list_size' original Klass objects.
|
||||
|
||||
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
|
||||
void** vtable,
|
||||
char** md_top,
|
||||
char* md_end,
|
||||
char** mc_top,
|
||||
char* mc_end) {
|
||||
Unimplemented();
|
||||
}
|
||||
|
540
hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp
Normal file
540
hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp
Normal file
@ -0,0 +1,540 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define BLOCK_COMMENT(str) // nothing
|
||||
#define STOP(error) stop(error)
|
||||
#else
|
||||
#define BLOCK_COMMENT(str) __ block_comment(str)
|
||||
#define STOP(error) block_comment(error); __ stop(error)
|
||||
#endif
|
||||
|
||||
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
|
||||
|
||||
// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
|
||||
inline static RegisterOrConstant constant(int value) {
|
||||
return RegisterOrConstant(value);
|
||||
}
|
||||
|
||||
void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg) {
|
||||
if (VerifyMethodHandles)
|
||||
verify_klass(_masm, klass_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_Class), temp_reg, temp2_reg,
|
||||
"MH argument is a Class");
|
||||
__ ld(klass_reg, java_lang_Class::klass_offset_in_bytes(), klass_reg);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
static int check_nonzero(const char* xname, int x) {
|
||||
assert(x != 0, err_msg("%s should be nonzero", xname));
|
||||
return x;
|
||||
}
|
||||
#define NONZERO(x) check_nonzero(#x, x)
|
||||
#else //ASSERT
|
||||
#define NONZERO(x) (x)
|
||||
#endif //ASSERT
|
||||
|
||||
#ifdef ASSERT
|
||||
void MethodHandles::verify_klass(MacroAssembler* _masm,
|
||||
Register obj_reg, SystemDictionary::WKID klass_id,
|
||||
Register temp_reg, Register temp2_reg,
|
||||
const char* error_message) {
|
||||
Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
|
||||
KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
|
||||
Label L_ok, L_bad;
|
||||
BLOCK_COMMENT("verify_klass {");
|
||||
__ verify_oop(obj_reg);
|
||||
__ cmpdi(CCR0, obj_reg, 0);
|
||||
__ beq(CCR0, L_bad);
|
||||
__ load_klass(temp_reg, obj_reg);
|
||||
__ load_const_optimized(temp2_reg, (address) klass_addr);
|
||||
__ ld(temp2_reg, 0, temp2_reg);
|
||||
__ cmpd(CCR0, temp_reg, temp2_reg);
|
||||
__ beq(CCR0, L_ok);
|
||||
__ ld(temp_reg, klass->super_check_offset(), temp_reg);
|
||||
__ cmpd(CCR0, temp_reg, temp2_reg);
|
||||
__ beq(CCR0, L_ok);
|
||||
__ BIND(L_bad);
|
||||
__ stop(error_message);
|
||||
__ BIND(L_ok);
|
||||
BLOCK_COMMENT("} verify_klass");
|
||||
}
|
||||
|
||||
void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) {
|
||||
Label L;
|
||||
BLOCK_COMMENT("verify_ref_kind {");
|
||||
__ load_sized_value(temp, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes()), member_reg,
|
||||
sizeof(u4), /*is_signed*/ false);
|
||||
// assert(sizeof(u4) == sizeof(java.lang.invoke.MemberName.flags), "");
|
||||
__ srwi( temp, temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT);
|
||||
__ andi(temp, temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK);
|
||||
__ cmpwi(CCR1, temp, ref_kind);
|
||||
__ beq(CCR1, L);
|
||||
{ char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal);
|
||||
jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind);
|
||||
if (ref_kind == JVM_REF_invokeVirtual ||
|
||||
ref_kind == JVM_REF_invokeSpecial)
|
||||
// could do this for all ref_kinds, but would explode assembly code size
|
||||
trace_method_handle(_masm, buf);
|
||||
__ stop(buf);
|
||||
}
|
||||
BLOCK_COMMENT("} verify_ref_kind");
|
||||
__ BIND(L);
|
||||
}
|
||||
|
||||
#endif // ASSERT
|
||||
|
||||
void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp,
|
||||
bool for_compiler_entry) {
|
||||
assert(method == R19_method, "interpreter calling convention");
|
||||
assert_different_registers(method, target, temp);
|
||||
|
||||
if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) {
|
||||
Label run_compiled_code;
|
||||
// JVMTI events, such as single-stepping, are implemented partly by avoiding running
|
||||
// compiled code in threads for which the event is enabled. Check here for
|
||||
// interp_only_mode if these events CAN be enabled.
|
||||
__ verify_thread();
|
||||
__ lwz(temp, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
|
||||
__ cmplwi(CCR0, temp, 0);
|
||||
__ beq(CCR0, run_compiled_code);
|
||||
__ ld(target, in_bytes(Method::interpreter_entry_offset()), R19_method);
|
||||
__ mtctr(target);
|
||||
__ bctr();
|
||||
__ BIND(run_compiled_code);
|
||||
}
|
||||
|
||||
const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
|
||||
Method::from_interpreted_offset();
|
||||
__ ld(target, in_bytes(entry_offset), R19_method);
|
||||
__ mtctr(target);
|
||||
__ bctr();
|
||||
}
|
||||
|
||||
|
||||
void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
|
||||
Register recv, Register method_temp,
|
||||
Register temp2, Register temp3,
|
||||
bool for_compiler_entry) {
|
||||
BLOCK_COMMENT("jump_to_lambda_form {");
|
||||
// This is the initial entry point of a lazy method handle.
|
||||
// After type checking, it picks up the invoker from the LambdaForm.
|
||||
assert_different_registers(recv, method_temp, temp2); // temp3 is only passed on
|
||||
assert(method_temp == R19_method, "required register for loading method");
|
||||
|
||||
// Load the invoker, as MH -> MH.form -> LF.vmentry
|
||||
__ verify_oop(recv);
|
||||
__ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes()), recv);
|
||||
__ verify_oop(method_temp);
|
||||
__ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()), method_temp);
|
||||
__ verify_oop(method_temp);
|
||||
// the following assumes that a Method* is normally compressed in the vmtarget field:
|
||||
__ ld(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), method_temp);
|
||||
|
||||
if (VerifyMethodHandles && !for_compiler_entry) {
|
||||
// make sure recv is already on stack
|
||||
__ ld(temp2, in_bytes(Method::const_offset()), method_temp);
|
||||
__ load_sized_value(temp2, in_bytes(ConstMethod::size_of_parameters_offset()), temp2,
|
||||
sizeof(u2), /*is_signed*/ false);
|
||||
// assert(sizeof(u2) == sizeof(ConstMethod::_size_of_parameters), "");
|
||||
Label L;
|
||||
__ ld(temp2, __ argument_offset(temp2, temp2, 0), R17_tos);
|
||||
__ cmpd(CCR1, temp2, recv);
|
||||
__ beq(CCR1, L);
|
||||
__ stop("receiver not on stack");
|
||||
__ BIND(L);
|
||||
}
|
||||
|
||||
jump_from_method_handle(_masm, method_temp, temp2, temp3, for_compiler_entry);
|
||||
BLOCK_COMMENT("} jump_to_lambda_form");
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Code generation
|
||||
address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm,
|
||||
vmIntrinsics::ID iid) {
|
||||
const bool not_for_compiler_entry = false; // this is the interpreter entry
|
||||
assert(is_signature_polymorphic(iid), "expected invoke iid");
|
||||
if (iid == vmIntrinsics::_invokeGeneric ||
|
||||
iid == vmIntrinsics::_compiledLambdaForm) {
|
||||
// Perhaps surprisingly, the symbolic references visible to Java are not directly used.
|
||||
// They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
|
||||
// They all allow an appendix argument.
|
||||
__ stop("Should not reach here"); // empty stubs make SG sick
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Register argbase = R17_tos; // parameter (preserved)
|
||||
Register argslot = R3;
|
||||
Register temp1 = R6;
|
||||
Register param_size = R7;
|
||||
|
||||
// here's where control starts out:
|
||||
__ align(CodeEntryAlignment);
|
||||
address entry_point = __ pc();
|
||||
|
||||
if (VerifyMethodHandles) {
|
||||
Label L;
|
||||
BLOCK_COMMENT("verify_intrinsic_id {");
|
||||
__ load_sized_value(temp1, Method::intrinsic_id_offset_in_bytes(), R19_method,
|
||||
sizeof(u1), /*is_signed*/ false);
|
||||
// assert(sizeof(u1) == sizeof(Method::_intrinsic_id), "");
|
||||
__ cmpwi(CCR1, temp1, (int) iid);
|
||||
__ beq(CCR1, L);
|
||||
if (iid == vmIntrinsics::_linkToVirtual ||
|
||||
iid == vmIntrinsics::_linkToSpecial) {
|
||||
// could do this for all kinds, but would explode assembly code size
|
||||
trace_method_handle(_masm, "bad Method*:intrinsic_id");
|
||||
}
|
||||
__ stop("bad Method*::intrinsic_id");
|
||||
__ BIND(L);
|
||||
BLOCK_COMMENT("} verify_intrinsic_id");
|
||||
}
|
||||
|
||||
// First task: Find out how big the argument list is.
|
||||
int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
|
||||
assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
|
||||
if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
|
||||
__ ld(param_size, in_bytes(Method::const_offset()), R19_method);
|
||||
__ load_sized_value(param_size, in_bytes(ConstMethod::size_of_parameters_offset()), param_size,
|
||||
sizeof(u2), /*is_signed*/ false);
|
||||
// assert(sizeof(u2) == sizeof(ConstMethod::_size_of_parameters), "");
|
||||
} else {
|
||||
DEBUG_ONLY(param_size = noreg);
|
||||
}
|
||||
|
||||
Register tmp_mh = noreg;
|
||||
if (!is_signature_polymorphic_static(iid)) {
|
||||
__ ld(tmp_mh = temp1, __ argument_offset(param_size, param_size, 0), argbase);
|
||||
DEBUG_ONLY(param_size = noreg);
|
||||
}
|
||||
|
||||
if (TraceMethodHandles) {
|
||||
if (tmp_mh != noreg)
|
||||
__ mr(R23_method_handle, tmp_mh); // make stub happy
|
||||
trace_method_handle_interpreter_entry(_masm, iid);
|
||||
}
|
||||
|
||||
if (iid == vmIntrinsics::_invokeBasic) {
|
||||
generate_method_handle_dispatch(_masm, iid, tmp_mh, noreg, not_for_compiler_entry);
|
||||
|
||||
} else {
|
||||
// Adjust argument list by popping the trailing MemberName argument.
|
||||
Register tmp_recv = noreg;
|
||||
if (MethodHandles::ref_kind_has_receiver(ref_kind)) {
|
||||
// Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack.
|
||||
__ ld(tmp_recv = temp1, __ argument_offset(param_size, param_size, 0), argbase);
|
||||
DEBUG_ONLY(param_size = noreg);
|
||||
}
|
||||
Register R19_member = R19_method; // MemberName ptr; incoming method ptr is dead now
|
||||
__ ld(R19_member, RegisterOrConstant((intptr_t)8), argbase);
|
||||
__ add(argbase, Interpreter::stackElementSize, argbase);
|
||||
generate_method_handle_dispatch(_masm, iid, tmp_recv, R19_member, not_for_compiler_entry);
|
||||
}
|
||||
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
vmIntrinsics::ID iid,
|
||||
Register receiver_reg,
|
||||
Register member_reg,
|
||||
bool for_compiler_entry) {
|
||||
assert(is_signature_polymorphic(iid), "expected invoke iid");
|
||||
Register temp1 = (for_compiler_entry ? R21_tmp1 : R7);
|
||||
Register temp2 = (for_compiler_entry ? R22_tmp2 : R8);
|
||||
Register temp3 = (for_compiler_entry ? R23_tmp3 : R9);
|
||||
Register temp4 = (for_compiler_entry ? R24_tmp4 : R10);
|
||||
if (receiver_reg != noreg) assert_different_registers(temp1, temp2, temp3, temp4, receiver_reg);
|
||||
if (member_reg != noreg) assert_different_registers(temp1, temp2, temp3, temp4, member_reg);
|
||||
|
||||
if (iid == vmIntrinsics::_invokeBasic) {
|
||||
// indirect through MH.form.vmentry.vmtarget
|
||||
jump_to_lambda_form(_masm, receiver_reg, R19_method, temp1, temp2, for_compiler_entry);
|
||||
} else {
|
||||
// The method is a member invoker used by direct method handles.
|
||||
if (VerifyMethodHandles) {
|
||||
// make sure the trailing argument really is a MemberName (caller responsibility)
|
||||
verify_klass(_masm, member_reg, SystemDictionary::WK_KLASS_ENUM_NAME(MemberName_klass),
|
||||
temp1, temp2,
|
||||
"MemberName required for invokeVirtual etc.");
|
||||
}
|
||||
|
||||
Register temp1_recv_klass = temp1;
|
||||
if (iid != vmIntrinsics::_linkToStatic) {
|
||||
__ verify_oop(receiver_reg);
|
||||
if (iid == vmIntrinsics::_linkToSpecial) {
|
||||
// Don't actually load the klass; just null-check the receiver.
|
||||
__ null_check_throw(receiver_reg, 0, temp1, StubRoutines::throw_NullPointerException_at_call_entry());
|
||||
} else {
|
||||
// load receiver klass itself
|
||||
__ null_check_throw(receiver_reg, oopDesc::klass_offset_in_bytes(),
|
||||
temp1, StubRoutines::throw_NullPointerException_at_call_entry());
|
||||
__ load_klass(temp1_recv_klass, receiver_reg);
|
||||
__ verify_klass_ptr(temp1_recv_klass);
|
||||
}
|
||||
BLOCK_COMMENT("check_receiver {");
|
||||
// The receiver for the MemberName must be in receiver_reg.
|
||||
// Check the receiver against the MemberName.clazz
|
||||
if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) {
|
||||
// Did not load it above...
|
||||
__ load_klass(temp1_recv_klass, receiver_reg);
|
||||
__ verify_klass_ptr(temp1_recv_klass);
|
||||
}
|
||||
if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {
|
||||
Label L_ok;
|
||||
Register temp2_defc = temp2;
|
||||
__ load_heap_oop_not_null(temp2_defc, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()), member_reg);
|
||||
load_klass_from_Class(_masm, temp2_defc, temp3, temp4);
|
||||
__ verify_klass_ptr(temp2_defc);
|
||||
__ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, temp4, L_ok);
|
||||
// If we get here, the type check failed!
|
||||
__ stop("receiver class disagrees with MemberName.clazz");
|
||||
__ BIND(L_ok);
|
||||
}
|
||||
BLOCK_COMMENT("} check_receiver");
|
||||
}
|
||||
if (iid == vmIntrinsics::_linkToSpecial ||
|
||||
iid == vmIntrinsics::_linkToStatic) {
|
||||
DEBUG_ONLY(temp1_recv_klass = noreg); // these guys didn't load the recv_klass
|
||||
}
|
||||
|
||||
// Live registers at this point:
|
||||
// member_reg - MemberName that was the trailing argument
|
||||
// temp1_recv_klass - klass of stacked receiver, if needed
|
||||
// O5_savedSP - interpreter linkage (if interpreted)
|
||||
// O0..O5 - compiler arguments (if compiled)
|
||||
|
||||
Label L_incompatible_class_change_error;
|
||||
switch (iid) {
|
||||
case vmIntrinsics::_linkToSpecial:
|
||||
if (VerifyMethodHandles) {
|
||||
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp2);
|
||||
}
|
||||
__ ld(R19_method, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), member_reg);
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_linkToStatic:
|
||||
if (VerifyMethodHandles) {
|
||||
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp2);
|
||||
}
|
||||
__ ld(R19_method, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), member_reg);
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_linkToVirtual:
|
||||
{
|
||||
// same as TemplateTable::invokevirtual,
|
||||
// minus the CP setup and profiling:
|
||||
|
||||
if (VerifyMethodHandles) {
|
||||
verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp2);
|
||||
}
|
||||
|
||||
// pick out the vtable index from the MemberName, and then we can discard it:
|
||||
Register temp2_index = temp2;
|
||||
__ ld(temp2_index, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()), member_reg);
|
||||
|
||||
if (VerifyMethodHandles) {
|
||||
Label L_index_ok;
|
||||
__ cmpdi(CCR1, temp2_index, 0);
|
||||
__ bge(CCR1, L_index_ok);
|
||||
__ stop("no virtual index");
|
||||
__ BIND(L_index_ok);
|
||||
}
|
||||
|
||||
// Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget
|
||||
// at this point. And VerifyMethodHandles has already checked clazz, if needed.
|
||||
|
||||
// get target Method* & entry point
|
||||
__ lookup_virtual_method(temp1_recv_klass, temp2_index, R19_method);
|
||||
break;
|
||||
}
|
||||
|
||||
case vmIntrinsics::_linkToInterface:
|
||||
{
|
||||
// same as TemplateTable::invokeinterface
|
||||
// (minus the CP setup and profiling, with different argument motion)
|
||||
if (VerifyMethodHandles) {
|
||||
verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp2);
|
||||
}
|
||||
|
||||
Register temp2_intf = temp2;
|
||||
__ load_heap_oop_not_null(temp2_intf, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()), member_reg);
|
||||
load_klass_from_Class(_masm, temp2_intf, temp3, temp4);
|
||||
__ verify_klass_ptr(temp2_intf);
|
||||
|
||||
Register vtable_index = R19_method;
|
||||
__ ld(vtable_index, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()), member_reg);
|
||||
if (VerifyMethodHandles) {
|
||||
Label L_index_ok;
|
||||
__ cmpdi(CCR1, vtable_index, 0);
|
||||
__ bge(CCR1, L_index_ok);
|
||||
__ stop("invalid vtable index for MH.invokeInterface");
|
||||
__ BIND(L_index_ok);
|
||||
}
|
||||
|
||||
// given intf, index, and recv klass, dispatch to the implementation method
|
||||
__ lookup_interface_method(temp1_recv_klass, temp2_intf,
|
||||
// note: next two args must be the same:
|
||||
vtable_index, R19_method,
|
||||
temp3, temp4,
|
||||
L_incompatible_class_change_error);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
|
||||
break;
|
||||
}
|
||||
|
||||
// Live at this point:
|
||||
// R19_method
|
||||
// O5_savedSP (if interpreted)
|
||||
|
||||
// After figuring out which concrete method to call, jump into it.
|
||||
// Note that this works in the interpreter with no data motion.
|
||||
// But the compiled version will require that rcx_recv be shifted out.
|
||||
__ verify_method_ptr(R19_method);
|
||||
jump_from_method_handle(_masm, R19_method, temp1, temp2, for_compiler_entry);
|
||||
|
||||
if (iid == vmIntrinsics::_linkToInterface) {
|
||||
__ BIND(L_incompatible_class_change_error);
|
||||
__ load_const_optimized(temp1, StubRoutines::throw_IncompatibleClassChangeError_entry());
|
||||
__ mtctr(temp1);
|
||||
__ bctr();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void trace_method_handle_stub(const char* adaptername,
|
||||
oopDesc* mh,
|
||||
intptr_t* entry_sp,
|
||||
intptr_t* saved_regs) {
|
||||
|
||||
bool has_mh = (strstr(adaptername, "/static") == NULL &&
|
||||
strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH
|
||||
const char* mh_reg_name = has_mh ? "R23_method_handle" : "G23";
|
||||
tty->print_cr("MH %s %s="INTPTR_FORMAT " sp=" INTPTR_FORMAT,
|
||||
adaptername, mh_reg_name, (intptr_t) mh, entry_sp);
|
||||
|
||||
if (Verbose) {
|
||||
tty->print_cr("Registers:");
|
||||
const int abi_offset = frame::abi_112_size / 8;
|
||||
for (int i = R3->encoding(); i <= R13->encoding(); i++) {
|
||||
Register r = as_Register(i);
|
||||
int count = i - R3->encoding();
|
||||
// The registers are stored in reverse order on the stack (by save_volatile_gprs(R1_SP, abi_112_size)).
|
||||
tty->print("%3s=" PTR_FORMAT, r->name(), saved_regs[abi_offset + count]);
|
||||
if ((count + 1) % 4 == 0) {
|
||||
tty->cr();
|
||||
} else {
|
||||
tty->print(", ");
|
||||
}
|
||||
}
|
||||
tty->cr();
|
||||
|
||||
{
|
||||
// dumping last frame with frame::describe
|
||||
|
||||
JavaThread* p = JavaThread::active();
|
||||
|
||||
ResourceMark rm;
|
||||
PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
|
||||
FrameValues values;
|
||||
|
||||
// Note: We want to allow trace_method_handle from any call site.
|
||||
// While trace_method_handle creates a frame, it may be entered
|
||||
// without a PC on the stack top (e.g. not just after a call).
|
||||
// Walking that frame could lead to failures due to that invalid PC.
|
||||
// => carefully detect that frame when doing the stack walking
|
||||
|
||||
// Current C frame
|
||||
frame cur_frame = os::current_frame();
|
||||
|
||||
// Robust search of trace_calling_frame (independant of inlining).
|
||||
// Assumes saved_regs comes from a pusha in the trace_calling_frame.
|
||||
assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?");
|
||||
frame trace_calling_frame = os::get_sender_for_C_frame(&cur_frame);
|
||||
while (trace_calling_frame.fp() < saved_regs) {
|
||||
trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame);
|
||||
}
|
||||
|
||||
// safely create a frame and call frame::describe
|
||||
intptr_t *dump_sp = trace_calling_frame.sender_sp();
|
||||
|
||||
frame dump_frame = frame(dump_sp);
|
||||
dump_frame.describe(values, 1);
|
||||
|
||||
values.describe(-1, saved_regs, "raw top of stack");
|
||||
|
||||
tty->print_cr("Stack layout:");
|
||||
values.print(p);
|
||||
}
|
||||
|
||||
if (has_mh && mh->is_oop()) {
|
||||
mh->print();
|
||||
if (java_lang_invoke_MethodHandle::is_instance(mh)) {
|
||||
if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0)
|
||||
java_lang_invoke_MethodHandle::form(mh)->print();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
|
||||
if (!TraceMethodHandles) return;
|
||||
|
||||
BLOCK_COMMENT("trace_method_handle {");
|
||||
|
||||
int nbytes_save = 10 * 8; // 10 volatile gprs
|
||||
__ save_LR_CR(R0);
|
||||
__ mr(R0, R1_SP); // saved_sp
|
||||
assert(Assembler::is_simm(-nbytes_save, 16), "Overwriting R0");
|
||||
// push_frame_abi112 only uses R0 if nbytes_save is wider than 16 bit
|
||||
__ push_frame_abi112(nbytes_save, R0);
|
||||
__ save_volatile_gprs(R1_SP, frame::abi_112_size); // Except R0.
|
||||
|
||||
__ load_const(R3_ARG1, (address)adaptername);
|
||||
__ mr(R4_ARG2, R23_method_handle);
|
||||
__ mr(R5_ARG3, R0); // saved_sp
|
||||
__ mr(R6_ARG4, R1_SP);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub));
|
||||
|
||||
__ restore_volatile_gprs(R1_SP, 112); // except R0
|
||||
__ pop_frame();
|
||||
__ restore_LR_CR(R0);
|
||||
|
||||
BLOCK_COMMENT("} trace_method_handle");
|
||||
}
|
||||
#endif // PRODUCT
|
62
hotspot/src/cpu/ppc/vm/methodHandles_ppc.hpp
Normal file
62
hotspot/src/cpu/ppc/vm/methodHandles_ppc.hpp
Normal file
@ -0,0 +1,62 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// Platform-specific definitions for method handles.
|
||||
// These definitions are inlined into class MethodHandles.
|
||||
|
||||
// Adapters
|
||||
//static unsigned int adapter_code_size() {
|
||||
// return 32*K DEBUG_ONLY(+ 16*K) + (TraceMethodHandles ? 16*K : 0) + (VerifyMethodHandles ? 32*K : 0);
|
||||
//}
|
||||
enum /* platform_dependent_constants */ {
|
||||
adapter_code_size = NOT_LP64(16000 DEBUG_ONLY(+ 25000)) LP64_ONLY(32000 DEBUG_ONLY(+ 150000))
|
||||
};
|
||||
|
||||
// Additional helper methods for MethodHandles code generation:
|
||||
public:
|
||||
static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg);
|
||||
|
||||
static void verify_klass(MacroAssembler* _masm,
|
||||
Register obj_reg, SystemDictionary::WKID klass_id,
|
||||
Register temp_reg, Register temp2_reg,
|
||||
const char* error_message = "wrong klass") NOT_DEBUG_RETURN;
|
||||
|
||||
static void verify_method_handle(MacroAssembler* _masm, Register mh_reg,
|
||||
Register temp_reg, Register temp2_reg) {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
static void verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) NOT_DEBUG_RETURN;
|
||||
|
||||
// Similar to InterpreterMacroAssembler::jump_from_interpreted.
|
||||
// Takes care of special dispatch from single stepping too.
|
||||
static void jump_from_method_handle(MacroAssembler* _masm, Register method,
|
||||
Register temp, Register temp2,
|
||||
bool for_compiler_entry);
|
||||
|
||||
static void jump_to_lambda_form(MacroAssembler* _masm,
|
||||
Register recv, Register method_temp,
|
||||
Register temp2, Register temp3,
|
||||
bool for_compiler_entry);
|
382
hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp
Normal file
382
hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp
Normal file
@ -0,0 +1,382 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "nativeInst_ppc.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#endif
|
||||
|
||||
// We use an illtrap for marking a method as not_entrant or zombie iff !UseSIGTRAP
|
||||
// Work around a C++ compiler bug which changes 'this'
|
||||
bool NativeInstruction::is_sigill_zombie_not_entrant_at(address addr) {
|
||||
assert(!UseSIGTRAP, "precondition");
|
||||
if (*(int*)addr != 0 /*illtrap*/) return false;
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(addr);
|
||||
if (cb == NULL || !cb->is_nmethod()) return false;
|
||||
nmethod *nm = (nmethod *)cb;
|
||||
// This method is not_entrant or zombie iff the illtrap instruction is
|
||||
// located at the verified entry point.
|
||||
return nm->verified_entry_point() == addr;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void NativeInstruction::verify() {
|
||||
// Make sure code pattern is actually an instruction address.
|
||||
address addr = addr_at(0);
|
||||
if (addr == 0 || ((intptr_t)addr & 3) != 0) {
|
||||
fatal("not an instruction address");
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
// Extract call destination from a NativeCall. The call might use a trampoline stub.
|
||||
address NativeCall::destination() const {
|
||||
address addr = (address)this;
|
||||
address destination = Assembler::bxx_destination(addr);
|
||||
|
||||
// Do we use a trampoline stub for this call?
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(addr); // Else we get assertion if nmethod is zombie.
|
||||
assert(cb && cb->is_nmethod(), "sanity");
|
||||
nmethod *nm = (nmethod *)cb;
|
||||
if (nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
|
||||
// Yes we do, so get the destination from the trampoline stub.
|
||||
const address trampoline_stub_addr = destination;
|
||||
destination = NativeCallTrampolineStub_at(trampoline_stub_addr)->destination();
|
||||
}
|
||||
|
||||
return destination;
|
||||
}
|
||||
|
||||
// Similar to replace_mt_safe, but just changes the destination. The
|
||||
// important thing is that free-running threads are able to execute this
|
||||
// call instruction at all times. Thus, the displacement field must be
|
||||
// instruction-word-aligned.
|
||||
//
|
||||
// Used in the runtime linkage of calls; see class CompiledIC.
|
||||
//
|
||||
// Add parameter assert_lock to switch off assertion
|
||||
// during code generation, where no patching lock is needed.
|
||||
void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
|
||||
assert(!assert_lock ||
|
||||
(Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()),
|
||||
"concurrent code patching");
|
||||
|
||||
ResourceMark rm;
|
||||
int code_size = 1 * BytesPerInstWord;
|
||||
address addr_call = addr_at(0);
|
||||
assert(MacroAssembler::is_bl(*(int*)addr_call), "unexpected code at call-site");
|
||||
|
||||
CodeBuffer cb(addr_call, code_size + 1);
|
||||
MacroAssembler* a = new MacroAssembler(&cb);
|
||||
|
||||
// Patch the call.
|
||||
if (ReoptimizeCallSequences &&
|
||||
a->is_within_range_of_b(dest, addr_call)) {
|
||||
a->bl(dest);
|
||||
} else {
|
||||
address trampoline_stub_addr = get_trampoline();
|
||||
|
||||
// We did not find a trampoline stub because the current codeblob
|
||||
// does not provide this information. The branch will be patched
|
||||
// later during a final fixup, when all necessary information is
|
||||
// available.
|
||||
if (trampoline_stub_addr == 0)
|
||||
return;
|
||||
|
||||
// Patch the constant in the call's trampoline stub.
|
||||
NativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
|
||||
|
||||
a->bl(trampoline_stub_addr);
|
||||
}
|
||||
ICache::invalidate_range(addr_call, code_size);
|
||||
}
|
||||
|
||||
address NativeCall::get_trampoline() {
|
||||
address call_addr = addr_at(0);
|
||||
|
||||
CodeBlob *code = CodeCache::find_blob(call_addr);
|
||||
assert(code != NULL, "Could not find the containing code blob");
|
||||
|
||||
// There are no relocations available when the code gets relocated
|
||||
// because of CodeBuffer expansion.
|
||||
if (code->relocation_size() == 0)
|
||||
return NULL;
|
||||
|
||||
address bl_destination = Assembler::bxx_destination(call_addr);
|
||||
if (code->content_contains(bl_destination) &&
|
||||
is_NativeCallTrampolineStub_at(bl_destination))
|
||||
return bl_destination;
|
||||
|
||||
// If the codeBlob is not a nmethod, this is because we get here from the
|
||||
// CodeBlob constructor, which is called within the nmethod constructor.
|
||||
return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void NativeCall::verify() {
|
||||
address addr = addr_at(0);
|
||||
|
||||
if (!NativeCall::is_call_at(addr)) {
|
||||
tty->print_cr("not a NativeCall at " PTR_FORMAT, addr);
|
||||
// TODO: PPC port: Disassembler::decode(addr - 20, addr + 20, tty);
|
||||
fatal(err_msg("not a NativeCall at " PTR_FORMAT, addr));
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
#ifdef ASSERT
|
||||
void NativeFarCall::verify() {
|
||||
address addr = addr_at(0);
|
||||
|
||||
NativeInstruction::verify();
|
||||
if (!NativeFarCall::is_far_call_at(addr)) {
|
||||
tty->print_cr("not a NativeFarCall at " PTR_FORMAT, addr);
|
||||
// TODO: PPC port: Disassembler::decode(addr, 20, 20, tty);
|
||||
fatal(err_msg("not a NativeFarCall at " PTR_FORMAT, addr));
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
address NativeMovConstReg::next_instruction_address() const {
|
||||
#ifdef ASSERT
|
||||
CodeBlob* nm = CodeCache::find_blob(instruction_address());
|
||||
assert(!MacroAssembler::is_set_narrow_oop(addr_at(0), nm->content_begin()), "Should not patch narrow oop here");
|
||||
#endif
|
||||
|
||||
if (MacroAssembler::is_load_const_from_method_toc_at(addr_at(0))) {
|
||||
return addr_at(load_const_from_method_toc_instruction_size);
|
||||
} else {
|
||||
return addr_at(load_const_instruction_size);
|
||||
}
|
||||
}
|
||||
|
||||
intptr_t NativeMovConstReg::data() const {
|
||||
address addr = addr_at(0);
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(addr);
|
||||
|
||||
if (MacroAssembler::is_load_const_at(addr)) {
|
||||
return MacroAssembler::get_const(addr);
|
||||
} else if (MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) {
|
||||
narrowOop no = (narrowOop)MacroAssembler::get_narrow_oop(addr, cb->content_begin());
|
||||
return (intptr_t)oopDesc::decode_heap_oop(no);
|
||||
} else {
|
||||
assert(MacroAssembler::is_load_const_from_method_toc_at(addr), "must be load_const_from_pool");
|
||||
|
||||
address ctable = cb->content_begin();
|
||||
int offset = MacroAssembler::get_offset_of_load_const_from_method_toc_at(addr);
|
||||
return *(intptr_t *)(ctable + offset);
|
||||
}
|
||||
}
|
||||
|
||||
address NativeMovConstReg::set_data_plain(intptr_t data, CodeBlob *cb) {
|
||||
address addr = instruction_address();
|
||||
address next_address = NULL;
|
||||
if (!cb) cb = CodeCache::find_blob(addr);
|
||||
|
||||
if (cb != NULL && MacroAssembler::is_load_const_from_method_toc_at(addr)) {
|
||||
// A load from the method's TOC (ctable).
|
||||
assert(cb->is_nmethod(), "must be nmethod");
|
||||
const address ctable = cb->content_begin();
|
||||
const int toc_offset = MacroAssembler::get_offset_of_load_const_from_method_toc_at(addr);
|
||||
*(intptr_t *)(ctable + toc_offset) = data;
|
||||
next_address = addr + BytesPerInstWord;
|
||||
} else if (cb != NULL &&
|
||||
MacroAssembler::is_calculate_address_from_global_toc_at(addr, cb->content_begin())) {
|
||||
// A calculation relative to the global TOC.
|
||||
const int invalidated_range =
|
||||
MacroAssembler::patch_calculate_address_from_global_toc_at(addr, cb->content_begin(),
|
||||
(address)data);
|
||||
const address start = invalidated_range < 0 ? addr + invalidated_range : addr;
|
||||
// FIXME:
|
||||
const int range = invalidated_range < 0 ? 4 - invalidated_range : 8;
|
||||
ICache::invalidate_range(start, range);
|
||||
next_address = addr + 1 * BytesPerInstWord;
|
||||
} else if (MacroAssembler::is_load_const_at(addr)) {
|
||||
// A normal 5 instruction load_const code sequence.
|
||||
// This is not mt safe, ok in methods like CodeBuffer::copy_code().
|
||||
MacroAssembler::patch_const(addr, (long)data);
|
||||
ICache::invalidate_range(addr, load_const_instruction_size);
|
||||
next_address = addr + 5 * BytesPerInstWord;
|
||||
} else if (MacroAssembler::is_bl(* (int*) addr)) {
|
||||
// A single branch-and-link instruction.
|
||||
ResourceMark rm;
|
||||
const int code_size = 1 * BytesPerInstWord;
|
||||
CodeBuffer cb(addr, code_size + 1);
|
||||
MacroAssembler* a = new MacroAssembler(&cb);
|
||||
a->bl((address) data);
|
||||
ICache::invalidate_range(addr, code_size);
|
||||
next_address = addr + code_size;
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
return next_address;
|
||||
}
|
||||
|
||||
void NativeMovConstReg::set_data(intptr_t data) {
|
||||
// Store the value into the instruction stream.
|
||||
CodeBlob *cb = CodeCache::find_blob(instruction_address());
|
||||
address next_address = set_data_plain(data, cb);
|
||||
|
||||
// Also store the value into an oop_Relocation cell, if any.
|
||||
if (cb && cb->is_nmethod()) {
|
||||
RelocIterator iter((nmethod *) cb, instruction_address(), next_address);
|
||||
oop* oop_addr = NULL;
|
||||
Metadata** metadata_addr = NULL;
|
||||
while (iter.next()) {
|
||||
if (iter.type() == relocInfo::oop_type) {
|
||||
oop_Relocation *r = iter.oop_reloc();
|
||||
if (oop_addr == NULL) {
|
||||
oop_addr = r->oop_addr();
|
||||
*oop_addr = (oop)data;
|
||||
} else {
|
||||
assert(oop_addr == r->oop_addr(), "must be only one set-oop here") ;
|
||||
}
|
||||
}
|
||||
if (iter.type() == relocInfo::metadata_type) {
|
||||
metadata_Relocation *r = iter.metadata_reloc();
|
||||
if (metadata_addr == NULL) {
|
||||
metadata_addr = r->metadata_addr();
|
||||
*metadata_addr = (Metadata*)data;
|
||||
} else {
|
||||
assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void NativeMovConstReg::set_narrow_oop(narrowOop data, CodeBlob *code /* = NULL */) {
|
||||
address addr = addr_at(0);
|
||||
CodeBlob* cb = (code) ? code : CodeCache::find_blob(instruction_address());
|
||||
const int invalidated_range =
|
||||
MacroAssembler::patch_set_narrow_oop(addr, cb->content_begin(), (long)data);
|
||||
const address start = invalidated_range < 0 ? addr + invalidated_range : addr;
|
||||
// FIXME:
|
||||
const int range = invalidated_range < 0 ? 4 - invalidated_range : 8;
|
||||
ICache::invalidate_range(start, range);
|
||||
}
|
||||
|
||||
// Do not use an assertion here. Let clients decide whether they only
|
||||
// want this when assertions are enabled.
|
||||
#ifdef ASSERT
|
||||
void NativeMovConstReg::verify() {
|
||||
address addr = addr_at(0);
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(addr); // find_nmethod() asserts if nmethod is zombie.
|
||||
if (! MacroAssembler::is_load_const_at(addr) &&
|
||||
! MacroAssembler::is_load_const_from_method_toc_at(addr) &&
|
||||
! (cb != NULL && MacroAssembler::is_calculate_address_from_global_toc_at(addr, cb->content_begin())) &&
|
||||
! (cb != NULL && MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) &&
|
||||
! MacroAssembler::is_bl(*((int*) addr))) {
|
||||
tty->print_cr("not a NativeMovConstReg at " PTR_FORMAT, addr);
|
||||
// TODO: PPC port Disassembler::decode(addr, 20, 20, tty);
|
||||
fatal(err_msg("not a NativeMovConstReg at " PTR_FORMAT, addr));
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
|
||||
ResourceMark rm;
|
||||
int code_size = 1 * BytesPerInstWord;
|
||||
CodeBuffer cb(verified_entry, code_size + 1);
|
||||
MacroAssembler* a = new MacroAssembler(&cb);
|
||||
#ifdef COMPILER2
|
||||
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
|
||||
#endif
|
||||
// Patch this nmethod atomically. Always use illtrap/trap in debug build.
|
||||
if (DEBUG_ONLY(false &&) a->is_within_range_of_b(dest, a->pc())) {
|
||||
a->b(dest);
|
||||
} else {
|
||||
// The signal handler will continue at dest=OptoRuntime::handle_wrong_method_stub().
|
||||
if (TrapBasedNotEntrantChecks) {
|
||||
// We use a special trap for marking a method as not_entrant or zombie.
|
||||
a->trap_zombie_not_entrant();
|
||||
} else {
|
||||
// We use an illtrap for marking a method as not_entrant or zombie.
|
||||
a->illtrap();
|
||||
}
|
||||
}
|
||||
ICache::invalidate_range(verified_entry, code_size);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void NativeJump::verify() {
|
||||
address addr = addr_at(0);
|
||||
|
||||
NativeInstruction::verify();
|
||||
if (!NativeJump::is_jump_at(addr)) {
|
||||
tty->print_cr("not a NativeJump at " PTR_FORMAT, addr);
|
||||
// TODO: PPC port: Disassembler::decode(addr, 20, 20, tty);
|
||||
fatal(err_msg("not a NativeJump at " PTR_FORMAT, addr));
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
// Call trampoline stubs.
|
||||
//
|
||||
// Layout and instructions of a call trampoline stub:
|
||||
// 0: load the TOC (part 1)
|
||||
// 4: load the TOC (part 2)
|
||||
// 8: load the call target from the constant pool (part 1)
|
||||
// [12: load the call target from the constant pool (part 2, optional)]
|
||||
// ..: branch via CTR
|
||||
//
|
||||
|
||||
address NativeCallTrampolineStub::encoded_destination_addr() const {
|
||||
address instruction_addr = addr_at(2 * BytesPerInstWord);
|
||||
assert(MacroAssembler::is_ld_largeoffset(instruction_addr),
|
||||
"must be a ld with large offset (from the constant pool)");
|
||||
|
||||
return instruction_addr;
|
||||
}
|
||||
|
||||
address NativeCallTrampolineStub::destination() const {
|
||||
CodeBlob* cb = CodeCache::find_blob(addr_at(0));
|
||||
address ctable = cb->content_begin();
|
||||
|
||||
return *(address*)(ctable + destination_toc_offset());
|
||||
}
|
||||
|
||||
int NativeCallTrampolineStub::destination_toc_offset() const {
|
||||
return MacroAssembler::get_ld_largeoffset_offset(encoded_destination_addr());
|
||||
}
|
||||
|
||||
void NativeCallTrampolineStub::set_destination(address new_destination) {
|
||||
CodeBlob* cb = CodeCache::find_blob(addr_at(0));
|
||||
address ctable = cb->content_begin();
|
||||
|
||||
*(address*)(ctable + destination_toc_offset()) = new_destination;
|
||||
}
|
||||
|
397
hotspot/src/cpu/ppc/vm/nativeInst_ppc.hpp
Normal file
397
hotspot/src/cpu/ppc/vm/nativeInst_ppc.hpp
Normal file
@ -0,0 +1,397 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_NATIVEINST_PPC_HPP
|
||||
#define CPU_PPC_VM_NATIVEINST_PPC_HPP
|
||||
|
||||
#include "asm/assembler.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
// We have interfaces for the following instructions:
|
||||
//
|
||||
// - NativeInstruction
|
||||
// - NativeCall
|
||||
// - NativeFarCall
|
||||
// - NativeMovConstReg
|
||||
// - NativeJump
|
||||
// - NativeIllegalInstruction
|
||||
// - NativeConditionalFarBranch
|
||||
// - NativeCallTrampolineStub
|
||||
|
||||
// The base class for different kinds of native instruction abstractions.
|
||||
// It provides the primitive operations to manipulate code relative to this.
|
||||
class NativeInstruction VALUE_OBJ_CLASS_SPEC {
|
||||
friend class Relocation;
|
||||
|
||||
public:
|
||||
bool is_sigtrap_ic_miss_check() {
|
||||
assert(UseSIGTRAP, "precondition");
|
||||
return MacroAssembler::is_trap_ic_miss_check(long_at(0));
|
||||
}
|
||||
|
||||
bool is_sigtrap_null_check() {
|
||||
assert(UseSIGTRAP && TrapBasedNullChecks, "precondition");
|
||||
return MacroAssembler::is_trap_null_check(long_at(0));
|
||||
}
|
||||
|
||||
// We use a special trap for marking a method as not_entrant or zombie
|
||||
// iff UseSIGTRAP.
|
||||
bool is_sigtrap_zombie_not_entrant() {
|
||||
assert(UseSIGTRAP, "precondition");
|
||||
return MacroAssembler::is_trap_zombie_not_entrant(long_at(0));
|
||||
}
|
||||
|
||||
// We use an illtrap for marking a method as not_entrant or zombie
|
||||
// iff !UseSIGTRAP.
|
||||
bool is_sigill_zombie_not_entrant() {
|
||||
assert(!UseSIGTRAP, "precondition");
|
||||
// Work around a C++ compiler bug which changes 'this'.
|
||||
return NativeInstruction::is_sigill_zombie_not_entrant_at(addr_at(0));
|
||||
}
|
||||
static bool is_sigill_zombie_not_entrant_at(address addr);
|
||||
|
||||
// SIGTRAP-based implicit range checks
|
||||
bool is_sigtrap_range_check() {
|
||||
assert(UseSIGTRAP && TrapBasedRangeChecks, "precondition");
|
||||
return MacroAssembler::is_trap_range_check(long_at(0));
|
||||
}
|
||||
|
||||
// 'should not reach here'.
|
||||
bool is_sigtrap_should_not_reach_here() {
|
||||
return MacroAssembler::is_trap_should_not_reach_here(long_at(0));
|
||||
}
|
||||
|
||||
bool is_safepoint_poll() {
|
||||
// Is the current instruction a POTENTIAL read access to the polling page?
|
||||
// The current arguments of the instruction are not checked!
|
||||
return MacroAssembler::is_load_from_polling_page(long_at(0), NULL);
|
||||
}
|
||||
|
||||
bool is_memory_serialization(JavaThread *thread, void *ucontext) {
|
||||
// Is the current instruction a write access of thread to the
|
||||
// memory serialization page?
|
||||
return MacroAssembler::is_memory_serialization(long_at(0), thread, ucontext);
|
||||
}
|
||||
|
||||
address get_stack_bang_address(void *ucontext) {
|
||||
// If long_at(0) is not a stack bang, return 0. Otherwise, return
|
||||
// banged address.
|
||||
return MacroAssembler::get_stack_bang_address(long_at(0), ucontext);
|
||||
}
|
||||
|
||||
protected:
|
||||
address addr_at(int offset) const { return address(this) + offset; }
|
||||
int long_at(int offset) const { return *(int*)addr_at(offset); }
|
||||
|
||||
public:
|
||||
void verify() NOT_DEBUG_RETURN;
|
||||
};
|
||||
|
||||
inline NativeInstruction* nativeInstruction_at(address address) {
|
||||
NativeInstruction* inst = (NativeInstruction*)address;
|
||||
inst->verify();
|
||||
return inst;
|
||||
}
|
||||
|
||||
// The NativeCall is an abstraction for accessing/manipulating call
|
||||
// instructions. It is used to manipulate inline caches, primitive &
|
||||
// dll calls, etc.
|
||||
//
|
||||
// Sparc distinguishes `NativeCall' and `NativeFarCall'. On PPC64,
|
||||
// at present, we provide a single class `NativeCall' representing the
|
||||
// sequence `load_const, mtctr, bctrl' or the sequence 'ld_from_toc,
|
||||
// mtctr, bctrl'.
|
||||
class NativeCall: public NativeInstruction {
|
||||
public:
|
||||
|
||||
enum specific_constants {
|
||||
load_const_instruction_size = 28,
|
||||
load_const_from_method_toc_instruction_size = 16,
|
||||
instruction_size = 16 // Used in shared code for calls with reloc_info.
|
||||
};
|
||||
|
||||
static bool is_call_at(address a) {
|
||||
return Assembler::is_bl(*(int*)(a));
|
||||
}
|
||||
|
||||
static bool is_call_before(address return_address) {
|
||||
return NativeCall::is_call_at(return_address - 4);
|
||||
}
|
||||
|
||||
address instruction_address() const {
|
||||
return addr_at(0);
|
||||
}
|
||||
|
||||
address next_instruction_address() const {
|
||||
// We have only bl.
|
||||
assert(MacroAssembler::is_bl(*(int*)instruction_address()), "Should be bl instruction!");
|
||||
return addr_at(4);
|
||||
}
|
||||
|
||||
address return_address() const {
|
||||
return next_instruction_address();
|
||||
}
|
||||
|
||||
address destination() const;
|
||||
|
||||
// The parameter assert_lock disables the assertion during code generation.
|
||||
void set_destination_mt_safe(address dest, bool assert_lock = true);
|
||||
|
||||
address get_trampoline();
|
||||
|
||||
void verify_alignment() {} // do nothing on ppc
|
||||
void verify() NOT_DEBUG_RETURN;
|
||||
};
|
||||
|
||||
inline NativeCall* nativeCall_at(address instr) {
|
||||
NativeCall* call = (NativeCall*)instr;
|
||||
call->verify();
|
||||
return call;
|
||||
}
|
||||
|
||||
inline NativeCall* nativeCall_before(address return_address) {
|
||||
NativeCall* call = NULL;
|
||||
if (MacroAssembler::is_bl(*(int*)(return_address - 4)))
|
||||
call = (NativeCall*)(return_address - 4);
|
||||
call->verify();
|
||||
return call;
|
||||
}
|
||||
|
||||
// The NativeFarCall is an abstraction for accessing/manipulating native
|
||||
// call-anywhere instructions.
|
||||
// Used to call native methods which may be loaded anywhere in the address
|
||||
// space, possibly out of reach of a call instruction.
|
||||
class NativeFarCall: public NativeInstruction {
|
||||
public:
|
||||
// We use MacroAssembler::bl64_patchable() for implementing a
|
||||
// call-anywhere instruction.
|
||||
|
||||
// Checks whether instr points at a NativeFarCall instruction.
|
||||
static bool is_far_call_at(address instr) {
|
||||
return MacroAssembler::is_bl64_patchable_at(instr);
|
||||
}
|
||||
|
||||
// Does the NativeFarCall implementation use a pc-relative encoding
|
||||
// of the call destination?
|
||||
// Used when relocating code.
|
||||
bool is_pcrelative() {
|
||||
assert(MacroAssembler::is_bl64_patchable_at((address)this),
|
||||
"unexpected call type");
|
||||
return MacroAssembler::is_bl64_patchable_pcrelative_at((address)this);
|
||||
}
|
||||
|
||||
// Returns the NativeFarCall's destination.
|
||||
address destination() const {
|
||||
assert(MacroAssembler::is_bl64_patchable_at((address)this),
|
||||
"unexpected call type");
|
||||
return MacroAssembler::get_dest_of_bl64_patchable_at((address)this);
|
||||
}
|
||||
|
||||
// Sets the NativeCall's destination, not necessarily mt-safe.
|
||||
// Used when relocating code.
|
||||
void set_destination(address dest) {
|
||||
// Set new destination (implementation of call may change here).
|
||||
assert(MacroAssembler::is_bl64_patchable_at((address)this),
|
||||
"unexpected call type");
|
||||
MacroAssembler::set_dest_of_bl64_patchable_at((address)this, dest);
|
||||
}
|
||||
|
||||
void verify() NOT_DEBUG_RETURN;
|
||||
};
|
||||
|
||||
// Instantiates a NativeFarCall object starting at the given instruction
|
||||
// address and returns the NativeFarCall object.
|
||||
inline NativeFarCall* nativeFarCall_at(address instr) {
|
||||
NativeFarCall* call = (NativeFarCall*)instr;
|
||||
call->verify();
|
||||
return call;
|
||||
}
|
||||
|
||||
// An interface for accessing/manipulating native set_oop imm, reg instructions.
|
||||
// (used to manipulate inlined data references, etc.)
|
||||
class NativeMovConstReg: public NativeInstruction {
|
||||
public:
|
||||
|
||||
enum specific_constants {
|
||||
load_const_instruction_size = 20,
|
||||
load_const_from_method_toc_instruction_size = 8,
|
||||
instruction_size = 8 // Used in shared code for calls with reloc_info.
|
||||
};
|
||||
|
||||
address instruction_address() const {
|
||||
return addr_at(0);
|
||||
}
|
||||
|
||||
address next_instruction_address() const;
|
||||
|
||||
// (The [set_]data accessor respects oop_type relocs also.)
|
||||
intptr_t data() const;
|
||||
|
||||
// Patch the code stream.
|
||||
address set_data_plain(intptr_t x, CodeBlob *code);
|
||||
// Patch the code stream and oop pool.
|
||||
void set_data(intptr_t x);
|
||||
|
||||
// Patch narrow oop constants. Use this also for narrow klass.
|
||||
void set_narrow_oop(narrowOop data, CodeBlob *code = NULL);
|
||||
|
||||
void verify() NOT_DEBUG_RETURN;
|
||||
};
|
||||
|
||||
inline NativeMovConstReg* nativeMovConstReg_at(address address) {
|
||||
NativeMovConstReg* test = (NativeMovConstReg*)address;
|
||||
test->verify();
|
||||
return test;
|
||||
}
|
||||
|
||||
// The NativeJump is an abstraction for accessing/manipulating native
|
||||
// jump-anywhere instructions.
|
||||
class NativeJump: public NativeInstruction {
|
||||
public:
|
||||
// We use MacroAssembler::b64_patchable() for implementing a
|
||||
// jump-anywhere instruction.
|
||||
|
||||
enum specific_constants {
|
||||
instruction_size = MacroAssembler::b64_patchable_size
|
||||
};
|
||||
|
||||
// Checks whether instr points at a NativeJump instruction.
|
||||
static bool is_jump_at(address instr) {
|
||||
return MacroAssembler::is_b64_patchable_at(instr)
|
||||
|| ( MacroAssembler::is_load_const_from_method_toc_at(instr)
|
||||
&& Assembler::is_mtctr(*(int*)(instr + 2 * 4))
|
||||
&& Assembler::is_bctr(*(int*)(instr + 3 * 4)));
|
||||
}
|
||||
|
||||
// Does the NativeJump implementation use a pc-relative encoding
|
||||
// of the call destination?
|
||||
// Used when relocating code or patching jumps.
|
||||
bool is_pcrelative() {
|
||||
return MacroAssembler::is_b64_patchable_pcrelative_at((address)this);
|
||||
}
|
||||
|
||||
// Returns the NativeJump's destination.
|
||||
address jump_destination() const {
|
||||
if (MacroAssembler::is_b64_patchable_at((address)this)) {
|
||||
return MacroAssembler::get_dest_of_b64_patchable_at((address)this);
|
||||
} else if (MacroAssembler::is_load_const_from_method_toc_at((address)this)
|
||||
&& Assembler::is_mtctr(*(int*)((address)this + 2 * 4))
|
||||
&& Assembler::is_bctr(*(int*)((address)this + 3 * 4))) {
|
||||
return (address)((NativeMovConstReg *)this)->data();
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// Sets the NativeJump's destination, not necessarily mt-safe.
|
||||
// Used when relocating code or patching jumps.
|
||||
void set_jump_destination(address dest) {
|
||||
// Set new destination (implementation of call may change here).
|
||||
if (MacroAssembler::is_b64_patchable_at((address)this)) {
|
||||
MacroAssembler::set_dest_of_b64_patchable_at((address)this, dest);
|
||||
} else if (MacroAssembler::is_load_const_from_method_toc_at((address)this)
|
||||
&& Assembler::is_mtctr(*(int*)((address)this + 2 * 4))
|
||||
&& Assembler::is_bctr(*(int*)((address)this + 3 * 4))) {
|
||||
((NativeMovConstReg *)this)->set_data((intptr_t)dest);
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
// MT-safe insertion of native jump at verified method entry
|
||||
static void patch_verified_entry(address entry, address verified_entry, address dest);
|
||||
|
||||
void verify() NOT_DEBUG_RETURN;
|
||||
|
||||
static void check_verified_entry_alignment(address entry, address verified_entry) {
|
||||
// We just patch one instruction on ppc64, so the jump doesn't have to
|
||||
// be aligned. Nothing to do here.
|
||||
}
|
||||
};
|
||||
|
||||
// Instantiates a NativeJump object starting at the given instruction
|
||||
// address and returns the NativeJump object.
|
||||
inline NativeJump* nativeJump_at(address instr) {
|
||||
NativeJump* call = (NativeJump*)instr;
|
||||
call->verify();
|
||||
return call;
|
||||
}
|
||||
|
||||
// NativeConditionalFarBranch is abstraction for accessing/manipulating
|
||||
// conditional far branches.
|
||||
class NativeConditionalFarBranch : public NativeInstruction {
|
||||
public:
|
||||
|
||||
static bool is_conditional_far_branch_at(address instr) {
|
||||
return MacroAssembler::is_bc_far_at(instr);
|
||||
}
|
||||
|
||||
address branch_destination() const {
|
||||
return MacroAssembler::get_dest_of_bc_far_at((address)this);
|
||||
}
|
||||
|
||||
void set_branch_destination(address dest) {
|
||||
MacroAssembler::set_dest_of_bc_far_at((address)this, dest);
|
||||
}
|
||||
};
|
||||
|
||||
inline NativeConditionalFarBranch* NativeConditionalFarBranch_at(address address) {
|
||||
assert(NativeConditionalFarBranch::is_conditional_far_branch_at(address),
|
||||
"must be a conditional far branch");
|
||||
return (NativeConditionalFarBranch*)address;
|
||||
}
|
||||
|
||||
// Call trampoline stubs.
|
||||
class NativeCallTrampolineStub : public NativeInstruction {
|
||||
private:
|
||||
|
||||
address encoded_destination_addr() const;
|
||||
|
||||
public:
|
||||
|
||||
address destination() const;
|
||||
int destination_toc_offset() const;
|
||||
|
||||
void set_destination(address new_destination);
|
||||
};
|
||||
|
||||
|
||||
inline bool is_NativeCallTrampolineStub_at(address address) {
|
||||
int first_instr = *(int*)address;
|
||||
return Assembler::is_addis(first_instr) &&
|
||||
(Register)(intptr_t)Assembler::inv_rt_field(first_instr) == R12_scratch2;
|
||||
}
|
||||
|
||||
inline NativeCallTrampolineStub* NativeCallTrampolineStub_at(address address) {
|
||||
assert(is_NativeCallTrampolineStub_at(address), "no call trampoline found");
|
||||
return (NativeCallTrampolineStub*)address;
|
||||
}
|
||||
|
||||
#endif // CPU_PPC_VM_NATIVEINST_PPC_HPP
|
45
hotspot/src/cpu/ppc/vm/registerMap_ppc.hpp
Normal file
45
hotspot/src/cpu/ppc/vm/registerMap_ppc.hpp
Normal file
@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_REGISTERMAP_PPC_HPP
|
||||
#define CPU_PPC_VM_REGISTERMAP_PPC_HPP
|
||||
|
||||
// machine-dependent implemention for register maps
|
||||
friend class frame;
|
||||
|
||||
private:
|
||||
// This is the hook for finding a register in an "well-known" location,
|
||||
// such as a register block of a predetermined format.
|
||||
// Since there is none, we just return NULL.
|
||||
// See registerMap_sparc.hpp for an example of grabbing registers
|
||||
// from register save areas of a standard layout.
|
||||
address pd_location(VMReg reg) const { return NULL; }
|
||||
|
||||
// no PD state to clear or copy:
|
||||
void pd_clear() {}
|
||||
void pd_initialize() {}
|
||||
void pd_initialize_from(const RegisterMap* map) {}
|
||||
|
||||
#endif // CPU_PPC_VM_REGISTERMAP_PPC_HPP
|
42
hotspot/src/cpu/ppc/vm/register_definitions_ppc.cpp
Normal file
42
hotspot/src/cpu/ppc/vm/register_definitions_ppc.cpp
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// make sure the defines don't screw up the declarations later on in this file
|
||||
#define DONT_USE_REGISTER_DEFINES
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "asm/register.hpp"
|
||||
#include "register_ppc.hpp"
|
||||
#ifdef TARGET_ARCH_MODEL_32
|
||||
# include "interp_masm_32.hpp"
|
||||
#endif
|
||||
#ifdef TARGET_ARCH_MODEL_ppc_64
|
||||
# include "interp_masm_ppc_64.hpp"
|
||||
#endif
|
||||
|
||||
REGISTER_DEFINITION(Register, noreg);
|
||||
|
||||
REGISTER_DEFINITION(FloatRegister, fnoreg);
|
77
hotspot/src/cpu/ppc/vm/register_ppc.cpp
Normal file
77
hotspot/src/cpu/ppc/vm/register_ppc.cpp
Normal file
@ -0,0 +1,77 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "register_ppc.hpp"
|
||||
|
||||
const int ConcreteRegisterImpl::max_gpr = RegisterImpl::number_of_registers * 2;
|
||||
const int ConcreteRegisterImpl::max_fpr = ConcreteRegisterImpl::max_gpr +
|
||||
FloatRegisterImpl::number_of_registers * 2;
|
||||
const int ConcreteRegisterImpl::max_cnd = ConcreteRegisterImpl::max_fpr +
|
||||
ConditionRegisterImpl::number_of_registers;
|
||||
|
||||
const char* RegisterImpl::name() const {
|
||||
const char* names[number_of_registers] = {
|
||||
"R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7",
|
||||
"R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
|
||||
"R16", "R17", "R18", "R19", "R20", "R21", "R22", "R23",
|
||||
"R24", "R25", "R26", "R27", "R28", "R29", "R30", "R31"
|
||||
};
|
||||
return is_valid() ? names[encoding()] : "noreg";
|
||||
}
|
||||
|
||||
const char* ConditionRegisterImpl::name() const {
|
||||
const char* names[number_of_registers] = {
|
||||
"CR0", "CR1", "CR2", "CR3", "CCR4", "CCR5", "CCR6", "CCR7"
|
||||
};
|
||||
return is_valid() ? names[encoding()] : "cnoreg";
|
||||
}
|
||||
|
||||
const char* FloatRegisterImpl::name() const {
|
||||
const char* names[number_of_registers] = {
|
||||
"F0", "F1", "F2", "F3", "F4", "F5", "F6", "F7",
|
||||
"F8", "F9", "F10", "F11", "F12", "F13", "F14", "F15",
|
||||
"F16", "F17", "F18", "F19", "F20", "F21", "F22", "F23",
|
||||
"F24", "F25", "F26", "F27", "F28", "F29", "F30", "F31"
|
||||
};
|
||||
return is_valid() ? names[encoding()] : "fnoreg";
|
||||
}
|
||||
|
||||
const char* SpecialRegisterImpl::name() const {
|
||||
const char* names[number_of_registers] = {
|
||||
"SR_XER", "SR_LR", "SR_CTR", "SR_VRSAVE", "R1_SPEFSCR", "SR_PPR"
|
||||
};
|
||||
return is_valid() ? names[encoding()] : "snoreg";
|
||||
}
|
||||
|
||||
const char* VectorRegisterImpl::name() const {
|
||||
const char* names[number_of_registers] = {
|
||||
"VR0", "VR1", "VR2", "VR3", "VR4", "VR5", "VR6", "VR7",
|
||||
"VR8", "VR9", "VR10", "VR11", "VR12", "VR13", "VR14", "VR15",
|
||||
"VR16", "VR17", "VR18", "VR19", "VR20", "VR21", "VR22", "VR23",
|
||||
"VR24", "VR25", "VR26", "VR27", "VR28", "VR29", "VR30", "VR31"
|
||||
};
|
||||
return is_valid() ? names[encoding()] : "vnoreg";
|
||||
}
|
635
hotspot/src/cpu/ppc/vm/register_ppc.hpp
Normal file
635
hotspot/src/cpu/ppc/vm/register_ppc.hpp
Normal file
@ -0,0 +1,635 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_REGISTER_PPC_HPP
|
||||
#define CPU_PPC_VM_REGISTER_PPC_HPP
|
||||
|
||||
#include "asm/register.hpp"
|
||||
#include "vm_version_ppc.hpp"
|
||||
|
||||
// forward declaration
|
||||
class Address;
|
||||
class VMRegImpl;
|
||||
typedef VMRegImpl* VMReg;
|
||||
|
||||
// PPC64 registers
|
||||
//
|
||||
// See "64-bit PowerPC ELF ABI Supplement 1.7", IBM Corp. (2003-10-29).
|
||||
// (http://math-atlas.sourceforge.net/devel/assembly/PPC-elf64abi-1.7.pdf)
|
||||
//
|
||||
// r0 Register used in function prologs (volatile)
|
||||
// r1 Stack pointer (nonvolatile)
|
||||
// r2 TOC pointer (volatile)
|
||||
// r3 Parameter and return value (volatile)
|
||||
// r4-r10 Function parameters (volatile)
|
||||
// r11 Register used in calls by pointer and as an environment pointer for languages which require one (volatile)
|
||||
// r12 Register used for exception handling and glink code (volatile)
|
||||
// r13 Reserved for use as system thread ID
|
||||
// r14-r31 Local variables (nonvolatile)
|
||||
//
|
||||
// f0 Scratch register (volatile)
|
||||
// f1-f4 Floating point parameters and return value (volatile)
|
||||
// f5-f13 Floating point parameters (volatile)
|
||||
// f14-f31 Floating point values (nonvolatile)
|
||||
//
|
||||
// LR Link register for return address (volatile)
|
||||
// CTR Loop counter (volatile)
|
||||
// XER Fixed point exception register (volatile)
|
||||
// FPSCR Floating point status and control register (volatile)
|
||||
//
|
||||
// CR0-CR1 Condition code fields (volatile)
|
||||
// CR2-CCR4 Condition code fields (nonvolatile)
|
||||
// CCR5-CCR7 Condition code fields (volatile)
|
||||
//
|
||||
// ----------------------------------------------
|
||||
// On processors with the VMX feature:
|
||||
// v0-v1 Volatile scratch registers
|
||||
// v2-v13 Volatile vector parameters registers
|
||||
// v14-v19 Volatile scratch registers
|
||||
// v20-v31 Non-volatile registers
|
||||
// vrsave Non-volatile 32-bit register
|
||||
|
||||
|
||||
// Use Register as shortcut
|
||||
class RegisterImpl;
|
||||
typedef RegisterImpl* Register;
|
||||
|
||||
inline Register as_Register(int encoding) {
|
||||
assert(encoding >= 0 && encoding < 32, "bad register encoding");
|
||||
return (Register)(intptr_t)encoding;
|
||||
}
|
||||
|
||||
// The implementation of integer registers for the Power architecture
|
||||
class RegisterImpl: public AbstractRegisterImpl {
|
||||
public:
|
||||
enum {
|
||||
number_of_registers = 32
|
||||
};
|
||||
|
||||
// general construction
|
||||
inline friend Register as_Register(int encoding);
|
||||
|
||||
// accessors
|
||||
int encoding() const { assert(is_valid(), "invalid register"); return value(); }
|
||||
VMReg as_VMReg();
|
||||
Register successor() const { return as_Register(encoding() + 1); }
|
||||
|
||||
// testers
|
||||
bool is_valid() const { return ( 0 <= (value()&0x7F) && (value()&0x7F) < number_of_registers); }
|
||||
bool is_volatile() const { return ( 0 <= (value()&0x7F) && (value()&0x7F) <= 13 ); }
|
||||
bool is_nonvolatile() const { return (14 <= (value()&0x7F) && (value()&0x7F) <= 31 ); }
|
||||
|
||||
const char* name() const;
|
||||
};
|
||||
|
||||
// The integer registers of the PPC architecture
|
||||
CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1));
|
||||
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R0, (0));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R1, (1));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R2, (2));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R3, (3));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R4, (4));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R5, (5));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R6, (6));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R7, (7));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R8, (8));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R9, (9));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R10, (10));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R11, (11));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R12, (12));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R13, (13));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R14, (14));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R15, (15));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R16, (16));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R17, (17));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R18, (18));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R19, (19));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R20, (20));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R21, (21));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R22, (22));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R23, (23));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R24, (24));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R25, (25));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R26, (26));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R27, (27));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R28, (28));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R29, (29));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R30, (30));
|
||||
CONSTANT_REGISTER_DECLARATION(Register, R31, (31));
|
||||
|
||||
|
||||
//
|
||||
// Because Power has many registers, #define'ing values for them is
|
||||
// beneficial in code size and is worth the cost of some of the
|
||||
// dangers of defines. If a particular file has a problem with these
|
||||
// defines then it's possible to turn them off in that file by
|
||||
// defining DONT_USE_REGISTER_DEFINES. Register_definition_ppc.cpp
|
||||
// does that so that it's able to provide real definitions of these
|
||||
// registers for use in debuggers and such.
|
||||
//
|
||||
|
||||
#ifndef DONT_USE_REGISTER_DEFINES
|
||||
#define noreg ((Register)(noreg_RegisterEnumValue))
|
||||
|
||||
#define R0 ((Register)(R0_RegisterEnumValue))
|
||||
#define R1 ((Register)(R1_RegisterEnumValue))
|
||||
#define R2 ((Register)(R2_RegisterEnumValue))
|
||||
#define R3 ((Register)(R3_RegisterEnumValue))
|
||||
#define R4 ((Register)(R4_RegisterEnumValue))
|
||||
#define R5 ((Register)(R5_RegisterEnumValue))
|
||||
#define R6 ((Register)(R6_RegisterEnumValue))
|
||||
#define R7 ((Register)(R7_RegisterEnumValue))
|
||||
#define R8 ((Register)(R8_RegisterEnumValue))
|
||||
#define R9 ((Register)(R9_RegisterEnumValue))
|
||||
#define R10 ((Register)(R10_RegisterEnumValue))
|
||||
#define R11 ((Register)(R11_RegisterEnumValue))
|
||||
#define R12 ((Register)(R12_RegisterEnumValue))
|
||||
#define R13 ((Register)(R13_RegisterEnumValue))
|
||||
#define R14 ((Register)(R14_RegisterEnumValue))
|
||||
#define R15 ((Register)(R15_RegisterEnumValue))
|
||||
#define R16 ((Register)(R16_RegisterEnumValue))
|
||||
#define R17 ((Register)(R17_RegisterEnumValue))
|
||||
#define R18 ((Register)(R18_RegisterEnumValue))
|
||||
#define R19 ((Register)(R19_RegisterEnumValue))
|
||||
#define R20 ((Register)(R20_RegisterEnumValue))
|
||||
#define R21 ((Register)(R21_RegisterEnumValue))
|
||||
#define R22 ((Register)(R22_RegisterEnumValue))
|
||||
#define R23 ((Register)(R23_RegisterEnumValue))
|
||||
#define R24 ((Register)(R24_RegisterEnumValue))
|
||||
#define R25 ((Register)(R25_RegisterEnumValue))
|
||||
#define R26 ((Register)(R26_RegisterEnumValue))
|
||||
#define R27 ((Register)(R27_RegisterEnumValue))
|
||||
#define R28 ((Register)(R28_RegisterEnumValue))
|
||||
#define R29 ((Register)(R29_RegisterEnumValue))
|
||||
#define R30 ((Register)(R30_RegisterEnumValue))
|
||||
#define R31 ((Register)(R31_RegisterEnumValue))
|
||||
#endif
|
||||
|
||||
// Use ConditionRegister as shortcut
|
||||
class ConditionRegisterImpl;
|
||||
typedef ConditionRegisterImpl* ConditionRegister;
|
||||
|
||||
inline ConditionRegister as_ConditionRegister(int encoding) {
|
||||
assert(encoding >= 0 && encoding < 8, "bad condition register encoding");
|
||||
return (ConditionRegister)(intptr_t)encoding;
|
||||
}
|
||||
|
||||
// The implementation of condition register(s) for the PPC architecture
|
||||
class ConditionRegisterImpl: public AbstractRegisterImpl {
|
||||
public:
|
||||
enum {
|
||||
number_of_registers = 8
|
||||
};
|
||||
|
||||
// construction.
|
||||
inline friend ConditionRegister as_ConditionRegister(int encoding);
|
||||
|
||||
// accessors
|
||||
int encoding() const { assert(is_valid(), "invalid register"); return value(); }
|
||||
VMReg as_VMReg();
|
||||
|
||||
// testers
|
||||
bool is_valid() const { return (0 <= value() && value() < number_of_registers); }
|
||||
bool is_nonvolatile() const { return (2 <= (value()&0x7F) && (value()&0x7F) <= 4 ); }
|
||||
|
||||
const char* name() const;
|
||||
};
|
||||
|
||||
// The (parts of the) condition register(s) of the PPC architecture
|
||||
// sys/ioctl.h on AIX defines CR0-CR3, so I name these CCR.
|
||||
CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR0, (0));
|
||||
CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR1, (1));
|
||||
CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR2, (2));
|
||||
CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR3, (3));
|
||||
CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR4, (4));
|
||||
CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR5, (5));
|
||||
CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR6, (6));
|
||||
CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR7, (7));
|
||||
|
||||
#ifndef DONT_USE_REGISTER_DEFINES
|
||||
|
||||
#define CCR0 ((ConditionRegister)(CCR0_ConditionRegisterEnumValue))
|
||||
#define CCR1 ((ConditionRegister)(CCR1_ConditionRegisterEnumValue))
|
||||
#define CCR2 ((ConditionRegister)(CCR2_ConditionRegisterEnumValue))
|
||||
#define CCR3 ((ConditionRegister)(CCR3_ConditionRegisterEnumValue))
|
||||
#define CCR4 ((ConditionRegister)(CCR4_ConditionRegisterEnumValue))
|
||||
#define CCR5 ((ConditionRegister)(CCR5_ConditionRegisterEnumValue))
|
||||
#define CCR6 ((ConditionRegister)(CCR6_ConditionRegisterEnumValue))
|
||||
#define CCR7 ((ConditionRegister)(CCR7_ConditionRegisterEnumValue))
|
||||
|
||||
#endif // DONT_USE_REGISTER_DEFINES
|
||||
|
||||
|
||||
// Use FloatRegister as shortcut
|
||||
class FloatRegisterImpl;
|
||||
typedef FloatRegisterImpl* FloatRegister;
|
||||
|
||||
inline FloatRegister as_FloatRegister(int encoding) {
|
||||
assert(encoding >= 0 && encoding < 32, "bad float register encoding");
|
||||
return (FloatRegister)(intptr_t)encoding;
|
||||
}
|
||||
|
||||
// The implementation of float registers for the PPC architecture
|
||||
class FloatRegisterImpl: public AbstractRegisterImpl {
|
||||
public:
|
||||
enum {
|
||||
number_of_registers = 32
|
||||
};
|
||||
|
||||
// construction
|
||||
inline friend FloatRegister as_FloatRegister(int encoding);
|
||||
|
||||
// accessors
|
||||
int encoding() const { assert(is_valid(), "invalid register"); return value(); }
|
||||
VMReg as_VMReg();
|
||||
FloatRegister successor() const { return as_FloatRegister(encoding() + 1); }
|
||||
|
||||
// testers
|
||||
bool is_valid() const { return (0 <= value() && value() < number_of_registers); }
|
||||
|
||||
const char* name() const;
|
||||
};
|
||||
|
||||
// The float registers of the PPC architecture
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, fnoreg, (-1));
|
||||
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F0, ( 0));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F1, ( 1));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F2, ( 2));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F3, ( 3));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F4, ( 4));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F5, ( 5));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F6, ( 6));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F7, ( 7));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F8, ( 8));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F9, ( 9));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F10, (10));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F11, (11));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F12, (12));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F13, (13));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F14, (14));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F15, (15));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F16, (16));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F17, (17));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F18, (18));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F19, (19));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F20, (20));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F21, (21));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F22, (22));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F23, (23));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F24, (24));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F25, (25));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F26, (26));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F27, (27));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F28, (28));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F29, (29));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F30, (30));
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, F31, (31));
|
||||
|
||||
#ifndef DONT_USE_REGISTER_DEFINES
|
||||
#define fnoreg ((FloatRegister)(fnoreg_FloatRegisterEnumValue))
|
||||
#define F0 ((FloatRegister)( F0_FloatRegisterEnumValue))
|
||||
#define F1 ((FloatRegister)( F1_FloatRegisterEnumValue))
|
||||
#define F2 ((FloatRegister)( F2_FloatRegisterEnumValue))
|
||||
#define F3 ((FloatRegister)( F3_FloatRegisterEnumValue))
|
||||
#define F4 ((FloatRegister)( F4_FloatRegisterEnumValue))
|
||||
#define F5 ((FloatRegister)( F5_FloatRegisterEnumValue))
|
||||
#define F6 ((FloatRegister)( F6_FloatRegisterEnumValue))
|
||||
#define F7 ((FloatRegister)( F7_FloatRegisterEnumValue))
|
||||
#define F8 ((FloatRegister)( F8_FloatRegisterEnumValue))
|
||||
#define F9 ((FloatRegister)( F9_FloatRegisterEnumValue))
|
||||
#define F10 ((FloatRegister)( F10_FloatRegisterEnumValue))
|
||||
#define F11 ((FloatRegister)( F11_FloatRegisterEnumValue))
|
||||
#define F12 ((FloatRegister)( F12_FloatRegisterEnumValue))
|
||||
#define F13 ((FloatRegister)( F13_FloatRegisterEnumValue))
|
||||
#define F14 ((FloatRegister)( F14_FloatRegisterEnumValue))
|
||||
#define F15 ((FloatRegister)( F15_FloatRegisterEnumValue))
|
||||
#define F16 ((FloatRegister)( F16_FloatRegisterEnumValue))
|
||||
#define F17 ((FloatRegister)( F17_FloatRegisterEnumValue))
|
||||
#define F18 ((FloatRegister)( F18_FloatRegisterEnumValue))
|
||||
#define F19 ((FloatRegister)( F19_FloatRegisterEnumValue))
|
||||
#define F20 ((FloatRegister)( F20_FloatRegisterEnumValue))
|
||||
#define F21 ((FloatRegister)( F21_FloatRegisterEnumValue))
|
||||
#define F22 ((FloatRegister)( F22_FloatRegisterEnumValue))
|
||||
#define F23 ((FloatRegister)( F23_FloatRegisterEnumValue))
|
||||
#define F24 ((FloatRegister)( F24_FloatRegisterEnumValue))
|
||||
#define F25 ((FloatRegister)( F25_FloatRegisterEnumValue))
|
||||
#define F26 ((FloatRegister)( F26_FloatRegisterEnumValue))
|
||||
#define F27 ((FloatRegister)( F27_FloatRegisterEnumValue))
|
||||
#define F28 ((FloatRegister)( F28_FloatRegisterEnumValue))
|
||||
#define F29 ((FloatRegister)( F29_FloatRegisterEnumValue))
|
||||
#define F30 ((FloatRegister)( F30_FloatRegisterEnumValue))
|
||||
#define F31 ((FloatRegister)( F31_FloatRegisterEnumValue))
|
||||
#endif // DONT_USE_REGISTER_DEFINES
|
||||
|
||||
// Use SpecialRegister as shortcut
|
||||
class SpecialRegisterImpl;
|
||||
typedef SpecialRegisterImpl* SpecialRegister;
|
||||
|
||||
inline SpecialRegister as_SpecialRegister(int encoding) {
|
||||
return (SpecialRegister)(intptr_t)encoding;
|
||||
}
|
||||
|
||||
// The implementation of special registers for the Power architecture (LR, CTR and friends)
|
||||
class SpecialRegisterImpl: public AbstractRegisterImpl {
|
||||
public:
|
||||
enum {
|
||||
number_of_registers = 6
|
||||
};
|
||||
|
||||
// construction
|
||||
inline friend SpecialRegister as_SpecialRegister(int encoding);
|
||||
|
||||
// accessors
|
||||
int encoding() const { assert(is_valid(), "invalid register"); return value(); }
|
||||
VMReg as_VMReg();
|
||||
|
||||
// testers
|
||||
bool is_valid() const { return 0 <= value() && value() < number_of_registers; }
|
||||
|
||||
const char* name() const;
|
||||
};
|
||||
|
||||
// The special registers of the PPC architecture
|
||||
CONSTANT_REGISTER_DECLARATION(SpecialRegister, SR_XER, (0));
|
||||
CONSTANT_REGISTER_DECLARATION(SpecialRegister, SR_LR, (1));
|
||||
CONSTANT_REGISTER_DECLARATION(SpecialRegister, SR_CTR, (2));
|
||||
CONSTANT_REGISTER_DECLARATION(SpecialRegister, SR_VRSAVE, (3));
|
||||
CONSTANT_REGISTER_DECLARATION(SpecialRegister, SR_SPEFSCR, (4));
|
||||
CONSTANT_REGISTER_DECLARATION(SpecialRegister, SR_PPR, (5));
|
||||
|
||||
#ifndef DONT_USE_REGISTER_DEFINES
|
||||
#define SR_XER ((SpecialRegister)(SR_XER_SpecialRegisterEnumValue))
|
||||
#define SR_LR ((SpecialRegister)(SR_LR_SpecialRegisterEnumValue))
|
||||
#define SR_CTR ((SpecialRegister)(SR_CTR_SpecialRegisterEnumValue))
|
||||
#define SR_VRSAVE ((SpecialRegister)(SR_VRSAVE_SpecialRegisterEnumValue))
|
||||
#define SR_SPEFSCR ((SpecialRegister)(SR_SPEFSCR_SpecialRegisterEnumValue))
|
||||
#define SR_PPR ((SpecialRegister)(SR_PPR_SpecialRegisterEnumValue))
|
||||
#endif // DONT_USE_REGISTER_DEFINES
|
||||
|
||||
|
||||
// Use VectorRegister as shortcut
|
||||
class VectorRegisterImpl;
|
||||
typedef VectorRegisterImpl* VectorRegister;
|
||||
|
||||
inline VectorRegister as_VectorRegister(int encoding) {
|
||||
return (VectorRegister)(intptr_t)encoding;
|
||||
}
|
||||
|
||||
// The implementation of vector registers for the Power architecture
|
||||
class VectorRegisterImpl: public AbstractRegisterImpl {
|
||||
public:
|
||||
enum {
|
||||
number_of_registers = 32
|
||||
};
|
||||
|
||||
// construction
|
||||
inline friend VectorRegister as_VectorRegister(int encoding);
|
||||
|
||||
// accessors
|
||||
int encoding() const { assert(is_valid(), "invalid register"); return value(); }
|
||||
|
||||
// testers
|
||||
bool is_valid() const { return 0 <= value() && value() < number_of_registers; }
|
||||
|
||||
const char* name() const;
|
||||
};
|
||||
|
||||
// The Vector registers of the Power architecture
|
||||
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, vnoreg, (-1));
|
||||
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR0, ( 0));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR1, ( 1));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR2, ( 2));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR3, ( 3));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR4, ( 4));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR5, ( 5));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR6, ( 6));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR7, ( 7));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR8, ( 8));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR9, ( 9));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR10, (10));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR11, (11));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR12, (12));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR13, (13));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR14, (14));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR15, (15));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR16, (16));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR17, (17));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR18, (18));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR19, (19));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR20, (20));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR21, (21));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR22, (22));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR23, (23));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR24, (24));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR25, (25));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR26, (26));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR27, (27));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR28, (28));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR29, (29));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR30, (30));
|
||||
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR31, (31));
|
||||
|
||||
#ifndef DONT_USE_REGISTER_DEFINES
|
||||
#define vnoreg ((VectorRegister)(vnoreg_VectorRegisterEnumValue))
|
||||
#define VR0 ((VectorRegister)( VR0_VectorRegisterEnumValue))
|
||||
#define VR1 ((VectorRegister)( VR1_VectorRegisterEnumValue))
|
||||
#define VR2 ((VectorRegister)( VR2_VectorRegisterEnumValue))
|
||||
#define VR3 ((VectorRegister)( VR3_VectorRegisterEnumValue))
|
||||
#define VR4 ((VectorRegister)( VR4_VectorRegisterEnumValue))
|
||||
#define VR5 ((VectorRegister)( VR5_VectorRegisterEnumValue))
|
||||
#define VR6 ((VectorRegister)( VR6_VectorRegisterEnumValue))
|
||||
#define VR7 ((VectorRegister)( VR7_VectorRegisterEnumValue))
|
||||
#define VR8 ((VectorRegister)( VR8_VectorRegisterEnumValue))
|
||||
#define VR9 ((VectorRegister)( VR9_VectorRegisterEnumValue))
|
||||
#define VR10 ((VectorRegister)( VR10_VectorRegisterEnumValue))
|
||||
#define VR11 ((VectorRegister)( VR11_VectorRegisterEnumValue))
|
||||
#define VR12 ((VectorRegister)( VR12_VectorRegisterEnumValue))
|
||||
#define VR13 ((VectorRegister)( VR13_VectorRegisterEnumValue))
|
||||
#define VR14 ((VectorRegister)( VR14_VectorRegisterEnumValue))
|
||||
#define VR15 ((VectorRegister)( VR15_VectorRegisterEnumValue))
|
||||
#define VR16 ((VectorRegister)( VR16_VectorRegisterEnumValue))
|
||||
#define VR17 ((VectorRegister)( VR17_VectorRegisterEnumValue))
|
||||
#define VR18 ((VectorRegister)( VR18_VectorRegisterEnumValue))
|
||||
#define VR19 ((VectorRegister)( VR19_VectorRegisterEnumValue))
|
||||
#define VR20 ((VectorRegister)( VR20_VectorRegisterEnumValue))
|
||||
#define VR21 ((VectorRegister)( VR21_VectorRegisterEnumValue))
|
||||
#define VR22 ((VectorRegister)( VR22_VectorRegisterEnumValue))
|
||||
#define VR23 ((VectorRegister)( VR23_VectorRegisterEnumValue))
|
||||
#define VR24 ((VectorRegister)( VR24_VectorRegisterEnumValue))
|
||||
#define VR25 ((VectorRegister)( VR25_VectorRegisterEnumValue))
|
||||
#define VR26 ((VectorRegister)( VR26_VectorRegisterEnumValue))
|
||||
#define VR27 ((VectorRegister)( VR27_VectorRegisterEnumValue))
|
||||
#define VR28 ((VectorRegister)( VR28_VectorRegisterEnumValue))
|
||||
#define VR29 ((VectorRegister)( VR29_VectorRegisterEnumValue))
|
||||
#define VR30 ((VectorRegister)( VR30_VectorRegisterEnumValue))
|
||||
#define VR31 ((VectorRegister)( VR31_VectorRegisterEnumValue))
|
||||
#endif // DONT_USE_REGISTER_DEFINES
|
||||
|
||||
|
||||
// Maximum number of incoming arguments that can be passed in i registers.
|
||||
const int PPC_ARGS_IN_REGS_NUM = 8;
|
||||
|
||||
|
||||
// Need to know the total number of registers of all sorts for SharedInfo.
|
||||
// Define a class that exports it.
|
||||
class ConcreteRegisterImpl : public AbstractRegisterImpl {
|
||||
public:
|
||||
enum {
|
||||
// This number must be large enough to cover REG_COUNT (defined by c2) registers.
|
||||
// There is no requirement that any ordering here matches any ordering c2 gives
|
||||
// it's optoregs.
|
||||
number_of_registers =
|
||||
( RegisterImpl::number_of_registers +
|
||||
FloatRegisterImpl::number_of_registers )
|
||||
* 2 // register halves
|
||||
+ ConditionRegisterImpl::number_of_registers // condition code registers
|
||||
+ SpecialRegisterImpl::number_of_registers // special registers
|
||||
+ VectorRegisterImpl::number_of_registers // vector registers
|
||||
};
|
||||
|
||||
static const int max_gpr;
|
||||
static const int max_fpr;
|
||||
static const int max_cnd;
|
||||
};
|
||||
|
||||
// Common register declarations used in assembler code.
|
||||
REGISTER_DECLARATION(Register, R0_SCRATCH, R0); // volatile
|
||||
REGISTER_DECLARATION(Register, R1_SP, R1); // non-volatile
|
||||
REGISTER_DECLARATION(Register, R2_TOC, R2); // volatile
|
||||
REGISTER_DECLARATION(Register, R3_RET, R3); // volatile
|
||||
REGISTER_DECLARATION(Register, R3_ARG1, R3); // volatile
|
||||
REGISTER_DECLARATION(Register, R4_ARG2, R4); // volatile
|
||||
REGISTER_DECLARATION(Register, R5_ARG3, R5); // volatile
|
||||
REGISTER_DECLARATION(Register, R6_ARG4, R6); // volatile
|
||||
REGISTER_DECLARATION(Register, R7_ARG5, R7); // volatile
|
||||
REGISTER_DECLARATION(Register, R8_ARG6, R8); // volatile
|
||||
REGISTER_DECLARATION(Register, R9_ARG7, R9); // volatile
|
||||
REGISTER_DECLARATION(Register, R10_ARG8, R10); // volatile
|
||||
REGISTER_DECLARATION(FloatRegister, FO_SCRATCH, F0); // volatile
|
||||
REGISTER_DECLARATION(FloatRegister, F1_RET, F1); // volatile
|
||||
REGISTER_DECLARATION(FloatRegister, F1_ARG1, F1); // volatile
|
||||
REGISTER_DECLARATION(FloatRegister, F2_ARG2, F2); // volatile
|
||||
REGISTER_DECLARATION(FloatRegister, F3_ARG3, F3); // volatile
|
||||
REGISTER_DECLARATION(FloatRegister, F4_ARG4, F4); // volatile
|
||||
REGISTER_DECLARATION(FloatRegister, F5_ARG5, F5); // volatile
|
||||
REGISTER_DECLARATION(FloatRegister, F6_ARG6, F6); // volatile
|
||||
REGISTER_DECLARATION(FloatRegister, F7_ARG7, F7); // volatile
|
||||
REGISTER_DECLARATION(FloatRegister, F8_ARG8, F8); // volatile
|
||||
REGISTER_DECLARATION(FloatRegister, F9_ARG9, F9); // volatile
|
||||
REGISTER_DECLARATION(FloatRegister, F10_ARG10, F10); // volatile
|
||||
REGISTER_DECLARATION(FloatRegister, F11_ARG11, F11); // volatile
|
||||
REGISTER_DECLARATION(FloatRegister, F12_ARG12, F12); // volatile
|
||||
REGISTER_DECLARATION(FloatRegister, F13_ARG13, F13); // volatile
|
||||
|
||||
#ifndef DONT_USE_REGISTER_DEFINES
|
||||
#define R0_SCRATCH AS_REGISTER(Register, R0)
|
||||
#define R1_SP AS_REGISTER(Register, R1)
|
||||
#define R2_TOC AS_REGISTER(Register, R2)
|
||||
#define R3_RET AS_REGISTER(Register, R3)
|
||||
#define R3_ARG1 AS_REGISTER(Register, R3)
|
||||
#define R4_ARG2 AS_REGISTER(Register, R4)
|
||||
#define R5_ARG3 AS_REGISTER(Register, R5)
|
||||
#define R6_ARG4 AS_REGISTER(Register, R6)
|
||||
#define R7_ARG5 AS_REGISTER(Register, R7)
|
||||
#define R8_ARG6 AS_REGISTER(Register, R8)
|
||||
#define R9_ARG7 AS_REGISTER(Register, R9)
|
||||
#define R10_ARG8 AS_REGISTER(Register, R10)
|
||||
#define FO_SCRATCH AS_REGISTER(FloatRegister, F0)
|
||||
#define F1_RET AS_REGISTER(FloatRegister, F1)
|
||||
#define F1_ARG1 AS_REGISTER(FloatRegister, F1)
|
||||
#define F2_ARG2 AS_REGISTER(FloatRegister, F2)
|
||||
#define F3_ARG3 AS_REGISTER(FloatRegister, F3)
|
||||
#define F4_ARG4 AS_REGISTER(FloatRegister, F4)
|
||||
#define F5_ARG5 AS_REGISTER(FloatRegister, F5)
|
||||
#define F6_ARG6 AS_REGISTER(FloatRegister, F6)
|
||||
#define F7_ARG7 AS_REGISTER(FloatRegister, F7)
|
||||
#define F8_ARG8 AS_REGISTER(FloatRegister, F8)
|
||||
#define F9_ARG9 AS_REGISTER(FloatRegister, F9)
|
||||
#define F10_ARG10 AS_REGISTER(FloatRegister, F10)
|
||||
#define F11_ARG11 AS_REGISTER(FloatRegister, F11)
|
||||
#define F12_ARG12 AS_REGISTER(FloatRegister, F12)
|
||||
#define F13_ARG13 AS_REGISTER(FloatRegister, F13)
|
||||
#endif
|
||||
|
||||
// Register declarations to be used in frame manager assembly code.
|
||||
// Use only non-volatile registers in order to keep values across C-calls.
|
||||
REGISTER_DECLARATION(Register, R14_state, R14); // address of new cInterpreter.
|
||||
REGISTER_DECLARATION(Register, R15_prev_state, R15); // address of old cInterpreter
|
||||
REGISTER_DECLARATION(Register, R16_thread, R16); // address of current thread
|
||||
REGISTER_DECLARATION(Register, R17_tos, R17); // address of Java tos (prepushed).
|
||||
REGISTER_DECLARATION(Register, R18_locals, R18); // address of first param slot (receiver).
|
||||
REGISTER_DECLARATION(Register, R19_method, R19); // address of current method
|
||||
#ifndef DONT_USE_REGISTER_DEFINES
|
||||
#define R14_state AS_REGISTER(Register, R14)
|
||||
#define R15_prev_state AS_REGISTER(Register, R15)
|
||||
#define R16_thread AS_REGISTER(Register, R16)
|
||||
#define R17_tos AS_REGISTER(Register, R17)
|
||||
#define R18_locals AS_REGISTER(Register, R18)
|
||||
#define R19_method AS_REGISTER(Register, R19)
|
||||
#define R21_sender_SP AS_REGISTER(Register, R21)
|
||||
#define R23_method_handle AS_REGISTER(Register, R23)
|
||||
#endif
|
||||
|
||||
// Temporary registers to be used within frame manager. We can use
|
||||
// the non-volatiles because the call stub has saved them.
|
||||
// Use only non-volatile registers in order to keep values across C-calls.
|
||||
REGISTER_DECLARATION(Register, R21_tmp1, R21);
|
||||
REGISTER_DECLARATION(Register, R22_tmp2, R22);
|
||||
REGISTER_DECLARATION(Register, R23_tmp3, R23);
|
||||
REGISTER_DECLARATION(Register, R24_tmp4, R24);
|
||||
REGISTER_DECLARATION(Register, R25_tmp5, R25);
|
||||
REGISTER_DECLARATION(Register, R26_tmp6, R26);
|
||||
REGISTER_DECLARATION(Register, R27_tmp7, R27);
|
||||
REGISTER_DECLARATION(Register, R28_tmp8, R28);
|
||||
REGISTER_DECLARATION(Register, R29_tmp9, R29);
|
||||
REGISTER_DECLARATION(Register, R30_polling_page, R30);
|
||||
#ifndef DONT_USE_REGISTER_DEFINES
|
||||
#define R21_tmp1 AS_REGISTER(Register, R21)
|
||||
#define R22_tmp2 AS_REGISTER(Register, R22)
|
||||
#define R23_tmp3 AS_REGISTER(Register, R23)
|
||||
#define R24_tmp4 AS_REGISTER(Register, R24)
|
||||
#define R25_tmp5 AS_REGISTER(Register, R25)
|
||||
#define R26_tmp6 AS_REGISTER(Register, R26)
|
||||
#define R27_tmp7 AS_REGISTER(Register, R27)
|
||||
#define R28_tmp8 AS_REGISTER(Register, R28)
|
||||
#define R29_tmp9 AS_REGISTER(Register, R29)
|
||||
#define R30_polling_page AS_REGISTER(Register, R30)
|
||||
|
||||
#define CCR4_is_synced AS_REGISTER(ConditionRegister, CCR4)
|
||||
#endif
|
||||
|
||||
// Scratch registers are volatile.
|
||||
REGISTER_DECLARATION(Register, R11_scratch1, R11);
|
||||
REGISTER_DECLARATION(Register, R12_scratch2, R12);
|
||||
#ifndef DONT_USE_REGISTER_DEFINES
|
||||
#define R11_scratch1 AS_REGISTER(Register, R11)
|
||||
#define R12_scratch2 AS_REGISTER(Register, R12)
|
||||
#endif
|
||||
|
||||
#endif // CPU_PPC_VM_REGISTER_PPC_HPP
|
133
hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp
Normal file
133
hotspot/src/cpu/ppc/vm/relocInfo_ppc.cpp
Normal file
@ -0,0 +1,133 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.inline.hpp"
|
||||
#include "assembler_ppc.inline.hpp"
|
||||
#include "code/relocInfo.hpp"
|
||||
#include "nativeInst_ppc.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
bool copy_back_to_oop_pool = true; // TODO: PPC port
|
||||
// The following comment is from the declaration of DataRelocation:
|
||||
//
|
||||
// "The "o" (displacement) argument is relevant only to split relocations
|
||||
// on RISC machines. In some CPUs (SPARC), the set-hi and set-lo ins'ns
|
||||
// can encode more than 32 bits between them. This allows compilers to
|
||||
// share set-hi instructions between addresses that differ by a small
|
||||
// offset (e.g., different static variables in the same class).
|
||||
// On such machines, the "x" argument to set_value on all set-lo
|
||||
// instructions must be the same as the "x" argument for the
|
||||
// corresponding set-hi instructions. The "o" arguments for the
|
||||
// set-hi instructions are ignored, and must not affect the high-half
|
||||
// immediate constant. The "o" arguments for the set-lo instructions are
|
||||
// added into the low-half immediate constant, and must not overflow it."
|
||||
//
|
||||
// Currently we don't support splitting of relocations, so o must be
|
||||
// zero:
|
||||
assert(o == 0, "tried to split relocations");
|
||||
|
||||
if (!verify_only) {
|
||||
if (format() != 1) {
|
||||
nativeMovConstReg_at(addr())->set_data_plain(((intptr_t)x), code());
|
||||
} else {
|
||||
assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type,
|
||||
"how to encode else?");
|
||||
narrowOop no = (type() == relocInfo::oop_type) ?
|
||||
oopDesc::encode_heap_oop((oop)x) : oopDesc::encode_klass((Klass*)x);
|
||||
nativeMovConstReg_at(addr())->set_narrow_oop(no, code());
|
||||
}
|
||||
} else {
|
||||
assert((address) (nativeMovConstReg_at(addr())->data()) == x, "data must match");
|
||||
}
|
||||
}
|
||||
|
||||
address Relocation::pd_call_destination(address orig_addr) {
|
||||
intptr_t adj = 0;
|
||||
address inst_loc = addr();
|
||||
|
||||
if (orig_addr != NULL) {
|
||||
// We just moved this call instruction from orig_addr to addr().
|
||||
// This means its target will appear to have grown by addr() - orig_addr.
|
||||
adj = -(inst_loc - orig_addr);
|
||||
}
|
||||
if (NativeFarCall::is_far_call_at(inst_loc)) {
|
||||
NativeFarCall* call = nativeFarCall_at(inst_loc);
|
||||
return call->destination() + (intptr_t)(call->is_pcrelative() ? adj : 0);
|
||||
} else if (NativeJump::is_jump_at(inst_loc)) {
|
||||
NativeJump* jump = nativeJump_at(inst_loc);
|
||||
return jump->jump_destination() + (intptr_t)(jump->is_pcrelative() ? adj : 0);
|
||||
} else if (NativeConditionalFarBranch::is_conditional_far_branch_at(inst_loc)) {
|
||||
NativeConditionalFarBranch* branch = NativeConditionalFarBranch_at(inst_loc);
|
||||
return branch->branch_destination();
|
||||
} else {
|
||||
// There are two instructions at the beginning of a stub, therefore we
|
||||
// load at orig_addr + 8.
|
||||
orig_addr = nativeCall_at(inst_loc)->get_trampoline();
|
||||
if (orig_addr == NULL) {
|
||||
return (address) -1;
|
||||
} else {
|
||||
return (address) nativeMovConstReg_at(orig_addr + 8)->data();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Relocation::pd_set_call_destination(address x) {
|
||||
address inst_loc = addr();
|
||||
|
||||
if (NativeFarCall::is_far_call_at(inst_loc)) {
|
||||
NativeFarCall* call = nativeFarCall_at(inst_loc);
|
||||
call->set_destination(x);
|
||||
} else if (NativeJump::is_jump_at(inst_loc)) {
|
||||
NativeJump* jump= nativeJump_at(inst_loc);
|
||||
jump->set_jump_destination(x);
|
||||
} else if (NativeConditionalFarBranch::is_conditional_far_branch_at(inst_loc)) {
|
||||
NativeConditionalFarBranch* branch = NativeConditionalFarBranch_at(inst_loc);
|
||||
branch->set_branch_destination(x);
|
||||
} else {
|
||||
NativeCall* call = nativeCall_at(inst_loc);
|
||||
call->set_destination_mt_safe(x, false);
|
||||
}
|
||||
}
|
||||
|
||||
address* Relocation::pd_address_in_code() {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
}
|
||||
|
||||
address Relocation::pd_get_address_from_code() {
|
||||
return (address)(nativeMovConstReg_at(addr())->data());
|
||||
}
|
||||
|
||||
void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
|
||||
}
|
||||
|
||||
void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
|
||||
}
|
||||
|
||||
void metadata_Relocation::pd_fix_value(address x) {
|
||||
}
|
46
hotspot/src/cpu/ppc/vm/relocInfo_ppc.hpp
Normal file
46
hotspot/src/cpu/ppc/vm/relocInfo_ppc.hpp
Normal file
@ -0,0 +1,46 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_RELOCINFO_PPC_HPP
|
||||
#define CPU_PPC_VM_RELOCINFO_PPC_HPP
|
||||
|
||||
// machine-dependent parts of class relocInfo
|
||||
private:
|
||||
enum {
|
||||
// Since Power instructions are whole words,
|
||||
// the two low-order offset bits can always be discarded.
|
||||
offset_unit = 4,
|
||||
|
||||
// There is no need for format bits; the instructions are
|
||||
// sufficiently self-identifying.
|
||||
#ifndef _LP64
|
||||
format_width = 0
|
||||
#else
|
||||
// Except narrow oops in 64-bits VM.
|
||||
format_width = 1
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif // CPU_PPC_VM_RELOCINFO_PPC_HPP
|
3209
hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp
Normal file
3209
hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp
Normal file
File diff suppressed because it is too large
Load Diff
2057
hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp
Normal file
2057
hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp
Normal file
File diff suppressed because it is too large
Load Diff
40
hotspot/src/cpu/ppc/vm/stubRoutines_ppc_64.cpp
Normal file
40
hotspot/src/cpu/ppc/vm/stubRoutines_ppc_64.cpp
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "runtime/deoptimization.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#ifdef TARGET_OS_FAMILY_aix
|
||||
# include "thread_aix.inline.hpp"
|
||||
#endif
|
||||
#ifdef TARGET_OS_FAMILY_linux
|
||||
# include "thread_linux.inline.hpp"
|
||||
#endif
|
||||
|
||||
// Implementation of the platform-specific part of StubRoutines - for
|
||||
// a description of how to extend it, see the stubRoutines.hpp file.
|
||||
|
||||
|
40
hotspot/src/cpu/ppc/vm/stubRoutines_ppc_64.hpp
Normal file
40
hotspot/src/cpu/ppc/vm/stubRoutines_ppc_64.hpp
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_STUBROUTINES_PPC_64_HPP
|
||||
#define CPU_PPC_VM_STUBROUTINES_PPC_64_HPP
|
||||
|
||||
// This file holds the platform specific parts of the StubRoutines
|
||||
// definition. See stubRoutines.hpp for a description on how to
|
||||
// extend it.
|
||||
|
||||
static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; }
|
||||
|
||||
enum platform_dependent_constants {
|
||||
code_size1 = 20000, // simply increase if too small (assembler will crash if too small)
|
||||
code_size2 = 20000 // simply increase if too small (assembler will crash if too small)
|
||||
};
|
||||
|
||||
#endif // CPU_PPC_VM_STUBROUTINES_PPC_64_HPP
|
41
hotspot/src/cpu/ppc/vm/vmStructs_ppc.hpp
Normal file
41
hotspot/src/cpu/ppc/vm/vmStructs_ppc.hpp
Normal file
@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_VMSTRUCTS_PPC_HPP
|
||||
#define CPU_PPC_VM_VMSTRUCTS_PPC_HPP
|
||||
|
||||
// These are the CPU-specific fields, types and integer
|
||||
// constants required by the Serviceability Agent. This file is
|
||||
// referenced by vmStructs.cpp.
|
||||
|
||||
#define VM_STRUCTS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field)
|
||||
|
||||
#define VM_TYPES_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type)
|
||||
|
||||
#define VM_INT_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
|
||||
|
||||
#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
|
||||
|
||||
#endif // CPU_PPC_VM_VMSTRUCTS_PPC_HPP
|
472
hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp
Normal file
472
hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp
Normal file
@ -0,0 +1,472 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "assembler_ppc.inline.hpp"
|
||||
#include "compiler/disassembler.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "utilities/defaultStream.hpp"
|
||||
#include "vm_version_ppc.hpp"
|
||||
#ifdef TARGET_OS_FAMILY_aix
|
||||
# include "os_aix.inline.hpp"
|
||||
#endif
|
||||
#ifdef TARGET_OS_FAMILY_linux
|
||||
# include "os_linux.inline.hpp"
|
||||
#endif
|
||||
|
||||
# include <sys/sysinfo.h>
|
||||
|
||||
int VM_Version::_features = VM_Version::unknown_m;
|
||||
int VM_Version::_measured_cache_line_size = 128; // default value
|
||||
const char* VM_Version::_features_str = "";
|
||||
bool VM_Version::_is_determine_features_test_running = false;
|
||||
|
||||
|
||||
#define MSG(flag) \
|
||||
if (flag && !FLAG_IS_DEFAULT(flag)) \
|
||||
jio_fprintf(defaultStream::error_stream(), \
|
||||
"warning: -XX:+" #flag " requires -XX:+UseSIGTRAP\n" \
|
||||
" -XX:+" #flag " will be disabled!\n");
|
||||
|
||||
void VM_Version::initialize() {
|
||||
|
||||
// Test which instructions are supported and measure cache line size.
|
||||
determine_features();
|
||||
|
||||
// If PowerArchitecturePPC64 hasn't been specified explicitly determine from features.
|
||||
if (FLAG_IS_DEFAULT(PowerArchitecturePPC64)) {
|
||||
if (VM_Version::has_popcntw()) {
|
||||
FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 7);
|
||||
} else if (VM_Version::has_cmpb()) {
|
||||
FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 6);
|
||||
} else if (VM_Version::has_popcntb()) {
|
||||
FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 5);
|
||||
} else {
|
||||
FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 0);
|
||||
}
|
||||
}
|
||||
guarantee(PowerArchitecturePPC64 == 0 || PowerArchitecturePPC64 == 5 ||
|
||||
PowerArchitecturePPC64 == 6 || PowerArchitecturePPC64 == 7,
|
||||
"PowerArchitecturePPC64 should be 0, 5, 6 or 7");
|
||||
|
||||
if (!UseSIGTRAP) {
|
||||
MSG(TrapBasedICMissChecks);
|
||||
MSG(TrapBasedNotEntrantChecks);
|
||||
MSG(TrapBasedNullChecks);
|
||||
MSG(TrapBasedRangeChecks);
|
||||
FLAG_SET_ERGO(bool, TrapBasedNotEntrantChecks, false);
|
||||
FLAG_SET_ERGO(bool, TrapBasedNullChecks, false);
|
||||
FLAG_SET_ERGO(bool, TrapBasedICMissChecks, false);
|
||||
FLAG_SET_ERGO(bool, TrapBasedRangeChecks, false);
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
// On Power6 test for section size.
|
||||
if (PowerArchitecturePPC64 == 6)
|
||||
determine_section_size();
|
||||
// TODO: PPC port else
|
||||
// TODO: PPC port PdScheduling::power6SectorSize = 0x20;
|
||||
|
||||
MaxVectorSize = 8;
|
||||
#endif
|
||||
|
||||
// Create and print feature-string.
|
||||
char buf[(num_features+1) * 16]; // max 16 chars per feature
|
||||
jio_snprintf(buf, sizeof(buf),
|
||||
"ppc64%s%s%s%s%s%s%s%s",
|
||||
(has_fsqrt() ? " fsqrt" : ""),
|
||||
(has_isel() ? " isel" : ""),
|
||||
(has_lxarxeh() ? " lxarxeh" : ""),
|
||||
(has_cmpb() ? " cmpb" : ""),
|
||||
//(has_mftgpr()? " mftgpr" : ""),
|
||||
(has_popcntb() ? " popcntb" : ""),
|
||||
(has_popcntw() ? " popcntw" : ""),
|
||||
(has_fcfids() ? " fcfids" : ""),
|
||||
(has_vand() ? " vand" : "")
|
||||
// Make sure number of %s matches num_features!
|
||||
);
|
||||
_features_str = strdup(buf);
|
||||
NOT_PRODUCT(if (Verbose) print_features(););
|
||||
|
||||
// PPC64 supports 8-byte compare-exchange operations (see
|
||||
// Atomic::cmpxchg and StubGenerator::generate_atomic_cmpxchg_ptr)
|
||||
// and 'atomic long memory ops' (see Unsafe_GetLongVolatile).
|
||||
_supports_cx8 = true;
|
||||
|
||||
UseSSE = 0; // Only on x86 and x64
|
||||
|
||||
intx cache_line_size = _measured_cache_line_size;
|
||||
|
||||
if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) AllocatePrefetchStyle = 1;
|
||||
|
||||
if (AllocatePrefetchStyle == 4) {
|
||||
AllocatePrefetchStepSize = cache_line_size; // need exact value
|
||||
if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) AllocatePrefetchLines = 12; // use larger blocks by default
|
||||
if (AllocatePrefetchDistance < 0) AllocatePrefetchDistance = 2*cache_line_size; // default is not defined ?
|
||||
} else {
|
||||
if (cache_line_size > AllocatePrefetchStepSize) AllocatePrefetchStepSize = cache_line_size;
|
||||
if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) AllocatePrefetchLines = 3; // Optimistic value
|
||||
if (AllocatePrefetchDistance < 0) AllocatePrefetchDistance = 3*cache_line_size; // default is not defined ?
|
||||
}
|
||||
|
||||
assert(AllocatePrefetchLines > 0, "invalid value");
|
||||
if (AllocatePrefetchLines < 1) // Set valid value in product VM.
|
||||
AllocatePrefetchLines = 1; // Conservative value
|
||||
|
||||
if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size)
|
||||
AllocatePrefetchStyle = 1; // fall back if inappropriate
|
||||
|
||||
assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
|
||||
}
|
||||
|
||||
void VM_Version::print_features() {
|
||||
tty->print_cr("Version: %s cache_line_size = %d", cpu_features(), get_cache_line_size());
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
// Determine section size on power6: If section size is 8 instructions,
|
||||
// there should be a difference between the two testloops of ~15 %. If
|
||||
// no difference is detected the section is assumed to be 32 instructions.
|
||||
void VM_Version::determine_section_size() {
|
||||
|
||||
int unroll = 80;
|
||||
|
||||
const int code_size = (2* unroll * 32 + 100)*BytesPerInstWord;
|
||||
|
||||
// Allocate space for the code
|
||||
ResourceMark rm;
|
||||
CodeBuffer cb("detect_section_size", code_size, 0);
|
||||
MacroAssembler* a = new MacroAssembler(&cb);
|
||||
|
||||
uint32_t *code = (uint32_t *)a->pc();
|
||||
// emit code.
|
||||
void (*test1)() = (void(*)())(void *)a->emit_fd();
|
||||
|
||||
Label l1;
|
||||
|
||||
a->li(R4, 1);
|
||||
a->sldi(R4, R4, 28);
|
||||
a->b(l1);
|
||||
a->align(CodeEntryAlignment);
|
||||
|
||||
a->bind(l1);
|
||||
|
||||
for (int i = 0; i < unroll; i++) {
|
||||
// Schleife 1
|
||||
// ------- sector 0 ------------
|
||||
// ;; 0
|
||||
a->nop(); // 1
|
||||
a->fpnop0(); // 2
|
||||
a->fpnop1(); // 3
|
||||
a->addi(R4,R4, -1); // 4
|
||||
|
||||
// ;; 1
|
||||
a->nop(); // 5
|
||||
a->fmr(F6, F6); // 6
|
||||
a->fmr(F7, F7); // 7
|
||||
a->endgroup(); // 8
|
||||
// ------- sector 8 ------------
|
||||
|
||||
// ;; 2
|
||||
a->nop(); // 9
|
||||
a->nop(); // 10
|
||||
a->fmr(F8, F8); // 11
|
||||
a->fmr(F9, F9); // 12
|
||||
|
||||
// ;; 3
|
||||
a->nop(); // 13
|
||||
a->fmr(F10, F10); // 14
|
||||
a->fmr(F11, F11); // 15
|
||||
a->endgroup(); // 16
|
||||
// -------- sector 16 -------------
|
||||
|
||||
// ;; 4
|
||||
a->nop(); // 17
|
||||
a->nop(); // 18
|
||||
a->fmr(F15, F15); // 19
|
||||
a->fmr(F16, F16); // 20
|
||||
|
||||
// ;; 5
|
||||
a->nop(); // 21
|
||||
a->fmr(F17, F17); // 22
|
||||
a->fmr(F18, F18); // 23
|
||||
a->endgroup(); // 24
|
||||
// ------- sector 24 ------------
|
||||
|
||||
// ;; 6
|
||||
a->nop(); // 25
|
||||
a->nop(); // 26
|
||||
a->fmr(F19, F19); // 27
|
||||
a->fmr(F20, F20); // 28
|
||||
|
||||
// ;; 7
|
||||
a->nop(); // 29
|
||||
a->fmr(F21, F21); // 30
|
||||
a->fmr(F22, F22); // 31
|
||||
a->brnop0(); // 32
|
||||
|
||||
// ------- sector 32 ------------
|
||||
}
|
||||
|
||||
// ;; 8
|
||||
a->cmpdi(CCR0, R4, unroll);// 33
|
||||
a->bge(CCR0, l1); // 34
|
||||
a->blr();
|
||||
|
||||
// emit code.
|
||||
void (*test2)() = (void(*)())(void *)a->emit_fd();
|
||||
// uint32_t *code = (uint32_t *)a->pc();
|
||||
|
||||
Label l2;
|
||||
|
||||
a->li(R4, 1);
|
||||
a->sldi(R4, R4, 28);
|
||||
a->b(l2);
|
||||
a->align(CodeEntryAlignment);
|
||||
|
||||
a->bind(l2);
|
||||
|
||||
for (int i = 0; i < unroll; i++) {
|
||||
// Schleife 2
|
||||
// ------- sector 0 ------------
|
||||
// ;; 0
|
||||
a->brnop0(); // 1
|
||||
a->nop(); // 2
|
||||
//a->cmpdi(CCR0, R4, unroll);
|
||||
a->fpnop0(); // 3
|
||||
a->fpnop1(); // 4
|
||||
a->addi(R4,R4, -1); // 5
|
||||
|
||||
// ;; 1
|
||||
|
||||
a->nop(); // 6
|
||||
a->fmr(F6, F6); // 7
|
||||
a->fmr(F7, F7); // 8
|
||||
// ------- sector 8 ---------------
|
||||
|
||||
// ;; 2
|
||||
a->endgroup(); // 9
|
||||
|
||||
// ;; 3
|
||||
a->nop(); // 10
|
||||
a->nop(); // 11
|
||||
a->fmr(F8, F8); // 12
|
||||
|
||||
// ;; 4
|
||||
a->fmr(F9, F9); // 13
|
||||
a->nop(); // 14
|
||||
a->fmr(F10, F10); // 15
|
||||
|
||||
// ;; 5
|
||||
a->fmr(F11, F11); // 16
|
||||
// -------- sector 16 -------------
|
||||
|
||||
// ;; 6
|
||||
a->endgroup(); // 17
|
||||
|
||||
// ;; 7
|
||||
a->nop(); // 18
|
||||
a->nop(); // 19
|
||||
a->fmr(F15, F15); // 20
|
||||
|
||||
// ;; 8
|
||||
a->fmr(F16, F16); // 21
|
||||
a->nop(); // 22
|
||||
a->fmr(F17, F17); // 23
|
||||
|
||||
// ;; 9
|
||||
a->fmr(F18, F18); // 24
|
||||
// -------- sector 24 -------------
|
||||
|
||||
// ;; 10
|
||||
a->endgroup(); // 25
|
||||
|
||||
// ;; 11
|
||||
a->nop(); // 26
|
||||
a->nop(); // 27
|
||||
a->fmr(F19, F19); // 28
|
||||
|
||||
// ;; 12
|
||||
a->fmr(F20, F20); // 29
|
||||
a->nop(); // 30
|
||||
a->fmr(F21, F21); // 31
|
||||
|
||||
// ;; 13
|
||||
a->fmr(F22, F22); // 32
|
||||
}
|
||||
|
||||
// -------- sector 32 -------------
|
||||
// ;; 14
|
||||
a->cmpdi(CCR0, R4, unroll); // 33
|
||||
a->bge(CCR0, l2); // 34
|
||||
|
||||
a->blr();
|
||||
uint32_t *code_end = (uint32_t *)a->pc();
|
||||
a->flush();
|
||||
|
||||
double loop1_seconds,loop2_seconds, rel_diff;
|
||||
uint64_t start1, stop1;
|
||||
|
||||
start1 = os::current_thread_cpu_time(false);
|
||||
(*test1)();
|
||||
stop1 = os::current_thread_cpu_time(false);
|
||||
loop1_seconds = (stop1- start1) / (1000 *1000 *1000.0);
|
||||
|
||||
|
||||
start1 = os::current_thread_cpu_time(false);
|
||||
(*test2)();
|
||||
stop1 = os::current_thread_cpu_time(false);
|
||||
|
||||
loop2_seconds = (stop1 - start1) / (1000 *1000 *1000.0);
|
||||
|
||||
rel_diff = (loop2_seconds - loop1_seconds) / loop1_seconds *100;
|
||||
|
||||
if (PrintAssembly) {
|
||||
ttyLocker ttyl;
|
||||
tty->print_cr("Decoding section size detection stub at " INTPTR_FORMAT " before execution:", code);
|
||||
Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
|
||||
tty->print_cr("Time loop1 :%f", loop1_seconds);
|
||||
tty->print_cr("Time loop2 :%f", loop2_seconds);
|
||||
tty->print_cr("(time2 - time1) / time1 = %f %%", rel_diff);
|
||||
|
||||
if (rel_diff > 12.0) {
|
||||
tty->print_cr("Section Size 8 Instructions");
|
||||
} else{
|
||||
tty->print_cr("Section Size 32 Instructions or Power5");
|
||||
}
|
||||
}
|
||||
|
||||
#if 0 // TODO: PPC port
|
||||
// Set sector size (if not set explicitly).
|
||||
if (FLAG_IS_DEFAULT(Power6SectorSize128PPC64)) {
|
||||
if (rel_diff > 12.0) {
|
||||
PdScheduling::power6SectorSize = 0x20;
|
||||
} else {
|
||||
PdScheduling::power6SectorSize = 0x80;
|
||||
}
|
||||
} else if (Power6SectorSize128PPC64) {
|
||||
PdScheduling::power6SectorSize = 0x80;
|
||||
} else {
|
||||
PdScheduling::power6SectorSize = 0x20;
|
||||
}
|
||||
#endif
|
||||
if (UsePower6SchedulerPPC64) Unimplemented();
|
||||
}
|
||||
#endif // COMPILER2
|
||||
|
||||
void VM_Version::determine_features() {
|
||||
const int code_size = (num_features+1+2*7)*BytesPerInstWord; // 7 InstWords for each call (function descriptor + blr instruction)
|
||||
int features = 0;
|
||||
|
||||
// create test area
|
||||
enum { BUFFER_SIZE = 2*4*K }; // needs to be >=2* max cache line size (cache line size can't exceed min page size)
|
||||
char test_area[BUFFER_SIZE];
|
||||
char *mid_of_test_area = &test_area[BUFFER_SIZE>>1];
|
||||
|
||||
// Allocate space for the code
|
||||
ResourceMark rm;
|
||||
CodeBuffer cb("detect_cpu_features", code_size, 0);
|
||||
MacroAssembler* a = new MacroAssembler(&cb);
|
||||
|
||||
// emit code.
|
||||
void (*test)(address addr, uint64_t offset)=(void(*)(address addr, uint64_t offset))(void *)a->emit_fd();
|
||||
uint32_t *code = (uint32_t *)a->pc();
|
||||
// Don't use R0 in ldarx.
|
||||
// keep R3_ARG1 = R3 unmodified, it contains &field (see below)
|
||||
// keep R4_ARG2 = R4 unmodified, it contains offset = 0 (see below)
|
||||
a->fsqrt(F3, F4); // code[0] -> fsqrt_m
|
||||
a->isel(R7, R5, R6, 0); // code[1] -> isel_m
|
||||
a->ldarx_unchecked(R7, R3_ARG1, R4_ARG2, 1);// code[2] -> lxarx_m
|
||||
a->cmpb(R7, R5, R6); // code[3] -> bcmp
|
||||
//a->mftgpr(R7, F3); // code[4] -> mftgpr
|
||||
a->popcntb(R7, R5); // code[5] -> popcntb
|
||||
a->popcntw(R7, R5); // code[6] -> popcntw
|
||||
a->fcfids(F3, F4); // code[7] -> fcfids
|
||||
a->vand(VR0, VR0, VR0); // code[8] -> vand
|
||||
a->blr();
|
||||
|
||||
// Emit function to set one cache line to zero
|
||||
void (*zero_cacheline_func_ptr)(char*) = (void(*)(char*))(void *)a->emit_fd(); // emit function descriptor and get pointer to it
|
||||
a->dcbz(R3_ARG1); // R3_ARG1 = R3 = addr
|
||||
a->blr();
|
||||
|
||||
uint32_t *code_end = (uint32_t *)a->pc();
|
||||
a->flush();
|
||||
|
||||
// Print the detection code.
|
||||
if (PrintAssembly) {
|
||||
ttyLocker ttyl;
|
||||
tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " before execution:", code);
|
||||
Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
|
||||
}
|
||||
|
||||
// Measure cache line size.
|
||||
memset(test_area, 0xFF, BUFFER_SIZE); // fill test area with 0xFF
|
||||
(*zero_cacheline_func_ptr)(mid_of_test_area); // call function which executes dcbz to the middle
|
||||
int count = 0; // count zeroed bytes
|
||||
for (int i = 0; i < BUFFER_SIZE; i++) if (test_area[i] == 0) count++;
|
||||
guarantee(is_power_of_2(count), "cache line size needs to be a power of 2");
|
||||
_measured_cache_line_size = count;
|
||||
|
||||
// Execute code. Illegal instructions will be replaced by 0 in the signal handler.
|
||||
VM_Version::_is_determine_features_test_running = true;
|
||||
(*test)((address)mid_of_test_area, (uint64_t)0);
|
||||
VM_Version::_is_determine_features_test_running = false;
|
||||
|
||||
// determine which instructions are legal.
|
||||
int feature_cntr = 0;
|
||||
if (code[feature_cntr++]) features |= fsqrt_m;
|
||||
if (code[feature_cntr++]) features |= isel_m;
|
||||
if (code[feature_cntr++]) features |= lxarxeh_m;
|
||||
if (code[feature_cntr++]) features |= cmpb_m;
|
||||
//if(code[feature_cntr++])features |= mftgpr_m;
|
||||
if (code[feature_cntr++]) features |= popcntb_m;
|
||||
if (code[feature_cntr++]) features |= popcntw_m;
|
||||
if (code[feature_cntr++]) features |= fcfids_m;
|
||||
if (code[feature_cntr++]) features |= vand_m;
|
||||
|
||||
// Print the detection code.
|
||||
if (PrintAssembly) {
|
||||
ttyLocker ttyl;
|
||||
tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " after execution:", code);
|
||||
Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
|
||||
}
|
||||
|
||||
_features = features;
|
||||
}
|
||||
|
||||
|
||||
static int saved_features = 0;
|
||||
|
||||
void VM_Version::allow_all() {
|
||||
saved_features = _features;
|
||||
_features = all_features_m;
|
||||
}
|
||||
|
||||
void VM_Version::revert() {
|
||||
_features = saved_features;
|
||||
}
|
93
hotspot/src/cpu/ppc/vm/vm_version_ppc.hpp
Normal file
93
hotspot/src/cpu/ppc/vm/vm_version_ppc.hpp
Normal file
@ -0,0 +1,93 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_VM_VERSION_PPC_HPP
|
||||
#define CPU_PPC_VM_VM_VERSION_PPC_HPP
|
||||
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/vm_version.hpp"
|
||||
|
||||
class VM_Version: public Abstract_VM_Version {
|
||||
protected:
|
||||
enum Feature_Flag {
|
||||
fsqrt,
|
||||
isel,
|
||||
lxarxeh,
|
||||
cmpb,
|
||||
popcntb,
|
||||
popcntw,
|
||||
fcfids,
|
||||
vand,
|
||||
dcba,
|
||||
num_features // last entry to count features
|
||||
};
|
||||
enum Feature_Flag_Set {
|
||||
unknown_m = 0,
|
||||
fsqrt_m = (1 << fsqrt ),
|
||||
isel_m = (1 << isel ),
|
||||
lxarxeh_m = (1 << lxarxeh),
|
||||
cmpb_m = (1 << cmpb ),
|
||||
popcntb_m = (1 << popcntb),
|
||||
popcntw_m = (1 << popcntw),
|
||||
fcfids_m = (1 << fcfids ),
|
||||
vand_m = (1 << vand ),
|
||||
dcba_m = (1 << dcba ),
|
||||
all_features_m = -1
|
||||
};
|
||||
static int _features;
|
||||
static int _measured_cache_line_size;
|
||||
static const char* _features_str;
|
||||
static bool _is_determine_features_test_running;
|
||||
|
||||
static void print_features();
|
||||
static void determine_features(); // also measures cache line size
|
||||
static void determine_section_size();
|
||||
static void power6_micro_bench();
|
||||
public:
|
||||
// Initialization
|
||||
static void initialize();
|
||||
|
||||
static bool is_determine_features_test_running() { return _is_determine_features_test_running; }
|
||||
// CPU instruction support
|
||||
static bool has_fsqrt() { return (_features & fsqrt_m) != 0; }
|
||||
static bool has_isel() { return (_features & isel_m) != 0; }
|
||||
static bool has_lxarxeh() { return (_features & lxarxeh_m) !=0; }
|
||||
static bool has_cmpb() { return (_features & cmpb_m) != 0; }
|
||||
static bool has_popcntb() { return (_features & popcntb_m) != 0; }
|
||||
static bool has_popcntw() { return (_features & popcntw_m) != 0; }
|
||||
static bool has_fcfids() { return (_features & fcfids_m) != 0; }
|
||||
static bool has_vand() { return (_features & vand_m) != 0; }
|
||||
static bool has_dcba() { return (_features & dcba_m) != 0; }
|
||||
|
||||
static const char* cpu_features() { return _features_str; }
|
||||
|
||||
static int get_cache_line_size() { return _measured_cache_line_size; }
|
||||
|
||||
// Assembler testing
|
||||
static void allow_all();
|
||||
static void revert();
|
||||
};
|
||||
|
||||
#endif // CPU_PPC_VM_VM_VERSION_PPC_HPP
|
51
hotspot/src/cpu/ppc/vm/vmreg_ppc.cpp
Normal file
51
hotspot/src/cpu/ppc/vm/vmreg_ppc.cpp
Normal file
@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "code/vmreg.hpp"
|
||||
|
||||
void VMRegImpl::set_regName() {
|
||||
Register reg = ::as_Register(0);
|
||||
int i;
|
||||
for (i = 0; i < ConcreteRegisterImpl::max_gpr; ) {
|
||||
regName[i++] = reg->name();
|
||||
regName[i++] = reg->name();
|
||||
if (reg->encoding() < RegisterImpl::number_of_registers-1)
|
||||
reg = reg->successor();
|
||||
}
|
||||
|
||||
FloatRegister freg = ::as_FloatRegister(0);
|
||||
for ( ; i < ConcreteRegisterImpl::max_fpr; ) {
|
||||
regName[i++] = freg->name();
|
||||
regName[i++] = freg->name();
|
||||
if (reg->encoding() < FloatRegisterImpl::number_of_registers-1)
|
||||
freg = freg->successor();
|
||||
}
|
||||
for ( ; i < ConcreteRegisterImpl::number_of_registers; i++) {
|
||||
regName[i] = "NON-GPR-FPR";
|
||||
}
|
||||
}
|
||||
|
35
hotspot/src/cpu/ppc/vm/vmreg_ppc.hpp
Normal file
35
hotspot/src/cpu/ppc/vm/vmreg_ppc.hpp
Normal file
@ -0,0 +1,35 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_VMREG_PPC_HPP
|
||||
#define CPU_PPC_VM_VMREG_PPC_HPP
|
||||
|
||||
bool is_Register();
|
||||
Register as_Register();
|
||||
|
||||
bool is_FloatRegister();
|
||||
FloatRegister as_FloatRegister();
|
||||
|
||||
#endif // CPU_PPC_VM_VMREG_PPC_HPP
|
71
hotspot/src/cpu/ppc/vm/vmreg_ppc.inline.hpp
Normal file
71
hotspot/src/cpu/ppc/vm/vmreg_ppc.inline.hpp
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_VM_VMREG_PPC_INLINE_HPP
|
||||
#define CPU_PPC_VM_VMREG_PPC_INLINE_HPP
|
||||
|
||||
inline VMReg RegisterImpl::as_VMReg() {
|
||||
if (this == noreg) return VMRegImpl::Bad();
|
||||
return VMRegImpl::as_VMReg(encoding() << 1);
|
||||
}
|
||||
|
||||
// Since we don't have two halfs here, don't multiply by 2.
|
||||
inline VMReg ConditionRegisterImpl::as_VMReg() {
|
||||
return VMRegImpl::as_VMReg((encoding()) + ConcreteRegisterImpl::max_fpr);
|
||||
}
|
||||
|
||||
inline VMReg FloatRegisterImpl::as_VMReg() {
|
||||
return VMRegImpl::as_VMReg((encoding() << 1) + ConcreteRegisterImpl::max_gpr);
|
||||
}
|
||||
|
||||
inline VMReg SpecialRegisterImpl::as_VMReg() {
|
||||
return VMRegImpl::as_VMReg((encoding()) + ConcreteRegisterImpl::max_cnd);
|
||||
}
|
||||
|
||||
inline bool VMRegImpl::is_Register() {
|
||||
return (unsigned int)value() < (unsigned int)ConcreteRegisterImpl::max_gpr;
|
||||
}
|
||||
|
||||
inline bool VMRegImpl::is_FloatRegister() {
|
||||
return value() >= ConcreteRegisterImpl::max_gpr &&
|
||||
value() < ConcreteRegisterImpl::max_fpr;
|
||||
}
|
||||
|
||||
inline Register VMRegImpl::as_Register() {
|
||||
assert(is_Register() && is_even(value()), "even-aligned GPR name");
|
||||
return ::as_Register(value()>>1);
|
||||
}
|
||||
|
||||
inline FloatRegister VMRegImpl::as_FloatRegister() {
|
||||
assert(is_FloatRegister() && is_even(value()), "must be");
|
||||
return ::as_FloatRegister((value() - ConcreteRegisterImpl::max_gpr) >> 1);
|
||||
}
|
||||
|
||||
inline bool VMRegImpl::is_concrete() {
|
||||
assert(is_reg(), "must be");
|
||||
return is_even(value());
|
||||
}
|
||||
|
||||
#endif // CPU_PPC_VM_VMREG_PPC_INLINE_HPP
|
269
hotspot/src/cpu/ppc/vm/vtableStubs_ppc_64.cpp
Normal file
269
hotspot/src/cpu/ppc/vm/vtableStubs_ppc_64.cpp
Normal file
@ -0,0 +1,269 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "code/vtableStubs.hpp"
|
||||
#include "interp_masm_ppc_64.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/klassVtable.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "vmreg_ppc.inline.hpp"
|
||||
#ifdef COMPILER2
|
||||
#include "opto/runtime.hpp"
|
||||
#endif
|
||||
|
||||
#define __ masm->
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define BLOCK_COMMENT(str) // nothing
|
||||
#else
|
||||
#define BLOCK_COMMENT(str) __ block_comment(str)
|
||||
#endif
|
||||
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
|
||||
|
||||
#ifndef PRODUCT
|
||||
extern "C" void bad_compiled_vtable_index(JavaThread* thread, oopDesc* receiver, int index);
|
||||
#endif
|
||||
|
||||
// Used by compiler only; may use only caller saved, non-argument
|
||||
// registers.
|
||||
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
// PPC port: use fixed size.
|
||||
const int code_length = VtableStub::pd_code_size_limit(true);
|
||||
VtableStub* s = new (code_length) VtableStub(true, vtable_index);
|
||||
ResourceMark rm;
|
||||
CodeBuffer cb(s->entry_point(), code_length);
|
||||
MacroAssembler* masm = new MacroAssembler(&cb);
|
||||
address start_pc;
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (CountCompiledCalls) {
|
||||
__ load_const(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr());
|
||||
__ lwz(R12_scratch2, 0, R11_scratch1);
|
||||
__ addi(R12_scratch2, R12_scratch2, 1);
|
||||
__ stw(R12_scratch2, 0, R11_scratch1);
|
||||
}
|
||||
#endif
|
||||
|
||||
assert(VtableStub::receiver_location() == R3_ARG1->as_VMReg(), "receiver expected in R3_ARG1");
|
||||
|
||||
// Get receiver klass.
|
||||
const Register rcvr_klass = R11_scratch1;
|
||||
|
||||
// We might implicit NULL fault here.
|
||||
address npe_addr = __ pc(); // npe = null pointer exception
|
||||
__ load_klass_with_trap_null_check(rcvr_klass, R3);
|
||||
|
||||
// Set methodOop (in case of interpreted method), and destination address.
|
||||
int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (DebugVtables) {
|
||||
Label L;
|
||||
// Check offset vs vtable length.
|
||||
const Register vtable_len = R12_scratch2;
|
||||
__ lwz(vtable_len, InstanceKlass::vtable_length_offset()*wordSize, rcvr_klass);
|
||||
__ cmpwi(CCR0, vtable_len, vtable_index*vtableEntry::size());
|
||||
__ bge(CCR0, L);
|
||||
__ li(R12_scratch2, vtable_index);
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), R3_ARG1, R12_scratch2, false);
|
||||
__ bind(L);
|
||||
}
|
||||
#endif
|
||||
|
||||
int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
|
||||
|
||||
__ ld(R19_method, v_off, rcvr_klass);
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (DebugVtables) {
|
||||
Label L;
|
||||
__ cmpdi(CCR0, R19_method, 0);
|
||||
__ bne(CCR0, L);
|
||||
__ stop("Vtable entry is ZERO", 102);
|
||||
__ bind(L);
|
||||
}
|
||||
#endif
|
||||
|
||||
// If the vtable entry is null, the method is abstract.
|
||||
address ame_addr = __ pc(); // ame = abstract method error
|
||||
|
||||
__ ld_with_trap_null_check(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
|
||||
__ mtctr(R12_scratch2);
|
||||
__ bctr();
|
||||
masm->flush();
|
||||
|
||||
guarantee(__ pc() <= s->code_end(), "overflowed buffer");
|
||||
|
||||
s->set_exception_points(npe_addr, ame_addr);
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
|
||||
// PPC port: use fixed size.
|
||||
const int code_length = VtableStub::pd_code_size_limit(false);
|
||||
VtableStub* s = new (code_length) VtableStub(false, vtable_index);
|
||||
ResourceMark rm;
|
||||
CodeBuffer cb(s->entry_point(), code_length);
|
||||
MacroAssembler* masm = new MacroAssembler(&cb);
|
||||
address start_pc;
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (CountCompiledCalls) {
|
||||
__ load_const(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr());
|
||||
__ lwz(R12_scratch2, 0, R11_scratch1);
|
||||
__ addi(R12_scratch2, R12_scratch2, 1);
|
||||
__ stw(R12_scratch2, 0, R11_scratch1);
|
||||
}
|
||||
#endif
|
||||
|
||||
assert(VtableStub::receiver_location() == R3_ARG1->as_VMReg(), "receiver expected in R3_ARG1");
|
||||
|
||||
// Entry arguments:
|
||||
// R19_method: Interface
|
||||
// R3_ARG1: Receiver
|
||||
//
|
||||
|
||||
const Register rcvr_klass = R11_scratch1;
|
||||
const Register vtable_len = R12_scratch2;
|
||||
const Register itable_entry_addr = R21_tmp1;
|
||||
const Register itable_interface = R22_tmp2;
|
||||
|
||||
// Get receiver klass.
|
||||
|
||||
// We might implicit NULL fault here.
|
||||
address npe_addr = __ pc(); // npe = null pointer exception
|
||||
__ load_klass_with_trap_null_check(rcvr_klass, R3_ARG1);
|
||||
|
||||
//__ ld(rcvr_klass, oopDesc::klass_offset_in_bytes(), R3_ARG1);
|
||||
|
||||
BLOCK_COMMENT("Load start of itable entries into itable_entry.");
|
||||
__ lwz(vtable_len, InstanceKlass::vtable_length_offset() * wordSize, rcvr_klass);
|
||||
__ slwi(vtable_len, vtable_len, exact_log2(vtableEntry::size() * wordSize));
|
||||
__ add(itable_entry_addr, vtable_len, rcvr_klass);
|
||||
|
||||
// Loop over all itable entries until desired interfaceOop(Rinterface) found.
|
||||
BLOCK_COMMENT("Increment itable_entry_addr in loop.");
|
||||
const int vtable_base_offset = InstanceKlass::vtable_start_offset() * wordSize;
|
||||
__ addi(itable_entry_addr, itable_entry_addr, vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes());
|
||||
|
||||
const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize;
|
||||
Label search;
|
||||
__ bind(search);
|
||||
__ ld(itable_interface, 0, itable_entry_addr);
|
||||
|
||||
// Handle IncompatibleClassChangeError in itable stubs.
|
||||
// If the entry is NULL then we've reached the end of the table
|
||||
// without finding the expected interface, so throw an exception.
|
||||
BLOCK_COMMENT("Handle IncompatibleClassChangeError in itable stubs.");
|
||||
Label throw_icce;
|
||||
__ cmpdi(CCR1, itable_interface, 0);
|
||||
__ cmpd(CCR0, itable_interface, R19_method);
|
||||
__ addi(itable_entry_addr, itable_entry_addr, itable_offset_search_inc);
|
||||
__ beq(CCR1, throw_icce);
|
||||
__ bne(CCR0, search);
|
||||
|
||||
// Entry found and itable_entry_addr points to it, get offset of vtable for interface.
|
||||
|
||||
const Register vtable_offset = R12_scratch2;
|
||||
const Register itable_method = R11_scratch1;
|
||||
|
||||
const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() -
|
||||
itableOffsetEntry::interface_offset_in_bytes()) -
|
||||
itable_offset_search_inc;
|
||||
__ lwz(vtable_offset, vtable_offset_offset, itable_entry_addr);
|
||||
|
||||
// Compute itableMethodEntry and get methodOop and entry point for compiler.
|
||||
const int method_offset = (itableMethodEntry::size() * wordSize * vtable_index) +
|
||||
itableMethodEntry::method_offset_in_bytes();
|
||||
|
||||
__ add(itable_method, rcvr_klass, vtable_offset);
|
||||
__ ld(R19_method, method_offset, itable_method);
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (DebugVtables) {
|
||||
Label ok;
|
||||
__ cmpd(CCR0, R19_method, 0);
|
||||
__ bne(CCR0, ok);
|
||||
__ stop("methodOop is null", 103);
|
||||
__ bind(ok);
|
||||
}
|
||||
#endif
|
||||
|
||||
// If the vtable entry is null, the method is abstract.
|
||||
address ame_addr = __ pc(); // ame = abstract method error
|
||||
|
||||
// Must do an explicit check if implicit checks are disabled.
|
||||
assert(!MacroAssembler::needs_explicit_null_check(in_bytes(Method::from_compiled_offset())), "sanity");
|
||||
if (!ImplicitNullChecks NOT_LINUX(|| true) /*!os::zero_page_read_protected()*/) {
|
||||
if (TrapBasedNullChecks) {
|
||||
__ trap_null_check(R19_method);
|
||||
} else {
|
||||
__ cmpdi(CCR0, R19_method, 0);
|
||||
__ beq(CCR0, throw_icce);
|
||||
}
|
||||
}
|
||||
__ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
|
||||
__ mtctr(R12_scratch2);
|
||||
__ bctr();
|
||||
|
||||
// Handle IncompatibleClassChangeError in itable stubs.
|
||||
// More detailed error message.
|
||||
// We force resolving of the call site by jumping to the "handle
|
||||
// wrong method" stub, and so let the interpreter runtime do all the
|
||||
// dirty work.
|
||||
__ bind(throw_icce);
|
||||
__ load_const(R11_scratch1, SharedRuntime::get_handle_wrong_method_stub());
|
||||
__ mtctr(R11_scratch1);
|
||||
__ bctr();
|
||||
|
||||
masm->flush();
|
||||
|
||||
guarantee(__ pc() <= s->code_end(), "overflowed buffer");
|
||||
|
||||
s->set_exception_points(npe_addr, ame_addr);
|
||||
return s;
|
||||
}
|
||||
|
||||
int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
|
||||
if (TraceJumps || DebugVtables || CountCompiledCalls || VerifyOops) {
|
||||
return 1000;
|
||||
} else {
|
||||
if (is_vtable_stub) {
|
||||
return 20 + 16 + 8; // Plain + (cOops & Traps) + safety
|
||||
} else {
|
||||
return 16 + 96;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int VtableStub::pd_code_alignment() {
|
||||
const unsigned int icache_line_size = 32;
|
||||
return icache_line_size;
|
||||
}
|
401
hotspot/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp
Normal file
401
hotspot/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp
Normal file
@ -0,0 +1,401 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_INLINE_HPP
|
||||
#define OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_INLINE_HPP
|
||||
|
||||
#include "orderAccess_linux_ppc.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "vm_version_ppc.hpp"
|
||||
|
||||
#ifndef PPC64
|
||||
#error "Atomic currently only implemented for PPC64"
|
||||
#endif
|
||||
|
||||
// Implementation of class atomic
|
||||
|
||||
inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
|
||||
inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
|
||||
inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
|
||||
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
|
||||
inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
|
||||
inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
|
||||
|
||||
inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
|
||||
inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
|
||||
inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
|
||||
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
|
||||
inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
|
||||
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
|
||||
|
||||
inline jlong Atomic::load(volatile jlong* src) { return *src; }
|
||||
|
||||
/*
|
||||
machine barrier instructions:
|
||||
|
||||
- sync two-way memory barrier, aka fence
|
||||
- lwsync orders Store|Store,
|
||||
Load|Store,
|
||||
Load|Load,
|
||||
but not Store|Load
|
||||
- eieio orders memory accesses for device memory (only)
|
||||
- isync invalidates speculatively executed instructions
|
||||
From the POWER ISA 2.06 documentation:
|
||||
"[...] an isync instruction prevents the execution of
|
||||
instructions following the isync until instructions
|
||||
preceding the isync have completed, [...]"
|
||||
From IBM's AIX assembler reference:
|
||||
"The isync [...] instructions causes the processor to
|
||||
refetch any instructions that might have been fetched
|
||||
prior to the isync instruction. The instruction isync
|
||||
causes the processor to wait for all previous instructions
|
||||
to complete. Then any instructions already fetched are
|
||||
discarded and instruction processing continues in the
|
||||
environment established by the previous instructions."
|
||||
|
||||
semantic barrier instructions:
|
||||
(as defined in orderAccess.hpp)
|
||||
|
||||
- release orders Store|Store, (maps to lwsync)
|
||||
Load|Store
|
||||
- acquire orders Load|Store, (maps to lwsync)
|
||||
Load|Load
|
||||
- fence orders Store|Store, (maps to sync)
|
||||
Load|Store,
|
||||
Load|Load,
|
||||
Store|Load
|
||||
*/
|
||||
|
||||
#define strasm_sync "\n sync \n"
|
||||
#define strasm_lwsync "\n lwsync \n"
|
||||
#define strasm_isync "\n isync \n"
|
||||
#define strasm_release strasm_lwsync
|
||||
#define strasm_acquire strasm_lwsync
|
||||
#define strasm_fence strasm_sync
|
||||
#define strasm_nobarrier ""
|
||||
#define strasm_nobarrier_clobber_memory ""
|
||||
|
||||
inline jint Atomic::add (jint add_value, volatile jint* dest) {
|
||||
|
||||
unsigned int result;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
strasm_lwsync
|
||||
"1: lwarx %0, 0, %2 \n"
|
||||
" add %0, %0, %1 \n"
|
||||
" stwcx. %0, 0, %2 \n"
|
||||
" bne- 1b \n"
|
||||
strasm_isync
|
||||
: /*%0*/"=&r" (result)
|
||||
: /*%1*/"r" (add_value), /*%2*/"r" (dest)
|
||||
: "cc", "memory" );
|
||||
|
||||
return (jint) result;
|
||||
}
|
||||
|
||||
|
||||
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
|
||||
|
||||
long result;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
strasm_lwsync
|
||||
"1: ldarx %0, 0, %2 \n"
|
||||
" add %0, %0, %1 \n"
|
||||
" stdcx. %0, 0, %2 \n"
|
||||
" bne- 1b \n"
|
||||
strasm_isync
|
||||
: /*%0*/"=&r" (result)
|
||||
: /*%1*/"r" (add_value), /*%2*/"r" (dest)
|
||||
: "cc", "memory" );
|
||||
|
||||
return (intptr_t) result;
|
||||
}
|
||||
|
||||
inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
|
||||
return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
|
||||
}
|
||||
|
||||
|
||||
inline void Atomic::inc (volatile jint* dest) {
|
||||
|
||||
unsigned int temp;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
strasm_nobarrier
|
||||
"1: lwarx %0, 0, %2 \n"
|
||||
" addic %0, %0, 1 \n"
|
||||
" stwcx. %0, 0, %2 \n"
|
||||
" bne- 1b \n"
|
||||
strasm_nobarrier
|
||||
: /*%0*/"=&r" (temp), "=m" (*dest)
|
||||
: /*%2*/"r" (dest), "m" (*dest)
|
||||
: "cc" strasm_nobarrier_clobber_memory);
|
||||
|
||||
}
|
||||
|
||||
inline void Atomic::inc_ptr(volatile intptr_t* dest) {
|
||||
|
||||
long temp;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
strasm_nobarrier
|
||||
"1: ldarx %0, 0, %2 \n"
|
||||
" addic %0, %0, 1 \n"
|
||||
" stdcx. %0, 0, %2 \n"
|
||||
" bne- 1b \n"
|
||||
strasm_nobarrier
|
||||
: /*%0*/"=&r" (temp), "=m" (*dest)
|
||||
: /*%2*/"r" (dest), "m" (*dest)
|
||||
: "cc" strasm_nobarrier_clobber_memory);
|
||||
|
||||
}
|
||||
|
||||
inline void Atomic::inc_ptr(volatile void* dest) {
|
||||
inc_ptr((volatile intptr_t*)dest);
|
||||
}
|
||||
|
||||
|
||||
inline void Atomic::dec (volatile jint* dest) {
|
||||
|
||||
unsigned int temp;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
strasm_nobarrier
|
||||
"1: lwarx %0, 0, %2 \n"
|
||||
" addic %0, %0, -1 \n"
|
||||
" stwcx. %0, 0, %2 \n"
|
||||
" bne- 1b \n"
|
||||
strasm_nobarrier
|
||||
: /*%0*/"=&r" (temp), "=m" (*dest)
|
||||
: /*%2*/"r" (dest), "m" (*dest)
|
||||
: "cc" strasm_nobarrier_clobber_memory);
|
||||
|
||||
}
|
||||
|
||||
inline void Atomic::dec_ptr(volatile intptr_t* dest) {
|
||||
|
||||
long temp;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
strasm_nobarrier
|
||||
"1: ldarx %0, 0, %2 \n"
|
||||
" addic %0, %0, -1 \n"
|
||||
" stdcx. %0, 0, %2 \n"
|
||||
" bne- 1b \n"
|
||||
strasm_nobarrier
|
||||
: /*%0*/"=&r" (temp), "=m" (*dest)
|
||||
: /*%2*/"r" (dest), "m" (*dest)
|
||||
: "cc" strasm_nobarrier_clobber_memory);
|
||||
|
||||
}
|
||||
|
||||
inline void Atomic::dec_ptr(volatile void* dest) {
|
||||
dec_ptr((volatile intptr_t*)dest);
|
||||
}
|
||||
|
||||
inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
|
||||
|
||||
// Note that xchg_ptr doesn't necessarily do an acquire
|
||||
// (see synchronizer.cpp).
|
||||
|
||||
unsigned int old_value;
|
||||
const uint64_t zero = 0;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
/* lwsync */
|
||||
strasm_lwsync
|
||||
/* atomic loop */
|
||||
"1: \n"
|
||||
" lwarx %[old_value], %[dest], %[zero] \n"
|
||||
" stwcx. %[exchange_value], %[dest], %[zero] \n"
|
||||
" bne- 1b \n"
|
||||
/* isync */
|
||||
strasm_sync
|
||||
/* exit */
|
||||
"2: \n"
|
||||
/* out */
|
||||
: [old_value] "=&r" (old_value),
|
||||
"=m" (*dest)
|
||||
/* in */
|
||||
: [dest] "b" (dest),
|
||||
[zero] "r" (zero),
|
||||
[exchange_value] "r" (exchange_value),
|
||||
"m" (*dest)
|
||||
/* clobber */
|
||||
: "cc",
|
||||
"memory"
|
||||
);
|
||||
|
||||
return (jint) old_value;
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
|
||||
|
||||
// Note that xchg_ptr doesn't necessarily do an acquire
|
||||
// (see synchronizer.cpp).
|
||||
|
||||
long old_value;
|
||||
const uint64_t zero = 0;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
/* lwsync */
|
||||
strasm_lwsync
|
||||
/* atomic loop */
|
||||
"1: \n"
|
||||
" ldarx %[old_value], %[dest], %[zero] \n"
|
||||
" stdcx. %[exchange_value], %[dest], %[zero] \n"
|
||||
" bne- 1b \n"
|
||||
/* isync */
|
||||
strasm_sync
|
||||
/* exit */
|
||||
"2: \n"
|
||||
/* out */
|
||||
: [old_value] "=&r" (old_value),
|
||||
"=m" (*dest)
|
||||
/* in */
|
||||
: [dest] "b" (dest),
|
||||
[zero] "r" (zero),
|
||||
[exchange_value] "r" (exchange_value),
|
||||
"m" (*dest)
|
||||
/* clobber */
|
||||
: "cc",
|
||||
"memory"
|
||||
);
|
||||
|
||||
return (intptr_t) old_value;
|
||||
}
|
||||
|
||||
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
|
||||
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
|
||||
}
|
||||
|
||||
inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) {
|
||||
|
||||
// Note that cmpxchg guarantees a two-way memory barrier across
|
||||
// the cmpxchg, so it's really a a 'fence_cmpxchg_acquire'
|
||||
// (see atomic.hpp).
|
||||
|
||||
unsigned int old_value;
|
||||
const uint64_t zero = 0;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
/* fence */
|
||||
strasm_sync
|
||||
/* simple guard */
|
||||
" lwz %[old_value], 0(%[dest]) \n"
|
||||
" cmpw %[compare_value], %[old_value] \n"
|
||||
" bne- 2f \n"
|
||||
/* atomic loop */
|
||||
"1: \n"
|
||||
" lwarx %[old_value], %[dest], %[zero] \n"
|
||||
" cmpw %[compare_value], %[old_value] \n"
|
||||
" bne- 2f \n"
|
||||
" stwcx. %[exchange_value], %[dest], %[zero] \n"
|
||||
" bne- 1b \n"
|
||||
/* acquire */
|
||||
strasm_sync
|
||||
/* exit */
|
||||
"2: \n"
|
||||
/* out */
|
||||
: [old_value] "=&r" (old_value),
|
||||
"=m" (*dest)
|
||||
/* in */
|
||||
: [dest] "b" (dest),
|
||||
[zero] "r" (zero),
|
||||
[compare_value] "r" (compare_value),
|
||||
[exchange_value] "r" (exchange_value),
|
||||
"m" (*dest)
|
||||
/* clobber */
|
||||
: "cc",
|
||||
"memory"
|
||||
);
|
||||
|
||||
return (jint) old_value;
|
||||
}
|
||||
|
||||
inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
|
||||
|
||||
// Note that cmpxchg guarantees a two-way memory barrier across
|
||||
// the cmpxchg, so it's really a a 'fence_cmpxchg_acquire'
|
||||
// (see atomic.hpp).
|
||||
|
||||
long old_value;
|
||||
const uint64_t zero = 0;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
/* fence */
|
||||
strasm_sync
|
||||
/* simple guard */
|
||||
" ld %[old_value], 0(%[dest]) \n"
|
||||
" cmpd %[compare_value], %[old_value] \n"
|
||||
" bne- 2f \n"
|
||||
/* atomic loop */
|
||||
"1: \n"
|
||||
" ldarx %[old_value], %[dest], %[zero] \n"
|
||||
" cmpd %[compare_value], %[old_value] \n"
|
||||
" bne- 2f \n"
|
||||
" stdcx. %[exchange_value], %[dest], %[zero] \n"
|
||||
" bne- 1b \n"
|
||||
/* acquire */
|
||||
strasm_sync
|
||||
/* exit */
|
||||
"2: \n"
|
||||
/* out */
|
||||
: [old_value] "=&r" (old_value),
|
||||
"=m" (*dest)
|
||||
/* in */
|
||||
: [dest] "b" (dest),
|
||||
[zero] "r" (zero),
|
||||
[compare_value] "r" (compare_value),
|
||||
[exchange_value] "r" (exchange_value),
|
||||
"m" (*dest)
|
||||
/* clobber */
|
||||
: "cc",
|
||||
"memory"
|
||||
);
|
||||
|
||||
return (jlong) old_value;
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) {
|
||||
return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
|
||||
}
|
||||
|
||||
inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) {
|
||||
return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
|
||||
}
|
||||
|
||||
#undef strasm_sync
|
||||
#undef strasm_lwsync
|
||||
#undef strasm_isync
|
||||
#undef strasm_release
|
||||
#undef strasm_acquire
|
||||
#undef strasm_fence
|
||||
#undef strasm_nobarrier
|
||||
#undef strasm_nobarrier_clobber_memory
|
||||
|
||||
#endif // OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_INLINE_HPP
|
54
hotspot/src/os_cpu/linux_ppc/vm/globals_linux_ppc.hpp
Normal file
54
hotspot/src/os_cpu/linux_ppc/vm/globals_linux_ppc.hpp
Normal file
@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_CPU_LINUX_PPC_VM_GLOBALS_LINUX_PPC_HPP
|
||||
#define OS_CPU_LINUX_PPC_VM_GLOBALS_LINUX_PPC_HPP
|
||||
|
||||
// Sets the default values for platform dependent flags used by the runtime system.
|
||||
// (see globals.hpp)
|
||||
|
||||
define_pd_global(bool, DontYieldALot, false);
|
||||
define_pd_global(intx, ThreadStackSize, 2048); // 0 => use system default
|
||||
define_pd_global(intx, VMThreadStackSize, 2048);
|
||||
|
||||
// if we set CompilerThreadStackSize to a value different than 0, it will
|
||||
// be used in os::create_thread(). Otherwise, due the strange logic in os::create_thread(),
|
||||
// the stack size for compiler threads will default to VMThreadStackSize, although it
|
||||
// is defined to 4M in os::Linux::default_stack_size()!
|
||||
define_pd_global(intx, CompilerThreadStackSize, 4096);
|
||||
|
||||
// Allow extra space in DEBUG builds for asserts.
|
||||
define_pd_global(uintx,JVMInvokeMethodSlack, 8192);
|
||||
|
||||
define_pd_global(intx, StackYellowPages, 6);
|
||||
define_pd_global(intx, StackRedPages, 1);
|
||||
define_pd_global(intx, StackShadowPages, 6 DEBUG_ONLY(+2));
|
||||
|
||||
// Only used on 64 bit platforms
|
||||
define_pd_global(uintx,HeapBaseMinAddress, 2*G);
|
||||
// Only used on 64 bit Windows platforms
|
||||
define_pd_global(bool, UseVectoredExceptions, false);
|
||||
|
||||
#endif // OS_CPU_LINUX_PPC_VM_GLOBALS_LINUX_PPC_HPP
|
149
hotspot/src/os_cpu/linux_ppc/vm/orderAccess_linux_ppc.inline.hpp
Normal file
149
hotspot/src/os_cpu/linux_ppc/vm/orderAccess_linux_ppc.inline.hpp
Normal file
@ -0,0 +1,149 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP
|
||||
#define OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP
|
||||
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "vm_version_ppc.hpp"
|
||||
|
||||
#ifndef PPC64
|
||||
#error "OrderAccess currently only implemented for PPC64"
|
||||
#endif
|
||||
|
||||
// Implementation of class OrderAccess.
|
||||
|
||||
//
|
||||
// Machine barrier instructions:
|
||||
//
|
||||
// - sync Two-way memory barrier, aka fence.
|
||||
// - lwsync orders Store|Store,
|
||||
// Load|Store,
|
||||
// Load|Load,
|
||||
// but not Store|Load
|
||||
// - eieio orders Store|Store
|
||||
// - isync Invalidates speculatively executed instructions,
|
||||
// but isync may complete before storage accesses
|
||||
// associated with instructions preceding isync have
|
||||
// been performed.
|
||||
//
|
||||
// Semantic barrier instructions:
|
||||
// (as defined in orderAccess.hpp)
|
||||
//
|
||||
// - release orders Store|Store, (maps to lwsync)
|
||||
// Load|Store
|
||||
// - acquire orders Load|Store, (maps to lwsync)
|
||||
// Load|Load
|
||||
// - fence orders Store|Store, (maps to sync)
|
||||
// Load|Store,
|
||||
// Load|Load,
|
||||
// Store|Load
|
||||
//
|
||||
|
||||
#define inlasm_sync() __asm__ __volatile__ ("sync" : : : "memory");
|
||||
#define inlasm_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory");
|
||||
#define inlasm_eieio() __asm__ __volatile__ ("eieio" : : : "memory");
|
||||
#define inlasm_isync() __asm__ __volatile__ ("isync" : : : "memory");
|
||||
#define inlasm_release() inlasm_lwsync();
|
||||
#define inlasm_acquire() inlasm_lwsync();
|
||||
// Use twi-isync for load_acquire (faster than lwsync).
|
||||
#define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory");
|
||||
#define inlasm_fence() inlasm_sync();
|
||||
|
||||
inline void OrderAccess::loadload() { inlasm_lwsync(); }
|
||||
inline void OrderAccess::storestore() { inlasm_lwsync(); }
|
||||
inline void OrderAccess::loadstore() { inlasm_lwsync(); }
|
||||
inline void OrderAccess::storeload() { inlasm_fence(); }
|
||||
|
||||
inline void OrderAccess::acquire() { inlasm_acquire(); }
|
||||
inline void OrderAccess::release() { inlasm_release(); }
|
||||
inline void OrderAccess::fence() { inlasm_fence(); }
|
||||
|
||||
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { register jbyte t = *p; inlasm_acquire_reg(t); return t; }
|
||||
inline jshort OrderAccess::load_acquire(volatile jshort* p) { register jshort t = *p; inlasm_acquire_reg(t); return t; }
|
||||
inline jint OrderAccess::load_acquire(volatile jint* p) { register jint t = *p; inlasm_acquire_reg(t); return t; }
|
||||
inline jlong OrderAccess::load_acquire(volatile jlong* p) { register jlong t = *p; inlasm_acquire_reg(t); return t; }
|
||||
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { register jubyte t = *p; inlasm_acquire_reg(t); return t; }
|
||||
inline jushort OrderAccess::load_acquire(volatile jushort* p) { register jushort t = *p; inlasm_acquire_reg(t); return t; }
|
||||
inline juint OrderAccess::load_acquire(volatile juint* p) { register juint t = *p; inlasm_acquire_reg(t); return t; }
|
||||
inline julong OrderAccess::load_acquire(volatile julong* p) { return (julong)load_acquire((volatile jlong*)p); }
|
||||
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { register jfloat t = *p; inlasm_acquire(); return t; }
|
||||
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { register jdouble t = *p; inlasm_acquire(); return t; }
|
||||
|
||||
inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return (intptr_t)load_acquire((volatile jlong*)p); }
|
||||
inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return (void*) load_acquire((volatile jlong*)p); }
|
||||
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return (void*) load_acquire((volatile jlong*)p); }
|
||||
|
||||
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { inlasm_release(); *p = v; }
|
||||
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { inlasm_release(); *p = v; }
|
||||
inline void OrderAccess::release_store(volatile jint* p, jint v) { inlasm_release(); *p = v; }
|
||||
inline void OrderAccess::release_store(volatile jlong* p, jlong v) { inlasm_release(); *p = v; }
|
||||
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { inlasm_release(); *p = v; }
|
||||
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { inlasm_release(); *p = v; }
|
||||
inline void OrderAccess::release_store(volatile juint* p, juint v) { inlasm_release(); *p = v; }
|
||||
inline void OrderAccess::release_store(volatile julong* p, julong v) { inlasm_release(); *p = v; }
|
||||
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { inlasm_release(); *p = v; }
|
||||
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { inlasm_release(); *p = v; }
|
||||
|
||||
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { inlasm_release(); *p = v; }
|
||||
inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { inlasm_release(); *(void* volatile *)p = v; }
|
||||
|
||||
inline void OrderAccess::store_fence(jbyte* p, jbyte v) { *p = v; inlasm_fence(); }
|
||||
inline void OrderAccess::store_fence(jshort* p, jshort v) { *p = v; inlasm_fence(); }
|
||||
inline void OrderAccess::store_fence(jint* p, jint v) { *p = v; inlasm_fence(); }
|
||||
inline void OrderAccess::store_fence(jlong* p, jlong v) { *p = v; inlasm_fence(); }
|
||||
inline void OrderAccess::store_fence(jubyte* p, jubyte v) { *p = v; inlasm_fence(); }
|
||||
inline void OrderAccess::store_fence(jushort* p, jushort v) { *p = v; inlasm_fence(); }
|
||||
inline void OrderAccess::store_fence(juint* p, juint v) { *p = v; inlasm_fence(); }
|
||||
inline void OrderAccess::store_fence(julong* p, julong v) { *p = v; inlasm_fence(); }
|
||||
inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; inlasm_fence(); }
|
||||
inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; inlasm_fence(); }
|
||||
|
||||
inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; inlasm_fence(); }
|
||||
inline void OrderAccess::store_ptr_fence(void** p, void* v) { *p = v; inlasm_fence(); }
|
||||
|
||||
inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { inlasm_release(); *p = v; inlasm_fence(); }
|
||||
inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { inlasm_release(); *p = v; inlasm_fence(); }
|
||||
inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { inlasm_release(); *p = v; inlasm_fence(); }
|
||||
inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { inlasm_release(); *p = v; inlasm_fence(); }
|
||||
inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { inlasm_release(); *p = v; inlasm_fence(); }
|
||||
inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { inlasm_release(); *p = v; inlasm_fence(); }
|
||||
inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { inlasm_release(); *p = v; inlasm_fence(); }
|
||||
inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { inlasm_release(); *p = v; inlasm_fence(); }
|
||||
inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { inlasm_release(); *p = v; inlasm_fence(); }
|
||||
inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { inlasm_release(); *p = v; inlasm_fence(); }
|
||||
|
||||
inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { inlasm_release(); *p = v; inlasm_fence(); }
|
||||
inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { inlasm_release(); *(void* volatile *)p = v; inlasm_fence(); }
|
||||
|
||||
#undef inlasm_sync
|
||||
#undef inlasm_lwsync
|
||||
#undef inlasm_eieio
|
||||
#undef inlasm_isync
|
||||
#undef inlasm_release
|
||||
#undef inlasm_acquire
|
||||
#undef inlasm_fence
|
||||
|
||||
#endif // OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP
|
607
hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp
Normal file
607
hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp
Normal file
@ -0,0 +1,607 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file hat
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// no precompiled headers
|
||||
#include "assembler_ppc.inline.hpp"
|
||||
#include "classfile/classLoader.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "code/icBuffer.hpp"
|
||||
#include "code/vtableStubs.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "jvm_linux.h"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "mutex_linux.inline.hpp"
|
||||
#include "nativeInst_ppc.hpp"
|
||||
#include "os_share_linux.hpp"
|
||||
#include "prims/jniFastGetField.hpp"
|
||||
#include "prims/jvm.h"
|
||||
#include "prims/jvm_misc.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/extendedPC.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/interfaceSupport.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/javaCalls.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/osThread.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "runtime/timer.hpp"
|
||||
#include "utilities/events.hpp"
|
||||
#include "utilities/vmError.hpp"
|
||||
|
||||
// put OS-includes here
|
||||
# include <sys/types.h>
|
||||
# include <sys/mman.h>
|
||||
# include <pthread.h>
|
||||
# include <signal.h>
|
||||
# include <errno.h>
|
||||
# include <dlfcn.h>
|
||||
# include <stdlib.h>
|
||||
# include <stdio.h>
|
||||
# include <unistd.h>
|
||||
# include <sys/resource.h>
|
||||
# include <pthread.h>
|
||||
# include <sys/stat.h>
|
||||
# include <sys/time.h>
|
||||
# include <sys/utsname.h>
|
||||
# include <sys/socket.h>
|
||||
# include <sys/wait.h>
|
||||
# include <pwd.h>
|
||||
# include <poll.h>
|
||||
# include <ucontext.h>
|
||||
|
||||
|
||||
address os::current_stack_pointer() {
|
||||
intptr_t* csp;
|
||||
|
||||
// inline assembly `mr regno(csp), R1_SP':
|
||||
__asm__ __volatile__ ("mr %0, 1":"=r"(csp):);
|
||||
|
||||
return (address) csp;
|
||||
}
|
||||
|
||||
char* os::non_memory_address_word() {
|
||||
// Must never look like an address returned by reserve_memory,
|
||||
// even in its subfields (as defined by the CPU immediate fields,
|
||||
// if the CPU splits constants across multiple instructions).
|
||||
|
||||
return (char*) -1;
|
||||
}
|
||||
|
||||
void os::initialize_thread(Thread *thread) { }
|
||||
|
||||
// Frame information (pc, sp, fp) retrieved via ucontext
|
||||
// always looks like a C-frame according to the frame
|
||||
// conventions in frame_ppc64.hpp.
|
||||
address os::Linux::ucontext_get_pc(ucontext_t * uc) {
|
||||
// On powerpc64, ucontext_t is not selfcontained but contains
|
||||
// a pointer to an optional substructure (mcontext_t.regs) containing the volatile
|
||||
// registers - NIP, among others.
|
||||
// This substructure may or may not be there depending where uc came from:
|
||||
// - if uc was handed over as the argument to a sigaction handler, a pointer to the
|
||||
// substructure was provided by the kernel when calling the signal handler, and
|
||||
// regs->nip can be accessed.
|
||||
// - if uc was filled by getcontext(), it is undefined - getcontext() does not fill
|
||||
// it because the volatile registers are not needed to make setcontext() work.
|
||||
// Hopefully it was zero'd out beforehand.
|
||||
guarantee(uc->uc_mcontext.regs != NULL, "only use ucontext_get_pc in sigaction context");
|
||||
return (address)uc->uc_mcontext.regs->nip;
|
||||
}
|
||||
|
||||
intptr_t* os::Linux::ucontext_get_sp(ucontext_t * uc) {
|
||||
return (intptr_t*)uc->uc_mcontext.regs->gpr[1/*REG_SP*/];
|
||||
}
|
||||
|
||||
intptr_t* os::Linux::ucontext_get_fp(ucontext_t * uc) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ExtendedPC os::fetch_frame_from_context(void* ucVoid,
|
||||
intptr_t** ret_sp, intptr_t** ret_fp) {
|
||||
|
||||
ExtendedPC epc;
|
||||
ucontext_t* uc = (ucontext_t*)ucVoid;
|
||||
|
||||
if (uc != NULL) {
|
||||
epc = ExtendedPC(os::Linux::ucontext_get_pc(uc));
|
||||
if (ret_sp) *ret_sp = os::Linux::ucontext_get_sp(uc);
|
||||
if (ret_fp) *ret_fp = os::Linux::ucontext_get_fp(uc);
|
||||
} else {
|
||||
// construct empty ExtendedPC for return value checking
|
||||
epc = ExtendedPC(NULL);
|
||||
if (ret_sp) *ret_sp = (intptr_t *)NULL;
|
||||
if (ret_fp) *ret_fp = (intptr_t *)NULL;
|
||||
}
|
||||
|
||||
return epc;
|
||||
}
|
||||
|
||||
frame os::fetch_frame_from_context(void* ucVoid) {
|
||||
intptr_t* sp;
|
||||
intptr_t* fp;
|
||||
ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
|
||||
return frame(sp, epc.pc());
|
||||
}
|
||||
|
||||
frame os::get_sender_for_C_frame(frame* fr) {
|
||||
if (*fr->sp() == 0) {
|
||||
// fr is the last C frame
|
||||
return frame(NULL, NULL);
|
||||
}
|
||||
return frame(fr->sender_sp(), fr->sender_pc());
|
||||
}
|
||||
|
||||
|
||||
frame os::current_frame() {
|
||||
intptr_t* csp = (intptr_t*) *((intptr_t*) os::current_stack_pointer());
|
||||
// hack.
|
||||
frame topframe(csp, (address)0x8);
|
||||
// return sender of current topframe which hopefully has pc != NULL.
|
||||
return os::get_sender_for_C_frame(&topframe);
|
||||
}
|
||||
|
||||
// Utility functions
|
||||
|
||||
extern "C" JNIEXPORT int
|
||||
JVM_handle_linux_signal(int sig,
|
||||
siginfo_t* info,
|
||||
void* ucVoid,
|
||||
int abort_if_unrecognized) {
|
||||
ucontext_t* uc = (ucontext_t*) ucVoid;
|
||||
|
||||
Thread* t = ThreadLocalStorage::get_thread_slow();
|
||||
|
||||
SignalHandlerMark shm(t);
|
||||
|
||||
// Note: it's not uncommon that JNI code uses signal/sigset to install
|
||||
// then restore certain signal handler (e.g. to temporarily block SIGPIPE,
|
||||
// or have a SIGILL handler when detecting CPU type). When that happens,
|
||||
// JVM_handle_linux_signal() might be invoked with junk info/ucVoid. To
|
||||
// avoid unnecessary crash when libjsig is not preloaded, try handle signals
|
||||
// that do not require siginfo/ucontext first.
|
||||
|
||||
if (sig == SIGPIPE) {
|
||||
if (os::Linux::chained_handler(sig, info, ucVoid)) {
|
||||
return true;
|
||||
} else {
|
||||
if (PrintMiscellaneous && (WizardMode || Verbose)) {
|
||||
warning("Ignoring SIGPIPE - see bug 4229104");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
JavaThread* thread = NULL;
|
||||
VMThread* vmthread = NULL;
|
||||
if (os::Linux::signal_handlers_are_installed) {
|
||||
if (t != NULL) {
|
||||
if(t->is_Java_thread()) {
|
||||
thread = (JavaThread*)t;
|
||||
} else if(t->is_VM_thread()) {
|
||||
vmthread = (VMThread *)t;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Moved SafeFetch32 handling outside thread!=NULL conditional block to make
|
||||
// it work if no associated JavaThread object exists.
|
||||
if (uc) {
|
||||
address const pc = os::Linux::ucontext_get_pc(uc);
|
||||
if (pc && StubRoutines::is_safefetch_fault(pc)) {
|
||||
uc->uc_mcontext.regs->nip = (unsigned long)StubRoutines::continuation_for_safefetch_fault(pc);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// decide if this trap can be handled by a stub
|
||||
address stub = NULL;
|
||||
address pc = NULL;
|
||||
|
||||
//%note os_trap_1
|
||||
if (info != NULL && uc != NULL && thread != NULL) {
|
||||
pc = (address) os::Linux::ucontext_get_pc(uc);
|
||||
|
||||
// Handle ALL stack overflow variations here
|
||||
if (sig == SIGSEGV) {
|
||||
// Si_addr may not be valid due to a bug in the linux-ppc64 kernel (see
|
||||
// comment below). Use get_stack_bang_address instead of si_addr.
|
||||
address addr = ((NativeInstruction*)pc)->get_stack_bang_address(uc);
|
||||
|
||||
// Check if fault address is within thread stack.
|
||||
if (addr < thread->stack_base() &&
|
||||
addr >= thread->stack_base() - thread->stack_size()) {
|
||||
// stack overflow
|
||||
if (thread->in_stack_yellow_zone(addr)) {
|
||||
thread->disable_stack_yellow_zone();
|
||||
if (thread->thread_state() == _thread_in_Java) {
|
||||
// Throw a stack overflow exception.
|
||||
// Guard pages will be reenabled while unwinding the stack.
|
||||
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
|
||||
} else {
|
||||
// Thread was in the vm or native code. Return and try to finish.
|
||||
return 1;
|
||||
}
|
||||
} else if (thread->in_stack_red_zone(addr)) {
|
||||
// Fatal red zone violation. Disable the guard pages and fall through
|
||||
// to handle_unexpected_exception way down below.
|
||||
thread->disable_stack_red_zone();
|
||||
tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
|
||||
|
||||
// This is a likely cause, but hard to verify. Let's just print
|
||||
// it as a hint.
|
||||
tty->print_raw_cr("Please check if any of your loaded .so files has "
|
||||
"enabled executable stack (see man page execstack(8))");
|
||||
} else {
|
||||
// Accessing stack address below sp may cause SEGV if current
|
||||
// thread has MAP_GROWSDOWN stack. This should only happen when
|
||||
// current thread was created by user code with MAP_GROWSDOWN flag
|
||||
// and then attached to VM. See notes in os_linux.cpp.
|
||||
if (thread->osthread()->expanding_stack() == 0) {
|
||||
thread->osthread()->set_expanding_stack();
|
||||
if (os::Linux::manually_expand_stack(thread, addr)) {
|
||||
thread->osthread()->clear_expanding_stack();
|
||||
return 1;
|
||||
}
|
||||
thread->osthread()->clear_expanding_stack();
|
||||
} else {
|
||||
fatal("recursive segv. expanding stack.");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (thread->thread_state() == _thread_in_Java) {
|
||||
// Java thread running in Java code => find exception handler if any
|
||||
// a fault inside compiled code, the interpreter, or a stub
|
||||
|
||||
// A VM-related SIGILL may only occur if we are not in the zero page.
|
||||
// On AIX, we get a SIGILL if we jump to 0x0 or to somewhere else
|
||||
// in the zero page, because it is filled with 0x0. We ignore
|
||||
// explicit SIGILLs in the zero page.
|
||||
if (sig == SIGILL && (pc < (address) 0x200)) {
|
||||
if (TraceTraps)
|
||||
tty->print_raw_cr("SIGILL happened inside zero page.");
|
||||
goto report_and_die;
|
||||
}
|
||||
|
||||
// Handle signal from NativeJump::patch_verified_entry().
|
||||
if (( TrapBasedNotEntrantChecks && sig == SIGTRAP && nativeInstruction_at(pc)->is_sigtrap_zombie_not_entrant()) ||
|
||||
(!TrapBasedNotEntrantChecks && sig == SIGILL && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant())) {
|
||||
if (TraceTraps)
|
||||
tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
|
||||
stub = SharedRuntime::get_handle_wrong_method_stub();
|
||||
}
|
||||
|
||||
else if (sig == SIGSEGV &&
|
||||
// A linux-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults
|
||||
// in 64bit mode (cf. http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.6),
|
||||
// especially when we try to read from the safepoint polling page. So the check
|
||||
// (address)info->si_addr == os::get_standard_polling_page()
|
||||
// doesn't work for us. We use:
|
||||
((NativeInstruction*)pc)->is_safepoint_poll()) {
|
||||
if (TraceTraps)
|
||||
tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (SIGSEGV)", pc);
|
||||
stub = SharedRuntime::get_poll_stub(pc);
|
||||
}
|
||||
|
||||
// SIGTRAP-based ic miss check in compiled code.
|
||||
else if (sig == SIGTRAP && TrapBasedICMissChecks &&
|
||||
nativeInstruction_at(pc)->is_sigtrap_ic_miss_check()) {
|
||||
if (TraceTraps)
|
||||
tty->print_cr("trap: ic_miss_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
|
||||
stub = SharedRuntime::get_ic_miss_stub();
|
||||
}
|
||||
|
||||
// SIGTRAP-based implicit null check in compiled code.
|
||||
else if (sig == SIGTRAP && TrapBasedNullChecks &&
|
||||
nativeInstruction_at(pc)->is_sigtrap_null_check()) {
|
||||
if (TraceTraps)
|
||||
tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
|
||||
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
|
||||
}
|
||||
|
||||
// SIGSEGV-based implicit null check in compiled code.
|
||||
else if (sig == SIGSEGV && ImplicitNullChecks &&
|
||||
CodeCache::contains((void*) pc) &&
|
||||
!MacroAssembler::needs_explicit_null_check((intptr_t) info->si_addr)) {
|
||||
if (TraceTraps)
|
||||
tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc);
|
||||
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
// SIGTRAP-based implicit range check in compiled code.
|
||||
else if (sig == SIGTRAP && TrapBasedRangeChecks &&
|
||||
nativeInstruction_at(pc)->is_sigtrap_range_check()) {
|
||||
if (TraceTraps)
|
||||
tty->print_cr("trap: range_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
|
||||
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
|
||||
}
|
||||
#endif
|
||||
else if (sig == SIGBUS) {
|
||||
// BugId 4454115: A read from a MappedByteBuffer can fault here if the
|
||||
// underlying file has been truncated. Do not crash the VM in such a case.
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
||||
nmethod* nm = (cb != NULL && cb->is_nmethod()) ? (nmethod*)cb : NULL;
|
||||
if (nm != NULL && nm->has_unsafe_access()) {
|
||||
// We don't really need a stub here! Just set the pending exeption and
|
||||
// continue at the next instruction after the faulting read. Returning
|
||||
// garbage from this read is ok.
|
||||
thread->set_pending_unsafe_access_error();
|
||||
uc->uc_mcontext.regs->nip = ((unsigned long)pc) + 4;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
else { // thread->thread_state() != _thread_in_Java
|
||||
if (sig == SIGILL && VM_Version::is_determine_features_test_running()) {
|
||||
// SIGILL must be caused by VM_Version::determine_features().
|
||||
*(int *)pc = 0; // patch instruction to 0 to indicate that it causes a SIGILL,
|
||||
// flushing of icache is not necessary.
|
||||
stub = pc + 4; // continue with next instruction.
|
||||
}
|
||||
else if (thread->thread_state() == _thread_in_vm &&
|
||||
sig == SIGBUS && thread->doing_unsafe_access()) {
|
||||
// We don't really need a stub here! Just set the pending exeption and
|
||||
// continue at the next instruction after the faulting read. Returning
|
||||
// garbage from this read is ok.
|
||||
thread->set_pending_unsafe_access_error();
|
||||
uc->uc_mcontext.regs->nip = ((unsigned long)pc) + 4;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Check to see if we caught the safepoint code in the
|
||||
// process of write protecting the memory serialization page.
|
||||
// It write enables the page immediately after protecting it
|
||||
// so we can just return to retry the write.
|
||||
if ((sig == SIGSEGV) &&
|
||||
// Si_addr may not be valid due to a bug in the linux-ppc64 kernel (see comment above).
|
||||
// Use is_memory_serialization instead of si_addr.
|
||||
((NativeInstruction*)pc)->is_memory_serialization(thread, ucVoid)) {
|
||||
// Synchronization problem in the pseudo memory barrier code (bug id 6546278)
|
||||
// Block current thread until the memory serialize page permission restored.
|
||||
os::block_on_serialize_page_trap();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (stub != NULL) {
|
||||
// Save all thread context in case we need to restore it.
|
||||
if (thread != NULL) thread->set_saved_exception_pc(pc);
|
||||
uc->uc_mcontext.regs->nip = (unsigned long)stub;
|
||||
return true;
|
||||
}
|
||||
|
||||
// signal-chaining
|
||||
if (os::Linux::chained_handler(sig, info, ucVoid)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!abort_if_unrecognized) {
|
||||
// caller wants another chance, so give it to him
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pc == NULL && uc != NULL) {
|
||||
pc = os::Linux::ucontext_get_pc(uc);
|
||||
}
|
||||
|
||||
report_and_die:
|
||||
// unmask current signal
|
||||
sigset_t newset;
|
||||
sigemptyset(&newset);
|
||||
sigaddset(&newset, sig);
|
||||
sigprocmask(SIG_UNBLOCK, &newset, NULL);
|
||||
|
||||
VMError err(t, sig, pc, info, ucVoid);
|
||||
err.report_and_die();
|
||||
|
||||
ShouldNotReachHere();
|
||||
return false;
|
||||
}
|
||||
|
||||
void os::Linux::init_thread_fpu_state(void) {
|
||||
// Disable FP exceptions.
|
||||
__asm__ __volatile__ ("mtfsfi 6,0");
|
||||
}
|
||||
|
||||
int os::Linux::get_fpu_control_word(void) {
|
||||
// x86 has problems with FPU precision after pthread_cond_timedwait().
|
||||
// nothing to do on ppc64.
|
||||
return 0;
|
||||
}
|
||||
|
||||
void os::Linux::set_fpu_control_word(int fpu_control) {
|
||||
// x86 has problems with FPU precision after pthread_cond_timedwait().
|
||||
// nothing to do on ppc64.
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// thread stack
|
||||
|
||||
size_t os::Linux::min_stack_allowed = 768*K;
|
||||
|
||||
bool os::Linux::supports_variable_stack_size() { return true; }
|
||||
|
||||
// return default stack size for thr_type
|
||||
size_t os::Linux::default_stack_size(os::ThreadType thr_type) {
|
||||
// default stack size (compiler thread needs larger stack)
|
||||
// Notice that the setting for compiler threads here have no impact
|
||||
// because of the strange 'fallback logic' in os::create_thread().
|
||||
// Better set CompilerThreadStackSize in globals_<os_cpu>.hpp if you want to
|
||||
// specify a different stack size for compiler threads!
|
||||
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1024 * K);
|
||||
return s;
|
||||
}
|
||||
|
||||
size_t os::Linux::default_guard_size(os::ThreadType thr_type) {
|
||||
return 2 * page_size();
|
||||
}
|
||||
|
||||
// Java thread:
|
||||
//
|
||||
// Low memory addresses
|
||||
// +------------------------+
|
||||
// | |\ JavaThread created by VM does not have glibc
|
||||
// | glibc guard page | - guard, attached Java thread usually has
|
||||
// | |/ 1 page glibc guard.
|
||||
// P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
|
||||
// | |\
|
||||
// | HotSpot Guard Pages | - red and yellow pages
|
||||
// | |/
|
||||
// +------------------------+ JavaThread::stack_yellow_zone_base()
|
||||
// | |\
|
||||
// | Normal Stack | -
|
||||
// | |/
|
||||
// P2 +------------------------+ Thread::stack_base()
|
||||
//
|
||||
// Non-Java thread:
|
||||
//
|
||||
// Low memory addresses
|
||||
// +------------------------+
|
||||
// | |\
|
||||
// | glibc guard page | - usually 1 page
|
||||
// | |/
|
||||
// P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
|
||||
// | |\
|
||||
// | Normal Stack | -
|
||||
// | |/
|
||||
// P2 +------------------------+ Thread::stack_base()
|
||||
//
|
||||
// ** P1 (aka bottom) and size ( P2 = P1 - size) are the address and stack size returned from
|
||||
// pthread_attr_getstack()
|
||||
|
||||
static void current_stack_region(address * bottom, size_t * size) {
|
||||
if (os::Linux::is_initial_thread()) {
|
||||
// initial thread needs special handling because pthread_getattr_np()
|
||||
// may return bogus value.
|
||||
*bottom = os::Linux::initial_thread_stack_bottom();
|
||||
*size = os::Linux::initial_thread_stack_size();
|
||||
} else {
|
||||
pthread_attr_t attr;
|
||||
|
||||
int rslt = pthread_getattr_np(pthread_self(), &attr);
|
||||
|
||||
// JVM needs to know exact stack location, abort if it fails
|
||||
if (rslt != 0) {
|
||||
if (rslt == ENOMEM) {
|
||||
vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");
|
||||
} else {
|
||||
fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt));
|
||||
}
|
||||
}
|
||||
|
||||
if (pthread_attr_getstack(&attr, (void **)bottom, size) != 0) {
|
||||
fatal("Can not locate current stack attributes!");
|
||||
}
|
||||
|
||||
pthread_attr_destroy(&attr);
|
||||
|
||||
}
|
||||
assert(os::current_stack_pointer() >= *bottom &&
|
||||
os::current_stack_pointer() < *bottom + *size, "just checking");
|
||||
}
|
||||
|
||||
address os::current_stack_base() {
|
||||
address bottom;
|
||||
size_t size;
|
||||
current_stack_region(&bottom, &size);
|
||||
return (bottom + size);
|
||||
}
|
||||
|
||||
size_t os::current_stack_size() {
|
||||
// stack size includes normal stack and HotSpot guard pages
|
||||
address bottom;
|
||||
size_t size;
|
||||
current_stack_region(&bottom, &size);
|
||||
return size;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
// helper functions for fatal error handler
|
||||
|
||||
void os::print_context(outputStream *st, void *context) {
|
||||
if (context == NULL) return;
|
||||
|
||||
ucontext_t* uc = (ucontext_t*)context;
|
||||
|
||||
st->print_cr("Registers:");
|
||||
st->print("pc =" INTPTR_FORMAT " ", uc->uc_mcontext.regs->nip);
|
||||
st->print("lr =" INTPTR_FORMAT " ", uc->uc_mcontext.regs->link);
|
||||
st->print("ctr=" INTPTR_FORMAT " ", uc->uc_mcontext.regs->ctr);
|
||||
st->cr();
|
||||
for (int i = 0; i < 32; i++) {
|
||||
st->print("r%-2d=" INTPTR_FORMAT " ", i, uc->uc_mcontext.regs->gpr[i]);
|
||||
if (i % 3 == 2) st->cr();
|
||||
}
|
||||
st->cr();
|
||||
st->cr();
|
||||
|
||||
intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
|
||||
st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
|
||||
print_hex_dump(st, (address)sp, (address)(sp + 128), sizeof(intptr_t));
|
||||
st->cr();
|
||||
|
||||
// Note: it may be unsafe to inspect memory near pc. For example, pc may
|
||||
// point to garbage if entry point in an nmethod is corrupted. Leave
|
||||
// this at the end, and hope for the best.
|
||||
address pc = os::Linux::ucontext_get_pc(uc);
|
||||
st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
|
||||
print_hex_dump(st, pc - 64, pc + 64, /*instrsize=*/4);
|
||||
st->cr();
|
||||
}
|
||||
|
||||
void os::print_register_info(outputStream *st, void *context) {
|
||||
if (context == NULL) return;
|
||||
|
||||
ucontext_t *uc = (ucontext_t*)context;
|
||||
|
||||
st->print_cr("Register to memory mapping:");
|
||||
st->cr();
|
||||
|
||||
// this is only for the "general purpose" registers
|
||||
for (int i = 0; i < 32; i++) {
|
||||
st->print("r%-2d=", i);
|
||||
print_location(st, uc->uc_mcontext.regs->gpr[i]);
|
||||
}
|
||||
st->cr();
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
int SpinPause() {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void os::verify_stack_alignment() {
|
||||
assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
|
||||
}
|
||||
#endif
|
35
hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.hpp
Normal file
35
hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.hpp
Normal file
@ -0,0 +1,35 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_CPU_LINUX_PPC_VM_OS_LINUX_PPC_HPP
|
||||
#define OS_CPU_LINUX_PPC_VM_OS_LINUX_PPC_HPP
|
||||
|
||||
static void setup_fpu() {}
|
||||
|
||||
// Used to register dynamic code cache area with the OS
|
||||
// Note: Currently only used in 64 bit Windows implementations
|
||||
static bool register_code_area(char *low, char *high) { return true; }
|
||||
|
||||
#endif // OS_CPU_LINUX_PPC_VM_OS_LINUX_PPC_HPP
|
@ -0,0 +1,50 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_CPU_LINUX_PPC_VM_PREFETCH_LINUX_PPC_INLINE_HPP
|
||||
#define OS_CPU_LINUX_PPC_VM_PREFETCH_LINUX_PPC_INLINE_HPP
|
||||
|
||||
#include "runtime/prefetch.hpp"
|
||||
|
||||
|
||||
inline void Prefetch::read(void *loc, intx interval) {
|
||||
__asm__ __volatile__ (
|
||||
" dcbt 0, %0 \n"
|
||||
:
|
||||
: /*%0*/"r" ( ((address)loc) +((long)interval) )
|
||||
//:
|
||||
);
|
||||
}
|
||||
|
||||
inline void Prefetch::write(void *loc, intx interval) {
|
||||
__asm__ __volatile__ (
|
||||
" dcbtst 0, %0 \n"
|
||||
:
|
||||
: /*%0*/"r" ( ((address)loc) +((long)interval) )
|
||||
//:
|
||||
);
|
||||
}
|
||||
|
||||
#endif // OS_CPU_LINUX_PPC_VM_PREFETCH_LINUX_OJDKPPC_HPP
|
39
hotspot/src/os_cpu/linux_ppc/vm/threadLS_linux_ppc.cpp
Normal file
39
hotspot/src/os_cpu/linux_ppc/vm/threadLS_linux_ppc.cpp
Normal file
@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "runtime/threadLocalStorage.hpp"
|
||||
|
||||
void ThreadLocalStorage::generate_code_for_get_thread() {
|
||||
// nothing we can do here for user-level thread
|
||||
}
|
||||
|
||||
void ThreadLocalStorage::pd_init() {
|
||||
// Nothing to do
|
||||
}
|
||||
|
||||
void ThreadLocalStorage::pd_set_thread(Thread* thread) {
|
||||
os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
|
||||
}
|
36
hotspot/src/os_cpu/linux_ppc/vm/threadLS_linux_ppc.hpp
Normal file
36
hotspot/src/os_cpu/linux_ppc/vm/threadLS_linux_ppc.hpp
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_CPU_LINUX_PPC_VM_THREADLS_LINUX_PPC_HPP
|
||||
#define OS_CPU_LINUX_PPC_VM_THREADLS_LINUX_PPC_HPP
|
||||
|
||||
// Processor dependent parts of ThreadLocalStorage
|
||||
|
||||
public:
|
||||
static Thread* thread() {
|
||||
return (Thread *) os::thread_local_storage_at(thread_index());
|
||||
}
|
||||
|
||||
#endif // OS_CPU_LINUX_PPC_VM_THREADLS_LINUX_PPC_HPP
|
36
hotspot/src/os_cpu/linux_ppc/vm/thread_linux_ppc.cpp
Normal file
36
hotspot/src/os_cpu/linux_ppc/vm/thread_linux_ppc.cpp
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "thread_linux.inline.hpp"
|
||||
|
||||
// Forte Analyzer AsyncGetCallTrace profiling support is not implemented on Linux/PPC.
|
||||
bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) {
|
||||
Unimplemented();
|
||||
return false;
|
||||
}
|
||||
|
||||
void JavaThread::cache_global_variables() { }
|
83
hotspot/src/os_cpu/linux_ppc/vm/thread_linux_ppc.hpp
Normal file
83
hotspot/src/os_cpu/linux_ppc/vm/thread_linux_ppc.hpp
Normal file
@ -0,0 +1,83 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_CPU_LINUX_PPC_VM_THREAD_LINUX_PPC_HPP
|
||||
#define OS_CPU_LINUX_PPC_VM_THREAD_LINUX_PPC_HPP
|
||||
|
||||
private:
|
||||
|
||||
void pd_initialize() {
|
||||
_anchor.clear();
|
||||
_last_interpreter_fp = NULL;
|
||||
}
|
||||
|
||||
// The `last' frame is the youngest Java frame on the thread's stack.
|
||||
frame pd_last_frame() {
|
||||
assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
|
||||
|
||||
intptr_t* sp = last_Java_sp();
|
||||
address pc = _anchor.last_Java_pc();
|
||||
|
||||
// Last_Java_pc ist not set, if we come here from compiled code.
|
||||
if (pc == NULL) {
|
||||
pc = (address) *(sp + 2);
|
||||
}
|
||||
|
||||
return frame(sp, pc);
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
void set_base_of_stack_pointer(intptr_t* base_sp) {}
|
||||
intptr_t* base_of_stack_pointer() { return NULL; }
|
||||
void record_base_of_stack_pointer() {}
|
||||
|
||||
// These routines are only used on cpu architectures that
|
||||
// have separate register stacks (Itanium).
|
||||
static bool register_stack_overflow() { return false; }
|
||||
static void enable_register_stack_guard() {}
|
||||
static void disable_register_stack_guard() {}
|
||||
|
||||
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava);
|
||||
|
||||
protected:
|
||||
|
||||
// -Xprof support
|
||||
//
|
||||
// In order to find the last Java fp from an async profile
|
||||
// tick, we store the current interpreter fp in the thread.
|
||||
// This value is only valid while we are in the C++ interpreter
|
||||
// and profiling.
|
||||
intptr_t *_last_interpreter_fp;
|
||||
|
||||
public:
|
||||
|
||||
static ByteSize last_interpreter_fp_offset() {
|
||||
return byte_offset_of(JavaThread, _last_interpreter_fp);
|
||||
}
|
||||
|
||||
intptr_t* last_interpreter_fp() { return _last_interpreter_fp; }
|
||||
|
||||
#endif // OS_CPU_LINUX_PPC_VM_THREAD_LINUX_PPC_HPP
|
55
hotspot/src/os_cpu/linux_ppc/vm/vmStructs_linux_ppc.hpp
Normal file
55
hotspot/src/os_cpu/linux_ppc/vm/vmStructs_linux_ppc.hpp
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_CPU_LINUX_PPC_VM_VMSTRUCTS_LINUX_PPC_HPP
|
||||
#define OS_CPU_LINUX_PPC_VM_VMSTRUCTS_LINUX_PPC_HPP
|
||||
|
||||
// These are the OS and CPU-specific fields, types and integer
|
||||
// constants required by the Serviceability Agent. This file is
|
||||
// referenced by vmStructs.cpp.
|
||||
|
||||
#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
|
||||
\
|
||||
/******************************/ \
|
||||
/* Threads (NOTE: incomplete) */ \
|
||||
/******************************/ \
|
||||
nonstatic_field(OSThread, _thread_id, pid_t) \
|
||||
nonstatic_field(OSThread, _pthread_id, pthread_t)
|
||||
|
||||
|
||||
#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \
|
||||
\
|
||||
/**********************/ \
|
||||
/* Posix Thread IDs */ \
|
||||
/**********************/ \
|
||||
\
|
||||
declare_integer_type(pid_t) \
|
||||
declare_unsigned_integer_type(pthread_t)
|
||||
|
||||
#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
|
||||
|
||||
#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
|
||||
|
||||
#endif // OS_CPU_LINUX_PPC_VM_VMSTRUCTS_LINUX_PPC_HPP
|
@ -3614,7 +3614,7 @@ class CommandLineFlags {
|
||||
NOT_LP64(LINUX_ONLY(2*G) NOT_LINUX(0)), \
|
||||
"Address to allocate shared memory region for class data") \
|
||||
\
|
||||
diagnostic(bool, EnableInvokeDynamic, true PPC64_ONLY(&& false), \
|
||||
diagnostic(bool, EnableInvokeDynamic, true, \
|
||||
"support JSR 292 (method handles, invokedynamic, " \
|
||||
"anonymous classes") \
|
||||
\
|
||||
|
Loading…
x
Reference in New Issue
Block a user