8274851: [PPC64] Port zgc to linux on ppc64le
Reviewed-by: ihse, pliden, mdoerr, eosterlund
This commit is contained in:
parent
cf828673a9
commit
337b73a459
@ -357,6 +357,13 @@ AC_DEFUN_ONCE([JVM_FEATURES_CHECK_ZGC],
|
||||
AC_MSG_RESULT([no, $OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU])
|
||||
AVAILABLE=false
|
||||
fi
|
||||
elif test "x$OPENJDK_TARGET_CPU" = "xppc64le"; then
|
||||
if test "x$OPENJDK_TARGET_OS" = "xlinux"; then
|
||||
AC_MSG_RESULT([yes])
|
||||
else
|
||||
AC_MSG_RESULT([no, $OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU])
|
||||
AVAILABLE=false
|
||||
fi
|
||||
else
|
||||
AC_MSG_RESULT([no, $OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU])
|
||||
AVAILABLE=false
|
||||
|
@ -155,6 +155,7 @@ ifeq ($(call check-jvm-feature, compiler2), true)
|
||||
ifeq ($(call check-jvm-feature, zgc), true)
|
||||
AD_SRC_FILES += $(call uniq, $(wildcard $(foreach d, $(AD_SRC_ROOTS), \
|
||||
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/z/z_$(HOTSPOT_TARGET_CPU).ad \
|
||||
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/z/z_$(HOTSPOT_TARGET_CPU_ARCH).ad \
|
||||
)))
|
||||
endif
|
||||
|
||||
|
@ -47,6 +47,9 @@ class Address {
|
||||
Address(Register b, address d = 0)
|
||||
: _base(b), _index(noreg), _disp((intptr_t)d) {}
|
||||
|
||||
Address(Register b, ByteSize d)
|
||||
: _base(b), _index(noreg), _disp((intptr_t)d) {}
|
||||
|
||||
Address(Register b, intptr_t d)
|
||||
: _base(b), _index(noreg), _disp(d) {}
|
||||
|
||||
|
567
src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp
Normal file
567
src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp
Normal file
@ -0,0 +1,567 @@
|
||||
/*
|
||||
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "asm/register.hpp"
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "code/codeBlob.hpp"
|
||||
#include "code/vmreg.inline.hpp"
|
||||
#include "gc/z/zBarrier.inline.hpp"
|
||||
#include "gc/z/zBarrierSet.hpp"
|
||||
#include "gc/z/zBarrierSetAssembler.hpp"
|
||||
#include "gc/z/zBarrierSetRuntime.hpp"
|
||||
#include "gc/z/zThreadLocalData.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "register_ppc.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "gc/z/c1/zBarrierSetC1.hpp"
|
||||
#endif // COMPILER1
|
||||
#ifdef COMPILER2
|
||||
#include "gc/z/c2/zBarrierSetC2.hpp"
|
||||
#endif // COMPILER2
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
void ZBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register base, RegisterOrConstant ind_or_offs, Register dst,
|
||||
Register tmp1, Register tmp2,
|
||||
MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null) {
|
||||
__ block_comment("load_at (zgc) {");
|
||||
|
||||
// Check whether a special gc barrier is required for this particular load
|
||||
// (e.g. whether it's a reference load or not)
|
||||
if (!ZBarrierSet::barrier_needed(decorators, type)) {
|
||||
BarrierSetAssembler::load_at(masm, decorators, type, base, ind_or_offs, dst,
|
||||
tmp1, tmp2, preservation_level, L_handle_null);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ind_or_offs.is_register()) {
|
||||
assert_different_registers(base, ind_or_offs.as_register(), tmp1, tmp2, R0, noreg);
|
||||
assert_different_registers(dst, ind_or_offs.as_register(), tmp1, tmp2, R0, noreg);
|
||||
} else {
|
||||
assert_different_registers(base, tmp1, tmp2, R0, noreg);
|
||||
assert_different_registers(dst, tmp1, tmp2, R0, noreg);
|
||||
}
|
||||
|
||||
/* ==== Load the pointer using the standard implementation for the actual heap access
|
||||
and the decompression of compressed pointers ==== */
|
||||
// Result of 'load_at' (standard implementation) will be written back to 'dst'.
|
||||
// As 'base' is required for the C-call, it must be reserved in case of a register clash.
|
||||
Register saved_base = base;
|
||||
if (base == dst) {
|
||||
__ mr(tmp2, base);
|
||||
saved_base = tmp2;
|
||||
}
|
||||
|
||||
BarrierSetAssembler::load_at(masm, decorators, type, base, ind_or_offs, dst,
|
||||
tmp1, noreg, preservation_level, L_handle_null);
|
||||
|
||||
/* ==== Check whether pointer is dirty ==== */
|
||||
Label skip_barrier;
|
||||
|
||||
// Load bad mask into scratch register.
|
||||
__ ld(tmp1, (intptr_t) ZThreadLocalData::address_bad_mask_offset(), R16_thread);
|
||||
|
||||
// The color bits of the to-be-tested pointer do not have to be equivalent to the 'bad_mask' testing bits.
|
||||
// A pointer is classified as dirty if any of the color bits that also match the bad mask is set.
|
||||
// Conversely, it follows that the logical AND of the bad mask and the pointer must be zero
|
||||
// if the pointer is not dirty.
|
||||
// Only dirty pointers must be processed by this barrier, so we can skip it in case the latter condition holds true.
|
||||
__ and_(tmp1, tmp1, dst);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
|
||||
/* ==== Invoke barrier ==== */
|
||||
int nbytes_save = 0;
|
||||
|
||||
const bool needs_frame = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR;
|
||||
const bool preserve_gp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS;
|
||||
const bool preserve_fp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_FP_REGS;
|
||||
|
||||
const bool preserve_R3 = dst != R3_ARG1;
|
||||
|
||||
if (needs_frame) {
|
||||
if (preserve_gp_registers) {
|
||||
nbytes_save = (preserve_fp_registers
|
||||
? MacroAssembler::num_volatile_gp_regs + MacroAssembler::num_volatile_fp_regs
|
||||
: MacroAssembler::num_volatile_gp_regs) * BytesPerWord;
|
||||
nbytes_save -= preserve_R3 ? 0 : BytesPerWord;
|
||||
__ save_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers, preserve_R3);
|
||||
}
|
||||
|
||||
__ save_LR_CR(tmp1);
|
||||
__ push_frame_reg_args(nbytes_save, tmp1);
|
||||
}
|
||||
|
||||
// Setup arguments
|
||||
if (saved_base != R3_ARG1) {
|
||||
__ mr_if_needed(R3_ARG1, dst);
|
||||
__ add(R4_ARG2, ind_or_offs, saved_base);
|
||||
} else if (dst != R4_ARG2) {
|
||||
__ add(R4_ARG2, ind_or_offs, saved_base);
|
||||
__ mr(R3_ARG1, dst);
|
||||
} else {
|
||||
__ add(R0, ind_or_offs, saved_base);
|
||||
__ mr(R3_ARG1, dst);
|
||||
__ mr(R4_ARG2, R0);
|
||||
}
|
||||
|
||||
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators));
|
||||
|
||||
Register result = R3_RET;
|
||||
if (needs_frame) {
|
||||
__ pop_frame();
|
||||
__ restore_LR_CR(tmp1);
|
||||
|
||||
if (preserve_R3) {
|
||||
__ mr(R0, R3_RET);
|
||||
result = R0;
|
||||
}
|
||||
|
||||
if (preserve_gp_registers) {
|
||||
__ restore_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers, preserve_R3);
|
||||
}
|
||||
}
|
||||
__ mr_if_needed(dst, result);
|
||||
|
||||
__ bind(skip_barrier);
|
||||
__ block_comment("} load_at (zgc)");
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
// The Z store barrier only verifies the pointers it is operating on and is thus a sole debugging measure.
|
||||
void ZBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register base, RegisterOrConstant ind_or_offs, Register val,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
MacroAssembler::PreservationLevel preservation_level) {
|
||||
__ block_comment("store_at (zgc) {");
|
||||
|
||||
// If the 'val' register is 'noreg', the to-be-stored value is a null pointer.
|
||||
if (is_reference_type(type) && val != noreg) {
|
||||
__ ld(tmp1, in_bytes(ZThreadLocalData::address_bad_mask_offset()), R16_thread);
|
||||
__ and_(tmp1, tmp1, val);
|
||||
__ asm_assert_eq("Detected dirty pointer on the heap in Z store barrier");
|
||||
}
|
||||
|
||||
// Store value
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, base, ind_or_offs, val, tmp1, tmp2, tmp3, preservation_level);
|
||||
|
||||
__ block_comment("} store_at (zgc)");
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler *masm, DecoratorSet decorators, BasicType component_type,
|
||||
Register src, Register dst, Register count,
|
||||
Register preserve1, Register preserve2) {
|
||||
__ block_comment("arraycopy_prologue (zgc) {");
|
||||
|
||||
/* ==== Check whether a special gc barrier is required for this particular load ==== */
|
||||
if (!is_reference_type(component_type)) {
|
||||
return;
|
||||
}
|
||||
|
||||
Label skip_barrier;
|
||||
|
||||
// Fast path: Array is of length zero
|
||||
__ cmpdi(CCR0, count, 0);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
|
||||
/* ==== Ensure register sanity ==== */
|
||||
Register tmp_R11 = R11_scratch1;
|
||||
|
||||
assert_different_registers(src, dst, count, tmp_R11, noreg);
|
||||
if (preserve1 != noreg) {
|
||||
// Not technically required, but unlikely being intended.
|
||||
assert_different_registers(preserve1, preserve2);
|
||||
}
|
||||
|
||||
/* ==== Invoke barrier (slowpath) ==== */
|
||||
int nbytes_save = 0;
|
||||
|
||||
{
|
||||
assert(!noreg->is_volatile(), "sanity");
|
||||
|
||||
if (preserve1->is_volatile()) {
|
||||
__ std(preserve1, -BytesPerWord * ++nbytes_save, R1_SP);
|
||||
}
|
||||
|
||||
if (preserve2->is_volatile() && preserve1 != preserve2) {
|
||||
__ std(preserve2, -BytesPerWord * ++nbytes_save, R1_SP);
|
||||
}
|
||||
|
||||
__ std(src, -BytesPerWord * ++nbytes_save, R1_SP);
|
||||
__ std(dst, -BytesPerWord * ++nbytes_save, R1_SP);
|
||||
__ std(count, -BytesPerWord * ++nbytes_save, R1_SP);
|
||||
|
||||
__ save_LR_CR(tmp_R11);
|
||||
__ push_frame_reg_args(nbytes_save, tmp_R11);
|
||||
}
|
||||
|
||||
// ZBarrierSetRuntime::load_barrier_on_oop_array_addr(src, count)
|
||||
if (count == R3_ARG1) {
|
||||
if (src == R4_ARG2) {
|
||||
// Arguments are provided in reverse order
|
||||
__ mr(tmp_R11, count);
|
||||
__ mr(R3_ARG1, src);
|
||||
__ mr(R4_ARG2, tmp_R11);
|
||||
} else {
|
||||
__ mr(R4_ARG2, count);
|
||||
__ mr(R3_ARG1, src);
|
||||
}
|
||||
} else {
|
||||
__ mr_if_needed(R3_ARG1, src);
|
||||
__ mr_if_needed(R4_ARG2, count);
|
||||
}
|
||||
|
||||
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_array_addr());
|
||||
|
||||
__ pop_frame();
|
||||
__ restore_LR_CR(tmp_R11);
|
||||
|
||||
{
|
||||
__ ld(count, -BytesPerWord * nbytes_save--, R1_SP);
|
||||
__ ld(dst, -BytesPerWord * nbytes_save--, R1_SP);
|
||||
__ ld(src, -BytesPerWord * nbytes_save--, R1_SP);
|
||||
|
||||
if (preserve2->is_volatile() && preserve1 != preserve2) {
|
||||
__ ld(preserve2, -BytesPerWord * nbytes_save--, R1_SP);
|
||||
}
|
||||
|
||||
if (preserve1->is_volatile()) {
|
||||
__ ld(preserve1, -BytesPerWord * nbytes_save--, R1_SP);
|
||||
}
|
||||
}
|
||||
|
||||
__ bind(skip_barrier);
|
||||
|
||||
__ block_comment("} arraycopy_prologue (zgc)");
|
||||
}
|
||||
|
||||
void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env,
|
||||
Register obj, Register tmp, Label& slowpath) {
|
||||
__ block_comment("try_resolve_jobject_in_native (zgc) {");
|
||||
|
||||
assert_different_registers(jni_env, obj, tmp);
|
||||
|
||||
// Resolve the pointer using the standard implementation for weak tag handling and pointer verfication.
|
||||
BarrierSetAssembler::try_resolve_jobject_in_native(masm, dst, jni_env, obj, tmp, slowpath);
|
||||
|
||||
// Check whether pointer is dirty.
|
||||
__ ld(tmp,
|
||||
in_bytes(ZThreadLocalData::address_bad_mask_offset() - JavaThread::jni_environment_offset()),
|
||||
jni_env);
|
||||
|
||||
__ and_(tmp, obj, tmp);
|
||||
__ bne(CCR0, slowpath);
|
||||
|
||||
__ block_comment("} try_resolve_jobject_in_native (zgc)");
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
#ifdef COMPILER1
|
||||
#define __ ce->masm()->
|
||||
|
||||
// Code emitted by LIR node "LIR_OpZLoadBarrierTest" which in turn is emitted by ZBarrierSetC1::load_barrier.
|
||||
// The actual compare and branch instructions are represented as stand-alone LIR nodes.
|
||||
void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
|
||||
LIR_Opr ref) const {
|
||||
__ block_comment("load_barrier_test (zgc) {");
|
||||
|
||||
__ ld(R0, in_bytes(ZThreadLocalData::address_bad_mask_offset()), R16_thread);
|
||||
__ andr(R0, R0, ref->as_pointer_register());
|
||||
__ cmpdi(CCR5 /* as mandated by LIR node */, R0, 0);
|
||||
|
||||
__ block_comment("} load_barrier_test (zgc)");
|
||||
}
|
||||
|
||||
// Code emitted by code stub "ZLoadBarrierStubC1" which in turn is emitted by ZBarrierSetC1::load_barrier.
|
||||
// Invokes the runtime stub which is defined just below.
|
||||
void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
|
||||
ZLoadBarrierStubC1* stub) const {
|
||||
__ block_comment("c1_load_barrier_stub (zgc) {");
|
||||
|
||||
__ bind(*stub->entry());
|
||||
|
||||
/* ==== Determine relevant data registers and ensure register sanity ==== */
|
||||
Register ref = stub->ref()->as_register();
|
||||
Register ref_addr = noreg;
|
||||
|
||||
// Determine reference address
|
||||
if (stub->tmp()->is_valid()) {
|
||||
// 'tmp' register is given, so address might have an index or a displacement.
|
||||
ce->leal(stub->ref_addr(), stub->tmp());
|
||||
ref_addr = stub->tmp()->as_pointer_register();
|
||||
} else {
|
||||
// 'tmp' register is not given, so address must have neither an index nor a displacement.
|
||||
// The address' base register is thus usable as-is.
|
||||
assert(stub->ref_addr()->as_address_ptr()->disp() == 0, "illegal displacement");
|
||||
assert(!stub->ref_addr()->as_address_ptr()->index()->is_valid(), "illegal index");
|
||||
|
||||
ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register();
|
||||
}
|
||||
|
||||
assert_different_registers(ref, ref_addr, R0, noreg);
|
||||
|
||||
/* ==== Invoke stub ==== */
|
||||
// Pass arguments via stack. The stack pointer will be bumped by the stub.
|
||||
__ std(ref, (intptr_t) -1 * BytesPerWord, R1_SP);
|
||||
__ std(ref_addr, (intptr_t) -2 * BytesPerWord, R1_SP);
|
||||
|
||||
__ load_const_optimized(R0, stub->runtime_stub());
|
||||
__ call_stub(R0);
|
||||
|
||||
// The runtime stub passes the result via the R0 register, overriding the previously-loaded stub address.
|
||||
__ mr_if_needed(ref, R0);
|
||||
__ b(*stub->continuation());
|
||||
|
||||
__ block_comment("} c1_load_barrier_stub (zgc)");
|
||||
}
|
||||
|
||||
#undef __
|
||||
#define __ sasm->
|
||||
|
||||
// Code emitted by runtime code stub which in turn is emitted by ZBarrierSetC1::generate_c1_runtime_stubs.
|
||||
void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
|
||||
DecoratorSet decorators) const {
|
||||
__ block_comment("c1_load_barrier_runtime_stub (zgc) {");
|
||||
|
||||
const int stack_parameters = 2;
|
||||
const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_parameters) * BytesPerWord;
|
||||
|
||||
__ save_volatile_gprs(R1_SP, -nbytes_save);
|
||||
__ save_LR_CR(R0);
|
||||
|
||||
// Load arguments back again from the stack.
|
||||
__ ld(R3_ARG1, (intptr_t) -1 * BytesPerWord, R1_SP); // ref
|
||||
__ ld(R4_ARG2, (intptr_t) -2 * BytesPerWord, R1_SP); // ref_addr
|
||||
|
||||
__ push_frame_reg_args(nbytes_save, R0);
|
||||
|
||||
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators));
|
||||
|
||||
__ verify_oop(R3_RET, "Bad pointer after barrier invocation");
|
||||
__ mr(R0, R3_RET);
|
||||
|
||||
__ pop_frame();
|
||||
__ restore_LR_CR(R3_RET);
|
||||
__ restore_volatile_gprs(R1_SP, -nbytes_save);
|
||||
|
||||
__ blr();
|
||||
|
||||
__ block_comment("} c1_load_barrier_runtime_stub (zgc)");
|
||||
}
|
||||
|
||||
#undef __
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
|
||||
OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) const {
|
||||
if (!OptoReg::is_reg(opto_reg)) {
|
||||
return OptoReg::Bad;
|
||||
}
|
||||
|
||||
VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
|
||||
if ((vm_reg->is_Register() || vm_reg ->is_FloatRegister()) && (opto_reg & 1) != 0) {
|
||||
return OptoReg::Bad;
|
||||
}
|
||||
|
||||
return opto_reg;
|
||||
}
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
class ZSaveLiveRegisters {
|
||||
|
||||
private:
|
||||
MacroAssembler* _masm;
|
||||
RegMask _reg_mask;
|
||||
Register _result_reg;
|
||||
|
||||
public:
|
||||
ZSaveLiveRegisters(MacroAssembler *masm, ZLoadBarrierStubC2 *stub)
|
||||
: _masm(masm), _reg_mask(stub->live()), _result_reg(stub->ref()) {
|
||||
|
||||
const int total_regs_amount = iterate_over_register_mask(ACTION_SAVE);
|
||||
|
||||
__ save_LR_CR(R0);
|
||||
__ push_frame_reg_args(total_regs_amount * BytesPerWord, R0);
|
||||
}
|
||||
|
||||
~ZSaveLiveRegisters() {
|
||||
__ pop_frame();
|
||||
__ restore_LR_CR(R0);
|
||||
|
||||
iterate_over_register_mask(ACTION_RESTORE);
|
||||
}
|
||||
|
||||
private:
|
||||
enum IterationAction : int {
|
||||
ACTION_SAVE = 0,
|
||||
ACTION_RESTORE = 1
|
||||
};
|
||||
|
||||
int iterate_over_register_mask(IterationAction action) {
|
||||
int reg_save_index = 0;
|
||||
RegMaskIterator live_regs_iterator(_reg_mask);
|
||||
|
||||
while(live_regs_iterator.has_next()) {
|
||||
const OptoReg::Name opto_reg = live_regs_iterator.next();
|
||||
|
||||
// Filter out stack slots (spilled registers, i.e., stack-allocated registers).
|
||||
if (!OptoReg::is_reg(opto_reg)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
|
||||
if (vm_reg->is_Register()) {
|
||||
Register std_reg = vm_reg->as_Register();
|
||||
|
||||
// '_result_reg' will hold the end result of the operation. Its content must thus not be preserved.
|
||||
if (std_reg == _result_reg) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (std_reg->encoding() >= R2->encoding() && std_reg->encoding() <= R12->encoding()) {
|
||||
reg_save_index++;
|
||||
|
||||
if (action == ACTION_SAVE) {
|
||||
_masm->std(std_reg, (intptr_t) -reg_save_index * BytesPerWord, R1_SP);
|
||||
} else if (action == ACTION_RESTORE) {
|
||||
_masm->ld(std_reg, (intptr_t) -reg_save_index * BytesPerWord, R1_SP);
|
||||
} else {
|
||||
fatal("Sanity");
|
||||
}
|
||||
}
|
||||
} else if (vm_reg->is_FloatRegister()) {
|
||||
FloatRegister fp_reg = vm_reg->as_FloatRegister();
|
||||
if (fp_reg->encoding() >= F0->encoding() && fp_reg->encoding() <= F13->encoding()) {
|
||||
reg_save_index++;
|
||||
|
||||
if (action == ACTION_SAVE) {
|
||||
_masm->stfd(fp_reg, (intptr_t) -reg_save_index * BytesPerWord, R1_SP);
|
||||
} else if (action == ACTION_RESTORE) {
|
||||
_masm->lfd(fp_reg, (intptr_t) -reg_save_index * BytesPerWord, R1_SP);
|
||||
} else {
|
||||
fatal("Sanity");
|
||||
}
|
||||
}
|
||||
} else if (vm_reg->is_ConditionRegister()) {
|
||||
// NOP. Conditions registers are covered by save_LR_CR
|
||||
} else {
|
||||
if (vm_reg->is_VectorRegister()) {
|
||||
fatal("Vector registers are unsupported. Found register %s", vm_reg->name());
|
||||
} else if (vm_reg->is_SpecialRegister()) {
|
||||
fatal("Special registers are unsupported. Found register %s", vm_reg->name());
|
||||
} else {
|
||||
fatal("Register type is not known");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return reg_save_index;
|
||||
}
|
||||
};
|
||||
|
||||
#undef __
|
||||
#define __ _masm->
|
||||
|
||||
class ZSetupArguments {
|
||||
private:
|
||||
MacroAssembler* const _masm;
|
||||
const Register _ref;
|
||||
const Address _ref_addr;
|
||||
|
||||
public:
|
||||
ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
|
||||
_masm(masm),
|
||||
_ref(stub->ref()),
|
||||
_ref_addr(stub->ref_addr()) {
|
||||
|
||||
// Desired register/argument configuration:
|
||||
// _ref: R3_ARG1
|
||||
// _ref_addr: R4_ARG2
|
||||
|
||||
// '_ref_addr' can be unspecified. In that case, the barrier will not heal the reference.
|
||||
if (_ref_addr.base() == noreg) {
|
||||
assert_different_registers(_ref, R0, noreg);
|
||||
|
||||
__ mr_if_needed(R3_ARG1, _ref);
|
||||
__ li(R4_ARG2, 0);
|
||||
} else {
|
||||
assert_different_registers(_ref, _ref_addr.base(), R0, noreg);
|
||||
assert(!_ref_addr.index()->is_valid(), "reference addresses must not contain an index component");
|
||||
|
||||
if (_ref != R4_ARG2) {
|
||||
// Calculate address first as the address' base register might clash with R4_ARG2
|
||||
__ add(R4_ARG2, (intptr_t) _ref_addr.disp(), _ref_addr.base());
|
||||
__ mr_if_needed(R3_ARG1, _ref);
|
||||
} else if (_ref_addr.base() != R3_ARG1) {
|
||||
__ mr(R3_ARG1, _ref);
|
||||
__ add(R4_ARG2, (intptr_t) _ref_addr.disp(), _ref_addr.base()); // Cloberring _ref
|
||||
} else {
|
||||
// Arguments are provided in inverse order (i.e. _ref == R4_ARG2, _ref_addr == R3_ARG1)
|
||||
__ mr(R0, _ref);
|
||||
__ add(R4_ARG2, (intptr_t) _ref_addr.disp(), _ref_addr.base());
|
||||
__ mr(R3_ARG1, R0);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const {
|
||||
__ block_comment("generate_c2_load_barrier_stub (zgc) {");
|
||||
|
||||
__ bind(*stub->entry());
|
||||
|
||||
Register ref = stub->ref();
|
||||
Address ref_addr = stub->ref_addr();
|
||||
|
||||
assert_different_registers(ref, ref_addr.base());
|
||||
|
||||
{
|
||||
ZSaveLiveRegisters save_live_registers(masm, stub);
|
||||
ZSetupArguments setup_arguments(masm, stub);
|
||||
|
||||
__ call_VM_leaf(stub->slow_path());
|
||||
__ mr_if_needed(ref, R3_RET);
|
||||
}
|
||||
|
||||
__ b(*stub->continuation());
|
||||
|
||||
__ block_comment("} generate_c2_load_barrier_stub (zgc)");
|
||||
}
|
||||
|
||||
#undef __
|
||||
#endif // COMPILER2
|
86
src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.hpp
Normal file
86
src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.hpp
Normal file
@ -0,0 +1,86 @@
|
||||
/*
|
||||
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_GC_Z_ZBARRIERSETASSEMBLER_PPC_HPP
|
||||
#define CPU_PPC_GC_Z_ZBARRIERSETASSEMBLER_PPC_HPP
|
||||
|
||||
#include "code/vmreg.hpp"
|
||||
#include "oops/accessDecorators.hpp"
|
||||
#ifdef COMPILER2
|
||||
#include "opto/optoreg.hpp"
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef COMPILER1
|
||||
class LIR_Assembler;
|
||||
class LIR_OprDesc;
|
||||
typedef LIR_OprDesc* LIR_Opr;
|
||||
class StubAssembler;
|
||||
class ZLoadBarrierStubC1;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
class Node;
|
||||
class ZLoadBarrierStubC2;
|
||||
#endif // COMPILER2
|
||||
|
||||
class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase {
|
||||
public:
|
||||
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register base, RegisterOrConstant ind_or_offs, Register dst,
|
||||
Register tmp1, Register tmp2,
|
||||
MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null = NULL);
|
||||
|
||||
#ifdef ASSERT
|
||||
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register base, RegisterOrConstant ind_or_offs, Register val,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
MacroAssembler::PreservationLevel preservation_level);
|
||||
#endif // ASSERT
|
||||
|
||||
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register src, Register dst, Register count,
|
||||
Register preserve1, Register preserve2);
|
||||
|
||||
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env,
|
||||
Register obj, Register tmp, Label& slowpath);
|
||||
|
||||
#ifdef COMPILER1
|
||||
void generate_c1_load_barrier_test(LIR_Assembler* ce,
|
||||
LIR_Opr ref) const;
|
||||
|
||||
void generate_c1_load_barrier_stub(LIR_Assembler* ce,
|
||||
ZLoadBarrierStubC1* stub) const;
|
||||
|
||||
void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
|
||||
DecoratorSet decorators) const;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
OptoReg::Name refine_register(const Node* node, OptoReg::Name opto_reg) const;
|
||||
|
||||
void generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const;
|
||||
#endif // COMPILER2
|
||||
};
|
||||
|
||||
#endif // CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP
|
203
src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.cpp
Normal file
203
src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.cpp
Normal file
@ -0,0 +1,203 @@
|
||||
/*
|
||||
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
#include <cstddef>
|
||||
|
||||
#ifdef LINUX
|
||||
#include <sys/mman.h>
|
||||
#endif // LINUX
|
||||
|
||||
//
|
||||
// The overall memory layouts across different power platforms are similar and only differ with regards to
|
||||
// the position of the highest addressable bit; the position of the metadata bits and the size of the actual
|
||||
// addressable heap address space are adjusted accordingly.
|
||||
//
|
||||
// The following memory schema shows an exemplary layout in which bit '45' is the highest addressable bit.
|
||||
// It is assumed that this virtual memroy address space layout is predominant on the power platform.
|
||||
//
|
||||
// Standard Address Space & Pointer Layout
|
||||
// ---------------------------------------
|
||||
//
|
||||
// +--------------------------------+ 0x00007FFFFFFFFFFF (127 TiB - 1)
|
||||
// . .
|
||||
// . .
|
||||
// . .
|
||||
// +--------------------------------+ 0x0000140000000000 (20 TiB)
|
||||
// | Remapped View |
|
||||
// +--------------------------------+ 0x0000100000000000 (16 TiB)
|
||||
// . .
|
||||
// +--------------------------------+ 0x00000c0000000000 (12 TiB)
|
||||
// | Marked1 View |
|
||||
// +--------------------------------+ 0x0000080000000000 (8 TiB)
|
||||
// | Marked0 View |
|
||||
// +--------------------------------+ 0x0000040000000000 (4 TiB)
|
||||
// . .
|
||||
// +--------------------------------+ 0x0000000000000000
|
||||
//
|
||||
// 6 4 4 4 4
|
||||
// 3 6 5 2 1 0
|
||||
// +--------------------+----+-----------------------------------------------+
|
||||
// |00000000 00000000 00|1111|11 11111111 11111111 11111111 11111111 11111111|
|
||||
// +--------------------+----+-----------------------------------------------+
|
||||
// | | |
|
||||
// | | * 41-0 Object Offset (42-bits, 4TB address space)
|
||||
// | |
|
||||
// | * 45-42 Metadata Bits (4-bits) 0001 = Marked0 (Address view 4-8TB)
|
||||
// | 0010 = Marked1 (Address view 8-12TB)
|
||||
// | 0100 = Remapped (Address view 16-20TB)
|
||||
// | 1000 = Finalizable (Address view N/A)
|
||||
// |
|
||||
// * 63-46 Fixed (18-bits, always zero)
|
||||
//
|
||||
|
||||
// Maximum value as per spec (Power ISA v2.07): 2 ^ 60 bytes, i.e. 1 EiB (exbibyte)
|
||||
static const unsigned int MAXIMUM_MAX_ADDRESS_BIT = 60;
|
||||
|
||||
// Most modern power processors provide an address space with not more than 45 bit addressable bit,
|
||||
// that is an address space of 32 TiB in size.
|
||||
static const unsigned int DEFAULT_MAX_ADDRESS_BIT = 45;
|
||||
|
||||
// Minimum value returned, if probing fails: 64 GiB
|
||||
static const unsigned int MINIMUM_MAX_ADDRESS_BIT = 36;
|
||||
|
||||
// Determines the highest addressable bit of the virtual address space (depends on platform)
|
||||
// by trying to interact with memory in that address range,
|
||||
// i.e. by syncing existing mappings (msync) or by temporarily mapping the memory area (mmap).
|
||||
// If one of those operations succeeds, it is proven that the targeted memory area is within the virtual address space.
|
||||
//
|
||||
// To reduce the number of required system calls to a bare minimum, the DEFAULT_MAX_ADDRESS_BIT is intentionally set
|
||||
// lower than what the ABI would theoretically permit.
|
||||
// Such an avoidance strategy, however, might impose unnecessary limits on processors that exceed this limit.
|
||||
// If DEFAULT_MAX_ADDRESS_BIT is addressable, the next higher bit will be tested as well to ensure that
|
||||
// the made assumption does not artificially restrict the memory availability.
|
||||
static unsigned int probe_valid_max_address_bit(size_t init_bit, size_t min_bit) {
|
||||
assert(init_bit >= min_bit, "Sanity");
|
||||
assert(init_bit <= MAXIMUM_MAX_ADDRESS_BIT, "Test bit is outside the assumed address space range");
|
||||
|
||||
#ifdef LINUX
|
||||
unsigned int max_valid_address_bit = 0;
|
||||
void* last_allocatable_address = nullptr;
|
||||
|
||||
const unsigned int page_size = os::vm_page_size();
|
||||
|
||||
for (size_t i = init_bit; i >= min_bit; --i) {
|
||||
void* base_addr = (void*) (((unsigned long) 1U) << i);
|
||||
|
||||
/* ==== Try msync-ing already mapped memory page ==== */
|
||||
if (msync(base_addr, page_size, MS_ASYNC) == 0) {
|
||||
// The page of the given address was synced by the linux kernel and must thus be both, mapped and valid.
|
||||
max_valid_address_bit = i;
|
||||
break;
|
||||
}
|
||||
if (errno != ENOMEM) {
|
||||
// An unexpected error occurred, i.e. an error not indicating that the targeted memory page is unmapped,
|
||||
// but pointing out another type of issue.
|
||||
// Even though this should never happen, those issues may come up due to undefined behavior.
|
||||
#ifdef ASSERT
|
||||
fatal("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno));
|
||||
#else // ASSERT
|
||||
log_warning_p(gc)("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno));
|
||||
#endif // ASSERT
|
||||
continue;
|
||||
}
|
||||
|
||||
/* ==== Try mapping memory page on our own ==== */
|
||||
last_allocatable_address = mmap(base_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0);
|
||||
if (last_allocatable_address != MAP_FAILED) {
|
||||
munmap(last_allocatable_address, page_size);
|
||||
}
|
||||
|
||||
if (last_allocatable_address == base_addr) {
|
||||
// As the linux kernel mapped exactly the page we have requested, the address must be valid.
|
||||
max_valid_address_bit = i;
|
||||
break;
|
||||
}
|
||||
|
||||
log_info_p(gc, init)("Probe failed for bit '%zu'", i);
|
||||
}
|
||||
|
||||
if (max_valid_address_bit == 0) {
|
||||
// Probing did not bring up any usable address bit.
|
||||
// As an alternative, the VM evaluates the address returned by mmap as it is expected that the reserved page
|
||||
// will be close to the probed address that was out-of-range.
|
||||
// As per mmap(2), "the kernel [will take] [the address] as a hint about where to
|
||||
// place the mapping; on Linux, the mapping will be created at a nearby page boundary".
|
||||
// It should thus be a "close enough" approximation to the real virtual memory address space limit.
|
||||
//
|
||||
// This recovery strategy is only applied in production builds.
|
||||
// In debug builds, an assertion in 'ZPlatformAddressOffsetBits' will bail out the VM to indicate that
|
||||
// the assumed address space is no longer up-to-date.
|
||||
if (last_allocatable_address != MAP_FAILED) {
|
||||
const unsigned int bitpos = BitsPerSize_t - count_leading_zeros((size_t) last_allocatable_address) - 1;
|
||||
log_info_p(gc, init)("Did not find any valid addresses within the range, using address '%u' instead", bitpos);
|
||||
return bitpos;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
fatal("Available address space can not be determined");
|
||||
#else // ASSERT
|
||||
log_warning_p(gc)("Cannot determine available address space. Falling back to default value.");
|
||||
return DEFAULT_MAX_ADDRESS_BIT;
|
||||
#endif // ASSERT
|
||||
} else {
|
||||
if (max_valid_address_bit == init_bit) {
|
||||
// An usable address bit has been found immediately.
|
||||
// To ensure that the entire virtual address space is exploited, the next highest bit will be tested as well.
|
||||
log_info_p(gc, init)("Hit valid address '%u' on first try, retrying with next higher bit", max_valid_address_bit);
|
||||
return MAX2(max_valid_address_bit, probe_valid_max_address_bit(init_bit + 1, init_bit + 1));
|
||||
}
|
||||
}
|
||||
|
||||
log_info_p(gc, init)("Found valid address '%u'", max_valid_address_bit);
|
||||
return max_valid_address_bit;
|
||||
#else // LINUX
|
||||
return DEFAULT_MAX_ADDRESS_BIT;
|
||||
#endif // LINUX
|
||||
}
|
||||
|
||||
size_t ZPlatformAddressOffsetBits() {
|
||||
const static unsigned int valid_max_address_offset_bits =
|
||||
probe_valid_max_address_bit(DEFAULT_MAX_ADDRESS_BIT, MINIMUM_MAX_ADDRESS_BIT) + 1;
|
||||
assert(valid_max_address_offset_bits >= MINIMUM_MAX_ADDRESS_BIT,
|
||||
"Highest addressable bit is outside the assumed address space range");
|
||||
|
||||
const size_t max_address_offset_bits = valid_max_address_offset_bits - 3;
|
||||
const size_t min_address_offset_bits = max_address_offset_bits - 2;
|
||||
const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
|
||||
const size_t address_offset_bits = log2i_exact(address_offset);
|
||||
|
||||
return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
|
||||
}
|
||||
|
||||
size_t ZPlatformAddressMetadataShift() {
|
||||
return ZPlatformAddressOffsetBits();
|
||||
}
|
36
src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.hpp
Normal file
36
src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.hpp
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_GC_Z_ZGLOBALS_PPC_HPP
|
||||
#define CPU_PPC_GC_Z_ZGLOBALS_PPC_HPP
|
||||
|
||||
#include "globalDefinitions_ppc.hpp"
|
||||
const size_t ZPlatformGranuleSizeShift = 21; // 2MB
|
||||
const size_t ZPlatformHeapViews = 3;
|
||||
const size_t ZPlatformCacheLineSize = DEFAULT_CACHE_LINE_SIZE;
|
||||
|
||||
size_t ZPlatformAddressOffsetBits();
|
||||
size_t ZPlatformAddressMetadataShift();
|
||||
|
||||
#endif // CPU_PPC_GC_Z_ZGLOBALS_PPC_HPP
|
298
src/hotspot/cpu/ppc/gc/z/z_ppc.ad
Normal file
298
src/hotspot/cpu/ppc/gc/z/z_ppc.ad
Normal file
@ -0,0 +1,298 @@
|
||||
//
|
||||
// Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2021 SAP SE. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License version 2 only, as
|
||||
// published by the Free Software Foundation.
|
||||
//
|
||||
// This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
// version 2 for more details (a copy is included in the LICENSE file that
|
||||
// accompanied this code).
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License version
|
||||
// 2 along with this work; if not, write to the Free Software Foundation,
|
||||
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
//
|
||||
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
// or visit www.oracle.com if you need additional information or have any
|
||||
// questions.
|
||||
//
|
||||
|
||||
source_hpp %{
|
||||
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/z/c2/zBarrierSetC2.hpp"
|
||||
#include "gc/z/zThreadLocalData.hpp"
|
||||
|
||||
%}
|
||||
|
||||
source %{
|
||||
|
||||
static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref,
|
||||
Register tmp, uint8_t barrier_data) {
|
||||
if (barrier_data == ZLoadBarrierElided) {
|
||||
return;
|
||||
}
|
||||
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data);
|
||||
__ ld(tmp, in_bytes(ZThreadLocalData::address_bad_mask_offset()), R16_thread);
|
||||
__ and_(tmp, tmp, ref);
|
||||
__ bne_far(CCR0, *stub->entry(), MacroAssembler::bc_far_optimize_on_relocate);
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref,
|
||||
Register tmp) {
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, ZLoadBarrierStrong);
|
||||
__ b(*stub->entry());
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
static void z_compare_and_swap(MacroAssembler& _masm, const MachNode* node,
|
||||
Register res, Register mem, Register oldval, Register newval,
|
||||
Register tmp_xchg, Register tmp_mask,
|
||||
bool weak, bool acquire) {
|
||||
// z-specific load barrier requires strong CAS operations.
|
||||
// Weak CAS operations are thus only emitted if the barrier is elided.
|
||||
__ cmpxchgd(CCR0, tmp_xchg, oldval, newval, mem,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, NULL, true,
|
||||
weak && node->barrier_data() == ZLoadBarrierElided);
|
||||
|
||||
if (node->barrier_data() != ZLoadBarrierElided) {
|
||||
Label skip_barrier;
|
||||
|
||||
__ ld(tmp_mask, in_bytes(ZThreadLocalData::address_bad_mask_offset()), R16_thread);
|
||||
__ and_(tmp_mask, tmp_mask, tmp_xchg);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
|
||||
// CAS must have failed because pointer in memory is bad.
|
||||
z_load_barrier_slow_path(_masm, node, Address(mem), tmp_xchg, res /* used as tmp */);
|
||||
|
||||
__ cmpxchgd(CCR0, tmp_xchg, oldval, newval, mem,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, NULL, true, weak);
|
||||
|
||||
__ bind(skip_barrier);
|
||||
}
|
||||
|
||||
if (acquire) {
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
// Uses the isync instruction as an acquire barrier.
|
||||
// This exploits the compare and the branch in the z load barrier (load, compare and branch, isync).
|
||||
__ isync();
|
||||
} else {
|
||||
__ sync();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void z_compare_and_exchange(MacroAssembler& _masm, const MachNode* node,
|
||||
Register res, Register mem, Register oldval, Register newval, Register tmp,
|
||||
bool weak, bool acquire) {
|
||||
// z-specific load barrier requires strong CAS operations.
|
||||
// Weak CAS operations are thus only emitted if the barrier is elided.
|
||||
__ cmpxchgd(CCR0, res, oldval, newval, mem,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, NULL, true,
|
||||
weak && node->barrier_data() == ZLoadBarrierElided);
|
||||
|
||||
if (node->barrier_data() != ZLoadBarrierElided) {
|
||||
Label skip_barrier;
|
||||
__ ld(tmp, in_bytes(ZThreadLocalData::address_bad_mask_offset()), R16_thread);
|
||||
__ and_(tmp, tmp, res);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
|
||||
z_load_barrier_slow_path(_masm, node, Address(mem), res, tmp);
|
||||
|
||||
__ cmpxchgd(CCR0, res, oldval, newval, mem,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, NULL, true, weak);
|
||||
|
||||
__ bind(skip_barrier);
|
||||
}
|
||||
|
||||
if (acquire) {
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
// Uses the isync instruction as an acquire barrier.
|
||||
// This exploits the compare and the branch in the z load barrier (load, compare and branch, isync).
|
||||
__ isync();
|
||||
} else {
|
||||
__ sync();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
instruct zLoadP(iRegPdst dst, memoryAlg4 mem, iRegPdst tmp, flagsRegCR0 cr0)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
effect(TEMP_DEF dst, TEMP tmp, KILL cr0);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
predicate((UseZGC && n->as_Load()->barrier_data() != 0)
|
||||
&& (n->as_Load()->is_unordered() || followed_by_acquire(n)));
|
||||
|
||||
format %{ "LD $dst, $mem" %}
|
||||
ins_encode %{
|
||||
assert($mem$$index == 0, "sanity");
|
||||
__ ld($dst$$Register, $mem$$disp, $mem$$base$$Register);
|
||||
z_load_barrier(_masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data());
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// Load Pointer Volatile
|
||||
instruct zLoadP_acq(iRegPdst dst, memoryAlg4 mem, iRegPdst tmp, flagsRegCR0 cr0)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
effect(TEMP_DEF dst, TEMP tmp, KILL cr0);
|
||||
ins_cost(3 * MEMORY_REF_COST);
|
||||
|
||||
// Predicate on instruction order is implicitly present due to the predicate of the cheaper zLoadP operation
|
||||
predicate(UseZGC && n->as_Load()->barrier_data() != 0);
|
||||
|
||||
format %{ "LD acq $dst, $mem" %}
|
||||
ins_encode %{
|
||||
__ ld($dst$$Register, $mem$$disp, $mem$$base$$Register);
|
||||
z_load_barrier(_masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data());
|
||||
|
||||
// Uses the isync instruction as an acquire barrier.
|
||||
// This exploits the compare and the branch in the z load barrier (load, compare and branch, isync).
|
||||
__ isync();
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct zCompareAndSwapP(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
|
||||
iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0);
|
||||
|
||||
predicate((UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong)
|
||||
&& (((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*) n)->order() != MemNode::seqcst));
|
||||
|
||||
format %{ "CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
z_compare_and_swap(_masm, this,
|
||||
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
$tmp_xchg$$Register, $tmp_mask$$Register,
|
||||
false /* weak */, false /* acquire */);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct zCompareAndSwapP_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
|
||||
iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0);
|
||||
|
||||
predicate((UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong)
|
||||
&& (((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*) n)->order() == MemNode::seqcst));
|
||||
|
||||
format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
z_compare_and_swap(_masm, this,
|
||||
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
$tmp_xchg$$Register, $tmp_mask$$Register,
|
||||
false /* weak */, true /* acquire */);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct zCompareAndSwapPWeak(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
|
||||
iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0);
|
||||
|
||||
predicate((UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong)
|
||||
&& ((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*) n)->order() != MemNode::seqcst);
|
||||
|
||||
format %{ "weak CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
z_compare_and_swap(_masm, this,
|
||||
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
$tmp_xchg$$Register, $tmp_mask$$Register,
|
||||
true /* weak */, false /* acquire */);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct zCompareAndSwapPWeak_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
|
||||
iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0);
|
||||
|
||||
predicate((UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong)
|
||||
&& (((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*) n)->order() == MemNode::seqcst));
|
||||
|
||||
format %{ "weak CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
z_compare_and_swap(_masm, this,
|
||||
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
$tmp_xchg$$Register, $tmp_mask$$Register,
|
||||
true /* weak */, true /* acquire */);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct zCompareAndExchangeP(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
|
||||
iRegPdst tmp, flagsRegCR0 cr0) %{
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
|
||||
|
||||
predicate((UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong)
|
||||
&& (
|
||||
((CompareAndSwapNode*)n)->order() != MemNode::acquire
|
||||
&& ((CompareAndSwapNode*)n)->order() != MemNode::seqcst
|
||||
));
|
||||
|
||||
format %{ "CMPXCHG $res, $mem, $oldval, $newval; as ptr; ptr" %}
|
||||
ins_encode %{
|
||||
z_compare_and_exchange(_masm, this,
|
||||
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register,
|
||||
false /* weak */, false /* acquire */);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct zCompareAndExchangeP_acq(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
|
||||
iRegPdst tmp, flagsRegCR0 cr0) %{
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
|
||||
|
||||
predicate((UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong)
|
||||
&& (
|
||||
((CompareAndSwapNode*)n)->order() == MemNode::acquire
|
||||
|| ((CompareAndSwapNode*)n)->order() == MemNode::seqcst
|
||||
));
|
||||
|
||||
format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as ptr; ptr" %}
|
||||
ins_encode %{
|
||||
z_compare_and_exchange(_masm, this,
|
||||
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register,
|
||||
false /* weak */, true /* acquire */);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct zGetAndSetP(iRegPdst res, iRegPdst mem, iRegPsrc newval, iRegPdst tmp, flagsRegCR0 cr0) %{
|
||||
match(Set res (GetAndSetP mem newval));
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
|
||||
|
||||
predicate(UseZGC && n->as_LoadStore()->barrier_data() != 0);
|
||||
|
||||
format %{ "GetAndSetP $res, $mem, $newval" %}
|
||||
ins_encode %{
|
||||
__ getandsetd($res$$Register, $newval$$Register, $mem$$Register, MacroAssembler::cmpxchgx_hint_atomic_update());
|
||||
z_load_barrier(_masm, this, Address(noreg, (intptr_t) 0), $res$$Register, $tmp$$Register, barrier_data());
|
||||
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
__ isync();
|
||||
} else {
|
||||
__ sync();
|
||||
}
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
@ -5531,7 +5531,7 @@ instruct loadN2P_klass_unscaled(iRegPdst dst, memory mem) %{
|
||||
// Load Pointer
|
||||
instruct loadP(iRegPdst dst, memoryAlg4 mem) %{
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
|
||||
predicate((n->as_Load()->is_unordered() || followed_by_acquire(n)) && n->as_Load()->barrier_data() == 0);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "LD $dst, $mem \t// ptr" %}
|
||||
@ -5545,6 +5545,8 @@ instruct loadP_ac(iRegPdst dst, memoryAlg4 mem) %{
|
||||
match(Set dst (LoadP mem));
|
||||
ins_cost(3*MEMORY_REF_COST);
|
||||
|
||||
predicate(n->as_Load()->barrier_data() == 0);
|
||||
|
||||
format %{ "LD $dst, $mem \t// ptr acquire\n\t"
|
||||
"TWI $dst\n\t"
|
||||
"ISYNC" %}
|
||||
@ -5556,7 +5558,7 @@ instruct loadP_ac(iRegPdst dst, memoryAlg4 mem) %{
|
||||
// LoadP + CastP2L
|
||||
instruct loadP2X(iRegLdst dst, memoryAlg4 mem) %{
|
||||
match(Set dst (CastP2X (LoadP mem)));
|
||||
predicate(_kids[0]->_leaf->as_Load()->is_unordered());
|
||||
predicate(_kids[0]->_leaf->as_Load()->is_unordered() && _kids[0]->_leaf->as_Load()->barrier_data() == 0);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "LD $dst, $mem \t// ptr + p2x" %}
|
||||
@ -7478,6 +7480,7 @@ instruct storeLConditional_regP_regL_regL(flagsReg crx, indirect mem_ptr, iRegLs
|
||||
instruct storePConditional_regP_regP_regP(flagsRegCR0 cr0, indirect mem_ptr, iRegPsrc oldVal, iRegPsrc newVal) %{
|
||||
match(Set cr0 (StorePConditional mem_ptr (Binary oldVal newVal)));
|
||||
ins_cost(2*MEMORY_REF_COST);
|
||||
predicate(n->as_LoadStore()->barrier_data() == 0);
|
||||
|
||||
format %{ "STDCX_ if ($cr0 = ($oldVal == *$mem_ptr)) *mem_ptr = $newVal; as bool" %}
|
||||
ins_encode %{
|
||||
@ -7642,6 +7645,7 @@ instruct compareAndSwapL_regP_regL_regL(iRegIdst res, iRegPdst mem_ptr, iRegLsrc
|
||||
instruct compareAndSwapP_regP_regP_regP(iRegIdst res, iRegPdst mem_ptr, iRegPsrc src1, iRegPsrc src2, flagsRegCR0 cr0) %{
|
||||
match(Set res (CompareAndSwapP mem_ptr (Binary src1 src2)));
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
predicate(n->as_LoadStore()->barrier_data() == 0);
|
||||
format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
@ -7864,7 +7868,7 @@ instruct weakCompareAndSwapL_acq_regP_regL_regL(iRegIdst res, iRegPdst mem_ptr,
|
||||
|
||||
instruct weakCompareAndSwapP_regP_regP_regP(iRegIdst res, iRegPdst mem_ptr, iRegPsrc src1, iRegPsrc src2, flagsRegCR0 cr0) %{
|
||||
match(Set res (WeakCompareAndSwapP mem_ptr (Binary src1 src2)));
|
||||
predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst);
|
||||
predicate((((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst) && n->as_LoadStore()->barrier_data() == 0);
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
@ -7878,7 +7882,7 @@ instruct weakCompareAndSwapP_regP_regP_regP(iRegIdst res, iRegPdst mem_ptr, iReg
|
||||
|
||||
instruct weakCompareAndSwapP_acq_regP_regP_regP(iRegIdst res, iRegPdst mem_ptr, iRegPsrc src1, iRegPsrc src2, flagsRegCR0 cr0) %{
|
||||
match(Set res (WeakCompareAndSwapP mem_ptr (Binary src1 src2)));
|
||||
predicate(((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst);
|
||||
predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst) && n->as_LoadStore()->barrier_data() == 0);
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGD acq $res, $mem_ptr, $src1, $src2; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
@ -8134,7 +8138,8 @@ instruct compareAndExchangeL_acq_regP_regL_regL(iRegLdst res, iRegPdst mem_ptr,
|
||||
|
||||
instruct compareAndExchangeP_regP_regP_regP(iRegPdst res, iRegPdst mem_ptr, iRegPsrc src1, iRegPsrc src2, flagsRegCR0 cr0) %{
|
||||
match(Set res (CompareAndExchangeP mem_ptr (Binary src1 src2)));
|
||||
predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst);
|
||||
predicate((((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst)
|
||||
&& n->as_LoadStore()->barrier_data() == 0);
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as ptr; ptr" %}
|
||||
ins_encode %{
|
||||
@ -8148,7 +8153,8 @@ instruct compareAndExchangeP_regP_regP_regP(iRegPdst res, iRegPdst mem_ptr, iReg
|
||||
|
||||
instruct compareAndExchangeP_acq_regP_regP_regP(iRegPdst res, iRegPdst mem_ptr, iRegPsrc src1, iRegPsrc src2, flagsRegCR0 cr0) %{
|
||||
match(Set res (CompareAndExchangeP mem_ptr (Binary src1 src2)));
|
||||
predicate(((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst);
|
||||
predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst)
|
||||
&& n->as_LoadStore()->barrier_data() == 0);
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGD acq $res, $mem_ptr, $src1, $src2; as ptr; ptr" %}
|
||||
ins_encode %{
|
||||
@ -8370,6 +8376,7 @@ instruct getAndSetL(iRegLdst res, iRegPdst mem_ptr, iRegLsrc src, flagsRegCR0 cr
|
||||
|
||||
instruct getAndSetP(iRegPdst res, iRegPdst mem_ptr, iRegPsrc src, flagsRegCR0 cr0) %{
|
||||
match(Set res (GetAndSetP mem_ptr src));
|
||||
predicate(n->as_LoadStore()->barrier_data() == 0);
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "GetAndSetP $res, $mem_ptr, $src" %}
|
||||
ins_encode %{
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2021 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -35,6 +35,21 @@ inline bool is_FloatRegister() {
|
||||
value() < ConcreteRegisterImpl::max_fpr;
|
||||
}
|
||||
|
||||
inline bool is_VectorRegister() {
|
||||
return value() >= ConcreteRegisterImpl::max_fpr &&
|
||||
value() < ConcreteRegisterImpl::max_vsr;
|
||||
}
|
||||
|
||||
inline bool is_ConditionRegister() {
|
||||
return value() >= ConcreteRegisterImpl::max_vsr &&
|
||||
value() < ConcreteRegisterImpl::max_cnd;
|
||||
}
|
||||
|
||||
inline bool is_SpecialRegister() {
|
||||
return value() >= ConcreteRegisterImpl::max_cnd &&
|
||||
value() < ConcreteRegisterImpl::max_spr;
|
||||
}
|
||||
|
||||
inline Register as_Register() {
|
||||
assert(is_Register() && is_even(value()), "even-aligned GPR name");
|
||||
return ::as_Register(value()>>1);
|
||||
|
42
src/hotspot/os_cpu/linux_ppc/gc/z/zSyscall_linux_ppc.hpp
Normal file
42
src/hotspot/os_cpu/linux_ppc/gc/z/zSyscall_linux_ppc.hpp
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef OS_CPU_LINUX_PPC_GC_Z_ZSYSCALL_LINUX_PPC_HPP
|
||||
#define OS_CPU_LINUX_PPC_GC_Z_ZSYSCALL_LINUX_PPC_HPP
|
||||
|
||||
#include <sys/syscall.h>
|
||||
|
||||
//
|
||||
// Support for building on older Linux systems
|
||||
//
|
||||
|
||||
|
||||
#ifndef SYS_memfd_create
|
||||
#define SYS_memfd_create 360
|
||||
#endif
|
||||
#ifndef SYS_fallocate
|
||||
#define SYS_fallocate 309
|
||||
#endif
|
||||
|
||||
#endif // OS_CPU_LINUX_PPC_GC_Z_ZSYSCALL_LINUX_PPC_HPP
|
Loading…
x
Reference in New Issue
Block a user