8307058: Implementation of Generational ZGC
Co-authored-by: Stefan Karlsson <stefank@openjdk.org> Co-authored-by: Erik Österlund <eosterlund@openjdk.org> Co-authored-by: Axel Boldt-Christmas <aboldtch@openjdk.org> Co-authored-by: Per Liden <pliden@openjdk.org> Co-authored-by: Stefan Johansson <sjohanss@openjdk.org> Co-authored-by: Albert Mingkun Yang <ayang@openjdk.org> Co-authored-by: Erik Helin <ehelin@openjdk.org> Co-authored-by: Roberto Castañeda Lozano <rcastanedalo@openjdk.org> Co-authored-by: Nils Eliasson <neliasso@openjdk.org> Co-authored-by: Martin Doerr <mdoerr@openjdk.org> Co-authored-by: Leslie Zhai <lzhai@openjdk.org> Co-authored-by: Fei Yang <fyang@openjdk.org> Co-authored-by: Yadong Wang <yadongwang@openjdk.org> Reviewed-by: eosterlund, aboldtch, rcastanedalo
This commit is contained in:
parent
0cbfbc400a
commit
d20034b09c
@ -172,6 +172,8 @@ ifeq ($(call check-jvm-feature, compiler2), true)
|
||||
|
||||
ifeq ($(call check-jvm-feature, zgc), true)
|
||||
AD_SRC_FILES += $(call uniq, $(wildcard $(foreach d, $(AD_SRC_ROOTS), \
|
||||
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/x/x_$(HOTSPOT_TARGET_CPU).ad \
|
||||
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/x/x_$(HOTSPOT_TARGET_CPU_ARCH).ad \
|
||||
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/z/z_$(HOTSPOT_TARGET_CPU).ad \
|
||||
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/z/z_$(HOTSPOT_TARGET_CPU_ARCH).ad \
|
||||
)))
|
||||
|
@ -149,6 +149,7 @@ endif
|
||||
ifneq ($(call check-jvm-feature, zgc), true)
|
||||
JVM_CFLAGS_FEATURES += -DINCLUDE_ZGC=0
|
||||
JVM_EXCLUDE_PATTERNS += gc/z
|
||||
JVM_EXCLUDE_PATTERNS += gc/x
|
||||
endif
|
||||
|
||||
ifneq ($(call check-jvm-feature, shenandoahgc), true)
|
||||
|
@ -1010,7 +1010,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
__ decode_heap_oop(dest->as_register());
|
||||
}
|
||||
|
||||
if (!UseZGC) {
|
||||
if (!(UseZGC && !ZGenerational)) {
|
||||
// Load barrier has not yet been applied, so ZGC can't verify the oop here
|
||||
__ verify_oop(dest->as_register());
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -129,20 +129,14 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
// Store the instruction bitmask, bits and name for checking the barrier.
|
||||
struct CheckInsn {
|
||||
uint32_t mask;
|
||||
uint32_t bits;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
// The first instruction of the nmethod entry barrier is an ldr (literal)
|
||||
// instruction. Verify that it's really there, so the offsets are not skewed.
|
||||
bool NativeNMethodBarrier::check_barrier(err_msg& msg) const {
|
||||
uint32_t* addr = (uint32_t*) instruction_address();
|
||||
uint32_t inst = *addr;
|
||||
if ((inst & 0xff000000) != 0x18000000) {
|
||||
msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x not an ldr", p2i(addr), inst);
|
||||
msg.print("Nmethod entry barrier did not start with ldr (literal) as expected. "
|
||||
"Addr: " PTR_FORMAT " Code: " UINT32_FORMAT, p2i(addr), inst);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
462
src/hotspot/cpu/aarch64/gc/x/xBarrierSetAssembler_aarch64.cpp
Normal file
462
src/hotspot/cpu/aarch64/gc/x/xBarrierSetAssembler_aarch64.cpp
Normal file
@ -0,0 +1,462 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "code/codeBlob.hpp"
|
||||
#include "code/vmreg.inline.hpp"
|
||||
#include "gc/x/xBarrier.inline.hpp"
|
||||
#include "gc/x/xBarrierSet.hpp"
|
||||
#include "gc/x/xBarrierSetAssembler.hpp"
|
||||
#include "gc/x/xBarrierSetRuntime.hpp"
|
||||
#include "gc/x/xThreadLocalData.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "gc/x/c1/xBarrierSetC1.hpp"
|
||||
#endif // COMPILER1
|
||||
#ifdef COMPILER2
|
||||
#include "gc/x/c2/xBarrierSetC2.hpp"
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define BLOCK_COMMENT(str) /* nothing */
|
||||
#else
|
||||
#define BLOCK_COMMENT(str) __ block_comment(str)
|
||||
#endif
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
void XBarrierSetAssembler::load_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
Register dst,
|
||||
Address src,
|
||||
Register tmp1,
|
||||
Register tmp2) {
|
||||
if (!XBarrierSet::barrier_needed(decorators, type)) {
|
||||
// Barrier not needed
|
||||
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
|
||||
return;
|
||||
}
|
||||
|
||||
assert_different_registers(rscratch1, rscratch2, src.base());
|
||||
assert_different_registers(rscratch1, rscratch2, dst);
|
||||
|
||||
Label done;
|
||||
|
||||
// Load bad mask into scratch register.
|
||||
__ ldr(rscratch1, address_bad_mask_from_thread(rthread));
|
||||
__ lea(rscratch2, src);
|
||||
__ ldr(dst, src);
|
||||
|
||||
// Test reference against bad mask. If mask bad, then we need to fix it up.
|
||||
__ tst(dst, rscratch1);
|
||||
__ br(Assembler::EQ, done);
|
||||
|
||||
__ enter(/*strip_ret_addr*/true);
|
||||
|
||||
__ push_call_clobbered_registers_except(RegSet::of(dst));
|
||||
|
||||
if (c_rarg0 != dst) {
|
||||
__ mov(c_rarg0, dst);
|
||||
}
|
||||
__ mov(c_rarg1, rscratch2);
|
||||
|
||||
__ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2);
|
||||
|
||||
// Make sure dst has the return value.
|
||||
if (dst != r0) {
|
||||
__ mov(dst, r0);
|
||||
}
|
||||
|
||||
__ pop_call_clobbered_registers_except(RegSet::of(dst));
|
||||
__ leave();
|
||||
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
|
||||
void XBarrierSetAssembler::store_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
Address dst,
|
||||
Register val,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3) {
|
||||
// Verify value
|
||||
if (is_reference_type(type)) {
|
||||
// Note that src could be noreg, which means we
|
||||
// are storing null and can skip verification.
|
||||
if (val != noreg) {
|
||||
Label done;
|
||||
|
||||
// tmp1, tmp2 and tmp3 are often set to noreg.
|
||||
RegSet savedRegs = RegSet::of(rscratch1);
|
||||
__ push(savedRegs, sp);
|
||||
|
||||
__ ldr(rscratch1, address_bad_mask_from_thread(rthread));
|
||||
__ tst(val, rscratch1);
|
||||
__ br(Assembler::EQ, done);
|
||||
__ stop("Verify oop store failed");
|
||||
__ should_not_reach_here();
|
||||
__ bind(done);
|
||||
__ pop(savedRegs, sp);
|
||||
}
|
||||
}
|
||||
|
||||
// Store value
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, noreg);
|
||||
}
|
||||
|
||||
#endif // ASSERT
|
||||
|
||||
void XBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
bool is_oop,
|
||||
Register src,
|
||||
Register dst,
|
||||
Register count,
|
||||
RegSet saved_regs) {
|
||||
if (!is_oop) {
|
||||
// Barrier not needed
|
||||
return;
|
||||
}
|
||||
|
||||
BLOCK_COMMENT("XBarrierSetAssembler::arraycopy_prologue {");
|
||||
|
||||
assert_different_registers(src, count, rscratch1);
|
||||
|
||||
__ push(saved_regs, sp);
|
||||
|
||||
if (count == c_rarg0) {
|
||||
if (src == c_rarg1) {
|
||||
// exactly backwards!!
|
||||
__ mov(rscratch1, c_rarg0);
|
||||
__ mov(c_rarg0, c_rarg1);
|
||||
__ mov(c_rarg1, rscratch1);
|
||||
} else {
|
||||
__ mov(c_rarg1, count);
|
||||
__ mov(c_rarg0, src);
|
||||
}
|
||||
} else {
|
||||
__ mov(c_rarg0, src);
|
||||
__ mov(c_rarg1, count);
|
||||
}
|
||||
|
||||
__ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_array_addr(), 2);
|
||||
|
||||
__ pop(saved_regs, sp);
|
||||
|
||||
BLOCK_COMMENT("} XBarrierSetAssembler::arraycopy_prologue");
|
||||
}
|
||||
|
||||
void XBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
|
||||
Register jni_env,
|
||||
Register robj,
|
||||
Register tmp,
|
||||
Label& slowpath) {
|
||||
BLOCK_COMMENT("XBarrierSetAssembler::try_resolve_jobject_in_native {");
|
||||
|
||||
assert_different_registers(jni_env, robj, tmp);
|
||||
|
||||
// Resolve jobject
|
||||
BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, robj, tmp, slowpath);
|
||||
|
||||
// The Address offset is too large to direct load - -784. Our range is +127, -128.
|
||||
__ mov(tmp, (int64_t)(in_bytes(XThreadLocalData::address_bad_mask_offset()) -
|
||||
in_bytes(JavaThread::jni_environment_offset())));
|
||||
|
||||
// Load address bad mask
|
||||
__ add(tmp, jni_env, tmp);
|
||||
__ ldr(tmp, Address(tmp));
|
||||
|
||||
// Check address bad mask
|
||||
__ tst(robj, tmp);
|
||||
__ br(Assembler::NE, slowpath);
|
||||
|
||||
BLOCK_COMMENT("} XBarrierSetAssembler::try_resolve_jobject_in_native");
|
||||
}
|
||||
|
||||
#ifdef COMPILER1
|
||||
|
||||
#undef __
|
||||
#define __ ce->masm()->
|
||||
|
||||
void XBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
|
||||
LIR_Opr ref) const {
|
||||
assert_different_registers(rscratch1, rthread, ref->as_register());
|
||||
|
||||
__ ldr(rscratch1, address_bad_mask_from_thread(rthread));
|
||||
__ tst(ref->as_register(), rscratch1);
|
||||
}
|
||||
|
||||
void XBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
|
||||
XLoadBarrierStubC1* stub) const {
|
||||
// Stub entry
|
||||
__ bind(*stub->entry());
|
||||
|
||||
Register ref = stub->ref()->as_register();
|
||||
Register ref_addr = noreg;
|
||||
Register tmp = noreg;
|
||||
|
||||
if (stub->tmp()->is_valid()) {
|
||||
// Load address into tmp register
|
||||
ce->leal(stub->ref_addr(), stub->tmp());
|
||||
ref_addr = tmp = stub->tmp()->as_pointer_register();
|
||||
} else {
|
||||
// Address already in register
|
||||
ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register();
|
||||
}
|
||||
|
||||
assert_different_registers(ref, ref_addr, noreg);
|
||||
|
||||
// Save r0 unless it is the result or tmp register
|
||||
// Set up SP to accommodate parameters and maybe r0..
|
||||
if (ref != r0 && tmp != r0) {
|
||||
__ sub(sp, sp, 32);
|
||||
__ str(r0, Address(sp, 16));
|
||||
} else {
|
||||
__ sub(sp, sp, 16);
|
||||
}
|
||||
|
||||
// Setup arguments and call runtime stub
|
||||
ce->store_parameter(ref_addr, 1);
|
||||
ce->store_parameter(ref, 0);
|
||||
|
||||
__ far_call(stub->runtime_stub());
|
||||
|
||||
// Verify result
|
||||
__ verify_oop(r0);
|
||||
|
||||
// Move result into place
|
||||
if (ref != r0) {
|
||||
__ mov(ref, r0);
|
||||
}
|
||||
|
||||
// Restore r0 unless it is the result or tmp register
|
||||
if (ref != r0 && tmp != r0) {
|
||||
__ ldr(r0, Address(sp, 16));
|
||||
__ add(sp, sp, 32);
|
||||
} else {
|
||||
__ add(sp, sp, 16);
|
||||
}
|
||||
|
||||
// Stub exit
|
||||
__ b(*stub->continuation());
|
||||
}
|
||||
|
||||
#undef __
|
||||
#define __ sasm->
|
||||
|
||||
void XBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
|
||||
DecoratorSet decorators) const {
|
||||
__ prologue("zgc_load_barrier stub", false);
|
||||
|
||||
__ push_call_clobbered_registers_except(RegSet::of(r0));
|
||||
|
||||
// Setup arguments
|
||||
__ load_parameter(0, c_rarg0);
|
||||
__ load_parameter(1, c_rarg1);
|
||||
|
||||
__ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2);
|
||||
|
||||
__ pop_call_clobbered_registers_except(RegSet::of(r0));
|
||||
|
||||
__ epilogue();
|
||||
}
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
|
||||
OptoReg::Name XBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
|
||||
if (!OptoReg::is_reg(opto_reg)) {
|
||||
return OptoReg::Bad;
|
||||
}
|
||||
|
||||
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
|
||||
if (vm_reg->is_FloatRegister()) {
|
||||
return opto_reg & ~1;
|
||||
}
|
||||
|
||||
return opto_reg;
|
||||
}
|
||||
|
||||
#undef __
|
||||
#define __ _masm->
|
||||
|
||||
class XSaveLiveRegisters {
|
||||
private:
|
||||
MacroAssembler* const _masm;
|
||||
RegSet _gp_regs;
|
||||
FloatRegSet _fp_regs;
|
||||
PRegSet _p_regs;
|
||||
|
||||
public:
|
||||
void initialize(XLoadBarrierStubC2* stub) {
|
||||
// Record registers that needs to be saved/restored
|
||||
RegMaskIterator rmi(stub->live());
|
||||
while (rmi.has_next()) {
|
||||
const OptoReg::Name opto_reg = rmi.next();
|
||||
if (OptoReg::is_reg(opto_reg)) {
|
||||
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
|
||||
if (vm_reg->is_Register()) {
|
||||
_gp_regs += RegSet::of(vm_reg->as_Register());
|
||||
} else if (vm_reg->is_FloatRegister()) {
|
||||
_fp_regs += FloatRegSet::of(vm_reg->as_FloatRegister());
|
||||
} else if (vm_reg->is_PRegister()) {
|
||||
_p_regs += PRegSet::of(vm_reg->as_PRegister());
|
||||
} else {
|
||||
fatal("Unknown register type");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove C-ABI SOE registers, scratch regs and _ref register that will be updated
|
||||
_gp_regs -= RegSet::range(r19, r30) + RegSet::of(r8, r9, stub->ref());
|
||||
}
|
||||
|
||||
XSaveLiveRegisters(MacroAssembler* masm, XLoadBarrierStubC2* stub) :
|
||||
_masm(masm),
|
||||
_gp_regs(),
|
||||
_fp_regs(),
|
||||
_p_regs() {
|
||||
|
||||
// Figure out what registers to save/restore
|
||||
initialize(stub);
|
||||
|
||||
// Save registers
|
||||
__ push(_gp_regs, sp);
|
||||
__ push_fp(_fp_regs, sp);
|
||||
__ push_p(_p_regs, sp);
|
||||
}
|
||||
|
||||
~XSaveLiveRegisters() {
|
||||
// Restore registers
|
||||
__ pop_p(_p_regs, sp);
|
||||
__ pop_fp(_fp_regs, sp);
|
||||
|
||||
// External runtime call may clobber ptrue reg
|
||||
__ reinitialize_ptrue();
|
||||
|
||||
__ pop(_gp_regs, sp);
|
||||
}
|
||||
};
|
||||
|
||||
#undef __
|
||||
#define __ _masm->
|
||||
|
||||
class XSetupArguments {
|
||||
private:
|
||||
MacroAssembler* const _masm;
|
||||
const Register _ref;
|
||||
const Address _ref_addr;
|
||||
|
||||
public:
|
||||
XSetupArguments(MacroAssembler* masm, XLoadBarrierStubC2* stub) :
|
||||
_masm(masm),
|
||||
_ref(stub->ref()),
|
||||
_ref_addr(stub->ref_addr()) {
|
||||
|
||||
// Setup arguments
|
||||
if (_ref_addr.base() == noreg) {
|
||||
// No self healing
|
||||
if (_ref != c_rarg0) {
|
||||
__ mov(c_rarg0, _ref);
|
||||
}
|
||||
__ mov(c_rarg1, 0);
|
||||
} else {
|
||||
// Self healing
|
||||
if (_ref == c_rarg0) {
|
||||
// _ref is already at correct place
|
||||
__ lea(c_rarg1, _ref_addr);
|
||||
} else if (_ref != c_rarg1) {
|
||||
// _ref is in wrong place, but not in c_rarg1, so fix it first
|
||||
__ lea(c_rarg1, _ref_addr);
|
||||
__ mov(c_rarg0, _ref);
|
||||
} else if (_ref_addr.base() != c_rarg0 && _ref_addr.index() != c_rarg0) {
|
||||
assert(_ref == c_rarg1, "Mov ref first, vacating c_rarg0");
|
||||
__ mov(c_rarg0, _ref);
|
||||
__ lea(c_rarg1, _ref_addr);
|
||||
} else {
|
||||
assert(_ref == c_rarg1, "Need to vacate c_rarg1 and _ref_addr is using c_rarg0");
|
||||
if (_ref_addr.base() == c_rarg0 || _ref_addr.index() == c_rarg0) {
|
||||
__ mov(rscratch2, c_rarg1);
|
||||
__ lea(c_rarg1, _ref_addr);
|
||||
__ mov(c_rarg0, rscratch2);
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
~XSetupArguments() {
|
||||
// Transfer result
|
||||
if (_ref != r0) {
|
||||
__ mov(_ref, r0);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const {
|
||||
BLOCK_COMMENT("XLoadBarrierStubC2");
|
||||
|
||||
// Stub entry
|
||||
__ bind(*stub->entry());
|
||||
|
||||
{
|
||||
XSaveLiveRegisters save_live_registers(masm, stub);
|
||||
XSetupArguments setup_arguments(masm, stub);
|
||||
__ mov(rscratch1, stub->slow_path());
|
||||
__ blr(rscratch1);
|
||||
}
|
||||
// Stub exit
|
||||
__ b(*stub->continuation());
|
||||
}
|
||||
|
||||
#endif // COMPILER2
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
void XBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) {
|
||||
// Check if mask is good.
|
||||
// verifies that XAddressBadMask & r0 == 0
|
||||
__ ldr(tmp2, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(tmp1, obj, tmp2);
|
||||
__ cbnz(tmp1, error);
|
||||
|
||||
BarrierSetAssembler::check_oop(masm, obj, tmp1, tmp2, error);
|
||||
}
|
||||
|
||||
#undef __
|
110
src/hotspot/cpu/aarch64/gc/x/xBarrierSetAssembler_aarch64.hpp
Normal file
110
src/hotspot/cpu/aarch64/gc/x/xBarrierSetAssembler_aarch64.hpp
Normal file
@ -0,0 +1,110 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef CPU_AARCH64_GC_X_XBARRIERSETASSEMBLER_AARCH64_HPP
|
||||
#define CPU_AARCH64_GC_X_XBARRIERSETASSEMBLER_AARCH64_HPP
|
||||
|
||||
#include "code/vmreg.hpp"
|
||||
#include "oops/accessDecorators.hpp"
|
||||
#ifdef COMPILER2
|
||||
#include "opto/optoreg.hpp"
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef COMPILER1
|
||||
class LIR_Assembler;
|
||||
class LIR_Opr;
|
||||
class StubAssembler;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
class Node;
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef COMPILER1
|
||||
class XLoadBarrierStubC1;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
class XLoadBarrierStubC2;
|
||||
#endif // COMPILER2
|
||||
|
||||
class XBarrierSetAssembler : public XBarrierSetAssemblerBase {
|
||||
public:
|
||||
virtual void load_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
Register dst,
|
||||
Address src,
|
||||
Register tmp1,
|
||||
Register tmp2);
|
||||
|
||||
#ifdef ASSERT
|
||||
virtual void store_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
Address dst,
|
||||
Register val,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3);
|
||||
#endif // ASSERT
|
||||
|
||||
virtual void arraycopy_prologue(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
bool is_oop,
|
||||
Register src,
|
||||
Register dst,
|
||||
Register count,
|
||||
RegSet saved_regs);
|
||||
|
||||
virtual void try_resolve_jobject_in_native(MacroAssembler* masm,
|
||||
Register jni_env,
|
||||
Register robj,
|
||||
Register tmp,
|
||||
Label& slowpath);
|
||||
|
||||
virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; }
|
||||
|
||||
#ifdef COMPILER1
|
||||
void generate_c1_load_barrier_test(LIR_Assembler* ce,
|
||||
LIR_Opr ref) const;
|
||||
|
||||
void generate_c1_load_barrier_stub(LIR_Assembler* ce,
|
||||
XLoadBarrierStubC1* stub) const;
|
||||
|
||||
void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
|
||||
DecoratorSet decorators) const;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
OptoReg::Name refine_register(const Node* node,
|
||||
OptoReg::Name opto_reg);
|
||||
|
||||
void generate_c2_load_barrier_stub(MacroAssembler* masm,
|
||||
XLoadBarrierStubC2* stub) const;
|
||||
#endif // COMPILER2
|
||||
|
||||
void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);
|
||||
};
|
||||
|
||||
#endif // CPU_AARCH64_GC_X_XBARRIERSETASSEMBLER_AARCH64_HPP
|
@ -24,7 +24,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/x/xGlobals.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
@ -196,15 +196,15 @@ static size_t probe_valid_max_address_bit() {
|
||||
#endif // LINUX
|
||||
}
|
||||
|
||||
size_t ZPlatformAddressOffsetBits() {
|
||||
size_t XPlatformAddressOffsetBits() {
|
||||
const static size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1;
|
||||
const size_t max_address_offset_bits = valid_max_address_offset_bits - 3;
|
||||
const size_t min_address_offset_bits = max_address_offset_bits - 2;
|
||||
const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
|
||||
const size_t address_offset = round_up_power_of_2(MaxHeapSize * XVirtualToPhysicalRatio);
|
||||
const size_t address_offset_bits = log2i_exact(address_offset);
|
||||
return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
|
||||
}
|
||||
|
||||
size_t ZPlatformAddressMetadataShift() {
|
||||
return ZPlatformAddressOffsetBits();
|
||||
size_t XPlatformAddressMetadataShift() {
|
||||
return XPlatformAddressOffsetBits();
|
||||
}
|
33
src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.hpp
Normal file
33
src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.hpp
Normal file
@ -0,0 +1,33 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef CPU_AARCH64_GC_X_XGLOBALS_AARCH64_HPP
|
||||
#define CPU_AARCH64_GC_X_XGLOBALS_AARCH64_HPP
|
||||
|
||||
const size_t XPlatformHeapViews = 3;
|
||||
const size_t XPlatformCacheLineSize = 64;
|
||||
|
||||
size_t XPlatformAddressOffsetBits();
|
||||
size_t XPlatformAddressMetadataShift();
|
||||
|
||||
#endif // CPU_AARCH64_GC_X_XGLOBALS_AARCH64_HPP
|
243
src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad
Normal file
243
src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad
Normal file
@ -0,0 +1,243 @@
|
||||
//
|
||||
// Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License version 2 only, as
|
||||
// published by the Free Software Foundation.
|
||||
//
|
||||
// This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
// version 2 for more details (a copy is included in the LICENSE file that
|
||||
// accompanied this code).
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License version
|
||||
// 2 along with this work; if not, write to the Free Software Foundation,
|
||||
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
//
|
||||
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
// or visit www.oracle.com if you need additional information or have any
|
||||
// questions.
|
||||
//
|
||||
|
||||
source_hpp %{
|
||||
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/x/c2/xBarrierSetC2.hpp"
|
||||
#include "gc/x/xThreadLocalData.hpp"
|
||||
|
||||
%}
|
||||
|
||||
source %{
|
||||
|
||||
static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) {
|
||||
if (barrier_data == XLoadBarrierElided) {
|
||||
return;
|
||||
}
|
||||
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data);
|
||||
__ ldr(tmp, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(tmp, tmp, ref);
|
||||
__ cbnz(tmp, *stub->entry());
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
static void x_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
|
||||
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong);
|
||||
__ b(*stub->entry());
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
// Load Pointer
|
||||
instruct xLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(UseZGC && !ZGenerational && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() != 0));
|
||||
effect(TEMP dst, KILL cr);
|
||||
|
||||
ins_cost(4 * INSN_COST);
|
||||
|
||||
format %{ "ldr $dst, $mem" %}
|
||||
|
||||
ins_encode %{
|
||||
const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
|
||||
__ ldr($dst$$Register, ref_addr);
|
||||
x_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, barrier_data());
|
||||
%}
|
||||
|
||||
ins_pipe(iload_reg_mem);
|
||||
%}
|
||||
|
||||
// Load Pointer Volatile
|
||||
instruct xLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(UseZGC && !ZGenerational && needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
|
||||
effect(TEMP dst, KILL cr);
|
||||
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
|
||||
format %{ "ldar $dst, $mem\t" %}
|
||||
|
||||
ins_encode %{
|
||||
__ ldar($dst$$Register, $mem$$Register);
|
||||
x_load_barrier(_masm, this, Address($mem$$Register), $dst$$Register, rscratch2 /* tmp */, barrier_data());
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
||||
instruct xCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && !ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
|
||||
effect(KILL cr, TEMP_DEF res);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "cmpxchg $mem, $oldval, $newval\n\t"
|
||||
"cset $res, EQ" %}
|
||||
|
||||
ins_encode %{
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
false /* acquire */, true /* release */, false /* weak */, rscratch2);
|
||||
__ cset($res$$Register, Assembler::EQ);
|
||||
if (barrier_data() != XLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(rscratch1, rscratch1, rscratch2);
|
||||
__ cbz(rscratch1, good);
|
||||
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
false /* acquire */, true /* release */, false /* weak */, rscratch2);
|
||||
__ cset($res$$Register, Assembler::EQ);
|
||||
__ bind(good);
|
||||
}
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct xCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && !ZGenerational && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == XLoadBarrierStrong));
|
||||
effect(KILL cr, TEMP_DEF res);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "cmpxchg $mem, $oldval, $newval\n\t"
|
||||
"cset $res, EQ" %}
|
||||
|
||||
ins_encode %{
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
true /* acquire */, true /* release */, false /* weak */, rscratch2);
|
||||
__ cset($res$$Register, Assembler::EQ);
|
||||
if (barrier_data() != XLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(rscratch1, rscratch1, rscratch2);
|
||||
__ cbz(rscratch1, good);
|
||||
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */ );
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
true /* acquire */, true /* release */, false /* weak */, rscratch2);
|
||||
__ cset($res$$Register, Assembler::EQ);
|
||||
__ bind(good);
|
||||
}
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct xCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && !ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
|
||||
effect(TEMP_DEF res, KILL cr);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "cmpxchg $res = $mem, $oldval, $newval" %}
|
||||
|
||||
ins_encode %{
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
false /* acquire */, true /* release */, false /* weak */, $res$$Register);
|
||||
if (barrier_data() != XLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(rscratch1, rscratch1, $res$$Register);
|
||||
__ cbz(rscratch1, good);
|
||||
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
false /* acquire */, true /* release */, false /* weak */, $res$$Register);
|
||||
__ bind(good);
|
||||
}
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct xCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && !ZGenerational && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
|
||||
effect(TEMP_DEF res, KILL cr);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "cmpxchg $res = $mem, $oldval, $newval" %}
|
||||
|
||||
ins_encode %{
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
true /* acquire */, true /* release */, false /* weak */, $res$$Register);
|
||||
if (barrier_data() != XLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(rscratch1, rscratch1, $res$$Register);
|
||||
__ cbz(rscratch1, good);
|
||||
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
true /* acquire */, true /* release */, false /* weak */, $res$$Register);
|
||||
__ bind(good);
|
||||
}
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct xGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
|
||||
match(Set prev (GetAndSetP mem newv));
|
||||
predicate(UseZGC && !ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP_DEF prev, KILL cr);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "atomic_xchg $prev, $newv, [$mem]" %}
|
||||
|
||||
ins_encode %{
|
||||
__ atomic_xchg($prev$$Register, $newv$$Register, $mem$$Register);
|
||||
x_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data());
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
||||
instruct xGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
|
||||
match(Set prev (GetAndSetP mem newv));
|
||||
predicate(UseZGC && !ZGenerational && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() != 0));
|
||||
effect(TEMP_DEF prev, KILL cr);
|
||||
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
|
||||
format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %}
|
||||
|
||||
ins_encode %{
|
||||
__ atomic_xchgal($prev$$Register, $newv$$Register, $mem$$Register);
|
||||
x_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data());
|
||||
%}
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
108
src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.cpp
Normal file
108
src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.cpp
Normal file
@ -0,0 +1,108 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/z/zAddress.hpp"
|
||||
#include "gc/z/zBarrierSetAssembler.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
#ifdef LINUX
|
||||
#include <sys/mman.h>
|
||||
#endif // LINUX
|
||||
|
||||
// Default value if probing is not implemented for a certain platform: 128TB
|
||||
static const size_t DEFAULT_MAX_ADDRESS_BIT = 47;
|
||||
// Minimum value returned, if probing fails: 64GB
|
||||
static const size_t MINIMUM_MAX_ADDRESS_BIT = 36;
|
||||
|
||||
static size_t probe_valid_max_address_bit() {
|
||||
#ifdef LINUX
|
||||
size_t max_address_bit = 0;
|
||||
const size_t page_size = os::vm_page_size();
|
||||
for (size_t i = DEFAULT_MAX_ADDRESS_BIT; i > MINIMUM_MAX_ADDRESS_BIT; --i) {
|
||||
const uintptr_t base_addr = ((uintptr_t) 1U) << i;
|
||||
if (msync((void*)base_addr, page_size, MS_ASYNC) == 0) {
|
||||
// msync suceeded, the address is valid, and maybe even already mapped.
|
||||
max_address_bit = i;
|
||||
break;
|
||||
}
|
||||
if (errno != ENOMEM) {
|
||||
// Some error occured. This should never happen, but msync
|
||||
// has some undefined behavior, hence ignore this bit.
|
||||
#ifdef ASSERT
|
||||
fatal("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno));
|
||||
#else // ASSERT
|
||||
log_warning_p(gc)("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno));
|
||||
#endif // ASSERT
|
||||
continue;
|
||||
}
|
||||
// Since msync failed with ENOMEM, the page might not be mapped.
|
||||
// Try to map it, to see if the address is valid.
|
||||
void* const result_addr = mmap((void*) base_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0);
|
||||
if (result_addr != MAP_FAILED) {
|
||||
munmap(result_addr, page_size);
|
||||
}
|
||||
if ((uintptr_t) result_addr == base_addr) {
|
||||
// address is valid
|
||||
max_address_bit = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (max_address_bit == 0) {
|
||||
// probing failed, allocate a very high page and take that bit as the maximum
|
||||
const uintptr_t high_addr = ((uintptr_t) 1U) << DEFAULT_MAX_ADDRESS_BIT;
|
||||
void* const result_addr = mmap((void*) high_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0);
|
||||
if (result_addr != MAP_FAILED) {
|
||||
max_address_bit = BitsPerSize_t - count_leading_zeros((size_t) result_addr) - 1;
|
||||
munmap(result_addr, page_size);
|
||||
}
|
||||
}
|
||||
log_info_p(gc, init)("Probing address space for the highest valid bit: " SIZE_FORMAT, max_address_bit);
|
||||
return MAX2(max_address_bit, MINIMUM_MAX_ADDRESS_BIT);
|
||||
#else // LINUX
|
||||
return DEFAULT_MAX_ADDRESS_BIT;
|
||||
#endif // LINUX
|
||||
}
|
||||
|
||||
size_t ZPlatformAddressOffsetBits() {
|
||||
const static size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1;
|
||||
const size_t max_address_offset_bits = valid_max_address_offset_bits - 3;
|
||||
const size_t min_address_offset_bits = max_address_offset_bits - 2;
|
||||
const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
|
||||
const size_t address_offset_bits = log2i_exact(address_offset);
|
||||
return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
|
||||
}
|
||||
|
||||
size_t ZPlatformAddressHeapBaseShift() {
|
||||
return ZPlatformAddressOffsetBits();
|
||||
}
|
||||
|
||||
void ZGlobalsPointers::pd_set_good_masks() {
|
||||
BarrierSetAssembler::clear_patching_epoch();
|
||||
}
|
34
src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.hpp
Normal file
34
src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.hpp
Normal file
@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef CPU_AARCH64_GC_Z_ZADDRESS_AARCH64_HPP
|
||||
#define CPU_AARCH64_GC_Z_ZADDRESS_AARCH64_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
const size_t ZPointerLoadShift = 16;
|
||||
|
||||
size_t ZPlatformAddressOffsetBits();
|
||||
size_t ZPlatformAddressHeapBaseShift();
|
||||
|
||||
#endif // CPU_AARCH64_GC_Z_ZADDRESS_AARCH64_HPP
|
37
src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.inline.hpp
Normal file
37
src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.inline.hpp
Normal file
@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef CPU_AARCH64_GC_Z_ZADDRESS_AARCH64_INLINE_HPP
|
||||
#define CPU_AARCH64_GC_Z_ZADDRESS_AARCH64_INLINE_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
inline uintptr_t ZPointer::remap_bits(uintptr_t colored) {
|
||||
return (colored ^ ZPointerRemappedMask) & ZPointerRemappedMask;
|
||||
}
|
||||
|
||||
inline constexpr int ZPointer::load_shift_lookup(uintptr_t value) {
|
||||
return ZPointerLoadShift;
|
||||
}
|
||||
|
||||
#endif // CPU_AARCH64_GC_Z_ZADDRESS_AARCH64_INLINE_HPP
|
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,21 +27,32 @@
|
||||
#include "code/vmreg.hpp"
|
||||
#include "oops/accessDecorators.hpp"
|
||||
#ifdef COMPILER2
|
||||
#include "gc/z/c2/zBarrierSetC2.hpp"
|
||||
#include "opto/optoreg.hpp"
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef COMPILER1
|
||||
class LIR_Address;
|
||||
class LIR_Assembler;
|
||||
class LIR_Opr;
|
||||
class StubAssembler;
|
||||
class ZLoadBarrierStubC1;
|
||||
class ZStoreBarrierStubC1;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
class MachNode;
|
||||
class Node;
|
||||
class ZLoadBarrierStubC2;
|
||||
#endif // COMPILER2
|
||||
|
||||
// ZBarrierRelocationFormatLoadGoodBeforeTbX is used for both tbnz and tbz
|
||||
// They are patched in the same way, their immediate value has the same
|
||||
// structure
|
||||
const int ZBarrierRelocationFormatLoadGoodBeforeTbX = 0;
|
||||
const int ZBarrierRelocationFormatMarkBadBeforeMov = 1;
|
||||
const int ZBarrierRelocationFormatStoreGoodBeforeMov = 2;
|
||||
const int ZBarrierRelocationFormatStoreBadBeforeMov = 3;
|
||||
|
||||
class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase {
|
||||
public:
|
||||
virtual void load_at(MacroAssembler* masm,
|
||||
@ -52,7 +63,27 @@ public:
|
||||
Register tmp1,
|
||||
Register tmp2);
|
||||
|
||||
#ifdef ASSERT
|
||||
void store_barrier_fast(MacroAssembler* masm,
|
||||
Address ref_addr,
|
||||
Register rnew_zaddress,
|
||||
Register rnew_zpointer,
|
||||
Register rtmp,
|
||||
bool in_nmethod,
|
||||
bool is_atomic,
|
||||
Label& medium_path,
|
||||
Label& medium_path_continuation) const;
|
||||
|
||||
void store_barrier_medium(MacroAssembler* masm,
|
||||
Address ref_addr,
|
||||
Register rtmp1,
|
||||
Register rtmp2,
|
||||
Register rtmp3,
|
||||
bool is_native,
|
||||
bool is_atomic,
|
||||
Label& medium_path_continuation,
|
||||
Label& slow_path,
|
||||
Label& slow_path_continuation) const;
|
||||
|
||||
virtual void store_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
@ -61,7 +92,6 @@ public:
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3);
|
||||
#endif // ASSERT
|
||||
|
||||
virtual void arraycopy_prologue(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
@ -71,23 +101,89 @@ public:
|
||||
Register count,
|
||||
RegSet saved_regs);
|
||||
|
||||
virtual void copy_load_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
size_t bytes,
|
||||
Register dst1,
|
||||
Register dst2,
|
||||
Address src,
|
||||
Register tmp);
|
||||
|
||||
virtual void copy_store_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
size_t bytes,
|
||||
Address dst,
|
||||
Register src1,
|
||||
Register src2,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3);
|
||||
|
||||
virtual void copy_load_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
size_t bytes,
|
||||
FloatRegister dst1,
|
||||
FloatRegister dst2,
|
||||
Address src,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
FloatRegister vec_tmp);
|
||||
|
||||
virtual void copy_store_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
size_t bytes,
|
||||
Address dst,
|
||||
FloatRegister src1,
|
||||
FloatRegister src2,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3,
|
||||
FloatRegister vec_tmp1,
|
||||
FloatRegister vec_tmp2,
|
||||
FloatRegister vec_tmp3);
|
||||
|
||||
virtual void try_resolve_jobject_in_native(MacroAssembler* masm,
|
||||
Register jni_env,
|
||||
Register robj,
|
||||
Register tmp,
|
||||
Label& slowpath);
|
||||
|
||||
virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; }
|
||||
virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_instruction_and_data_patch; }
|
||||
|
||||
void patch_barrier_relocation(address addr, int format);
|
||||
|
||||
void patch_barriers() {}
|
||||
|
||||
#ifdef COMPILER1
|
||||
void generate_c1_load_barrier_test(LIR_Assembler* ce,
|
||||
LIR_Opr ref) const;
|
||||
void generate_c1_color(LIR_Assembler* ce, LIR_Opr ref) const;
|
||||
void generate_c1_uncolor(LIR_Assembler* ce, LIR_Opr ref) const;
|
||||
|
||||
void generate_c1_load_barrier(LIR_Assembler* ce,
|
||||
LIR_Opr ref,
|
||||
ZLoadBarrierStubC1* stub,
|
||||
bool on_non_strong) const;
|
||||
|
||||
void generate_c1_load_barrier_stub(LIR_Assembler* ce,
|
||||
ZLoadBarrierStubC1* stub) const;
|
||||
|
||||
void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
|
||||
DecoratorSet decorators) const;
|
||||
|
||||
void generate_c1_store_barrier(LIR_Assembler* ce,
|
||||
LIR_Address* addr,
|
||||
LIR_Opr new_zaddress,
|
||||
LIR_Opr new_zpointer,
|
||||
ZStoreBarrierStubC1* stub) const;
|
||||
|
||||
void generate_c1_store_barrier_stub(LIR_Assembler* ce,
|
||||
ZStoreBarrierStubC1* stub) const;
|
||||
|
||||
void generate_c1_store_barrier_runtime_stub(StubAssembler* sasm,
|
||||
bool self_healing) const;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
@ -96,9 +192,103 @@ public:
|
||||
|
||||
void generate_c2_load_barrier_stub(MacroAssembler* masm,
|
||||
ZLoadBarrierStubC2* stub) const;
|
||||
void generate_c2_store_barrier_stub(MacroAssembler* masm,
|
||||
ZStoreBarrierStubC2* stub) const;
|
||||
#endif // COMPILER2
|
||||
|
||||
void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);
|
||||
};
|
||||
|
||||
#ifdef COMPILER2
|
||||
|
||||
// Load barriers on aarch64 are implemented with a test-and-branch immediate instruction.
|
||||
// This immediate has a max delta of 32K. Because of this the branch is implemented with
|
||||
// a small jump, as follows:
|
||||
// __ tbz(ref, barrier_Relocation::unpatched, good);
|
||||
// __ b(*stub->entry());
|
||||
// __ bind(good);
|
||||
//
|
||||
// If we can guarantee that the *stub->entry() label is within 32K we can replace the above
|
||||
// code with:
|
||||
// __ tbnz(ref, barrier_Relocation::unpatched, *stub->entry());
|
||||
//
|
||||
// From the branch shortening part of PhaseOutput we get a pessimistic code size that the code
|
||||
// will not grow beyond.
|
||||
//
|
||||
// The stubs objects are created and registered when the load barriers are emitted. The decision
|
||||
// between emitting the long branch or the test and branch is done at this point and uses the
|
||||
// pessimistic code size from branch shortening.
|
||||
//
|
||||
// After the code has been emitted the barrier set will emit all the stubs. When the stubs are
|
||||
// emitted we know the real code size. Because of this the trampoline jump can be skipped in
|
||||
// favour of emitting the stub directly if it does not interfere with the next trampoline stub.
|
||||
// (With respect to test and branch distance)
|
||||
//
|
||||
// The algorithm for emitting the load barrier branches and stubs now have three versions
|
||||
// depending on the distance between the barrier and the stub.
|
||||
// Version 1: Not Reachable with a test-and-branch immediate
|
||||
// Version 2: Reachable with a test-and-branch immediate via trampoline
|
||||
// Version 3: Reachable with a test-and-branch immediate without trampoline
|
||||
//
|
||||
// +--------------------- Code ----------------------+
|
||||
// | *** |
|
||||
// | b(stub1) | (Version 1)
|
||||
// | *** |
|
||||
// | tbnz(ref, barrier_Relocation::unpatched, tramp) | (Version 2)
|
||||
// | *** |
|
||||
// | tbnz(ref, barrier_Relocation::unpatched, stub3) | (Version 3)
|
||||
// | *** |
|
||||
// +--------------------- Stub ----------------------+
|
||||
// | tramp: b(stub2) | (Trampoline slot)
|
||||
// | stub3: |
|
||||
// | * Stub Code* |
|
||||
// | stub1: |
|
||||
// | * Stub Code* |
|
||||
// | stub2: |
|
||||
// | * Stub Code* |
|
||||
// +-------------------------------------------------+
|
||||
//
|
||||
// Version 1: Is emitted if the pessimistic distance between the branch instruction and the current
|
||||
// trampoline slot cannot fit in a test and branch immediate.
|
||||
//
|
||||
// Version 2: Is emitted if the distance between the branch instruction and the current trampoline
|
||||
// slot can fit in a test and branch immediate. But emitting the stub directly would
|
||||
// interfere with the next trampoline.
|
||||
//
|
||||
// Version 3: Same as version two but emitting the stub directly (skipping the trampoline) does not
|
||||
// interfere with the next trampoline.
|
||||
//
|
||||
class ZLoadBarrierStubC2Aarch64 : public ZLoadBarrierStubC2 {
|
||||
private:
|
||||
Label _test_and_branch_reachable_entry;
|
||||
const int _offset;
|
||||
bool _deferred_emit;
|
||||
bool _test_and_branch_reachable;
|
||||
|
||||
ZLoadBarrierStubC2Aarch64(const MachNode* node, Address ref_addr, Register ref, int offset);
|
||||
|
||||
int get_stub_size();
|
||||
public:
|
||||
static ZLoadBarrierStubC2Aarch64* create(const MachNode* node, Address ref_addr, Register ref, int offset);
|
||||
|
||||
virtual void emit_code(MacroAssembler& masm);
|
||||
bool is_test_and_branch_reachable();
|
||||
Label* entry();
|
||||
};
|
||||
|
||||
|
||||
class ZStoreBarrierStubC2Aarch64 : public ZStoreBarrierStubC2 {
|
||||
private:
|
||||
bool _deferred_emit;
|
||||
|
||||
ZStoreBarrierStubC2Aarch64(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic);
|
||||
|
||||
public:
|
||||
static ZStoreBarrierStubC2Aarch64* create(const MachNode* node, Address ref_addr, Register new_zaddress, Register new_zpointer, bool is_native, bool is_atomic);
|
||||
|
||||
virtual void emit_code(MacroAssembler& masm);
|
||||
};
|
||||
|
||||
#endif // COMPILER2
|
||||
|
||||
#endif // CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,10 +24,8 @@
|
||||
#ifndef CPU_AARCH64_GC_Z_ZGLOBALS_AARCH64_HPP
|
||||
#define CPU_AARCH64_GC_Z_ZGLOBALS_AARCH64_HPP
|
||||
|
||||
const size_t ZPlatformHeapViews = 3;
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
const size_t ZPlatformCacheLineSize = 64;
|
||||
|
||||
size_t ZPlatformAddressOffsetBits();
|
||||
size_t ZPlatformAddressMetadataShift();
|
||||
|
||||
#endif // CPU_AARCH64_GC_Z_ZGLOBALS_AARCH64_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
//
|
||||
// Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,30 +31,79 @@ source_hpp %{
|
||||
|
||||
source %{
|
||||
|
||||
static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) {
|
||||
if (barrier_data == ZLoadBarrierElided) {
|
||||
return;
|
||||
}
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data);
|
||||
__ ldr(tmp, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(tmp, tmp, ref);
|
||||
__ cbnz(tmp, *stub->entry());
|
||||
#include "gc/z/zBarrierSetAssembler.hpp"
|
||||
|
||||
static void z_color(MacroAssembler& _masm, const MachNode* node, Register dst, Register src) {
|
||||
assert_different_registers(src, dst);
|
||||
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodBeforeMov);
|
||||
__ movzw(dst, barrier_Relocation::unpatched);
|
||||
__ orr(dst, dst, src, Assembler::LSL, ZPointerLoadShift);
|
||||
}
|
||||
|
||||
static void z_uncolor(MacroAssembler& _masm, const MachNode* node, Register ref) {
|
||||
__ lsr(ref, ref, ZPointerLoadShift);
|
||||
}
|
||||
|
||||
static void z_keep_alive_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
|
||||
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatMarkBadBeforeMov);
|
||||
__ movzw(tmp, barrier_Relocation::unpatched);
|
||||
__ tst(ref, tmp);
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref);
|
||||
__ br(Assembler::NE, *stub->entry());
|
||||
z_uncolor(_masm, node, ref);
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, ZLoadBarrierStrong);
|
||||
__ b(*stub->entry());
|
||||
static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
|
||||
Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm);
|
||||
const bool on_non_strong =
|
||||
((node->barrier_data() & ZBarrierWeak) != 0) ||
|
||||
((node->barrier_data() & ZBarrierPhantom) != 0);
|
||||
|
||||
if (on_non_strong) {
|
||||
z_keep_alive_load_barrier(_masm, node, ref_addr, ref, tmp);
|
||||
return;
|
||||
}
|
||||
|
||||
if (node->barrier_data() == ZBarrierElided) {
|
||||
z_uncolor(_masm, node, ref);
|
||||
return;
|
||||
}
|
||||
|
||||
ZLoadBarrierStubC2Aarch64* const stub = ZLoadBarrierStubC2Aarch64::create(node, ref_addr, ref, __ offset());
|
||||
if (stub->is_test_and_branch_reachable()) {
|
||||
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatLoadGoodBeforeTbX);
|
||||
__ tbnz(ref, barrier_Relocation::unpatched, *stub->entry());
|
||||
} else {
|
||||
Label good;
|
||||
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatLoadGoodBeforeTbX);
|
||||
__ tbz(ref, barrier_Relocation::unpatched, good);
|
||||
__ b(*stub->entry());
|
||||
__ bind(good);
|
||||
}
|
||||
z_uncolor(_masm, node, ref);
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
static void z_store_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register rnew_zaddress, Register rnew_zpointer, Register tmp, bool is_atomic) {
|
||||
Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm);
|
||||
if (node->barrier_data() == ZBarrierElided) {
|
||||
z_color(_masm, node, rnew_zpointer, rnew_zaddress);
|
||||
} else {
|
||||
bool is_native = (node->barrier_data() & ZBarrierNative) != 0;
|
||||
ZStoreBarrierStubC2Aarch64* const stub = ZStoreBarrierStubC2Aarch64::create(node, ref_addr, rnew_zaddress, rnew_zpointer, is_native, is_atomic);
|
||||
ZBarrierSetAssembler* bs_asm = ZBarrierSet::assembler();
|
||||
bs_asm->store_barrier_fast(&_masm, ref_addr, rnew_zaddress, rnew_zpointer, tmp, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
|
||||
}
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
// Load Pointer
|
||||
instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(UseZGC && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() != 0));
|
||||
predicate(UseZGC && ZGenerational && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
|
||||
effect(TEMP dst, KILL cr);
|
||||
|
||||
ins_cost(4 * INSN_COST);
|
||||
@ -64,7 +113,7 @@ instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
|
||||
ins_encode %{
|
||||
const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
|
||||
__ ldr($dst$$Register, ref_addr);
|
||||
z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, barrier_data());
|
||||
z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch1);
|
||||
%}
|
||||
|
||||
ins_pipe(iload_reg_mem);
|
||||
@ -74,7 +123,7 @@ instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
|
||||
instruct zLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(UseZGC && needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
|
||||
predicate(UseZGC && ZGenerational && needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
|
||||
effect(TEMP dst, KILL cr);
|
||||
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
@ -82,18 +131,53 @@ instruct zLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg
|
||||
format %{ "ldar $dst, $mem\t" %}
|
||||
|
||||
ins_encode %{
|
||||
const Address ref_addr = Address($mem$$Register);
|
||||
__ ldar($dst$$Register, $mem$$Register);
|
||||
z_load_barrier(_masm, this, Address($mem$$Register), $dst$$Register, rscratch2 /* tmp */, barrier_data());
|
||||
z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch1);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
||||
instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
// Store Pointer
|
||||
instruct zStoreP(memory mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseZGC && ZGenerational && !needs_releasing_store(n) && n->as_Store()->barrier_data() != 0);
|
||||
match(Set mem (StoreP mem src));
|
||||
effect(TEMP tmp, KILL cr);
|
||||
|
||||
ins_cost(125); // XXX
|
||||
format %{ "movq $mem, $src\t# ptr" %}
|
||||
ins_encode %{
|
||||
const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
|
||||
z_store_barrier(_masm, this, ref_addr, $src$$Register, $tmp$$Register, rscratch2, false /* is_atomic */);
|
||||
__ str($tmp$$Register, ref_addr);
|
||||
%}
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
||||
// Store Pointer Volatile
|
||||
instruct zStorePVolatile(indirect mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseZGC && ZGenerational && needs_releasing_store(n) && n->as_Store()->barrier_data() != 0);
|
||||
match(Set mem (StoreP mem src));
|
||||
effect(TEMP tmp, KILL cr);
|
||||
|
||||
ins_cost(125); // XXX
|
||||
format %{ "movq $mem, $src\t# ptr" %}
|
||||
ins_encode %{
|
||||
const Address ref_addr = Address($mem$$Register);
|
||||
z_store_barrier(_masm, this, ref_addr, $src$$Register, $tmp$$Register, rscratch2, false /* is_atomic */);
|
||||
__ stlr($tmp$$Register, $mem$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
||||
instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
|
||||
effect(KILL cr, TEMP_DEF res);
|
||||
predicate(UseZGC && ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP res, KILL cr);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
@ -102,108 +186,83 @@ instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newva
|
||||
|
||||
ins_encode %{
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
false /* acquire */, true /* release */, false /* weak */, rscratch2);
|
||||
Address ref_addr($mem$$Register);
|
||||
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */);
|
||||
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register);
|
||||
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::xword,
|
||||
false /* acquire */, true /* release */, false /* weak */, noreg);
|
||||
__ cset($res$$Register, Assembler::EQ);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(rscratch1, rscratch1, rscratch2);
|
||||
__ cbz(rscratch1, good);
|
||||
z_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
false /* acquire */, true /* release */, false /* weak */, rscratch2);
|
||||
__ cset($res$$Register, Assembler::EQ);
|
||||
__ bind(good);
|
||||
}
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong));
|
||||
effect(KILL cr, TEMP_DEF res);
|
||||
predicate(UseZGC && ZGenerational && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP res, KILL cr);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "cmpxchg $mem, $oldval, $newval\n\t"
|
||||
"cset $res, EQ" %}
|
||||
format %{ "cmpxchg $mem, $oldval, $newval\n\t"
|
||||
"cset $res, EQ" %}
|
||||
|
||||
ins_encode %{
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
true /* acquire */, true /* release */, false /* weak */, rscratch2);
|
||||
Address ref_addr($mem$$Register);
|
||||
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */);
|
||||
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register);
|
||||
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::xword,
|
||||
true /* acquire */, true /* release */, false /* weak */, noreg);
|
||||
__ cset($res$$Register, Assembler::EQ);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(rscratch1, rscratch1, rscratch2);
|
||||
__ cbz(rscratch1, good);
|
||||
z_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */ );
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
true /* acquire */, true /* release */, false /* weak */, rscratch2);
|
||||
__ cset($res$$Register, Assembler::EQ);
|
||||
__ bind(good);
|
||||
}
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
|
||||
instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
|
||||
effect(TEMP_DEF res, KILL cr);
|
||||
predicate(UseZGC && ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP res, KILL cr);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "cmpxchg $res = $mem, $oldval, $newval" %}
|
||||
format %{ "cmpxchg $mem, $oldval, $newval\n\t"
|
||||
"cset $res, EQ" %}
|
||||
|
||||
ins_encode %{
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
Address ref_addr($mem$$Register);
|
||||
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */);
|
||||
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register);
|
||||
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::xword,
|
||||
false /* acquire */, true /* release */, false /* weak */, $res$$Register);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(rscratch1, rscratch1, $res$$Register);
|
||||
__ cbz(rscratch1, good);
|
||||
z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
false /* acquire */, true /* release */, false /* weak */, $res$$Register);
|
||||
__ bind(good);
|
||||
}
|
||||
z_uncolor(_masm, this, $res$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
|
||||
effect(TEMP_DEF res, KILL cr);
|
||||
predicate(UseZGC && ZGenerational && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP res, KILL cr);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "cmpxchg $res = $mem, $oldval, $newval" %}
|
||||
format %{ "cmpxchg $mem, $oldval, $newval\n\t"
|
||||
"cset $res, EQ" %}
|
||||
|
||||
ins_encode %{
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
Address ref_addr($mem$$Register);
|
||||
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */);
|
||||
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register);
|
||||
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::xword,
|
||||
true /* acquire */, true /* release */, false /* weak */, $res$$Register);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(rscratch1, rscratch1, $res$$Register);
|
||||
__ cbz(rscratch1, good);
|
||||
z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
|
||||
true /* acquire */, true /* release */, false /* weak */, $res$$Register);
|
||||
__ bind(good);
|
||||
}
|
||||
z_uncolor(_masm, this, $res$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
@ -211,16 +270,17 @@ instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iReg
|
||||
|
||||
instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
|
||||
match(Set prev (GetAndSetP mem newv));
|
||||
predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP_DEF prev, KILL cr);
|
||||
predicate(UseZGC && ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP prev, KILL cr);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "atomic_xchg $prev, $newv, [$mem]" %}
|
||||
|
||||
ins_encode %{
|
||||
__ atomic_xchg($prev$$Register, $newv$$Register, $mem$$Register);
|
||||
z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data());
|
||||
z_store_barrier(_masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, rscratch2, true /* is_atomic */);
|
||||
__ atomic_xchg($prev$$Register, $prev$$Register, $mem$$Register);
|
||||
z_uncolor(_masm, this, $prev$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_serial);
|
||||
@ -228,16 +288,18 @@ instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
|
||||
|
||||
instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
|
||||
match(Set prev (GetAndSetP mem newv));
|
||||
predicate(UseZGC && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() != 0));
|
||||
effect(TEMP_DEF prev, KILL cr);
|
||||
predicate(UseZGC && ZGenerational && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP prev, KILL cr);
|
||||
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %}
|
||||
format %{ "atomic_xchg $prev, $newv, [$mem]" %}
|
||||
|
||||
ins_encode %{
|
||||
__ atomic_xchgal($prev$$Register, $newv$$Register, $mem$$Register);
|
||||
z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data());
|
||||
z_store_barrier(_masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, rscratch2, true /* is_atomic */);
|
||||
__ atomic_xchgal($prev$$Register, $prev$$Register, $mem$$Register);
|
||||
z_uncolor(_masm, this, $prev$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -33,7 +33,8 @@
|
||||
// the two lowest offset bits can always be discarded.
|
||||
offset_unit = 4,
|
||||
// Must be at least 1 for RelocInfo::narrow_oop_in_const.
|
||||
format_width = 1
|
||||
// Must be at least 2 for ZGC GC barrier patching.
|
||||
format_width = 2
|
||||
};
|
||||
|
||||
public:
|
||||
|
@ -38,8 +38,8 @@ enum platform_dependent_constants {
|
||||
// simply increase sizes if too small (assembler will crash if too small)
|
||||
_initial_stubs_code_size = 10000,
|
||||
_continuation_stubs_code_size = 2000,
|
||||
_compiler_stubs_code_size = 30000,
|
||||
_final_stubs_code_size = 20000
|
||||
_compiler_stubs_code_size = 30000 ZGC_ONLY(+10000),
|
||||
_final_stubs_code_size = 20000 ZGC_ONLY(+60000)
|
||||
};
|
||||
|
||||
class aarch64 {
|
||||
|
@ -237,10 +237,12 @@ class Assembler : public AbstractAssembler {
|
||||
|
||||
enum opcdxos_masks {
|
||||
XL_FORM_OPCODE_MASK = (63u << OPCODE_SHIFT) | (1023u << 1),
|
||||
ANDI_OPCODE_MASK = (63u << OPCODE_SHIFT),
|
||||
ADDI_OPCODE_MASK = (63u << OPCODE_SHIFT),
|
||||
ADDIS_OPCODE_MASK = (63u << OPCODE_SHIFT),
|
||||
BXX_OPCODE_MASK = (63u << OPCODE_SHIFT),
|
||||
BCXX_OPCODE_MASK = (63u << OPCODE_SHIFT),
|
||||
CMPLI_OPCODE_MASK = (63u << OPCODE_SHIFT),
|
||||
// trap instructions
|
||||
TDI_OPCODE_MASK = (63u << OPCODE_SHIFT),
|
||||
TWI_OPCODE_MASK = (63u << OPCODE_SHIFT),
|
||||
@ -1478,6 +1480,9 @@ class Assembler : public AbstractAssembler {
|
||||
static bool is_addis(int x) {
|
||||
return ADDIS_OPCODE == (x & ADDIS_OPCODE_MASK);
|
||||
}
|
||||
static bool is_andi(int x) {
|
||||
return ANDI_OPCODE == (x & ANDI_OPCODE_MASK);
|
||||
}
|
||||
static bool is_bxx(int x) {
|
||||
return BXX_OPCODE == (x & BXX_OPCODE_MASK);
|
||||
}
|
||||
@ -1502,6 +1507,9 @@ class Assembler : public AbstractAssembler {
|
||||
static bool is_bclr(int x) {
|
||||
return BCLR_OPCODE == (x & XL_FORM_OPCODE_MASK);
|
||||
}
|
||||
static bool is_cmpli(int x) {
|
||||
return CMPLI_OPCODE == (x & CMPLI_OPCODE_MASK);
|
||||
}
|
||||
static bool is_li(int x) {
|
||||
return is_addi(x) && inv_ra_field(x)==0;
|
||||
}
|
||||
|
585
src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.cpp
Normal file
585
src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.cpp
Normal file
@ -0,0 +1,585 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2022 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "asm/register.hpp"
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "code/codeBlob.hpp"
|
||||
#include "code/vmreg.inline.hpp"
|
||||
#include "gc/x/xBarrier.inline.hpp"
|
||||
#include "gc/x/xBarrierSet.hpp"
|
||||
#include "gc/x/xBarrierSetAssembler.hpp"
|
||||
#include "gc/x/xBarrierSetRuntime.hpp"
|
||||
#include "gc/x/xThreadLocalData.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "register_ppc.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "gc/x/c1/xBarrierSetC1.hpp"
|
||||
#endif // COMPILER1
|
||||
#ifdef COMPILER2
|
||||
#include "gc/x/c2/xBarrierSetC2.hpp"
|
||||
#endif // COMPILER2
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
void XBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register base, RegisterOrConstant ind_or_offs, Register dst,
|
||||
Register tmp1, Register tmp2,
|
||||
MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null) {
|
||||
__ block_comment("load_at (zgc) {");
|
||||
|
||||
// Check whether a special gc barrier is required for this particular load
|
||||
// (e.g. whether it's a reference load or not)
|
||||
if (!XBarrierSet::barrier_needed(decorators, type)) {
|
||||
BarrierSetAssembler::load_at(masm, decorators, type, base, ind_or_offs, dst,
|
||||
tmp1, tmp2, preservation_level, L_handle_null);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ind_or_offs.is_register()) {
|
||||
assert_different_registers(base, ind_or_offs.as_register(), tmp1, tmp2, R0, noreg);
|
||||
assert_different_registers(dst, ind_or_offs.as_register(), tmp1, tmp2, R0, noreg);
|
||||
} else {
|
||||
assert_different_registers(base, tmp1, tmp2, R0, noreg);
|
||||
assert_different_registers(dst, tmp1, tmp2, R0, noreg);
|
||||
}
|
||||
|
||||
/* ==== Load the pointer using the standard implementation for the actual heap access
|
||||
and the decompression of compressed pointers ==== */
|
||||
// Result of 'load_at' (standard implementation) will be written back to 'dst'.
|
||||
// As 'base' is required for the C-call, it must be reserved in case of a register clash.
|
||||
Register saved_base = base;
|
||||
if (base == dst) {
|
||||
__ mr(tmp2, base);
|
||||
saved_base = tmp2;
|
||||
}
|
||||
|
||||
BarrierSetAssembler::load_at(masm, decorators, type, base, ind_or_offs, dst,
|
||||
tmp1, noreg, preservation_level, L_handle_null);
|
||||
|
||||
/* ==== Check whether pointer is dirty ==== */
|
||||
Label skip_barrier;
|
||||
|
||||
// Load bad mask into scratch register.
|
||||
__ ld(tmp1, (intptr_t) XThreadLocalData::address_bad_mask_offset(), R16_thread);
|
||||
|
||||
// The color bits of the to-be-tested pointer do not have to be equivalent to the 'bad_mask' testing bits.
|
||||
// A pointer is classified as dirty if any of the color bits that also match the bad mask is set.
|
||||
// Conversely, it follows that the logical AND of the bad mask and the pointer must be zero
|
||||
// if the pointer is not dirty.
|
||||
// Only dirty pointers must be processed by this barrier, so we can skip it in case the latter condition holds true.
|
||||
__ and_(tmp1, tmp1, dst);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
|
||||
/* ==== Invoke barrier ==== */
|
||||
int nbytes_save = 0;
|
||||
|
||||
const bool needs_frame = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR;
|
||||
const bool preserve_gp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS;
|
||||
const bool preserve_fp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_FP_REGS;
|
||||
|
||||
const bool preserve_R3 = dst != R3_ARG1;
|
||||
|
||||
if (needs_frame) {
|
||||
if (preserve_gp_registers) {
|
||||
nbytes_save = (preserve_fp_registers
|
||||
? MacroAssembler::num_volatile_gp_regs + MacroAssembler::num_volatile_fp_regs
|
||||
: MacroAssembler::num_volatile_gp_regs) * BytesPerWord;
|
||||
nbytes_save -= preserve_R3 ? 0 : BytesPerWord;
|
||||
__ save_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers, preserve_R3);
|
||||
}
|
||||
|
||||
__ save_LR_CR(tmp1);
|
||||
__ push_frame_reg_args(nbytes_save, tmp1);
|
||||
}
|
||||
|
||||
// Setup arguments
|
||||
if (saved_base != R3_ARG1) {
|
||||
__ mr_if_needed(R3_ARG1, dst);
|
||||
__ add(R4_ARG2, ind_or_offs, saved_base);
|
||||
} else if (dst != R4_ARG2) {
|
||||
__ add(R4_ARG2, ind_or_offs, saved_base);
|
||||
__ mr(R3_ARG1, dst);
|
||||
} else {
|
||||
__ add(R0, ind_or_offs, saved_base);
|
||||
__ mr(R3_ARG1, dst);
|
||||
__ mr(R4_ARG2, R0);
|
||||
}
|
||||
|
||||
__ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators));
|
||||
|
||||
Register result = R3_RET;
|
||||
if (needs_frame) {
|
||||
__ pop_frame();
|
||||
__ restore_LR_CR(tmp1);
|
||||
|
||||
if (preserve_R3) {
|
||||
__ mr(R0, R3_RET);
|
||||
result = R0;
|
||||
}
|
||||
|
||||
if (preserve_gp_registers) {
|
||||
__ restore_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers, preserve_R3);
|
||||
}
|
||||
}
|
||||
__ mr_if_needed(dst, result);
|
||||
|
||||
__ bind(skip_barrier);
|
||||
__ block_comment("} load_at (zgc)");
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
// The Z store barrier only verifies the pointers it is operating on and is thus a sole debugging measure.
|
||||
void XBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register base, RegisterOrConstant ind_or_offs, Register val,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
MacroAssembler::PreservationLevel preservation_level) {
|
||||
__ block_comment("store_at (zgc) {");
|
||||
|
||||
// If the 'val' register is 'noreg', the to-be-stored value is a null pointer.
|
||||
if (is_reference_type(type) && val != noreg) {
|
||||
__ ld(tmp1, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread);
|
||||
__ and_(tmp1, tmp1, val);
|
||||
__ asm_assert_eq("Detected dirty pointer on the heap in Z store barrier");
|
||||
}
|
||||
|
||||
// Store value
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, base, ind_or_offs, val, tmp1, tmp2, tmp3, preservation_level);
|
||||
|
||||
__ block_comment("} store_at (zgc)");
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
void XBarrierSetAssembler::arraycopy_prologue(MacroAssembler *masm, DecoratorSet decorators, BasicType component_type,
|
||||
Register src, Register dst, Register count,
|
||||
Register preserve1, Register preserve2) {
|
||||
__ block_comment("arraycopy_prologue (zgc) {");
|
||||
|
||||
/* ==== Check whether a special gc barrier is required for this particular load ==== */
|
||||
if (!is_reference_type(component_type)) {
|
||||
return;
|
||||
}
|
||||
|
||||
Label skip_barrier;
|
||||
|
||||
// Fast path: Array is of length zero
|
||||
__ cmpdi(CCR0, count, 0);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
|
||||
/* ==== Ensure register sanity ==== */
|
||||
Register tmp_R11 = R11_scratch1;
|
||||
|
||||
assert_different_registers(src, dst, count, tmp_R11, noreg);
|
||||
if (preserve1 != noreg) {
|
||||
// Not technically required, but unlikely being intended.
|
||||
assert_different_registers(preserve1, preserve2);
|
||||
}
|
||||
|
||||
/* ==== Invoke barrier (slowpath) ==== */
|
||||
int nbytes_save = 0;
|
||||
|
||||
{
|
||||
assert(!noreg->is_volatile(), "sanity");
|
||||
|
||||
if (preserve1->is_volatile()) {
|
||||
__ std(preserve1, -BytesPerWord * ++nbytes_save, R1_SP);
|
||||
}
|
||||
|
||||
if (preserve2->is_volatile() && preserve1 != preserve2) {
|
||||
__ std(preserve2, -BytesPerWord * ++nbytes_save, R1_SP);
|
||||
}
|
||||
|
||||
__ std(src, -BytesPerWord * ++nbytes_save, R1_SP);
|
||||
__ std(dst, -BytesPerWord * ++nbytes_save, R1_SP);
|
||||
__ std(count, -BytesPerWord * ++nbytes_save, R1_SP);
|
||||
|
||||
__ save_LR_CR(tmp_R11);
|
||||
__ push_frame_reg_args(nbytes_save, tmp_R11);
|
||||
}
|
||||
|
||||
// XBarrierSetRuntime::load_barrier_on_oop_array_addr(src, count)
|
||||
if (count == R3_ARG1) {
|
||||
if (src == R4_ARG2) {
|
||||
// Arguments are provided in reverse order
|
||||
__ mr(tmp_R11, count);
|
||||
__ mr(R3_ARG1, src);
|
||||
__ mr(R4_ARG2, tmp_R11);
|
||||
} else {
|
||||
__ mr(R4_ARG2, count);
|
||||
__ mr(R3_ARG1, src);
|
||||
}
|
||||
} else {
|
||||
__ mr_if_needed(R3_ARG1, src);
|
||||
__ mr_if_needed(R4_ARG2, count);
|
||||
}
|
||||
|
||||
__ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_array_addr());
|
||||
|
||||
__ pop_frame();
|
||||
__ restore_LR_CR(tmp_R11);
|
||||
|
||||
{
|
||||
__ ld(count, -BytesPerWord * nbytes_save--, R1_SP);
|
||||
__ ld(dst, -BytesPerWord * nbytes_save--, R1_SP);
|
||||
__ ld(src, -BytesPerWord * nbytes_save--, R1_SP);
|
||||
|
||||
if (preserve2->is_volatile() && preserve1 != preserve2) {
|
||||
__ ld(preserve2, -BytesPerWord * nbytes_save--, R1_SP);
|
||||
}
|
||||
|
||||
if (preserve1->is_volatile()) {
|
||||
__ ld(preserve1, -BytesPerWord * nbytes_save--, R1_SP);
|
||||
}
|
||||
}
|
||||
|
||||
__ bind(skip_barrier);
|
||||
|
||||
__ block_comment("} arraycopy_prologue (zgc)");
|
||||
}
|
||||
|
||||
void XBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env,
|
||||
Register obj, Register tmp, Label& slowpath) {
|
||||
__ block_comment("try_resolve_jobject_in_native (zgc) {");
|
||||
|
||||
assert_different_registers(jni_env, obj, tmp);
|
||||
|
||||
// Resolve the pointer using the standard implementation for weak tag handling and pointer verification.
|
||||
BarrierSetAssembler::try_resolve_jobject_in_native(masm, dst, jni_env, obj, tmp, slowpath);
|
||||
|
||||
// Check whether pointer is dirty.
|
||||
__ ld(tmp,
|
||||
in_bytes(XThreadLocalData::address_bad_mask_offset() - JavaThread::jni_environment_offset()),
|
||||
jni_env);
|
||||
|
||||
__ and_(tmp, obj, tmp);
|
||||
__ bne(CCR0, slowpath);
|
||||
|
||||
__ block_comment("} try_resolve_jobject_in_native (zgc)");
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
#ifdef COMPILER1
|
||||
#define __ ce->masm()->
|
||||
|
||||
// Code emitted by LIR node "LIR_OpXLoadBarrierTest" which in turn is emitted by XBarrierSetC1::load_barrier.
|
||||
// The actual compare and branch instructions are represented as stand-alone LIR nodes.
|
||||
void XBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
|
||||
LIR_Opr ref) const {
|
||||
__ block_comment("load_barrier_test (zgc) {");
|
||||
|
||||
__ ld(R0, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread);
|
||||
__ andr(R0, R0, ref->as_pointer_register());
|
||||
__ cmpdi(CCR5 /* as mandated by LIR node */, R0, 0);
|
||||
|
||||
__ block_comment("} load_barrier_test (zgc)");
|
||||
}
|
||||
|
||||
// Code emitted by code stub "XLoadBarrierStubC1" which in turn is emitted by XBarrierSetC1::load_barrier.
|
||||
// Invokes the runtime stub which is defined just below.
|
||||
void XBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
|
||||
XLoadBarrierStubC1* stub) const {
|
||||
__ block_comment("c1_load_barrier_stub (zgc) {");
|
||||
|
||||
__ bind(*stub->entry());
|
||||
|
||||
/* ==== Determine relevant data registers and ensure register sanity ==== */
|
||||
Register ref = stub->ref()->as_register();
|
||||
Register ref_addr = noreg;
|
||||
|
||||
// Determine reference address
|
||||
if (stub->tmp()->is_valid()) {
|
||||
// 'tmp' register is given, so address might have an index or a displacement.
|
||||
ce->leal(stub->ref_addr(), stub->tmp());
|
||||
ref_addr = stub->tmp()->as_pointer_register();
|
||||
} else {
|
||||
// 'tmp' register is not given, so address must have neither an index nor a displacement.
|
||||
// The address' base register is thus usable as-is.
|
||||
assert(stub->ref_addr()->as_address_ptr()->disp() == 0, "illegal displacement");
|
||||
assert(!stub->ref_addr()->as_address_ptr()->index()->is_valid(), "illegal index");
|
||||
|
||||
ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register();
|
||||
}
|
||||
|
||||
assert_different_registers(ref, ref_addr, R0, noreg);
|
||||
|
||||
/* ==== Invoke stub ==== */
|
||||
// Pass arguments via stack. The stack pointer will be bumped by the stub.
|
||||
__ std(ref, (intptr_t) -1 * BytesPerWord, R1_SP);
|
||||
__ std(ref_addr, (intptr_t) -2 * BytesPerWord, R1_SP);
|
||||
|
||||
__ load_const_optimized(R0, stub->runtime_stub());
|
||||
__ call_stub(R0);
|
||||
|
||||
// The runtime stub passes the result via the R0 register, overriding the previously-loaded stub address.
|
||||
__ mr_if_needed(ref, R0);
|
||||
__ b(*stub->continuation());
|
||||
|
||||
__ block_comment("} c1_load_barrier_stub (zgc)");
|
||||
}
|
||||
|
||||
#undef __
|
||||
#define __ sasm->
|
||||
|
||||
// Code emitted by runtime code stub which in turn is emitted by XBarrierSetC1::generate_c1_runtime_stubs.
|
||||
void XBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
|
||||
DecoratorSet decorators) const {
|
||||
__ block_comment("c1_load_barrier_runtime_stub (zgc) {");
|
||||
|
||||
const int stack_parameters = 2;
|
||||
const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_parameters) * BytesPerWord;
|
||||
|
||||
__ save_volatile_gprs(R1_SP, -nbytes_save);
|
||||
__ save_LR_CR(R0);
|
||||
|
||||
// Load arguments back again from the stack.
|
||||
__ ld(R3_ARG1, (intptr_t) -1 * BytesPerWord, R1_SP); // ref
|
||||
__ ld(R4_ARG2, (intptr_t) -2 * BytesPerWord, R1_SP); // ref_addr
|
||||
|
||||
__ push_frame_reg_args(nbytes_save, R0);
|
||||
|
||||
__ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators));
|
||||
|
||||
__ verify_oop(R3_RET, "Bad pointer after barrier invocation");
|
||||
__ mr(R0, R3_RET);
|
||||
|
||||
__ pop_frame();
|
||||
__ restore_LR_CR(R3_RET);
|
||||
__ restore_volatile_gprs(R1_SP, -nbytes_save);
|
||||
|
||||
__ blr();
|
||||
|
||||
__ block_comment("} c1_load_barrier_runtime_stub (zgc)");
|
||||
}
|
||||
|
||||
#undef __
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
|
||||
OptoReg::Name XBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) const {
|
||||
if (!OptoReg::is_reg(opto_reg)) {
|
||||
return OptoReg::Bad;
|
||||
}
|
||||
|
||||
VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
|
||||
if ((vm_reg->is_Register() || vm_reg ->is_FloatRegister()) && (opto_reg & 1) != 0) {
|
||||
return OptoReg::Bad;
|
||||
}
|
||||
|
||||
return opto_reg;
|
||||
}
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
class XSaveLiveRegisters {
|
||||
MacroAssembler* _masm;
|
||||
RegMask _reg_mask;
|
||||
Register _result_reg;
|
||||
int _frame_size;
|
||||
|
||||
public:
|
||||
XSaveLiveRegisters(MacroAssembler *masm, XLoadBarrierStubC2 *stub)
|
||||
: _masm(masm), _reg_mask(stub->live()), _result_reg(stub->ref()) {
|
||||
|
||||
const int register_save_size = iterate_over_register_mask(ACTION_COUNT_ONLY) * BytesPerWord;
|
||||
_frame_size = align_up(register_save_size, frame::alignment_in_bytes)
|
||||
+ frame::native_abi_reg_args_size;
|
||||
|
||||
__ save_LR_CR(R0);
|
||||
__ push_frame(_frame_size, R0);
|
||||
|
||||
iterate_over_register_mask(ACTION_SAVE, _frame_size);
|
||||
}
|
||||
|
||||
~XSaveLiveRegisters() {
|
||||
iterate_over_register_mask(ACTION_RESTORE, _frame_size);
|
||||
|
||||
__ addi(R1_SP, R1_SP, _frame_size);
|
||||
__ restore_LR_CR(R0);
|
||||
}
|
||||
|
||||
private:
|
||||
enum IterationAction : int {
|
||||
ACTION_SAVE,
|
||||
ACTION_RESTORE,
|
||||
ACTION_COUNT_ONLY
|
||||
};
|
||||
|
||||
int iterate_over_register_mask(IterationAction action, int offset = 0) {
|
||||
int reg_save_index = 0;
|
||||
RegMaskIterator live_regs_iterator(_reg_mask);
|
||||
|
||||
while(live_regs_iterator.has_next()) {
|
||||
const OptoReg::Name opto_reg = live_regs_iterator.next();
|
||||
|
||||
// Filter out stack slots (spilled registers, i.e., stack-allocated registers).
|
||||
if (!OptoReg::is_reg(opto_reg)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
|
||||
if (vm_reg->is_Register()) {
|
||||
Register std_reg = vm_reg->as_Register();
|
||||
|
||||
// '_result_reg' will hold the end result of the operation. Its content must thus not be preserved.
|
||||
if (std_reg == _result_reg) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (std_reg->encoding() >= R2->encoding() && std_reg->encoding() <= R12->encoding()) {
|
||||
reg_save_index++;
|
||||
|
||||
if (action == ACTION_SAVE) {
|
||||
_masm->std(std_reg, offset - reg_save_index * BytesPerWord, R1_SP);
|
||||
} else if (action == ACTION_RESTORE) {
|
||||
_masm->ld(std_reg, offset - reg_save_index * BytesPerWord, R1_SP);
|
||||
} else {
|
||||
assert(action == ACTION_COUNT_ONLY, "Sanity");
|
||||
}
|
||||
}
|
||||
} else if (vm_reg->is_FloatRegister()) {
|
||||
FloatRegister fp_reg = vm_reg->as_FloatRegister();
|
||||
if (fp_reg->encoding() >= F0->encoding() && fp_reg->encoding() <= F13->encoding()) {
|
||||
reg_save_index++;
|
||||
|
||||
if (action == ACTION_SAVE) {
|
||||
_masm->stfd(fp_reg, offset - reg_save_index * BytesPerWord, R1_SP);
|
||||
} else if (action == ACTION_RESTORE) {
|
||||
_masm->lfd(fp_reg, offset - reg_save_index * BytesPerWord, R1_SP);
|
||||
} else {
|
||||
assert(action == ACTION_COUNT_ONLY, "Sanity");
|
||||
}
|
||||
}
|
||||
} else if (vm_reg->is_ConditionRegister()) {
|
||||
// NOP. Conditions registers are covered by save_LR_CR
|
||||
} else if (vm_reg->is_VectorSRegister()) {
|
||||
assert(SuperwordUseVSX, "or should not reach here");
|
||||
VectorSRegister vs_reg = vm_reg->as_VectorSRegister();
|
||||
if (vs_reg->encoding() >= VSR32->encoding() && vs_reg->encoding() <= VSR51->encoding()) {
|
||||
reg_save_index += 2;
|
||||
|
||||
Register spill_addr = R0;
|
||||
if (action == ACTION_SAVE) {
|
||||
_masm->addi(spill_addr, R1_SP, offset - reg_save_index * BytesPerWord);
|
||||
_masm->stxvd2x(vs_reg, spill_addr);
|
||||
} else if (action == ACTION_RESTORE) {
|
||||
_masm->addi(spill_addr, R1_SP, offset - reg_save_index * BytesPerWord);
|
||||
_masm->lxvd2x(vs_reg, spill_addr);
|
||||
} else {
|
||||
assert(action == ACTION_COUNT_ONLY, "Sanity");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (vm_reg->is_SpecialRegister()) {
|
||||
fatal("Special registers are unsupported. Found register %s", vm_reg->name());
|
||||
} else {
|
||||
fatal("Register type is not known");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return reg_save_index;
|
||||
}
|
||||
};
|
||||
|
||||
#undef __
|
||||
#define __ _masm->
|
||||
|
||||
class XSetupArguments {
|
||||
MacroAssembler* const _masm;
|
||||
const Register _ref;
|
||||
const Address _ref_addr;
|
||||
|
||||
public:
|
||||
XSetupArguments(MacroAssembler* masm, XLoadBarrierStubC2* stub) :
|
||||
_masm(masm),
|
||||
_ref(stub->ref()),
|
||||
_ref_addr(stub->ref_addr()) {
|
||||
|
||||
// Desired register/argument configuration:
|
||||
// _ref: R3_ARG1
|
||||
// _ref_addr: R4_ARG2
|
||||
|
||||
// '_ref_addr' can be unspecified. In that case, the barrier will not heal the reference.
|
||||
if (_ref_addr.base() == noreg) {
|
||||
assert_different_registers(_ref, R0, noreg);
|
||||
|
||||
__ mr_if_needed(R3_ARG1, _ref);
|
||||
__ li(R4_ARG2, 0);
|
||||
} else {
|
||||
assert_different_registers(_ref, _ref_addr.base(), R0, noreg);
|
||||
assert(!_ref_addr.index()->is_valid(), "reference addresses must not contain an index component");
|
||||
|
||||
if (_ref != R4_ARG2) {
|
||||
// Calculate address first as the address' base register might clash with R4_ARG2
|
||||
__ addi(R4_ARG2, _ref_addr.base(), _ref_addr.disp());
|
||||
__ mr_if_needed(R3_ARG1, _ref);
|
||||
} else if (_ref_addr.base() != R3_ARG1) {
|
||||
__ mr(R3_ARG1, _ref);
|
||||
__ addi(R4_ARG2, _ref_addr.base(), _ref_addr.disp()); // Clobbering _ref
|
||||
} else {
|
||||
// Arguments are provided in inverse order (i.e. _ref == R4_ARG2, _ref_addr == R3_ARG1)
|
||||
__ mr(R0, _ref);
|
||||
__ addi(R4_ARG2, _ref_addr.base(), _ref_addr.disp());
|
||||
__ mr(R3_ARG1, R0);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const {
|
||||
__ block_comment("generate_c2_load_barrier_stub (zgc) {");
|
||||
|
||||
__ bind(*stub->entry());
|
||||
|
||||
Register ref = stub->ref();
|
||||
Address ref_addr = stub->ref_addr();
|
||||
|
||||
assert_different_registers(ref, ref_addr.base());
|
||||
|
||||
{
|
||||
XSaveLiveRegisters save_live_registers(masm, stub);
|
||||
XSetupArguments setup_arguments(masm, stub);
|
||||
|
||||
__ call_VM_leaf(stub->slow_path());
|
||||
__ mr_if_needed(ref, R3_RET);
|
||||
}
|
||||
|
||||
__ b(*stub->continuation());
|
||||
|
||||
__ block_comment("} generate_c2_load_barrier_stub (zgc)");
|
||||
}
|
||||
|
||||
#undef __
|
||||
#endif // COMPILER2
|
93
src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.hpp
Normal file
93
src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.hpp
Normal file
@ -0,0 +1,93 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2022 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_GC_X_XBARRIERSETASSEMBLER_PPC_HPP
|
||||
#define CPU_PPC_GC_X_XBARRIERSETASSEMBLER_PPC_HPP
|
||||
|
||||
#include "code/vmreg.hpp"
|
||||
#include "oops/accessDecorators.hpp"
|
||||
#ifdef COMPILER2
|
||||
#include "opto/optoreg.hpp"
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef COMPILER1
|
||||
class LIR_Assembler;
|
||||
class LIR_Opr;
|
||||
class StubAssembler;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
class Node;
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef COMPILER1
|
||||
class XLoadBarrierStubC1;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
class XLoadBarrierStubC2;
|
||||
#endif // COMPILER2
|
||||
|
||||
class XBarrierSetAssembler : public XBarrierSetAssemblerBase {
|
||||
public:
|
||||
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register base, RegisterOrConstant ind_or_offs, Register dst,
|
||||
Register tmp1, Register tmp2,
|
||||
MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null = NULL);
|
||||
|
||||
#ifdef ASSERT
|
||||
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register base, RegisterOrConstant ind_or_offs, Register val,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
MacroAssembler::PreservationLevel preservation_level);
|
||||
#endif // ASSERT
|
||||
|
||||
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register src, Register dst, Register count,
|
||||
Register preserve1, Register preserve2);
|
||||
|
||||
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env,
|
||||
Register obj, Register tmp, Label& slowpath);
|
||||
|
||||
virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; }
|
||||
|
||||
#ifdef COMPILER1
|
||||
void generate_c1_load_barrier_test(LIR_Assembler* ce,
|
||||
LIR_Opr ref) const;
|
||||
|
||||
void generate_c1_load_barrier_stub(LIR_Assembler* ce,
|
||||
XLoadBarrierStubC1* stub) const;
|
||||
|
||||
void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
|
||||
DecoratorSet decorators) const;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
OptoReg::Name refine_register(const Node* node, OptoReg::Name opto_reg) const;
|
||||
|
||||
void generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const;
|
||||
#endif // COMPILER2
|
||||
};
|
||||
|
||||
#endif // CPU_PPC_GC_X_XBARRIERSETASSEMBLER_PPC_HPP
|
@ -25,7 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/x/xGlobals.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
@ -154,7 +154,7 @@ static unsigned int probe_valid_max_address_bit(size_t init_bit, size_t min_bit)
|
||||
// It should thus be a "close enough" approximation to the real virtual memory address space limit.
|
||||
//
|
||||
// This recovery strategy is only applied in production builds.
|
||||
// In debug builds, an assertion in 'ZPlatformAddressOffsetBits' will bail out the VM to indicate that
|
||||
// In debug builds, an assertion in 'XPlatformAddressOffsetBits' will bail out the VM to indicate that
|
||||
// the assumed address space is no longer up-to-date.
|
||||
if (last_allocatable_address != MAP_FAILED) {
|
||||
const unsigned int bitpos = BitsPerSize_t - count_leading_zeros((size_t) last_allocatable_address) - 1;
|
||||
@ -184,7 +184,7 @@ static unsigned int probe_valid_max_address_bit(size_t init_bit, size_t min_bit)
|
||||
#endif // LINUX
|
||||
}
|
||||
|
||||
size_t ZPlatformAddressOffsetBits() {
|
||||
size_t XPlatformAddressOffsetBits() {
|
||||
const static unsigned int valid_max_address_offset_bits =
|
||||
probe_valid_max_address_bit(DEFAULT_MAX_ADDRESS_BIT, MINIMUM_MAX_ADDRESS_BIT) + 1;
|
||||
assert(valid_max_address_offset_bits >= MINIMUM_MAX_ADDRESS_BIT,
|
||||
@ -192,12 +192,12 @@ size_t ZPlatformAddressOffsetBits() {
|
||||
|
||||
const size_t max_address_offset_bits = valid_max_address_offset_bits - 3;
|
||||
const size_t min_address_offset_bits = max_address_offset_bits - 2;
|
||||
const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
|
||||
const size_t address_offset = round_up_power_of_2(MaxHeapSize * XVirtualToPhysicalRatio);
|
||||
const size_t address_offset_bits = log2i_exact(address_offset);
|
||||
|
||||
return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
|
||||
}
|
||||
|
||||
size_t ZPlatformAddressMetadataShift() {
|
||||
return ZPlatformAddressOffsetBits();
|
||||
size_t XPlatformAddressMetadataShift() {
|
||||
return XPlatformAddressOffsetBits();
|
||||
}
|
36
src/hotspot/cpu/ppc/gc/x/xGlobals_ppc.hpp
Normal file
36
src/hotspot/cpu/ppc/gc/x/xGlobals_ppc.hpp
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_GC_X_XGLOBALS_PPC_HPP
|
||||
#define CPU_PPC_GC_X_XGLOBALS_PPC_HPP
|
||||
|
||||
#include "globalDefinitions_ppc.hpp"
|
||||
|
||||
const size_t XPlatformHeapViews = 3;
|
||||
const size_t XPlatformCacheLineSize = DEFAULT_CACHE_LINE_SIZE;
|
||||
|
||||
size_t XPlatformAddressOffsetBits();
|
||||
size_t XPlatformAddressMetadataShift();
|
||||
|
||||
#endif // CPU_PPC_GC_X_XGLOBALS_PPC_HPP
|
298
src/hotspot/cpu/ppc/gc/x/x_ppc.ad
Normal file
298
src/hotspot/cpu/ppc/gc/x/x_ppc.ad
Normal file
@ -0,0 +1,298 @@
|
||||
//
|
||||
// Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2021 SAP SE. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License version 2 only, as
|
||||
// published by the Free Software Foundation.
|
||||
//
|
||||
// This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
// version 2 for more details (a copy is included in the LICENSE file that
|
||||
// accompanied this code).
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License version
|
||||
// 2 along with this work; if not, write to the Free Software Foundation,
|
||||
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
//
|
||||
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
// or visit www.oracle.com if you need additional information or have any
|
||||
// questions.
|
||||
//
|
||||
|
||||
source_hpp %{
|
||||
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/x/c2/xBarrierSetC2.hpp"
|
||||
#include "gc/x/xThreadLocalData.hpp"
|
||||
|
||||
%}
|
||||
|
||||
source %{
|
||||
|
||||
static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref,
|
||||
Register tmp, uint8_t barrier_data) {
|
||||
if (barrier_data == XLoadBarrierElided) {
|
||||
return;
|
||||
}
|
||||
|
||||
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data);
|
||||
__ ld(tmp, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread);
|
||||
__ and_(tmp, tmp, ref);
|
||||
__ bne_far(CCR0, *stub->entry(), MacroAssembler::bc_far_optimize_on_relocate);
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
static void x_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref,
|
||||
Register tmp) {
|
||||
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong);
|
||||
__ b(*stub->entry());
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
static void x_compare_and_swap(MacroAssembler& _masm, const MachNode* node,
|
||||
Register res, Register mem, Register oldval, Register newval,
|
||||
Register tmp_xchg, Register tmp_mask,
|
||||
bool weak, bool acquire) {
|
||||
// z-specific load barrier requires strong CAS operations.
|
||||
// Weak CAS operations are thus only emitted if the barrier is elided.
|
||||
__ cmpxchgd(CCR0, tmp_xchg, oldval, newval, mem,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, NULL, true,
|
||||
weak && node->barrier_data() == XLoadBarrierElided);
|
||||
|
||||
if (node->barrier_data() != XLoadBarrierElided) {
|
||||
Label skip_barrier;
|
||||
|
||||
__ ld(tmp_mask, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread);
|
||||
__ and_(tmp_mask, tmp_mask, tmp_xchg);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
|
||||
// CAS must have failed because pointer in memory is bad.
|
||||
x_load_barrier_slow_path(_masm, node, Address(mem), tmp_xchg, res /* used as tmp */);
|
||||
|
||||
__ cmpxchgd(CCR0, tmp_xchg, oldval, newval, mem,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, NULL, true, weak);
|
||||
|
||||
__ bind(skip_barrier);
|
||||
}
|
||||
|
||||
if (acquire) {
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
// Uses the isync instruction as an acquire barrier.
|
||||
// This exploits the compare and the branch in the z load barrier (load, compare and branch, isync).
|
||||
__ isync();
|
||||
} else {
|
||||
__ sync();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void x_compare_and_exchange(MacroAssembler& _masm, const MachNode* node,
|
||||
Register res, Register mem, Register oldval, Register newval, Register tmp,
|
||||
bool weak, bool acquire) {
|
||||
// z-specific load barrier requires strong CAS operations.
|
||||
// Weak CAS operations are thus only emitted if the barrier is elided.
|
||||
__ cmpxchgd(CCR0, res, oldval, newval, mem,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, NULL, true,
|
||||
weak && node->barrier_data() == XLoadBarrierElided);
|
||||
|
||||
if (node->barrier_data() != XLoadBarrierElided) {
|
||||
Label skip_barrier;
|
||||
__ ld(tmp, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread);
|
||||
__ and_(tmp, tmp, res);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
|
||||
x_load_barrier_slow_path(_masm, node, Address(mem), res, tmp);
|
||||
|
||||
__ cmpxchgd(CCR0, res, oldval, newval, mem,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, NULL, true, weak);
|
||||
|
||||
__ bind(skip_barrier);
|
||||
}
|
||||
|
||||
if (acquire) {
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
// Uses the isync instruction as an acquire barrier.
|
||||
// This exploits the compare and the branch in the z load barrier (load, compare and branch, isync).
|
||||
__ isync();
|
||||
} else {
|
||||
__ sync();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
instruct xLoadP(iRegPdst dst, memoryAlg4 mem, iRegPdst tmp, flagsRegCR0 cr0)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
effect(TEMP_DEF dst, TEMP tmp, KILL cr0);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
predicate((UseZGC && !ZGenerational && n->as_Load()->barrier_data() != 0)
|
||||
&& (n->as_Load()->is_unordered() || followed_by_acquire(n)));
|
||||
|
||||
format %{ "LD $dst, $mem" %}
|
||||
ins_encode %{
|
||||
assert($mem$$index == 0, "sanity");
|
||||
__ ld($dst$$Register, $mem$$disp, $mem$$base$$Register);
|
||||
x_load_barrier(_masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data());
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// Load Pointer Volatile
|
||||
instruct xLoadP_acq(iRegPdst dst, memoryAlg4 mem, iRegPdst tmp, flagsRegCR0 cr0)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
effect(TEMP_DEF dst, TEMP tmp, KILL cr0);
|
||||
ins_cost(3 * MEMORY_REF_COST);
|
||||
|
||||
// Predicate on instruction order is implicitly present due to the predicate of the cheaper zLoadP operation
|
||||
predicate(UseZGC && !ZGenerational && n->as_Load()->barrier_data() != 0);
|
||||
|
||||
format %{ "LD acq $dst, $mem" %}
|
||||
ins_encode %{
|
||||
__ ld($dst$$Register, $mem$$disp, $mem$$base$$Register);
|
||||
x_load_barrier(_masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data());
|
||||
|
||||
// Uses the isync instruction as an acquire barrier.
|
||||
// This exploits the compare and the branch in the z load barrier (load, compare and branch, isync).
|
||||
__ isync();
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct xCompareAndSwapP(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
|
||||
iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0);
|
||||
|
||||
predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong)
|
||||
&& (((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*) n)->order() != MemNode::seqcst));
|
||||
|
||||
format %{ "CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
x_compare_and_swap(_masm, this,
|
||||
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
$tmp_xchg$$Register, $tmp_mask$$Register,
|
||||
false /* weak */, false /* acquire */);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct xCompareAndSwapP_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
|
||||
iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0);
|
||||
|
||||
predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong)
|
||||
&& (((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*) n)->order() == MemNode::seqcst));
|
||||
|
||||
format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
x_compare_and_swap(_masm, this,
|
||||
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
$tmp_xchg$$Register, $tmp_mask$$Register,
|
||||
false /* weak */, true /* acquire */);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct xCompareAndSwapPWeak(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
|
||||
iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0);
|
||||
|
||||
predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong)
|
||||
&& ((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*) n)->order() != MemNode::seqcst);
|
||||
|
||||
format %{ "weak CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
x_compare_and_swap(_masm, this,
|
||||
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
$tmp_xchg$$Register, $tmp_mask$$Register,
|
||||
true /* weak */, false /* acquire */);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct xCompareAndSwapPWeak_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
|
||||
iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0);
|
||||
|
||||
predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong)
|
||||
&& (((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*) n)->order() == MemNode::seqcst));
|
||||
|
||||
format %{ "weak CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
x_compare_and_swap(_masm, this,
|
||||
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
$tmp_xchg$$Register, $tmp_mask$$Register,
|
||||
true /* weak */, true /* acquire */);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct xCompareAndExchangeP(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
|
||||
iRegPdst tmp, flagsRegCR0 cr0) %{
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
|
||||
|
||||
predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong)
|
||||
&& (
|
||||
((CompareAndSwapNode*)n)->order() != MemNode::acquire
|
||||
&& ((CompareAndSwapNode*)n)->order() != MemNode::seqcst
|
||||
));
|
||||
|
||||
format %{ "CMPXCHG $res, $mem, $oldval, $newval; as ptr; ptr" %}
|
||||
ins_encode %{
|
||||
x_compare_and_exchange(_masm, this,
|
||||
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register,
|
||||
false /* weak */, false /* acquire */);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct xCompareAndExchangeP_acq(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
|
||||
iRegPdst tmp, flagsRegCR0 cr0) %{
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
|
||||
|
||||
predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong)
|
||||
&& (
|
||||
((CompareAndSwapNode*)n)->order() == MemNode::acquire
|
||||
|| ((CompareAndSwapNode*)n)->order() == MemNode::seqcst
|
||||
));
|
||||
|
||||
format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as ptr; ptr" %}
|
||||
ins_encode %{
|
||||
x_compare_and_exchange(_masm, this,
|
||||
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register,
|
||||
false /* weak */, true /* acquire */);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct xGetAndSetP(iRegPdst res, iRegPdst mem, iRegPsrc newval, iRegPdst tmp, flagsRegCR0 cr0) %{
|
||||
match(Set res (GetAndSetP mem newval));
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
|
||||
|
||||
predicate(UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() != 0);
|
||||
|
||||
format %{ "GetAndSetP $res, $mem, $newval" %}
|
||||
ins_encode %{
|
||||
__ getandsetd($res$$Register, $newval$$Register, $mem$$Register, MacroAssembler::cmpxchgx_hint_atomic_update());
|
||||
x_load_barrier(_masm, this, Address(noreg, (intptr_t) 0), $res$$Register, $tmp$$Register, barrier_data());
|
||||
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
__ isync();
|
||||
} else {
|
||||
__ sync();
|
||||
}
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
106
src/hotspot/cpu/ppc/gc/z/zAddress_ppc.cpp
Normal file
106
src/hotspot/cpu/ppc/gc/z/zAddress_ppc.cpp
Normal file
@ -0,0 +1,106 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
#ifdef LINUX
|
||||
#include <sys/mman.h>
|
||||
#endif // LINUX
|
||||
|
||||
// Default value if probing is not implemented for a certain platform: 128TB
|
||||
static const size_t DEFAULT_MAX_ADDRESS_BIT = 47;
|
||||
// Minimum value returned, if probing fails: 64GB
|
||||
static const size_t MINIMUM_MAX_ADDRESS_BIT = 36;
|
||||
|
||||
static size_t probe_valid_max_address_bit() {
|
||||
#ifdef LINUX
|
||||
size_t max_address_bit = 0;
|
||||
const size_t page_size = os::vm_page_size();
|
||||
for (size_t i = DEFAULT_MAX_ADDRESS_BIT; i > MINIMUM_MAX_ADDRESS_BIT; --i) {
|
||||
const uintptr_t base_addr = ((uintptr_t) 1U) << i;
|
||||
if (msync((void*)base_addr, page_size, MS_ASYNC) == 0) {
|
||||
// msync suceeded, the address is valid, and maybe even already mapped.
|
||||
max_address_bit = i;
|
||||
break;
|
||||
}
|
||||
if (errno != ENOMEM) {
|
||||
// Some error occured. This should never happen, but msync
|
||||
// has some undefined behavior, hence ignore this bit.
|
||||
#ifdef ASSERT
|
||||
fatal("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno));
|
||||
#else // ASSERT
|
||||
log_warning_p(gc)("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno));
|
||||
#endif // ASSERT
|
||||
continue;
|
||||
}
|
||||
// Since msync failed with ENOMEM, the page might not be mapped.
|
||||
// Try to map it, to see if the address is valid.
|
||||
void* const result_addr = mmap((void*) base_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0);
|
||||
if (result_addr != MAP_FAILED) {
|
||||
munmap(result_addr, page_size);
|
||||
}
|
||||
if ((uintptr_t) result_addr == base_addr) {
|
||||
// address is valid
|
||||
max_address_bit = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (max_address_bit == 0) {
|
||||
// probing failed, allocate a very high page and take that bit as the maximum
|
||||
const uintptr_t high_addr = ((uintptr_t) 1U) << DEFAULT_MAX_ADDRESS_BIT;
|
||||
void* const result_addr = mmap((void*) high_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0);
|
||||
if (result_addr != MAP_FAILED) {
|
||||
max_address_bit = BitsPerSize_t - count_leading_zeros((size_t) result_addr) - 1;
|
||||
munmap(result_addr, page_size);
|
||||
}
|
||||
}
|
||||
log_info_p(gc, init)("Probing address space for the highest valid bit: " SIZE_FORMAT, max_address_bit);
|
||||
return MAX2(max_address_bit, MINIMUM_MAX_ADDRESS_BIT);
|
||||
#else // LINUX
|
||||
return DEFAULT_MAX_ADDRESS_BIT;
|
||||
#endif // LINUX
|
||||
}
|
||||
|
||||
size_t ZPlatformAddressOffsetBits() {
|
||||
const static size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1;
|
||||
const size_t max_address_offset_bits = valid_max_address_offset_bits - 3;
|
||||
const size_t min_address_offset_bits = max_address_offset_bits - 2;
|
||||
const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
|
||||
const size_t address_offset_bits = log2i_exact(address_offset);
|
||||
return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
|
||||
}
|
||||
|
||||
size_t ZPlatformAddressHeapBaseShift() {
|
||||
return ZPlatformAddressOffsetBits();
|
||||
}
|
||||
|
||||
void ZGlobalsPointers::pd_set_good_masks() {
|
||||
}
|
34
src/hotspot/cpu/ppc/gc/z/zAddress_ppc.hpp
Normal file
34
src/hotspot/cpu/ppc/gc/z/zAddress_ppc.hpp
Normal file
@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_GC_Z_ZADDRESS_PPC_HPP
|
||||
#define CPU_PPC_GC_Z_ZADDRESS_PPC_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
const size_t ZPointerLoadShift = 16;
|
||||
|
||||
size_t ZPlatformAddressOffsetBits();
|
||||
size_t ZPlatformAddressHeapBaseShift();
|
||||
|
||||
#endif // CPU_PPC_GC_Z_ZADDRESS_PPC_HPP
|
37
src/hotspot/cpu/ppc/gc/z/zAddress_ppc.inline.hpp
Normal file
37
src/hotspot/cpu/ppc/gc/z/zAddress_ppc.inline.hpp
Normal file
@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_GC_Z_ZADDRESS_PPC_INLINE_HPP
|
||||
#define CPU_PPC_GC_Z_ZADDRESS_PPC_INLINE_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
inline uintptr_t ZPointer::remap_bits(uintptr_t colored) {
|
||||
return colored & ZPointerRemappedMask;
|
||||
}
|
||||
|
||||
inline constexpr int ZPointer::load_shift_lookup(uintptr_t value) {
|
||||
return ZPointerLoadShift;
|
||||
}
|
||||
|
||||
#endif // CPU_PPC_GC_Z_ZADDRESS_PPC_INLINE_HPP
|
@ -22,11 +22,12 @@
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "asm/register.hpp"
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "asm/register.hpp"
|
||||
#include "code/codeBlob.hpp"
|
||||
#include "code/vmreg.inline.hpp"
|
||||
#include "gc/z/zAddress.hpp"
|
||||
#include "gc/z/zBarrier.inline.hpp"
|
||||
#include "gc/z/zBarrierSet.hpp"
|
||||
#include "gc/z/zBarrierSetAssembler.hpp"
|
||||
@ -34,6 +35,7 @@
|
||||
#include "gc/z/zThreadLocalData.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "register_ppc.hpp"
|
||||
#include "runtime/jniHandles.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
@ -44,11 +46,76 @@
|
||||
#endif // COMPILER1
|
||||
#ifdef COMPILER2
|
||||
#include "gc/z/c2/zBarrierSetC2.hpp"
|
||||
#include "opto/output.hpp"
|
||||
#endif // COMPILER2
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
// Helper for saving and restoring registers across a runtime call that does
|
||||
// not have any live vector registers.
|
||||
class ZRuntimeCallSpill {
|
||||
MacroAssembler* _masm;
|
||||
Register _result;
|
||||
bool _needs_frame, _preserve_gp_registers, _preserve_fp_registers;
|
||||
int _nbytes_save;
|
||||
|
||||
void save() {
|
||||
MacroAssembler* masm = _masm;
|
||||
|
||||
if (_needs_frame) {
|
||||
if (_preserve_gp_registers) {
|
||||
bool preserve_R3 = _result != R3_ARG1;
|
||||
_nbytes_save = (MacroAssembler::num_volatile_gp_regs
|
||||
+ (_preserve_fp_registers ? MacroAssembler::num_volatile_fp_regs : 0)
|
||||
- (preserve_R3 ? 0 : 1)
|
||||
) * BytesPerWord;
|
||||
__ save_volatile_gprs(R1_SP, -_nbytes_save, _preserve_fp_registers, preserve_R3);
|
||||
}
|
||||
|
||||
__ save_LR_CR(R0);
|
||||
__ push_frame_reg_args(_nbytes_save, R0);
|
||||
}
|
||||
}
|
||||
|
||||
void restore() {
|
||||
MacroAssembler* masm = _masm;
|
||||
|
||||
Register result = R3_RET;
|
||||
if (_needs_frame) {
|
||||
__ pop_frame();
|
||||
__ restore_LR_CR(R0);
|
||||
|
||||
if (_preserve_gp_registers) {
|
||||
bool restore_R3 = _result != R3_ARG1;
|
||||
if (restore_R3 && _result != noreg) {
|
||||
__ mr(R0, R3_RET);
|
||||
result = R0;
|
||||
}
|
||||
__ restore_volatile_gprs(R1_SP, -_nbytes_save, _preserve_fp_registers, restore_R3);
|
||||
}
|
||||
}
|
||||
if (_result != noreg) {
|
||||
__ mr_if_needed(_result, result);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
ZRuntimeCallSpill(MacroAssembler* masm, Register result, MacroAssembler::PreservationLevel preservation_level)
|
||||
: _masm(masm),
|
||||
_result(result),
|
||||
_needs_frame(preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR),
|
||||
_preserve_gp_registers(preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS),
|
||||
_preserve_fp_registers(preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_FP_REGS),
|
||||
_nbytes_save(0) {
|
||||
save();
|
||||
}
|
||||
~ZRuntimeCallSpill() {
|
||||
restore();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
void ZBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register base, RegisterOrConstant ind_or_offs, Register dst,
|
||||
Register tmp1, Register tmp2,
|
||||
@ -81,14 +148,21 @@ void ZBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators
|
||||
saved_base = tmp2;
|
||||
}
|
||||
|
||||
BarrierSetAssembler::load_at(masm, decorators, type, base, ind_or_offs, dst,
|
||||
tmp1, noreg, preservation_level, L_handle_null);
|
||||
__ ld(dst, ind_or_offs, base);
|
||||
|
||||
/* ==== Check whether pointer is dirty ==== */
|
||||
Label skip_barrier;
|
||||
Label done, uncolor;
|
||||
|
||||
const bool on_non_strong =
|
||||
(decorators & ON_WEAK_OOP_REF) != 0 ||
|
||||
(decorators & ON_PHANTOM_OOP_REF) != 0;
|
||||
|
||||
// Load bad mask into scratch register.
|
||||
__ ld(tmp1, (intptr_t) ZThreadLocalData::address_bad_mask_offset(), R16_thread);
|
||||
if (on_non_strong) {
|
||||
__ ld(tmp1, in_bytes(ZThreadLocalData::mark_bad_mask_offset()), R16_thread);
|
||||
} else {
|
||||
__ ld(tmp1, in_bytes(ZThreadLocalData::load_bad_mask_offset()), R16_thread);
|
||||
}
|
||||
|
||||
// The color bits of the to-be-tested pointer do not have to be equivalent to the 'bad_mask' testing bits.
|
||||
// A pointer is classified as dirty if any of the color bits that also match the bad mask is set.
|
||||
@ -96,66 +170,195 @@ void ZBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators
|
||||
// if the pointer is not dirty.
|
||||
// Only dirty pointers must be processed by this barrier, so we can skip it in case the latter condition holds true.
|
||||
__ and_(tmp1, tmp1, dst);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
__ beq(CCR0, uncolor);
|
||||
|
||||
/* ==== Invoke barrier ==== */
|
||||
int nbytes_save = 0;
|
||||
{
|
||||
ZRuntimeCallSpill rcs(masm, dst, preservation_level);
|
||||
|
||||
const bool needs_frame = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR;
|
||||
const bool preserve_gp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS;
|
||||
const bool preserve_fp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_FP_REGS;
|
||||
|
||||
const bool preserve_R3 = dst != R3_ARG1;
|
||||
|
||||
if (needs_frame) {
|
||||
if (preserve_gp_registers) {
|
||||
nbytes_save = (preserve_fp_registers
|
||||
? MacroAssembler::num_volatile_gp_regs + MacroAssembler::num_volatile_fp_regs
|
||||
: MacroAssembler::num_volatile_gp_regs) * BytesPerWord;
|
||||
nbytes_save -= preserve_R3 ? 0 : BytesPerWord;
|
||||
__ save_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers, preserve_R3);
|
||||
// Setup arguments
|
||||
if (saved_base != R3_ARG1 && ind_or_offs.register_or_noreg() != R3_ARG1) {
|
||||
__ mr_if_needed(R3_ARG1, dst);
|
||||
__ add(R4_ARG2, ind_or_offs, saved_base);
|
||||
} else if (dst != R4_ARG2) {
|
||||
__ add(R4_ARG2, ind_or_offs, saved_base);
|
||||
__ mr(R3_ARG1, dst);
|
||||
} else {
|
||||
__ add(R0, ind_or_offs, saved_base);
|
||||
__ mr(R3_ARG1, dst);
|
||||
__ mr(R4_ARG2, R0);
|
||||
}
|
||||
|
||||
__ save_LR_CR(tmp1);
|
||||
__ push_frame_reg_args(nbytes_save, tmp1);
|
||||
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators));
|
||||
}
|
||||
|
||||
// Setup arguments
|
||||
if (saved_base != R3_ARG1) {
|
||||
__ mr_if_needed(R3_ARG1, dst);
|
||||
__ add(R4_ARG2, ind_or_offs, saved_base);
|
||||
} else if (dst != R4_ARG2) {
|
||||
__ add(R4_ARG2, ind_or_offs, saved_base);
|
||||
__ mr(R3_ARG1, dst);
|
||||
// Slow-path has already uncolored
|
||||
if (L_handle_null != nullptr) {
|
||||
__ cmpdi(CCR0, dst, 0);
|
||||
__ beq(CCR0, *L_handle_null);
|
||||
}
|
||||
__ b(done);
|
||||
|
||||
__ bind(uncolor);
|
||||
if (L_handle_null == nullptr) {
|
||||
__ srdi(dst, dst, ZPointerLoadShift);
|
||||
} else {
|
||||
__ add(R0, ind_or_offs, saved_base);
|
||||
__ mr(R3_ARG1, dst);
|
||||
__ mr(R4_ARG2, R0);
|
||||
__ srdi_(dst, dst, ZPointerLoadShift);
|
||||
__ beq(CCR0, *L_handle_null);
|
||||
}
|
||||
|
||||
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators));
|
||||
|
||||
Register result = R3_RET;
|
||||
if (needs_frame) {
|
||||
__ pop_frame();
|
||||
__ restore_LR_CR(tmp1);
|
||||
|
||||
if (preserve_R3) {
|
||||
__ mr(R0, R3_RET);
|
||||
result = R0;
|
||||
}
|
||||
|
||||
if (preserve_gp_registers) {
|
||||
__ restore_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers, preserve_R3);
|
||||
}
|
||||
}
|
||||
__ mr_if_needed(dst, result);
|
||||
|
||||
__ bind(skip_barrier);
|
||||
__ bind(done);
|
||||
__ block_comment("} load_at (zgc)");
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
static void load_least_significant_16_oop_bits(MacroAssembler* masm, Register dst, RegisterOrConstant ind_or_offs, Register base) {
|
||||
assert_different_registers(dst, base);
|
||||
#ifndef VM_LITTLE_ENDIAN
|
||||
const int BE_offset = 6;
|
||||
if (ind_or_offs.is_register()) {
|
||||
__ addi(dst, ind_or_offs.as_register(), BE_offset);
|
||||
__ lhzx(dst, base, dst);
|
||||
} else {
|
||||
__ lhz(dst, ind_or_offs.as_constant() + BE_offset, base);
|
||||
}
|
||||
#else
|
||||
__ lhz(dst, ind_or_offs, base);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void emit_store_fast_path_check(MacroAssembler* masm, Register base, RegisterOrConstant ind_or_offs, bool is_atomic, Label& medium_path) {
|
||||
if (is_atomic) {
|
||||
assert(ZPointerLoadShift + LogMinObjAlignmentInBytes >= 16, "or replace following code");
|
||||
load_least_significant_16_oop_bits(masm, R0, ind_or_offs, base);
|
||||
// Atomic operations must ensure that the contents of memory are store-good before
|
||||
// an atomic operation can execute.
|
||||
// A not relocatable object could have spurious raw null pointers in its fields after
|
||||
// getting promoted to the old generation.
|
||||
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodBits);
|
||||
__ cmplwi(CCR0, R0, barrier_Relocation::unpatched);
|
||||
} else {
|
||||
__ ld(R0, ind_or_offs, base);
|
||||
// Stores on relocatable objects never need to deal with raw null pointers in fields.
|
||||
// Raw null pointers may only exist in the young generation, as they get pruned when
|
||||
// the object is relocated to old. And no pre-write barrier needs to perform any action
|
||||
// in the young generation.
|
||||
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreBadMask);
|
||||
__ andi_(R0, R0, barrier_Relocation::unpatched);
|
||||
}
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), medium_path);
|
||||
}
|
||||
|
||||
void ZBarrierSetAssembler::store_barrier_fast(MacroAssembler* masm,
|
||||
Register ref_base,
|
||||
RegisterOrConstant ind_or_offset,
|
||||
Register rnew_zaddress,
|
||||
Register rnew_zpointer,
|
||||
bool in_nmethod,
|
||||
bool is_atomic,
|
||||
Label& medium_path,
|
||||
Label& medium_path_continuation) const {
|
||||
assert_different_registers(ref_base, rnew_zpointer);
|
||||
assert_different_registers(ind_or_offset.register_or_noreg(), rnew_zpointer);
|
||||
assert_different_registers(rnew_zaddress, rnew_zpointer);
|
||||
|
||||
if (in_nmethod) {
|
||||
emit_store_fast_path_check(masm, ref_base, ind_or_offset, is_atomic, medium_path);
|
||||
__ bind(medium_path_continuation);
|
||||
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodBits);
|
||||
__ li(rnew_zpointer, barrier_Relocation::unpatched); // Load color bits.
|
||||
if (rnew_zaddress == noreg) { // noreg encodes null.
|
||||
if (ZPointerLoadShift >= 16) {
|
||||
__ rldicl(rnew_zpointer, rnew_zpointer, 0, 64 - ZPointerLoadShift); // Clear sign extension from li.
|
||||
}
|
||||
}
|
||||
} else {
|
||||
__ ld(R0, ind_or_offset, ref_base);
|
||||
__ ld(rnew_zpointer, in_bytes(ZThreadLocalData::store_bad_mask_offset()), R16_thread);
|
||||
__ and_(R0, R0, rnew_zpointer);
|
||||
__ bne(CCR0, medium_path);
|
||||
__ bind(medium_path_continuation);
|
||||
__ ld(rnew_zpointer, in_bytes(ZThreadLocalData::store_good_mask_offset()), R16_thread);
|
||||
}
|
||||
if (rnew_zaddress != noreg) { // noreg encodes null.
|
||||
__ rldimi(rnew_zpointer, rnew_zaddress, ZPointerLoadShift, 0); // Insert shifted pointer.
|
||||
}
|
||||
}
|
||||
|
||||
static void store_barrier_buffer_add(MacroAssembler* masm,
|
||||
Register ref_base,
|
||||
RegisterOrConstant ind_or_offs,
|
||||
Register tmp1,
|
||||
Label& slow_path) {
|
||||
__ ld(tmp1, in_bytes(ZThreadLocalData::store_barrier_buffer_offset()), R16_thread);
|
||||
|
||||
// Combined pointer bump and check if the buffer is disabled or full
|
||||
__ ld(R0, in_bytes(ZStoreBarrierBuffer::current_offset()), tmp1);
|
||||
__ addic_(R0, R0, -(int)sizeof(ZStoreBarrierEntry));
|
||||
__ blt(CCR0, slow_path);
|
||||
__ std(R0, in_bytes(ZStoreBarrierBuffer::current_offset()), tmp1);
|
||||
|
||||
// Entry is at ZStoreBarrierBuffer (tmp1) + buffer_offset + scaled index (R0)
|
||||
__ add(tmp1, tmp1, R0);
|
||||
|
||||
// Compute and log the store address
|
||||
Register store_addr = ref_base;
|
||||
if (!ind_or_offs.is_constant() || ind_or_offs.as_constant() != 0) {
|
||||
__ add(R0, ind_or_offs, ref_base);
|
||||
store_addr = R0;
|
||||
}
|
||||
__ std(store_addr, in_bytes(ZStoreBarrierBuffer::buffer_offset()) + in_bytes(ZStoreBarrierEntry::p_offset()), tmp1);
|
||||
|
||||
// Load and log the prev value
|
||||
__ ld(R0, ind_or_offs, ref_base);
|
||||
__ std(R0, in_bytes(ZStoreBarrierBuffer::buffer_offset()) + in_bytes(ZStoreBarrierEntry::prev_offset()), tmp1);
|
||||
}
|
||||
|
||||
void ZBarrierSetAssembler::store_barrier_medium(MacroAssembler* masm,
|
||||
Register ref_base,
|
||||
RegisterOrConstant ind_or_offs,
|
||||
Register tmp,
|
||||
bool is_atomic,
|
||||
Label& medium_path_continuation,
|
||||
Label& slow_path) const {
|
||||
assert_different_registers(ref_base, tmp, R0);
|
||||
|
||||
// The reason to end up in the medium path is that the pre-value was not 'good'.
|
||||
|
||||
if (is_atomic) {
|
||||
// Atomic accesses can get to the medium fast path because the value was a
|
||||
// raw null value. If it was not null, then there is no doubt we need to take a slow path.
|
||||
__ ld(tmp, ind_or_offs, ref_base);
|
||||
__ cmpdi(CCR0, tmp, 0);
|
||||
__ bne(CCR0, slow_path);
|
||||
|
||||
// If we get this far, we know there is a young raw null value in the field.
|
||||
// Try to self-heal null values for atomic accesses
|
||||
bool need_restore = false;
|
||||
if (!ind_or_offs.is_constant() || ind_or_offs.as_constant() != 0) {
|
||||
__ add(ref_base, ind_or_offs, ref_base);
|
||||
need_restore = true;
|
||||
}
|
||||
__ ld(R0, in_bytes(ZThreadLocalData::store_good_mask_offset()), R16_thread);
|
||||
__ cmpxchgd(CCR0, tmp, (intptr_t)0, R0, ref_base,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update());
|
||||
if (need_restore) {
|
||||
__ subf(ref_base, ind_or_offs, ref_base);
|
||||
}
|
||||
__ bne(CCR0, slow_path);
|
||||
} else {
|
||||
// A non-atomic relocatable object won't get to the medium fast path due to a
|
||||
// raw null in the young generation. We only get here because the field is bad.
|
||||
// In this path we don't need any self healing, so we can avoid a runtime call
|
||||
// most of the time by buffering the store barrier to be applied lazily.
|
||||
store_barrier_buffer_add(masm,
|
||||
ref_base,
|
||||
ind_or_offs,
|
||||
tmp,
|
||||
slow_path);
|
||||
}
|
||||
__ b(medium_path_continuation);
|
||||
}
|
||||
|
||||
// The Z store barrier only verifies the pointers it is operating on and is thus a sole debugging measure.
|
||||
void ZBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register base, RegisterOrConstant ind_or_offs, Register val,
|
||||
@ -163,124 +366,267 @@ void ZBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorator
|
||||
MacroAssembler::PreservationLevel preservation_level) {
|
||||
__ block_comment("store_at (zgc) {");
|
||||
|
||||
// If the 'val' register is 'noreg', the to-be-stored value is a null pointer.
|
||||
if (is_reference_type(type) && val != noreg) {
|
||||
__ ld(tmp1, in_bytes(ZThreadLocalData::address_bad_mask_offset()), R16_thread);
|
||||
__ and_(tmp1, tmp1, val);
|
||||
__ asm_assert_eq("Detected dirty pointer on the heap in Z store barrier");
|
||||
}
|
||||
bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
|
||||
|
||||
// Store value
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, base, ind_or_offs, val, tmp1, tmp2, tmp3, preservation_level);
|
||||
if (is_reference_type(type)) {
|
||||
assert_different_registers(base, val, tmp1, tmp2, tmp3);
|
||||
|
||||
if (dest_uninitialized) {
|
||||
// tmp1 = (val << ZPointerLoadShift) | store_good_mask
|
||||
__ ld(tmp1, in_bytes(ZThreadLocalData::store_good_mask_offset()), R16_thread);
|
||||
if (val != noreg) { // noreg encodes null.
|
||||
__ rldimi(tmp1, val, ZPointerLoadShift, 0);
|
||||
}
|
||||
} else {
|
||||
Label done;
|
||||
Label medium;
|
||||
Label medium_continuation; // bound in store_barrier_fast
|
||||
Label slow;
|
||||
|
||||
store_barrier_fast(masm, base, ind_or_offs, val, tmp1, false, false, medium, medium_continuation);
|
||||
__ b(done);
|
||||
__ bind(medium);
|
||||
store_barrier_medium(masm, base, ind_or_offs, tmp1, false, medium_continuation, slow);
|
||||
__ bind(slow);
|
||||
{
|
||||
ZRuntimeCallSpill rcs(masm, noreg, preservation_level);
|
||||
__ add(R3_ARG1, ind_or_offs, base);
|
||||
__ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr(), R3_ARG1);
|
||||
}
|
||||
__ b(medium_continuation);
|
||||
|
||||
__ bind(done);
|
||||
}
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, base, ind_or_offs, tmp1, tmp2, tmp3, noreg, preservation_level);
|
||||
} else {
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, base, ind_or_offs, val, tmp1, tmp2, tmp3, preservation_level);
|
||||
}
|
||||
|
||||
__ block_comment("} store_at (zgc)");
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler *masm, DecoratorSet decorators, BasicType component_type,
|
||||
/* arraycopy */
|
||||
const Register _load_bad_mask = R6, _store_bad_mask = R7, _store_good_mask = R8;
|
||||
|
||||
void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler *masm, DecoratorSet decorators, BasicType type,
|
||||
Register src, Register dst, Register count,
|
||||
Register preserve1, Register preserve2) {
|
||||
__ block_comment("arraycopy_prologue (zgc) {");
|
||||
bool is_checkcast_copy = (decorators & ARRAYCOPY_CHECKCAST) != 0,
|
||||
dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
|
||||
|
||||
/* ==== Check whether a special gc barrier is required for this particular load ==== */
|
||||
if (!is_reference_type(component_type)) {
|
||||
if (!ZBarrierSet::barrier_needed(decorators, type) || is_checkcast_copy) {
|
||||
// Barrier not needed
|
||||
return;
|
||||
}
|
||||
|
||||
Label skip_barrier;
|
||||
__ block_comment("arraycopy_prologue (zgc) {");
|
||||
|
||||
// Fast path: Array is of length zero
|
||||
__ cmpdi(CCR0, count, 0);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
|
||||
/* ==== Ensure register sanity ==== */
|
||||
Register tmp_R11 = R11_scratch1;
|
||||
|
||||
assert_different_registers(src, dst, count, tmp_R11, noreg);
|
||||
if (preserve1 != noreg) {
|
||||
// Not technically required, but unlikely being intended.
|
||||
assert_different_registers(preserve1, preserve2);
|
||||
}
|
||||
|
||||
/* ==== Invoke barrier (slowpath) ==== */
|
||||
int nbytes_save = 0;
|
||||
|
||||
{
|
||||
assert(!noreg->is_volatile(), "sanity");
|
||||
|
||||
if (preserve1->is_volatile()) {
|
||||
__ std(preserve1, -BytesPerWord * ++nbytes_save, R1_SP);
|
||||
}
|
||||
|
||||
if (preserve2->is_volatile() && preserve1 != preserve2) {
|
||||
__ std(preserve2, -BytesPerWord * ++nbytes_save, R1_SP);
|
||||
}
|
||||
|
||||
__ std(src, -BytesPerWord * ++nbytes_save, R1_SP);
|
||||
__ std(dst, -BytesPerWord * ++nbytes_save, R1_SP);
|
||||
__ std(count, -BytesPerWord * ++nbytes_save, R1_SP);
|
||||
|
||||
__ save_LR_CR(tmp_R11);
|
||||
__ push_frame_reg_args(nbytes_save, tmp_R11);
|
||||
}
|
||||
|
||||
// ZBarrierSetRuntime::load_barrier_on_oop_array_addr(src, count)
|
||||
if (count == R3_ARG1) {
|
||||
if (src == R4_ARG2) {
|
||||
// Arguments are provided in reverse order
|
||||
__ mr(tmp_R11, count);
|
||||
__ mr(R3_ARG1, src);
|
||||
__ mr(R4_ARG2, tmp_R11);
|
||||
} else {
|
||||
__ mr(R4_ARG2, count);
|
||||
__ mr(R3_ARG1, src);
|
||||
}
|
||||
} else {
|
||||
__ mr_if_needed(R3_ARG1, src);
|
||||
__ mr_if_needed(R4_ARG2, count);
|
||||
}
|
||||
|
||||
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_array_addr());
|
||||
|
||||
__ pop_frame();
|
||||
__ restore_LR_CR(tmp_R11);
|
||||
|
||||
{
|
||||
__ ld(count, -BytesPerWord * nbytes_save--, R1_SP);
|
||||
__ ld(dst, -BytesPerWord * nbytes_save--, R1_SP);
|
||||
__ ld(src, -BytesPerWord * nbytes_save--, R1_SP);
|
||||
|
||||
if (preserve2->is_volatile() && preserve1 != preserve2) {
|
||||
__ ld(preserve2, -BytesPerWord * nbytes_save--, R1_SP);
|
||||
}
|
||||
|
||||
if (preserve1->is_volatile()) {
|
||||
__ ld(preserve1, -BytesPerWord * nbytes_save--, R1_SP);
|
||||
}
|
||||
}
|
||||
|
||||
__ bind(skip_barrier);
|
||||
load_copy_masks(masm, _load_bad_mask, _store_bad_mask, _store_good_mask, dest_uninitialized);
|
||||
|
||||
__ block_comment("} arraycopy_prologue (zgc)");
|
||||
}
|
||||
|
||||
void ZBarrierSetAssembler::load_copy_masks(MacroAssembler* masm,
|
||||
Register load_bad_mask,
|
||||
Register store_bad_mask,
|
||||
Register store_good_mask,
|
||||
bool dest_uninitialized) const {
|
||||
__ ld(load_bad_mask, in_bytes(ZThreadLocalData::load_bad_mask_offset()), R16_thread);
|
||||
__ ld(store_good_mask, in_bytes(ZThreadLocalData::store_good_mask_offset()), R16_thread);
|
||||
if (dest_uninitialized) {
|
||||
DEBUG_ONLY( __ li(store_bad_mask, -1); )
|
||||
} else {
|
||||
__ ld(store_bad_mask, in_bytes(ZThreadLocalData::store_bad_mask_offset()), R16_thread);
|
||||
}
|
||||
}
|
||||
void ZBarrierSetAssembler::copy_load_at_fast(MacroAssembler* masm,
|
||||
Register zpointer,
|
||||
Register addr,
|
||||
Register load_bad_mask,
|
||||
Label& slow_path,
|
||||
Label& continuation) const {
|
||||
__ ldx(zpointer, addr);
|
||||
__ and_(R0, zpointer, load_bad_mask);
|
||||
__ bne(CCR0, slow_path);
|
||||
__ bind(continuation);
|
||||
}
|
||||
void ZBarrierSetAssembler::copy_load_at_slow(MacroAssembler* masm,
|
||||
Register zpointer,
|
||||
Register addr,
|
||||
Register tmp,
|
||||
Label& slow_path,
|
||||
Label& continuation) const {
|
||||
__ align(32);
|
||||
__ bind(slow_path);
|
||||
__ mfctr(tmp); // preserve loop counter
|
||||
{
|
||||
ZRuntimeCallSpill rcs(masm, R0, MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS);
|
||||
assert(zpointer != R4_ARG2, "or change argument setup");
|
||||
__ mr_if_needed(R4_ARG2, addr);
|
||||
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(), zpointer, R4_ARG2);
|
||||
}
|
||||
__ sldi(zpointer, R0, ZPointerLoadShift); // Slow-path has uncolored; revert
|
||||
__ mtctr(tmp); // restore loop counter
|
||||
__ b(continuation);
|
||||
}
|
||||
void ZBarrierSetAssembler::copy_store_at_fast(MacroAssembler* masm,
|
||||
Register zpointer,
|
||||
Register addr,
|
||||
Register store_bad_mask,
|
||||
Register store_good_mask,
|
||||
Label& medium_path,
|
||||
Label& continuation,
|
||||
bool dest_uninitialized) const {
|
||||
if (!dest_uninitialized) {
|
||||
__ ldx(R0, addr);
|
||||
__ and_(R0, R0, store_bad_mask);
|
||||
__ bne(CCR0, medium_path);
|
||||
__ bind(continuation);
|
||||
}
|
||||
__ rldimi(zpointer, store_good_mask, 0, 64 - ZPointerLoadShift); // Replace color bits.
|
||||
__ stdx(zpointer, addr);
|
||||
}
|
||||
void ZBarrierSetAssembler::copy_store_at_slow(MacroAssembler* masm,
|
||||
Register addr,
|
||||
Register tmp,
|
||||
Label& medium_path,
|
||||
Label& continuation,
|
||||
bool dest_uninitialized) const {
|
||||
if (!dest_uninitialized) {
|
||||
Label slow_path;
|
||||
__ align(32);
|
||||
__ bind(medium_path);
|
||||
store_barrier_medium(masm, addr, (intptr_t)0, tmp, false, continuation, slow_path);
|
||||
__ bind(slow_path);
|
||||
__ mfctr(tmp); // preserve loop counter
|
||||
{
|
||||
ZRuntimeCallSpill rcs(masm, noreg, MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS);
|
||||
__ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr(), addr);
|
||||
}
|
||||
__ mtctr(tmp); // restore loop counter
|
||||
__ b(continuation);
|
||||
}
|
||||
}
|
||||
|
||||
// Arguments for generated stub:
|
||||
// from: R3_ARG1
|
||||
// to: R4_ARG2
|
||||
// count: R5_ARG3 (int >= 0)
|
||||
void ZBarrierSetAssembler::generate_disjoint_oop_copy(MacroAssembler* masm, bool dest_uninitialized) {
|
||||
const Register zpointer = R2, tmp = R9;
|
||||
Label done, loop, load_bad, load_good, store_bad, store_good;
|
||||
__ cmpdi(CCR0, R5_ARG3, 0);
|
||||
__ beq(CCR0, done);
|
||||
__ mtctr(R5_ARG3);
|
||||
|
||||
__ align(32);
|
||||
__ bind(loop);
|
||||
copy_load_at_fast(masm, zpointer, R3_ARG1, _load_bad_mask, load_bad, load_good);
|
||||
copy_store_at_fast(masm, zpointer, R4_ARG2, _store_bad_mask, _store_good_mask, store_bad, store_good, dest_uninitialized);
|
||||
__ addi(R3_ARG1, R3_ARG1, 8);
|
||||
__ addi(R4_ARG2, R4_ARG2, 8);
|
||||
__ bdnz(loop);
|
||||
|
||||
__ bind(done);
|
||||
__ li(R3_RET, 0);
|
||||
__ blr();
|
||||
|
||||
copy_load_at_slow(masm, zpointer, R3_ARG1, tmp, load_bad, load_good);
|
||||
copy_store_at_slow(masm, R4_ARG2, tmp, store_bad, store_good, dest_uninitialized);
|
||||
}
|
||||
|
||||
void ZBarrierSetAssembler::generate_conjoint_oop_copy(MacroAssembler* masm, bool dest_uninitialized) {
|
||||
const Register zpointer = R2, tmp = R9;
|
||||
Label done, loop, load_bad, load_good, store_bad, store_good;
|
||||
__ sldi_(R0, R5_ARG3, 3);
|
||||
__ beq(CCR0, done);
|
||||
__ mtctr(R5_ARG3);
|
||||
// Point behind last elements and copy backwards.
|
||||
__ add(R3_ARG1, R3_ARG1, R0);
|
||||
__ add(R4_ARG2, R4_ARG2, R0);
|
||||
|
||||
__ align(32);
|
||||
__ bind(loop);
|
||||
__ addi(R3_ARG1, R3_ARG1, -8);
|
||||
__ addi(R4_ARG2, R4_ARG2, -8);
|
||||
copy_load_at_fast(masm, zpointer, R3_ARG1, _load_bad_mask, load_bad, load_good);
|
||||
copy_store_at_fast(masm, zpointer, R4_ARG2, _store_bad_mask, _store_good_mask, store_bad, store_good, dest_uninitialized);
|
||||
__ bdnz(loop);
|
||||
|
||||
__ bind(done);
|
||||
__ li(R3_RET, 0);
|
||||
__ blr();
|
||||
|
||||
copy_load_at_slow(masm, zpointer, R3_ARG1, tmp, load_bad, load_good);
|
||||
copy_store_at_slow(masm, R4_ARG2, tmp, store_bad, store_good, dest_uninitialized);
|
||||
}
|
||||
|
||||
|
||||
// Verify a colored pointer.
|
||||
void ZBarrierSetAssembler::check_oop(MacroAssembler *masm, Register obj, const char* msg) {
|
||||
if (!VerifyOops) {
|
||||
return;
|
||||
}
|
||||
Label done, skip_uncolor;
|
||||
// Skip (colored) null.
|
||||
__ srdi_(R0, obj, ZPointerLoadShift);
|
||||
__ beq(CCR0, done);
|
||||
|
||||
// Check if ZAddressHeapBase << ZPointerLoadShift is set. If so, we need to uncolor.
|
||||
__ rldicl_(R0, obj, 64 - ZAddressHeapBaseShift - ZPointerLoadShift, 63);
|
||||
__ mr(R0, obj);
|
||||
__ beq(CCR0, skip_uncolor);
|
||||
__ srdi(R0, obj, ZPointerLoadShift);
|
||||
__ bind(skip_uncolor);
|
||||
|
||||
__ verify_oop(R0, msg);
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
|
||||
void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env,
|
||||
Register obj, Register tmp, Label& slowpath) {
|
||||
__ block_comment("try_resolve_jobject_in_native (zgc) {");
|
||||
|
||||
assert_different_registers(jni_env, obj, tmp);
|
||||
Label done, tagged, weak_tagged, check_color;
|
||||
Address load_bad_mask = load_bad_mask_from_jni_env(jni_env),
|
||||
mark_bad_mask = mark_bad_mask_from_jni_env(jni_env);
|
||||
|
||||
// Resolve the pointer using the standard implementation for weak tag handling and pointer verification.
|
||||
BarrierSetAssembler::try_resolve_jobject_in_native(masm, dst, jni_env, obj, tmp, slowpath);
|
||||
// Test for tag
|
||||
__ andi_(tmp, obj, JNIHandles::tag_mask);
|
||||
__ bne(CCR0, tagged);
|
||||
|
||||
// Check whether pointer is dirty.
|
||||
__ ld(tmp,
|
||||
in_bytes(ZThreadLocalData::address_bad_mask_offset() - JavaThread::jni_environment_offset()),
|
||||
jni_env);
|
||||
// Resolve local handle
|
||||
__ ld(dst, 0, obj);
|
||||
__ b(done);
|
||||
|
||||
__ and_(tmp, obj, tmp);
|
||||
__ bind(tagged);
|
||||
|
||||
// Test for weak tag
|
||||
__ andi_(tmp, obj, JNIHandles::TypeTag::weak_global);
|
||||
__ clrrdi(dst, obj, JNIHandles::tag_size); // Untag.
|
||||
__ bne(CCR0, weak_tagged);
|
||||
|
||||
// Resolve global handle
|
||||
__ ld(dst, 0, dst);
|
||||
__ ld(tmp, load_bad_mask.disp(), load_bad_mask.base());
|
||||
__ b(check_color);
|
||||
|
||||
__ bind(weak_tagged);
|
||||
|
||||
// Resolve weak handle
|
||||
__ ld(dst, 0, dst);
|
||||
__ ld(tmp, mark_bad_mask.disp(), mark_bad_mask.base());
|
||||
|
||||
__ bind(check_color);
|
||||
__ and_(tmp, tmp, dst);
|
||||
__ bne(CCR0, slowpath);
|
||||
|
||||
// Uncolor
|
||||
__ srdi(dst, dst, ZPointerLoadShift);
|
||||
|
||||
__ bind(done);
|
||||
|
||||
__ block_comment("} try_resolve_jobject_in_native (zgc)");
|
||||
}
|
||||
|
||||
@ -289,17 +635,40 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, R
|
||||
#ifdef COMPILER1
|
||||
#define __ ce->masm()->
|
||||
|
||||
// Code emitted by LIR node "LIR_OpZLoadBarrierTest" which in turn is emitted by ZBarrierSetC1::load_barrier.
|
||||
// The actual compare and branch instructions are represented as stand-alone LIR nodes.
|
||||
void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
|
||||
LIR_Opr ref) const {
|
||||
__ block_comment("load_barrier_test (zgc) {");
|
||||
static void z_uncolor(LIR_Assembler* ce, LIR_Opr ref) {
|
||||
Register r = ref->as_register();
|
||||
__ srdi(r, r, ZPointerLoadShift);
|
||||
}
|
||||
|
||||
__ ld(R0, in_bytes(ZThreadLocalData::address_bad_mask_offset()), R16_thread);
|
||||
__ andr(R0, R0, ref->as_pointer_register());
|
||||
__ cmpdi(CCR5 /* as mandated by LIR node */, R0, 0);
|
||||
static void check_color(LIR_Assembler* ce, LIR_Opr ref, bool on_non_strong) {
|
||||
int relocFormat = on_non_strong ? ZBarrierRelocationFormatMarkBadMask
|
||||
: ZBarrierRelocationFormatLoadBadMask;
|
||||
__ relocate(barrier_Relocation::spec(), relocFormat);
|
||||
__ andi_(R0, ref->as_register(), barrier_Relocation::unpatched);
|
||||
}
|
||||
|
||||
__ block_comment("} load_barrier_test (zgc)");
|
||||
static void z_color(LIR_Assembler* ce, LIR_Opr ref) {
|
||||
__ sldi(ref->as_register(), ref->as_register(), ZPointerLoadShift);
|
||||
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodBits);
|
||||
__ ori(ref->as_register(), ref->as_register(), barrier_Relocation::unpatched);
|
||||
}
|
||||
|
||||
void ZBarrierSetAssembler::generate_c1_uncolor(LIR_Assembler* ce, LIR_Opr ref) const {
|
||||
z_uncolor(ce, ref);
|
||||
}
|
||||
|
||||
void ZBarrierSetAssembler::generate_c1_color(LIR_Assembler* ce, LIR_Opr ref) const {
|
||||
z_color(ce, ref);
|
||||
}
|
||||
|
||||
void ZBarrierSetAssembler::generate_c1_load_barrier(LIR_Assembler* ce,
|
||||
LIR_Opr ref,
|
||||
ZLoadBarrierStubC1* stub,
|
||||
bool on_non_strong) const {
|
||||
check_color(ce, ref, on_non_strong);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *stub->entry());
|
||||
z_uncolor(ce, ref);
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
// Code emitted by code stub "ZLoadBarrierStubC1" which in turn is emitted by ZBarrierSetC1::load_barrier.
|
||||
@ -332,19 +701,77 @@ void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
|
||||
|
||||
/* ==== Invoke stub ==== */
|
||||
// Pass arguments via stack. The stack pointer will be bumped by the stub.
|
||||
__ std(ref, (intptr_t) -1 * BytesPerWord, R1_SP);
|
||||
__ std(ref_addr, (intptr_t) -2 * BytesPerWord, R1_SP);
|
||||
__ std(ref, -1 * BytesPerWord, R1_SP);
|
||||
__ std(ref_addr, -2 * BytesPerWord, R1_SP);
|
||||
|
||||
__ load_const_optimized(R0, stub->runtime_stub());
|
||||
__ load_const_optimized(R0, stub->runtime_stub(), /* temp */ ref);
|
||||
__ call_stub(R0);
|
||||
|
||||
// The runtime stub passes the result via the R0 register, overriding the previously-loaded stub address.
|
||||
__ mr_if_needed(ref, R0);
|
||||
__ mr(ref, R0);
|
||||
__ b(*stub->continuation());
|
||||
|
||||
__ block_comment("} c1_load_barrier_stub (zgc)");
|
||||
}
|
||||
|
||||
void ZBarrierSetAssembler::generate_c1_store_barrier(LIR_Assembler* ce,
|
||||
LIR_Address* addr,
|
||||
LIR_Opr new_zaddress,
|
||||
LIR_Opr new_zpointer,
|
||||
ZStoreBarrierStubC1* stub) const {
|
||||
Register rnew_zaddress = new_zaddress->as_register();
|
||||
Register rnew_zpointer = new_zpointer->as_register();
|
||||
|
||||
Register rbase = addr->base()->as_pointer_register();
|
||||
RegisterOrConstant ind_or_offs = (addr->index()->is_illegal())
|
||||
? (RegisterOrConstant)addr->disp()
|
||||
: (RegisterOrConstant)addr->index()->as_pointer_register();
|
||||
|
||||
store_barrier_fast(ce->masm(),
|
||||
rbase,
|
||||
ind_or_offs,
|
||||
rnew_zaddress,
|
||||
rnew_zpointer,
|
||||
true,
|
||||
stub->is_atomic(),
|
||||
*stub->entry(),
|
||||
*stub->continuation());
|
||||
}
|
||||
|
||||
void ZBarrierSetAssembler::generate_c1_store_barrier_stub(LIR_Assembler* ce,
|
||||
ZStoreBarrierStubC1* stub) const {
|
||||
// Stub entry
|
||||
__ bind(*stub->entry());
|
||||
|
||||
Label slow;
|
||||
|
||||
LIR_Address* addr = stub->ref_addr()->as_address_ptr();
|
||||
assert(addr->index()->is_illegal() || addr->disp() == 0, "can't have both");
|
||||
Register rbase = addr->base()->as_pointer_register();
|
||||
RegisterOrConstant ind_or_offs = (addr->index()->is_illegal())
|
||||
? (RegisterOrConstant)addr->disp()
|
||||
: (RegisterOrConstant)addr->index()->as_pointer_register();
|
||||
Register new_zpointer = stub->new_zpointer()->as_register();
|
||||
|
||||
store_barrier_medium(ce->masm(),
|
||||
rbase,
|
||||
ind_or_offs,
|
||||
new_zpointer, // temp
|
||||
stub->is_atomic(),
|
||||
*stub->continuation(),
|
||||
slow);
|
||||
|
||||
__ bind(slow);
|
||||
|
||||
__ load_const_optimized(/*stub address*/ new_zpointer, stub->runtime_stub(), R0);
|
||||
__ add(R0, ind_or_offs, rbase); // pass store address in R0
|
||||
__ mtctr(new_zpointer);
|
||||
__ bctrl();
|
||||
|
||||
// Stub exit
|
||||
__ b(*stub->continuation());
|
||||
}
|
||||
|
||||
#undef __
|
||||
#define __ sasm->
|
||||
|
||||
@ -360,8 +787,8 @@ void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler*
|
||||
__ save_LR_CR(R0);
|
||||
|
||||
// Load arguments back again from the stack.
|
||||
__ ld(R3_ARG1, (intptr_t) -1 * BytesPerWord, R1_SP); // ref
|
||||
__ ld(R4_ARG2, (intptr_t) -2 * BytesPerWord, R1_SP); // ref_addr
|
||||
__ ld(R3_ARG1, -1 * BytesPerWord, R1_SP); // ref
|
||||
__ ld(R4_ARG2, -2 * BytesPerWord, R1_SP); // ref_addr
|
||||
|
||||
__ push_frame_reg_args(nbytes_save, R0);
|
||||
|
||||
@ -379,6 +806,32 @@ void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler*
|
||||
__ block_comment("} c1_load_barrier_runtime_stub (zgc)");
|
||||
}
|
||||
|
||||
void ZBarrierSetAssembler::generate_c1_store_barrier_runtime_stub(StubAssembler* sasm,
|
||||
bool self_healing) const {
|
||||
__ block_comment("c1_store_barrier_runtime_stub (zgc) {");
|
||||
|
||||
const int nbytes_save = MacroAssembler::num_volatile_regs * BytesPerWord;
|
||||
__ save_volatile_gprs(R1_SP, -nbytes_save);
|
||||
__ mr(R3_ARG1, R0); // store address
|
||||
|
||||
__ save_LR_CR(R0);
|
||||
__ push_frame_reg_args(nbytes_save, R0);
|
||||
|
||||
if (self_healing) {
|
||||
__ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr());
|
||||
} else {
|
||||
__ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr());
|
||||
}
|
||||
|
||||
__ pop_frame();
|
||||
__ restore_LR_CR(R3_RET);
|
||||
__ restore_volatile_gprs(R1_SP, -nbytes_save);
|
||||
|
||||
__ blr();
|
||||
|
||||
__ block_comment("} c1_store_barrier_runtime_stub (zgc)");
|
||||
}
|
||||
|
||||
#undef __
|
||||
#endif // COMPILER1
|
||||
|
||||
@ -406,8 +859,8 @@ class ZSaveLiveRegisters {
|
||||
int _frame_size;
|
||||
|
||||
public:
|
||||
ZSaveLiveRegisters(MacroAssembler *masm, ZLoadBarrierStubC2 *stub)
|
||||
: _masm(masm), _reg_mask(stub->live()), _result_reg(stub->ref()) {
|
||||
ZSaveLiveRegisters(MacroAssembler *masm, ZBarrierStubC2 *stub)
|
||||
: _masm(masm), _reg_mask(stub->live()), _result_reg(stub->result()) {
|
||||
|
||||
const int register_save_size = iterate_over_register_mask(ACTION_COUNT_ONLY) * BytesPerWord;
|
||||
_frame_size = align_up(register_save_size, frame::alignment_in_bytes)
|
||||
@ -559,6 +1012,7 @@ class ZSetupArguments {
|
||||
#define __ masm->
|
||||
|
||||
void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const {
|
||||
Assembler::InlineSkippedInstructionsCounter skipped_counter(masm);
|
||||
__ block_comment("generate_c2_load_barrier_stub (zgc) {");
|
||||
|
||||
__ bind(*stub->entry());
|
||||
@ -581,5 +1035,79 @@ void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, Z
|
||||
__ block_comment("} generate_c2_load_barrier_stub (zgc)");
|
||||
}
|
||||
|
||||
void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm, ZStoreBarrierStubC2* stub) const {
|
||||
Assembler::InlineSkippedInstructionsCounter skipped_counter(masm);
|
||||
__ block_comment("ZStoreBarrierStubC2");
|
||||
|
||||
// Stub entry
|
||||
__ bind(*stub->entry());
|
||||
|
||||
Label slow;
|
||||
|
||||
Address addr = stub->ref_addr();
|
||||
Register rbase = addr.base();
|
||||
RegisterOrConstant ind_or_offs = (addr.index() == noreg)
|
||||
? (RegisterOrConstant)addr.disp()
|
||||
: (RegisterOrConstant)addr.index();
|
||||
|
||||
if (!stub->is_native()) {
|
||||
store_barrier_medium(masm,
|
||||
rbase,
|
||||
ind_or_offs,
|
||||
stub->new_zpointer(),
|
||||
stub->is_atomic(),
|
||||
*stub->continuation(),
|
||||
slow);
|
||||
}
|
||||
|
||||
__ bind(slow);
|
||||
{
|
||||
ZSaveLiveRegisters save_live_registers(masm, stub);
|
||||
__ add(R3_ARG1, ind_or_offs, rbase);
|
||||
if (stub->is_native()) {
|
||||
__ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing_addr(), R3_ARG1);
|
||||
} else if (stub->is_atomic()) {
|
||||
__ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr(), R3_ARG1);
|
||||
} else {
|
||||
__ call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr(), R3_ARG1);
|
||||
}
|
||||
}
|
||||
|
||||
// Stub exit
|
||||
__ b(*stub->continuation());
|
||||
}
|
||||
|
||||
#undef __
|
||||
#endif // COMPILER2
|
||||
|
||||
static uint16_t patch_barrier_relocation_value(int format) {
|
||||
switch (format) {
|
||||
case ZBarrierRelocationFormatLoadBadMask:
|
||||
return (uint16_t)ZPointerLoadBadMask;
|
||||
case ZBarrierRelocationFormatMarkBadMask:
|
||||
return (uint16_t)ZPointerMarkBadMask;
|
||||
case ZBarrierRelocationFormatStoreGoodBits:
|
||||
return (uint16_t)ZPointerStoreGoodMask;
|
||||
case ZBarrierRelocationFormatStoreBadMask:
|
||||
return (uint16_t)ZPointerStoreBadMask;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void ZBarrierSetAssembler::patch_barrier_relocation(address addr, int format) {
|
||||
#ifdef ASSERT
|
||||
int inst = *(int*)addr;
|
||||
if (format == ZBarrierRelocationFormatStoreGoodBits) {
|
||||
assert(Assembler::is_li(inst) || Assembler::is_ori(inst) || Assembler::is_cmpli(inst),
|
||||
"unexpected instruction 0x%04x", inst);
|
||||
// Note: li uses sign extend, but these bits will get cleared by rldimi.
|
||||
} else {
|
||||
assert(Assembler::is_andi(inst), "unexpected instruction 0x%04x", inst);
|
||||
}
|
||||
#endif
|
||||
// Patch the signed/unsigned 16 bit immediate field of the instruction.
|
||||
*(uint16_t*)(addr BIG_ENDIAN_ONLY(+2)) = patch_barrier_relocation_value(format);
|
||||
ICache::ppc64_flush_icache_bytes(addr, BytesPerInstWord);
|
||||
}
|
||||
|
@ -32,17 +32,27 @@
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef COMPILER1
|
||||
class CodeStub;
|
||||
class LIR_Address;
|
||||
class LIR_Assembler;
|
||||
class LIR_Opr;
|
||||
class StubAssembler;
|
||||
class ZLoadBarrierStubC1;
|
||||
class ZStoreBarrierStubC1;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
class MachNode;
|
||||
class Node;
|
||||
class ZLoadBarrierStubC2;
|
||||
class ZStoreBarrierStubC2;
|
||||
#endif // COMPILER2
|
||||
|
||||
const int ZBarrierRelocationFormatLoadBadMask = 0;
|
||||
const int ZBarrierRelocationFormatMarkBadMask = 1;
|
||||
const int ZBarrierRelocationFormatStoreGoodBits = 2;
|
||||
const int ZBarrierRelocationFormatStoreBadMask = 3;
|
||||
|
||||
class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase {
|
||||
public:
|
||||
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
@ -50,12 +60,10 @@ public:
|
||||
Register tmp1, Register tmp2,
|
||||
MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null = nullptr);
|
||||
|
||||
#ifdef ASSERT
|
||||
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register base, RegisterOrConstant ind_or_offs, Register val,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
MacroAssembler::PreservationLevel preservation_level);
|
||||
#endif // ASSERT
|
||||
|
||||
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register src, Register dst, Register count,
|
||||
@ -64,24 +72,102 @@ public:
|
||||
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env,
|
||||
Register obj, Register tmp, Label& slowpath);
|
||||
|
||||
virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; }
|
||||
virtual void check_oop(MacroAssembler *masm, Register obj, const char* msg);
|
||||
|
||||
virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_instruction_and_data_patch; }
|
||||
|
||||
#ifdef COMPILER1
|
||||
void generate_c1_load_barrier_test(LIR_Assembler* ce,
|
||||
LIR_Opr ref) const;
|
||||
|
||||
void generate_c1_load_barrier_stub(LIR_Assembler* ce,
|
||||
ZLoadBarrierStubC1* stub) const;
|
||||
|
||||
void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
|
||||
DecoratorSet decorators) const;
|
||||
|
||||
void generate_c1_color(LIR_Assembler* ce, LIR_Opr ref) const;
|
||||
void generate_c1_uncolor(LIR_Assembler* ce, LIR_Opr ref) const;
|
||||
|
||||
void generate_c1_load_barrier(LIR_Assembler* ce,
|
||||
LIR_Opr ref,
|
||||
ZLoadBarrierStubC1* stub,
|
||||
bool on_non_strong) const;
|
||||
|
||||
void generate_c1_store_barrier(LIR_Assembler* ce,
|
||||
LIR_Address* addr,
|
||||
LIR_Opr new_zaddress,
|
||||
LIR_Opr new_zpointer,
|
||||
ZStoreBarrierStubC1* stub) const;
|
||||
|
||||
void generate_c1_store_barrier_stub(LIR_Assembler* ce,
|
||||
ZStoreBarrierStubC1* stub) const;
|
||||
|
||||
void generate_c1_store_barrier_runtime_stub(StubAssembler* sasm,
|
||||
bool self_healing) const;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
OptoReg::Name refine_register(const Node* node, OptoReg::Name opto_reg) const;
|
||||
|
||||
void generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const;
|
||||
|
||||
void generate_c2_store_barrier_stub(MacroAssembler* masm, ZStoreBarrierStubC2* stub) const;
|
||||
#endif // COMPILER2
|
||||
|
||||
void store_barrier_fast(MacroAssembler* masm,
|
||||
Register ref_base,
|
||||
RegisterOrConstant ind_or_offset,
|
||||
Register rnew_persistent,
|
||||
Register rnew_transient,
|
||||
bool in_nmethod,
|
||||
bool is_atomic,
|
||||
Label& medium_path,
|
||||
Label& medium_path_continuation) const;
|
||||
|
||||
void store_barrier_medium(MacroAssembler* masm,
|
||||
Register ref_base,
|
||||
RegisterOrConstant ind_or_offs,
|
||||
Register tmp,
|
||||
bool is_atomic,
|
||||
Label& medium_path_continuation,
|
||||
Label& slow_path) const;
|
||||
|
||||
void load_copy_masks(MacroAssembler* masm,
|
||||
Register load_bad_mask,
|
||||
Register store_bad_mask,
|
||||
Register store_good_mask,
|
||||
bool dest_uninitialized) const;
|
||||
void copy_load_at_fast(MacroAssembler* masm,
|
||||
Register zpointer,
|
||||
Register addr,
|
||||
Register load_bad_mask,
|
||||
Label& slow_path,
|
||||
Label& continuation) const;
|
||||
void copy_load_at_slow(MacroAssembler* masm,
|
||||
Register zpointer,
|
||||
Register addr,
|
||||
Register tmp,
|
||||
Label& slow_path,
|
||||
Label& continuation) const;
|
||||
void copy_store_at_fast(MacroAssembler* masm,
|
||||
Register zpointer,
|
||||
Register addr,
|
||||
Register store_bad_mask,
|
||||
Register store_good_mask,
|
||||
Label& medium_path,
|
||||
Label& continuation,
|
||||
bool dest_uninitialized) const;
|
||||
void copy_store_at_slow(MacroAssembler* masm,
|
||||
Register addr,
|
||||
Register tmp,
|
||||
Label& medium_path,
|
||||
Label& continuation,
|
||||
bool dest_uninitialized) const;
|
||||
|
||||
void generate_disjoint_oop_copy(MacroAssembler* masm, bool dest_uninitialized);
|
||||
void generate_conjoint_oop_copy(MacroAssembler* masm, bool dest_uninitialized);
|
||||
|
||||
void patch_barrier_relocation(address addr, int format);
|
||||
|
||||
void patch_barriers() {}
|
||||
};
|
||||
|
||||
#endif // CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP
|
||||
#endif // CPU_PPC_GC_Z_ZBARRIERSETASSEMBLER_PPC_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -26,10 +26,7 @@
|
||||
#define CPU_PPC_GC_Z_ZGLOBALS_PPC_HPP
|
||||
|
||||
#include "globalDefinitions_ppc.hpp"
|
||||
const size_t ZPlatformHeapViews = 3;
|
||||
|
||||
const size_t ZPlatformCacheLineSize = DEFAULT_CACHE_LINE_SIZE;
|
||||
|
||||
size_t ZPlatformAddressOffsetBits();
|
||||
size_t ZPlatformAddressMetadataShift();
|
||||
|
||||
#endif // CPU_PPC_GC_Z_ZGLOBALS_PPC_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
//
|
||||
// Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2021 SAP SE. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
@ -32,51 +32,73 @@ source_hpp %{
|
||||
|
||||
source %{
|
||||
|
||||
static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref,
|
||||
Register tmp, uint8_t barrier_data) {
|
||||
if (barrier_data == ZLoadBarrierElided) {
|
||||
return;
|
||||
}
|
||||
#include "gc/z/zBarrierSetAssembler.hpp"
|
||||
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data);
|
||||
__ ld(tmp, in_bytes(ZThreadLocalData::address_bad_mask_offset()), R16_thread);
|
||||
__ and_(tmp, tmp, ref);
|
||||
__ bne_far(CCR0, *stub->entry(), MacroAssembler::bc_far_optimize_on_relocate);
|
||||
__ bind(*stub->continuation());
|
||||
static void z_color(MacroAssembler& _masm, Register dst, Register src) {
|
||||
assert_different_registers(dst, src);
|
||||
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodBits);
|
||||
__ li(dst, barrier_Relocation::unpatched); // Load color bits.
|
||||
if (src == noreg) { // noreg encodes null.
|
||||
if (ZPointerLoadShift >= 16) {
|
||||
__ rldicl(dst, dst, 0, 64 - ZPointerLoadShift); // Clear sign extension from li.
|
||||
}
|
||||
} else {
|
||||
__ rldimi(dst, src, ZPointerLoadShift, 0); // Insert shifted pointer.
|
||||
}
|
||||
}
|
||||
|
||||
static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref,
|
||||
Register tmp) {
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, ZLoadBarrierStrong);
|
||||
__ b(*stub->entry());
|
||||
__ bind(*stub->continuation());
|
||||
static void z_uncolor(MacroAssembler& _masm, Register ref) {
|
||||
__ srdi(ref, ref, ZPointerLoadShift);
|
||||
}
|
||||
|
||||
static void check_color(MacroAssembler& _masm, Register ref, bool on_non_strong) {
|
||||
int relocFormat = on_non_strong ? ZBarrierRelocationFormatMarkBadMask
|
||||
: ZBarrierRelocationFormatLoadBadMask;
|
||||
__ relocate(barrier_Relocation::spec(), relocFormat);
|
||||
__ andi_(R0, ref, barrier_Relocation::unpatched);
|
||||
}
|
||||
|
||||
static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref) {
|
||||
Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm);
|
||||
if (node->barrier_data() == ZBarrierElided) {
|
||||
z_uncolor(_masm, ref);
|
||||
} else {
|
||||
const bool on_non_strong =
|
||||
((node->barrier_data() & ZBarrierWeak) != 0) ||
|
||||
((node->barrier_data() & ZBarrierPhantom) != 0);
|
||||
|
||||
check_color(_masm, ref, on_non_strong);
|
||||
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref);
|
||||
__ bne_far(CCR0, *stub->entry(), MacroAssembler::bc_far_optimize_on_relocate);
|
||||
|
||||
z_uncolor(_masm, ref);
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
}
|
||||
|
||||
static void z_store_barrier(MacroAssembler& _masm, const MachNode* node, Register ref_base, intptr_t disp, Register rnew_zaddress, Register rnew_zpointer, bool is_atomic) {
|
||||
Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm);
|
||||
if (node->barrier_data() == ZBarrierElided) {
|
||||
z_color(_masm, rnew_zpointer, rnew_zaddress);
|
||||
} else {
|
||||
bool is_native = (node->barrier_data() & ZBarrierNative) != 0;
|
||||
ZStoreBarrierStubC2* const stub = ZStoreBarrierStubC2::create(node, Address(ref_base, disp), rnew_zaddress, rnew_zpointer, is_native, is_atomic);
|
||||
ZBarrierSetAssembler* bs_asm = ZBarrierSet::assembler();
|
||||
bs_asm->store_barrier_fast(&_masm, ref_base, disp, rnew_zaddress, rnew_zpointer, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
|
||||
}
|
||||
}
|
||||
|
||||
static void z_compare_and_swap(MacroAssembler& _masm, const MachNode* node,
|
||||
Register res, Register mem, Register oldval, Register newval,
|
||||
Register tmp_xchg, Register tmp_mask,
|
||||
bool weak, bool acquire) {
|
||||
// z-specific load barrier requires strong CAS operations.
|
||||
// Weak CAS operations are thus only emitted if the barrier is elided.
|
||||
__ cmpxchgd(CCR0, tmp_xchg, oldval, newval, mem,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, NULL, true,
|
||||
weak && node->barrier_data() == ZLoadBarrierElided);
|
||||
Register tmp1, Register tmp2, bool acquire) {
|
||||
|
||||
if (node->barrier_data() != ZLoadBarrierElided) {
|
||||
Label skip_barrier;
|
||||
|
||||
__ ld(tmp_mask, in_bytes(ZThreadLocalData::address_bad_mask_offset()), R16_thread);
|
||||
__ and_(tmp_mask, tmp_mask, tmp_xchg);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
|
||||
// CAS must have failed because pointer in memory is bad.
|
||||
z_load_barrier_slow_path(_masm, node, Address(mem), tmp_xchg, res /* used as tmp */);
|
||||
|
||||
__ cmpxchgd(CCR0, tmp_xchg, oldval, newval, mem,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, NULL, true, weak);
|
||||
|
||||
__ bind(skip_barrier);
|
||||
}
|
||||
Register rold_zpointer = tmp1, rnew_zpointer = tmp2;
|
||||
z_store_barrier(_masm, node, mem, 0, newval, rnew_zpointer, true /* is_atomic */);
|
||||
z_color(_masm, rold_zpointer, oldval);
|
||||
__ cmpxchgd(CCR0, R0, rold_zpointer, rnew_zpointer, mem,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, nullptr, true,
|
||||
false /* we could support weak, but benefit is questionable */);
|
||||
|
||||
if (acquire) {
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@ -90,27 +112,16 @@ static void z_compare_and_swap(MacroAssembler& _masm, const MachNode* node,
|
||||
}
|
||||
|
||||
static void z_compare_and_exchange(MacroAssembler& _masm, const MachNode* node,
|
||||
Register res, Register mem, Register oldval, Register newval, Register tmp,
|
||||
bool weak, bool acquire) {
|
||||
// z-specific load barrier requires strong CAS operations.
|
||||
// Weak CAS operations are thus only emitted if the barrier is elided.
|
||||
__ cmpxchgd(CCR0, res, oldval, newval, mem,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, NULL, true,
|
||||
weak && node->barrier_data() == ZLoadBarrierElided);
|
||||
Register res, Register mem, Register oldval, Register newval,
|
||||
Register tmp, bool acquire) {
|
||||
|
||||
if (node->barrier_data() != ZLoadBarrierElided) {
|
||||
Label skip_barrier;
|
||||
__ ld(tmp, in_bytes(ZThreadLocalData::address_bad_mask_offset()), R16_thread);
|
||||
__ and_(tmp, tmp, res);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
|
||||
z_load_barrier_slow_path(_masm, node, Address(mem), res, tmp);
|
||||
|
||||
__ cmpxchgd(CCR0, res, oldval, newval, mem,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, NULL, true, weak);
|
||||
|
||||
__ bind(skip_barrier);
|
||||
}
|
||||
Register rold_zpointer = R0, rnew_zpointer = tmp;
|
||||
z_store_barrier(_masm, node, mem, 0, newval, rnew_zpointer, true /* is_atomic */);
|
||||
z_color(_masm, rold_zpointer, oldval);
|
||||
__ cmpxchgd(CCR0, res, rold_zpointer, rnew_zpointer, mem,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, nullptr, true,
|
||||
false /* we could support weak, but benefit is questionable */);
|
||||
z_uncolor(_masm, res);
|
||||
|
||||
if (acquire) {
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@ -125,114 +136,110 @@ static void z_compare_and_exchange(MacroAssembler& _masm, const MachNode* node,
|
||||
|
||||
%}
|
||||
|
||||
instruct zLoadP(iRegPdst dst, memoryAlg4 mem, iRegPdst tmp, flagsRegCR0 cr0)
|
||||
instruct zLoadP(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
effect(TEMP_DEF dst, TEMP tmp, KILL cr0);
|
||||
effect(TEMP_DEF dst, KILL cr0);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
predicate((UseZGC && n->as_Load()->barrier_data() != 0)
|
||||
predicate((UseZGC && ZGenerational && n->as_Load()->barrier_data() != 0)
|
||||
&& (n->as_Load()->is_unordered() || followed_by_acquire(n)));
|
||||
|
||||
format %{ "LD $dst, $mem" %}
|
||||
ins_encode %{
|
||||
assert($mem$$index == 0, "sanity");
|
||||
__ ld($dst$$Register, $mem$$disp, $mem$$base$$Register);
|
||||
z_load_barrier(_masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data());
|
||||
z_load_barrier(_masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// Load Pointer Volatile
|
||||
instruct zLoadP_acq(iRegPdst dst, memoryAlg4 mem, iRegPdst tmp, flagsRegCR0 cr0)
|
||||
instruct zLoadP_acq(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
effect(TEMP_DEF dst, TEMP tmp, KILL cr0);
|
||||
effect(TEMP_DEF dst, KILL cr0);
|
||||
ins_cost(3 * MEMORY_REF_COST);
|
||||
|
||||
// Predicate on instruction order is implicitly present due to the predicate of the cheaper zLoadP operation
|
||||
predicate(UseZGC && n->as_Load()->barrier_data() != 0);
|
||||
predicate(UseZGC && ZGenerational && n->as_Load()->barrier_data() != 0);
|
||||
|
||||
format %{ "LD acq $dst, $mem" %}
|
||||
ins_encode %{
|
||||
__ ld($dst$$Register, $mem$$disp, $mem$$base$$Register);
|
||||
z_load_barrier(_masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data());
|
||||
z_load_barrier(_masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register);
|
||||
|
||||
// Uses the isync instruction as an acquire barrier.
|
||||
// This exploits the compare and the branch in the z load barrier (load, compare and branch, isync).
|
||||
if (barrier_data() == ZBarrierElided) __ twi_0($dst$$Register);
|
||||
__ isync();
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct zCompareAndSwapP(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
|
||||
iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0);
|
||||
// Store Pointer
|
||||
instruct zStoreP(memoryAlg4 mem, iRegPsrc src, iRegPdst tmp, flagsRegCR0 cr0)
|
||||
%{
|
||||
predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0);
|
||||
match(Set mem (StoreP mem src));
|
||||
effect(TEMP tmp, KILL cr0);
|
||||
ins_cost(2 * MEMORY_REF_COST);
|
||||
format %{ "std $mem, $src\t# ptr" %}
|
||||
ins_encode %{
|
||||
z_store_barrier(_masm, this, $mem$$base$$Register, $mem$$disp, $src$$Register, $tmp$$Register, false /* is_atomic */);
|
||||
__ std($tmp$$Register, $mem$$disp, $mem$$base$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
predicate((UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong)
|
||||
instruct zStorePNull(memoryAlg4 mem, immP_0 zero, iRegPdst tmp, flagsRegCR0 cr0)
|
||||
%{
|
||||
predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0);
|
||||
match(Set mem (StoreP mem zero));
|
||||
effect(TEMP tmp, KILL cr0);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
format %{ "std $mem, null\t# ptr" %}
|
||||
ins_encode %{
|
||||
z_store_barrier(_masm, this, $mem$$base$$Register, $mem$$disp, noreg, $tmp$$Register, false /* is_atomic */);
|
||||
__ std($tmp$$Register, $mem$$disp, $mem$$base$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct zCompareAndSwapP(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
|
||||
iRegPdst tmp1, iRegPdst tmp2, flagsRegCR0 cr0) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp1, TEMP tmp2, KILL cr0);
|
||||
|
||||
predicate((UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0)
|
||||
&& (((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*) n)->order() != MemNode::seqcst));
|
||||
|
||||
format %{ "CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
z_compare_and_swap(_masm, this,
|
||||
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
$tmp_xchg$$Register, $tmp_mask$$Register,
|
||||
false /* weak */, false /* acquire */);
|
||||
$tmp1$$Register, $tmp2$$Register,
|
||||
false /* acquire */);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct zCompareAndSwapP_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
|
||||
iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{
|
||||
iRegPdst tmp1, iRegPdst tmp2, flagsRegCR0 cr0) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0);
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp1, TEMP tmp2, KILL cr0);
|
||||
|
||||
predicate((UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong)
|
||||
predicate((UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0)
|
||||
&& (((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*) n)->order() == MemNode::seqcst));
|
||||
|
||||
format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
z_compare_and_swap(_masm, this,
|
||||
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
$tmp_xchg$$Register, $tmp_mask$$Register,
|
||||
false /* weak */, true /* acquire */);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct zCompareAndSwapPWeak(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
|
||||
iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0);
|
||||
|
||||
predicate((UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong)
|
||||
&& ((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*) n)->order() != MemNode::seqcst);
|
||||
|
||||
format %{ "weak CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
z_compare_and_swap(_masm, this,
|
||||
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
$tmp_xchg$$Register, $tmp_mask$$Register,
|
||||
true /* weak */, false /* acquire */);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
instruct zCompareAndSwapPWeak_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval,
|
||||
iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0);
|
||||
|
||||
predicate((UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong)
|
||||
&& (((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*) n)->order() == MemNode::seqcst));
|
||||
|
||||
format %{ "weak CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
z_compare_and_swap(_masm, this,
|
||||
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
$tmp_xchg$$Register, $tmp_mask$$Register,
|
||||
true /* weak */, true /* acquire */);
|
||||
$tmp1$$Register, $tmp2$$Register,
|
||||
true /* acquire */);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
@ -242,7 +249,7 @@ instruct zCompareAndExchangeP(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegP
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
|
||||
|
||||
predicate((UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong)
|
||||
predicate((UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0)
|
||||
&& (
|
||||
((CompareAndSwapNode*)n)->order() != MemNode::acquire
|
||||
&& ((CompareAndSwapNode*)n)->order() != MemNode::seqcst
|
||||
@ -252,7 +259,7 @@ instruct zCompareAndExchangeP(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegP
|
||||
ins_encode %{
|
||||
z_compare_and_exchange(_masm, this,
|
||||
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register,
|
||||
false /* weak */, false /* acquire */);
|
||||
false /* acquire */);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
@ -262,7 +269,7 @@ instruct zCompareAndExchangeP_acq(iRegPdst res, iRegPdst mem, iRegPsrc oldval, i
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
|
||||
|
||||
predicate((UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong)
|
||||
predicate((UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0)
|
||||
&& (
|
||||
((CompareAndSwapNode*)n)->order() == MemNode::acquire
|
||||
|| ((CompareAndSwapNode*)n)->order() == MemNode::seqcst
|
||||
@ -272,7 +279,7 @@ instruct zCompareAndExchangeP_acq(iRegPdst res, iRegPdst mem, iRegPsrc oldval, i
|
||||
ins_encode %{
|
||||
z_compare_and_exchange(_masm, this,
|
||||
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register,
|
||||
false /* weak */, true /* acquire */);
|
||||
true /* acquire */);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
@ -281,12 +288,14 @@ instruct zGetAndSetP(iRegPdst res, iRegPdst mem, iRegPsrc newval, iRegPdst tmp,
|
||||
match(Set res (GetAndSetP mem newval));
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr0);
|
||||
|
||||
predicate(UseZGC && n->as_LoadStore()->barrier_data() != 0);
|
||||
predicate(UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0);
|
||||
|
||||
format %{ "GetAndSetP $res, $mem, $newval" %}
|
||||
ins_encode %{
|
||||
__ getandsetd($res$$Register, $newval$$Register, $mem$$Register, MacroAssembler::cmpxchgx_hint_atomic_update());
|
||||
z_load_barrier(_masm, this, Address(noreg, (intptr_t) 0), $res$$Register, $tmp$$Register, barrier_data());
|
||||
Register rnew_zpointer = $tmp$$Register, result = $res$$Register;
|
||||
z_store_barrier(_masm, this, $mem$$Register, 0, $newval$$Register, rnew_zpointer, true /* is_atomic */);
|
||||
__ getandsetd(result, rnew_zpointer, $mem$$Register, MacroAssembler::cmpxchgx_hint_atomic_update());
|
||||
z_uncolor(_masm, result);
|
||||
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
__ isync();
|
||||
|
@ -354,7 +354,7 @@ inline void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorat
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
MacroAssembler::PreservationLevel preservation_level) {
|
||||
assert((decorators & ~(AS_RAW | IN_HEAP | IN_NATIVE | IS_ARRAY | IS_NOT_NULL |
|
||||
ON_UNKNOWN_OOP_REF)) == 0, "unsupported decorator");
|
||||
ON_UNKNOWN_OOP_REF | IS_DEST_UNINITIALIZED)) == 0, "unsupported decorator");
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bool as_raw = (decorators & AS_RAW) != 0;
|
||||
decorators = AccessInternal::decorator_fixup(decorators, type);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -39,7 +39,8 @@
|
||||
format_width = 0
|
||||
#else
|
||||
// Except narrow oops in 64-bits VM.
|
||||
format_width = 1
|
||||
// Must be at least 2 for ZGC GC barrier patching.
|
||||
format_width = 2
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -47,6 +47,10 @@
|
||||
#include "runtime/vm_version.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
#if INCLUDE_ZGC
|
||||
#include "gc/x/xBarrierSetAssembler.hpp"
|
||||
#include "gc/z/zBarrierSetAssembler.hpp"
|
||||
#endif
|
||||
|
||||
// Declaration and definition of StubGenerator (no .hpp file).
|
||||
// For a more detailed description of the stub routine structure
|
||||
@ -61,9 +65,9 @@
|
||||
#endif
|
||||
|
||||
#if defined(ABI_ELFv2)
|
||||
#define STUB_ENTRY(name) StubRoutines::name()
|
||||
#define STUB_ENTRY(name) StubRoutines::name
|
||||
#else
|
||||
#define STUB_ENTRY(name) ((FunctionDescriptor*)StubRoutines::name())->entry()
|
||||
#define STUB_ENTRY(name) ((FunctionDescriptor*)StubRoutines::name)->entry()
|
||||
#endif
|
||||
|
||||
class StubGenerator: public StubCodeGenerator {
|
||||
@ -1182,8 +1186,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
Register tmp3 = R8_ARG6;
|
||||
|
||||
address nooverlap_target = aligned ?
|
||||
STUB_ENTRY(arrayof_jbyte_disjoint_arraycopy) :
|
||||
STUB_ENTRY(jbyte_disjoint_arraycopy);
|
||||
STUB_ENTRY(arrayof_jbyte_disjoint_arraycopy()) :
|
||||
STUB_ENTRY(jbyte_disjoint_arraycopy());
|
||||
|
||||
array_overlap_test(nooverlap_target, 0);
|
||||
// Do reverse copy. We assume the case of actual overlap is rare enough
|
||||
@ -1454,8 +1458,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
Register tmp3 = R8_ARG6;
|
||||
|
||||
address nooverlap_target = aligned ?
|
||||
STUB_ENTRY(arrayof_jshort_disjoint_arraycopy) :
|
||||
STUB_ENTRY(jshort_disjoint_arraycopy);
|
||||
STUB_ENTRY(arrayof_jshort_disjoint_arraycopy()) :
|
||||
STUB_ENTRY(jshort_disjoint_arraycopy());
|
||||
|
||||
array_overlap_test(nooverlap_target, 1);
|
||||
|
||||
@ -1767,8 +1771,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address start = __ function_entry();
|
||||
assert_positive_int(R5_ARG3);
|
||||
address nooverlap_target = aligned ?
|
||||
STUB_ENTRY(arrayof_jint_disjoint_arraycopy) :
|
||||
STUB_ENTRY(jint_disjoint_arraycopy);
|
||||
STUB_ENTRY(arrayof_jint_disjoint_arraycopy()) :
|
||||
STUB_ENTRY(jint_disjoint_arraycopy());
|
||||
|
||||
array_overlap_test(nooverlap_target, 2);
|
||||
{
|
||||
@ -2024,8 +2028,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address start = __ function_entry();
|
||||
assert_positive_int(R5_ARG3);
|
||||
address nooverlap_target = aligned ?
|
||||
STUB_ENTRY(arrayof_jlong_disjoint_arraycopy) :
|
||||
STUB_ENTRY(jlong_disjoint_arraycopy);
|
||||
STUB_ENTRY(arrayof_jlong_disjoint_arraycopy()) :
|
||||
STUB_ENTRY(jlong_disjoint_arraycopy());
|
||||
|
||||
array_overlap_test(nooverlap_target, 3);
|
||||
{
|
||||
@ -2054,8 +2058,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address start = __ function_entry();
|
||||
assert_positive_int(R5_ARG3);
|
||||
address nooverlap_target = aligned ?
|
||||
STUB_ENTRY(arrayof_oop_disjoint_arraycopy) :
|
||||
STUB_ENTRY(oop_disjoint_arraycopy);
|
||||
STUB_ENTRY(arrayof_oop_disjoint_arraycopy(dest_uninitialized)) :
|
||||
STUB_ENTRY(oop_disjoint_arraycopy(dest_uninitialized));
|
||||
|
||||
array_overlap_test(nooverlap_target, UseCompressedOops ? 2 : 3);
|
||||
|
||||
DecoratorSet decorators = IN_HEAP | IS_ARRAY;
|
||||
if (dest_uninitialized) {
|
||||
@ -2069,10 +2075,14 @@ class StubGenerator: public StubCodeGenerator {
|
||||
bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_ARG1, R4_ARG2, R5_ARG3, noreg, noreg);
|
||||
|
||||
if (UseCompressedOops) {
|
||||
array_overlap_test(nooverlap_target, 2);
|
||||
generate_conjoint_int_copy_core(aligned);
|
||||
} else {
|
||||
array_overlap_test(nooverlap_target, 3);
|
||||
#if INCLUDE_ZGC
|
||||
if (UseZGC && ZGenerational) {
|
||||
ZBarrierSetAssembler *zbs = (ZBarrierSetAssembler*)bs;
|
||||
zbs->generate_conjoint_oop_copy(_masm, dest_uninitialized);
|
||||
} else
|
||||
#endif
|
||||
generate_conjoint_long_copy_core(aligned);
|
||||
}
|
||||
|
||||
@ -2110,6 +2120,12 @@ class StubGenerator: public StubCodeGenerator {
|
||||
if (UseCompressedOops) {
|
||||
generate_disjoint_int_copy_core(aligned);
|
||||
} else {
|
||||
#if INCLUDE_ZGC
|
||||
if (UseZGC && ZGenerational) {
|
||||
ZBarrierSetAssembler *zbs = (ZBarrierSetAssembler*)bs;
|
||||
zbs->generate_disjoint_oop_copy(_masm, dest_uninitialized);
|
||||
} else
|
||||
#endif
|
||||
generate_disjoint_long_copy_core(aligned);
|
||||
}
|
||||
|
||||
@ -2222,6 +2238,13 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ stw(R10_oop, R8_offset, R4_to);
|
||||
} else {
|
||||
__ bind(store_null);
|
||||
#if INCLUDE_ZGC
|
||||
if (UseZGC && ZGenerational) {
|
||||
__ store_heap_oop(R10_oop, R8_offset, R4_to, R11_scratch1, R12_tmp, noreg,
|
||||
MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS,
|
||||
dest_uninitialized ? IS_DEST_UNINITIALIZED : 0);
|
||||
} else
|
||||
#endif
|
||||
__ std(R10_oop, R8_offset, R4_to);
|
||||
}
|
||||
|
||||
@ -2231,6 +2254,14 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// ======== loop entry is here ========
|
||||
__ bind(load_element);
|
||||
#if INCLUDE_ZGC
|
||||
if (UseZGC && ZGenerational) {
|
||||
__ load_heap_oop(R10_oop, R8_offset, R3_from,
|
||||
R11_scratch1, R12_tmp,
|
||||
MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS,
|
||||
0, &store_null);
|
||||
} else
|
||||
#endif
|
||||
__ load_heap_oop(R10_oop, R8_offset, R3_from,
|
||||
R11_scratch1, R12_tmp,
|
||||
MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS,
|
||||
@ -3136,18 +3167,18 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", true);
|
||||
|
||||
StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy",
|
||||
STUB_ENTRY(jbyte_arraycopy),
|
||||
STUB_ENTRY(jshort_arraycopy),
|
||||
STUB_ENTRY(jint_arraycopy),
|
||||
STUB_ENTRY(jlong_arraycopy));
|
||||
STUB_ENTRY(jbyte_arraycopy()),
|
||||
STUB_ENTRY(jshort_arraycopy()),
|
||||
STUB_ENTRY(jint_arraycopy()),
|
||||
STUB_ENTRY(jlong_arraycopy()));
|
||||
StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy",
|
||||
STUB_ENTRY(jbyte_arraycopy),
|
||||
STUB_ENTRY(jshort_arraycopy),
|
||||
STUB_ENTRY(jint_arraycopy),
|
||||
STUB_ENTRY(oop_arraycopy),
|
||||
STUB_ENTRY(oop_disjoint_arraycopy),
|
||||
STUB_ENTRY(jlong_arraycopy),
|
||||
STUB_ENTRY(checkcast_arraycopy));
|
||||
STUB_ENTRY(jbyte_arraycopy()),
|
||||
STUB_ENTRY(jshort_arraycopy()),
|
||||
STUB_ENTRY(jint_arraycopy()),
|
||||
STUB_ENTRY(oop_arraycopy()),
|
||||
STUB_ENTRY(oop_disjoint_arraycopy()),
|
||||
STUB_ENTRY(jlong_arraycopy()),
|
||||
STUB_ENTRY(checkcast_arraycopy()));
|
||||
|
||||
// fill routines
|
||||
#ifdef COMPILER2
|
||||
|
@ -858,7 +858,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
__ decode_heap_oop(dest->as_register());
|
||||
}
|
||||
|
||||
if (!UseZGC) {
|
||||
if (!(UseZGC && !ZGenerational)) {
|
||||
// Load barrier has not yet been applied, so ZGC can't verify the oop here
|
||||
__ verify_oop(dest->as_register());
|
||||
}
|
||||
@ -1264,10 +1264,13 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
|
||||
if (UseCompressedOops) {
|
||||
Register tmp1 = op->tmp1()->as_register();
|
||||
assert(op->tmp1()->is_valid(), "must be");
|
||||
Register tmp2 = op->tmp2()->as_register();
|
||||
assert(op->tmp2()->is_valid(), "must be");
|
||||
|
||||
__ encode_heap_oop(tmp1, cmpval);
|
||||
cmpval = tmp1;
|
||||
__ encode_heap_oop(t1, newval);
|
||||
newval = t1;
|
||||
__ encode_heap_oop(tmp2, newval);
|
||||
newval = tmp2;
|
||||
caswu(addr, newval, cmpval);
|
||||
} else {
|
||||
casl(addr, newval, cmpval);
|
||||
@ -1277,6 +1280,11 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
|
||||
} else {
|
||||
casl(addr, newval, cmpval);
|
||||
}
|
||||
|
||||
if (op->result_opr()->is_valid()) {
|
||||
assert(op->result_opr()->is_register(), "need a register");
|
||||
__ mv(as_reg(op->result_opr()), t0); // cas result in t0, and 0 for success
|
||||
}
|
||||
}
|
||||
|
||||
void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
|
||||
|
458
src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.cpp
Normal file
458
src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.cpp
Normal file
@ -0,0 +1,458 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "code/codeBlob.hpp"
|
||||
#include "code/vmreg.inline.hpp"
|
||||
#include "gc/x/xBarrier.inline.hpp"
|
||||
#include "gc/x/xBarrierSet.hpp"
|
||||
#include "gc/x/xBarrierSetAssembler.hpp"
|
||||
#include "gc/x/xBarrierSetRuntime.hpp"
|
||||
#include "gc/x/xThreadLocalData.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "gc/x/c1/xBarrierSetC1.hpp"
|
||||
#endif // COMPILER1
|
||||
#ifdef COMPILER2
|
||||
#include "gc/x/c2/xBarrierSetC2.hpp"
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define BLOCK_COMMENT(str) /* nothing */
|
||||
#else
|
||||
#define BLOCK_COMMENT(str) __ block_comment(str)
|
||||
#endif
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
void XBarrierSetAssembler::load_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
Register dst,
|
||||
Address src,
|
||||
Register tmp1,
|
||||
Register tmp2) {
|
||||
if (!XBarrierSet::barrier_needed(decorators, type)) {
|
||||
// Barrier not needed
|
||||
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
|
||||
return;
|
||||
}
|
||||
|
||||
assert_different_registers(t1, src.base());
|
||||
assert_different_registers(t0, t1, dst);
|
||||
|
||||
Label done;
|
||||
|
||||
// Load bad mask into temp register.
|
||||
__ la(t0, src);
|
||||
__ ld(t1, address_bad_mask_from_thread(xthread));
|
||||
__ ld(dst, Address(t0));
|
||||
|
||||
// Test reference against bad mask. If mask bad, then we need to fix it up.
|
||||
__ andr(t1, dst, t1);
|
||||
__ beqz(t1, done);
|
||||
|
||||
__ enter();
|
||||
|
||||
__ push_call_clobbered_registers_except(RegSet::of(dst));
|
||||
|
||||
if (c_rarg0 != dst) {
|
||||
__ mv(c_rarg0, dst);
|
||||
}
|
||||
|
||||
__ mv(c_rarg1, t0);
|
||||
|
||||
__ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2);
|
||||
|
||||
// Make sure dst has the return value.
|
||||
if (dst != x10) {
|
||||
__ mv(dst, x10);
|
||||
}
|
||||
|
||||
__ pop_call_clobbered_registers_except(RegSet::of(dst));
|
||||
__ leave();
|
||||
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
|
||||
void XBarrierSetAssembler::store_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
Address dst,
|
||||
Register val,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3) {
|
||||
// Verify value
|
||||
if (is_reference_type(type)) {
|
||||
// Note that src could be noreg, which means we
|
||||
// are storing null and can skip verification.
|
||||
if (val != noreg) {
|
||||
Label done;
|
||||
|
||||
// tmp1, tmp2 and tmp3 are often set to noreg.
|
||||
RegSet savedRegs = RegSet::of(t0);
|
||||
__ push_reg(savedRegs, sp);
|
||||
|
||||
__ ld(t0, address_bad_mask_from_thread(xthread));
|
||||
__ andr(t0, val, t0);
|
||||
__ beqz(t0, done);
|
||||
__ stop("Verify oop store failed");
|
||||
__ should_not_reach_here();
|
||||
__ bind(done);
|
||||
__ pop_reg(savedRegs, sp);
|
||||
}
|
||||
}
|
||||
|
||||
// Store value
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, noreg);
|
||||
}
|
||||
|
||||
#endif // ASSERT
|
||||
|
||||
void XBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
bool is_oop,
|
||||
Register src,
|
||||
Register dst,
|
||||
Register count,
|
||||
RegSet saved_regs) {
|
||||
if (!is_oop) {
|
||||
// Barrier not needed
|
||||
return;
|
||||
}
|
||||
|
||||
BLOCK_COMMENT("XBarrierSetAssembler::arraycopy_prologue {");
|
||||
|
||||
assert_different_registers(src, count, t0);
|
||||
|
||||
__ push_reg(saved_regs, sp);
|
||||
|
||||
if (count == c_rarg0 && src == c_rarg1) {
|
||||
// exactly backwards!!
|
||||
__ xorr(c_rarg0, c_rarg0, c_rarg1);
|
||||
__ xorr(c_rarg1, c_rarg0, c_rarg1);
|
||||
__ xorr(c_rarg0, c_rarg0, c_rarg1);
|
||||
} else {
|
||||
__ mv(c_rarg0, src);
|
||||
__ mv(c_rarg1, count);
|
||||
}
|
||||
|
||||
__ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_array_addr(), 2);
|
||||
|
||||
__ pop_reg(saved_regs, sp);
|
||||
|
||||
BLOCK_COMMENT("} XBarrierSetAssembler::arraycopy_prologue");
|
||||
}
|
||||
|
||||
void XBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
|
||||
Register jni_env,
|
||||
Register robj,
|
||||
Register tmp,
|
||||
Label& slowpath) {
|
||||
BLOCK_COMMENT("XBarrierSetAssembler::try_resolve_jobject_in_native {");
|
||||
|
||||
assert_different_registers(jni_env, robj, tmp);
|
||||
|
||||
// Resolve jobject
|
||||
BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, robj, tmp, slowpath);
|
||||
|
||||
// Compute the offset of address bad mask from the field of jni_environment
|
||||
long int bad_mask_relative_offset = (long int) (in_bytes(XThreadLocalData::address_bad_mask_offset()) -
|
||||
in_bytes(JavaThread::jni_environment_offset()));
|
||||
|
||||
// Load the address bad mask
|
||||
__ ld(tmp, Address(jni_env, bad_mask_relative_offset));
|
||||
|
||||
// Check address bad mask
|
||||
__ andr(tmp, robj, tmp);
|
||||
__ bnez(tmp, slowpath);
|
||||
|
||||
BLOCK_COMMENT("} XBarrierSetAssembler::try_resolve_jobject_in_native");
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
|
||||
OptoReg::Name XBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
|
||||
if (!OptoReg::is_reg(opto_reg)) {
|
||||
return OptoReg::Bad;
|
||||
}
|
||||
|
||||
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
|
||||
if (vm_reg->is_FloatRegister()) {
|
||||
return opto_reg & ~1;
|
||||
}
|
||||
|
||||
return opto_reg;
|
||||
}
|
||||
|
||||
#undef __
|
||||
#define __ _masm->
|
||||
|
||||
class XSaveLiveRegisters {
|
||||
private:
|
||||
MacroAssembler* const _masm;
|
||||
RegSet _gp_regs;
|
||||
FloatRegSet _fp_regs;
|
||||
VectorRegSet _vp_regs;
|
||||
|
||||
public:
|
||||
void initialize(XLoadBarrierStubC2* stub) {
|
||||
// Record registers that needs to be saved/restored
|
||||
RegMaskIterator rmi(stub->live());
|
||||
while (rmi.has_next()) {
|
||||
const OptoReg::Name opto_reg = rmi.next();
|
||||
if (OptoReg::is_reg(opto_reg)) {
|
||||
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
|
||||
if (vm_reg->is_Register()) {
|
||||
_gp_regs += RegSet::of(vm_reg->as_Register());
|
||||
} else if (vm_reg->is_FloatRegister()) {
|
||||
_fp_regs += FloatRegSet::of(vm_reg->as_FloatRegister());
|
||||
} else if (vm_reg->is_VectorRegister()) {
|
||||
const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~(VectorRegister::max_slots_per_register - 1));
|
||||
_vp_regs += VectorRegSet::of(vm_reg_base->as_VectorRegister());
|
||||
} else {
|
||||
fatal("Unknown register type");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove C-ABI SOE registers, tmp regs and _ref register that will be updated
|
||||
_gp_regs -= RegSet::range(x18, x27) + RegSet::of(x2) + RegSet::of(x8, x9) + RegSet::of(x5, stub->ref());
|
||||
}
|
||||
|
||||
XSaveLiveRegisters(MacroAssembler* masm, XLoadBarrierStubC2* stub) :
|
||||
_masm(masm),
|
||||
_gp_regs(),
|
||||
_fp_regs(),
|
||||
_vp_regs() {
|
||||
// Figure out what registers to save/restore
|
||||
initialize(stub);
|
||||
|
||||
// Save registers
|
||||
__ push_reg(_gp_regs, sp);
|
||||
__ push_fp(_fp_regs, sp);
|
||||
__ push_v(_vp_regs, sp);
|
||||
}
|
||||
|
||||
~XSaveLiveRegisters() {
|
||||
// Restore registers
|
||||
__ pop_v(_vp_regs, sp);
|
||||
__ pop_fp(_fp_regs, sp);
|
||||
__ pop_reg(_gp_regs, sp);
|
||||
}
|
||||
};
|
||||
|
||||
class XSetupArguments {
|
||||
private:
|
||||
MacroAssembler* const _masm;
|
||||
const Register _ref;
|
||||
const Address _ref_addr;
|
||||
|
||||
public:
|
||||
XSetupArguments(MacroAssembler* masm, XLoadBarrierStubC2* stub) :
|
||||
_masm(masm),
|
||||
_ref(stub->ref()),
|
||||
_ref_addr(stub->ref_addr()) {
|
||||
|
||||
// Setup arguments
|
||||
if (_ref_addr.base() == noreg) {
|
||||
// No self healing
|
||||
if (_ref != c_rarg0) {
|
||||
__ mv(c_rarg0, _ref);
|
||||
}
|
||||
__ mv(c_rarg1, zr);
|
||||
} else {
|
||||
// Self healing
|
||||
if (_ref == c_rarg0) {
|
||||
// _ref is already at correct place
|
||||
__ la(c_rarg1, _ref_addr);
|
||||
} else if (_ref != c_rarg1) {
|
||||
// _ref is in wrong place, but not in c_rarg1, so fix it first
|
||||
__ la(c_rarg1, _ref_addr);
|
||||
__ mv(c_rarg0, _ref);
|
||||
} else if (_ref_addr.base() != c_rarg0) {
|
||||
assert(_ref == c_rarg1, "Mov ref first, vacating c_rarg0");
|
||||
__ mv(c_rarg0, _ref);
|
||||
__ la(c_rarg1, _ref_addr);
|
||||
} else {
|
||||
assert(_ref == c_rarg1, "Need to vacate c_rarg1 and _ref_addr is using c_rarg0");
|
||||
if (_ref_addr.base() == c_rarg0) {
|
||||
__ mv(t1, c_rarg1);
|
||||
__ la(c_rarg1, _ref_addr);
|
||||
__ mv(c_rarg0, t1);
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
~XSetupArguments() {
|
||||
// Transfer result
|
||||
if (_ref != x10) {
|
||||
__ mv(_ref, x10);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const {
|
||||
BLOCK_COMMENT("XLoadBarrierStubC2");
|
||||
|
||||
// Stub entry
|
||||
__ bind(*stub->entry());
|
||||
|
||||
{
|
||||
XSaveLiveRegisters save_live_registers(masm, stub);
|
||||
XSetupArguments setup_arguments(masm, stub);
|
||||
|
||||
Address target(stub->slow_path());
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(t0, target, offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
});
|
||||
}
|
||||
|
||||
// Stub exit
|
||||
__ j(*stub->continuation());
|
||||
}
|
||||
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef COMPILER1
|
||||
#undef __
|
||||
#define __ ce->masm()->
|
||||
|
||||
void XBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
|
||||
LIR_Opr ref) const {
|
||||
assert_different_registers(xthread, ref->as_register(), t1);
|
||||
__ ld(t1, address_bad_mask_from_thread(xthread));
|
||||
__ andr(t1, t1, ref->as_register());
|
||||
}
|
||||
|
||||
void XBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
|
||||
XLoadBarrierStubC1* stub) const {
|
||||
// Stub entry
|
||||
__ bind(*stub->entry());
|
||||
|
||||
Register ref = stub->ref()->as_register();
|
||||
Register ref_addr = noreg;
|
||||
Register tmp = noreg;
|
||||
|
||||
if (stub->tmp()->is_valid()) {
|
||||
// Load address into tmp register
|
||||
ce->leal(stub->ref_addr(), stub->tmp());
|
||||
ref_addr = tmp = stub->tmp()->as_pointer_register();
|
||||
} else {
|
||||
// Address already in register
|
||||
ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register();
|
||||
}
|
||||
|
||||
assert_different_registers(ref, ref_addr, noreg);
|
||||
|
||||
// Save x10 unless it is the result or tmp register
|
||||
// Set up SP to accommodate parameters and maybe x10.
|
||||
if (ref != x10 && tmp != x10) {
|
||||
__ sub(sp, sp, 32);
|
||||
__ sd(x10, Address(sp, 16));
|
||||
} else {
|
||||
__ sub(sp, sp, 16);
|
||||
}
|
||||
|
||||
// Setup arguments and call runtime stub
|
||||
ce->store_parameter(ref_addr, 1);
|
||||
ce->store_parameter(ref, 0);
|
||||
|
||||
__ far_call(stub->runtime_stub());
|
||||
|
||||
// Verify result
|
||||
__ verify_oop(x10);
|
||||
|
||||
|
||||
// Move result into place
|
||||
if (ref != x10) {
|
||||
__ mv(ref, x10);
|
||||
}
|
||||
|
||||
// Restore x10 unless it is the result or tmp register
|
||||
if (ref != x10 && tmp != x10) {
|
||||
__ ld(x10, Address(sp, 16));
|
||||
__ add(sp, sp, 32);
|
||||
} else {
|
||||
__ add(sp, sp, 16);
|
||||
}
|
||||
|
||||
// Stub exit
|
||||
__ j(*stub->continuation());
|
||||
}
|
||||
|
||||
#undef __
|
||||
#define __ sasm->
|
||||
|
||||
void XBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
|
||||
DecoratorSet decorators) const {
|
||||
__ prologue("zgc_load_barrier stub", false);
|
||||
|
||||
__ push_call_clobbered_registers_except(RegSet::of(x10));
|
||||
|
||||
// Setup arguments
|
||||
__ load_parameter(0, c_rarg0);
|
||||
__ load_parameter(1, c_rarg1);
|
||||
|
||||
__ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2);
|
||||
|
||||
__ pop_call_clobbered_registers_except(RegSet::of(x10));
|
||||
|
||||
__ epilogue();
|
||||
}
|
||||
|
||||
#endif // COMPILER1
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
void XBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) {
|
||||
// Check if mask is good.
|
||||
// verifies that XAddressBadMask & obj == 0
|
||||
__ ld(tmp2, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(tmp1, obj, tmp2);
|
||||
__ bnez(tmp1, error);
|
||||
|
||||
BarrierSetAssembler::check_oop(masm, obj, tmp1, tmp2, error);
|
||||
}
|
||||
|
||||
#undef __
|
112
src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.hpp
Normal file
112
src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.hpp
Normal file
@ -0,0 +1,112 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_GC_X_XBARRIERSETASSEMBLER_RISCV_HPP
|
||||
#define CPU_RISCV_GC_X_XBARRIERSETASSEMBLER_RISCV_HPP
|
||||
|
||||
#include "code/vmreg.hpp"
|
||||
#include "oops/accessDecorators.hpp"
|
||||
#ifdef COMPILER2
|
||||
#include "opto/optoreg.hpp"
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef COMPILER1
|
||||
class LIR_Assembler;
|
||||
class LIR_Opr;
|
||||
class StubAssembler;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
class Node;
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef COMPILER1
|
||||
class XLoadBarrierStubC1;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
class XLoadBarrierStubC2;
|
||||
#endif // COMPILER2
|
||||
|
||||
class XBarrierSetAssembler : public XBarrierSetAssemblerBase {
|
||||
public:
|
||||
virtual void load_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
Register dst,
|
||||
Address src,
|
||||
Register tmp1,
|
||||
Register tmp2);
|
||||
|
||||
#ifdef ASSERT
|
||||
virtual void store_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
Address dst,
|
||||
Register val,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3);
|
||||
#endif // ASSERT
|
||||
|
||||
virtual void arraycopy_prologue(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
bool is_oop,
|
||||
Register src,
|
||||
Register dst,
|
||||
Register count,
|
||||
RegSet saved_regs);
|
||||
|
||||
virtual void try_resolve_jobject_in_native(MacroAssembler* masm,
|
||||
Register jni_env,
|
||||
Register robj,
|
||||
Register tmp,
|
||||
Label& slowpath);
|
||||
|
||||
virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; }
|
||||
|
||||
#ifdef COMPILER1
|
||||
void generate_c1_load_barrier_test(LIR_Assembler* ce,
|
||||
LIR_Opr ref) const;
|
||||
|
||||
void generate_c1_load_barrier_stub(LIR_Assembler* ce,
|
||||
XLoadBarrierStubC1* stub) const;
|
||||
|
||||
void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
|
||||
DecoratorSet decorators) const;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
OptoReg::Name refine_register(const Node* node,
|
||||
OptoReg::Name opto_reg);
|
||||
|
||||
void generate_c2_load_barrier_stub(MacroAssembler* masm,
|
||||
XLoadBarrierStubC2* stub) const;
|
||||
#endif // COMPILER2
|
||||
|
||||
void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);
|
||||
};
|
||||
|
||||
#endif // CPU_RISCV_GC_X_XBARRIERSETASSEMBLER_RISCV_HPP
|
@ -26,7 +26,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/x/xGlobals.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
@ -198,15 +198,15 @@ static size_t probe_valid_max_address_bit() {
|
||||
#endif // LINUX
|
||||
}
|
||||
|
||||
size_t ZPlatformAddressOffsetBits() {
|
||||
size_t XPlatformAddressOffsetBits() {
|
||||
const static size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1;
|
||||
const size_t max_address_offset_bits = valid_max_address_offset_bits - 3;
|
||||
const size_t min_address_offset_bits = max_address_offset_bits - 2;
|
||||
const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
|
||||
const size_t address_offset = round_up_power_of_2(MaxHeapSize * XVirtualToPhysicalRatio);
|
||||
const size_t address_offset_bits = log2i_exact(address_offset);
|
||||
return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
|
||||
}
|
||||
|
||||
size_t ZPlatformAddressMetadataShift() {
|
||||
return ZPlatformAddressOffsetBits();
|
||||
size_t XPlatformAddressMetadataShift() {
|
||||
return XPlatformAddressOffsetBits();
|
||||
}
|
35
src/hotspot/cpu/riscv/gc/x/xGlobals_riscv.hpp
Normal file
35
src/hotspot/cpu/riscv/gc/x/xGlobals_riscv.hpp
Normal file
@ -0,0 +1,35 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_GC_X_XGLOBALS_RISCV_HPP
|
||||
#define CPU_RISCV_GC_X_XGLOBALS_RISCV_HPP
|
||||
|
||||
const size_t XPlatformHeapViews = 3;
|
||||
const size_t XPlatformCacheLineSize = 64;
|
||||
|
||||
size_t XPlatformAddressOffsetBits();
|
||||
size_t XPlatformAddressMetadataShift();
|
||||
|
||||
#endif // CPU_RISCV_GC_X_XGLOBALS_RISCV_HPP
|
233
src/hotspot/cpu/riscv/gc/x/x_riscv64.ad
Normal file
233
src/hotspot/cpu/riscv/gc/x/x_riscv64.ad
Normal file
@ -0,0 +1,233 @@
|
||||
//
|
||||
// Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License version 2 only, as
|
||||
// published by the Free Software Foundation.
|
||||
//
|
||||
// This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
// version 2 for more details (a copy is included in the LICENSE file that
|
||||
// accompanied this code).
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License version
|
||||
// 2 along with this work; if not, write to the Free Software Foundation,
|
||||
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
//
|
||||
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
// or visit www.oracle.com if you need additional information or have any
|
||||
// questions.
|
||||
//
|
||||
|
||||
source_hpp %{
|
||||
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/x/c2/xBarrierSetC2.hpp"
|
||||
#include "gc/x/xThreadLocalData.hpp"
|
||||
|
||||
%}
|
||||
|
||||
source %{
|
||||
|
||||
static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, int barrier_data) {
|
||||
if (barrier_data == XLoadBarrierElided) {
|
||||
return;
|
||||
}
|
||||
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data);
|
||||
__ ld(tmp, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(tmp, tmp, ref);
|
||||
__ bnez(tmp, *stub->entry(), true /* far */);
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
static void x_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
|
||||
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong);
|
||||
__ j(*stub->entry());
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
// Load Pointer
|
||||
instruct xLoadP(iRegPNoSp dst, memory mem)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(UseZGC && !ZGenerational && (n->as_Load()->barrier_data() != 0));
|
||||
effect(TEMP dst);
|
||||
|
||||
ins_cost(4 * DEFAULT_COST);
|
||||
|
||||
format %{ "ld $dst, $mem, #@zLoadP" %}
|
||||
|
||||
ins_encode %{
|
||||
const Address ref_addr (as_Register($mem$$base), $mem$$disp);
|
||||
__ ld($dst$$Register, ref_addr);
|
||||
x_load_barrier(_masm, this, ref_addr, $dst$$Register, t0 /* tmp */, barrier_data());
|
||||
%}
|
||||
|
||||
ins_pipe(iload_reg_mem);
|
||||
%}
|
||||
|
||||
instruct xCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && !ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
|
||||
effect(KILL cr, TEMP_DEF res);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "cmpxchg $mem, $oldval, $newval, #@zCompareAndSwapP\n\t"
|
||||
"mv $res, $res == $oldval" %}
|
||||
|
||||
ins_encode %{
|
||||
Label failed;
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
|
||||
true /* result_as_bool */);
|
||||
__ beqz($res$$Register, failed);
|
||||
__ mv(t0, $oldval$$Register);
|
||||
__ bind(failed);
|
||||
if (barrier_data() != XLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ld(t1, Address(xthread, XThreadLocalData::address_bad_mask_offset()), t1 /* tmp */);
|
||||
__ andr(t1, t1, t0);
|
||||
__ beqz(t1, good);
|
||||
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), t0 /* ref */, t1 /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
|
||||
true /* result_as_bool */);
|
||||
__ bind(good);
|
||||
}
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct xCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && !ZGenerational && needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == XLoadBarrierStrong));
|
||||
effect(KILL cr, TEMP_DEF res);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "cmpxchg $mem, $oldval, $newval, #@zCompareAndSwapPAcq\n\t"
|
||||
"mv $res, $res == $oldval" %}
|
||||
|
||||
ins_encode %{
|
||||
Label failed;
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
|
||||
true /* result_as_bool */);
|
||||
__ beqz($res$$Register, failed);
|
||||
__ mv(t0, $oldval$$Register);
|
||||
__ bind(failed);
|
||||
if (barrier_data() != XLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ld(t1, Address(xthread, XThreadLocalData::address_bad_mask_offset()), t1 /* tmp */);
|
||||
__ andr(t1, t1, t0);
|
||||
__ beqz(t1, good);
|
||||
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), t0 /* ref */, t1 /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
|
||||
true /* result_as_bool */);
|
||||
__ bind(good);
|
||||
}
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct xCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval) %{
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && !ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
|
||||
effect(TEMP_DEF res);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "cmpxchg $res = $mem, $oldval, $newval, #@zCompareAndExchangeP" %}
|
||||
|
||||
ins_encode %{
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register);
|
||||
if (barrier_data() != XLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(t0, t0, $res$$Register);
|
||||
__ beqz(t0, good);
|
||||
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, t0 /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register);
|
||||
__ bind(good);
|
||||
}
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct xCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval) %{
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && !ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
|
||||
effect(TEMP_DEF res);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "cmpxchg $res = $mem, $oldval, $newval, #@zCompareAndExchangePAcq" %}
|
||||
|
||||
ins_encode %{
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register);
|
||||
if (barrier_data() != XLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(t0, t0, $res$$Register);
|
||||
__ beqz(t0, good);
|
||||
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, t0 /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register);
|
||||
__ bind(good);
|
||||
}
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct xGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
|
||||
match(Set prev (GetAndSetP mem newv));
|
||||
predicate(UseZGC && !ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP_DEF prev, KILL cr);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "atomic_xchg $prev, $newv, [$mem], #@zGetAndSetP" %}
|
||||
|
||||
ins_encode %{
|
||||
__ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
|
||||
x_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, t0 /* tmp */, barrier_data());
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
||||
instruct xGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
|
||||
match(Set prev (GetAndSetP mem newv));
|
||||
predicate(UseZGC && !ZGenerational && needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() != 0));
|
||||
effect(TEMP_DEF prev, KILL cr);
|
||||
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
|
||||
format %{ "atomic_xchg_acq $prev, $newv, [$mem], #@zGetAndSetPAcq" %}
|
||||
|
||||
ins_encode %{
|
||||
__ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
|
||||
x_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, t0 /* tmp */, barrier_data());
|
||||
%}
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
109
src/hotspot/cpu/riscv/gc/z/zAddress_riscv.cpp
Normal file
109
src/hotspot/cpu/riscv/gc/z/zAddress_riscv.cpp
Normal file
@ -0,0 +1,109 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/z/zAddress.hpp"
|
||||
#include "gc/z/zBarrierSetAssembler.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
#ifdef LINUX
|
||||
#include <sys/mman.h>
|
||||
#endif // LINUX
|
||||
|
||||
// Default value if probe is not implemented for a certain platform: 128TB
|
||||
static const size_t DEFAULT_MAX_ADDRESS_BIT = 47;
|
||||
// Minimum value returned, if probing fails: 64GB
|
||||
static const size_t MINIMUM_MAX_ADDRESS_BIT = 36;
|
||||
|
||||
static size_t probe_valid_max_address_bit() {
|
||||
#ifdef LINUX
|
||||
size_t max_address_bit = 0;
|
||||
const size_t page_size = os::vm_page_size();
|
||||
for (size_t i = DEFAULT_MAX_ADDRESS_BIT; i > MINIMUM_MAX_ADDRESS_BIT; --i) {
|
||||
const uintptr_t base_addr = ((uintptr_t) 1U) << i;
|
||||
if (msync((void*)base_addr, page_size, MS_ASYNC) == 0) {
|
||||
// msync suceeded, the address is valid, and maybe even already mapped.
|
||||
max_address_bit = i;
|
||||
break;
|
||||
}
|
||||
if (errno != ENOMEM) {
|
||||
// Some error occured. This should never happen, but msync
|
||||
// has some undefined behavior, hence ignore this bit.
|
||||
#ifdef ASSERT
|
||||
fatal("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno));
|
||||
#else // ASSERT
|
||||
log_warning_p(gc)("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno));
|
||||
#endif // ASSERT
|
||||
continue;
|
||||
}
|
||||
// Since msync failed with ENOMEM, the page might not be mapped.
|
||||
// Try to map it, to see if the address is valid.
|
||||
void* const result_addr = mmap((void*) base_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0);
|
||||
if (result_addr != MAP_FAILED) {
|
||||
munmap(result_addr, page_size);
|
||||
}
|
||||
if ((uintptr_t) result_addr == base_addr) {
|
||||
// address is valid
|
||||
max_address_bit = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (max_address_bit == 0) {
|
||||
// probing failed, allocate a very high page and take that bit as the maximum
|
||||
const uintptr_t high_addr = ((uintptr_t) 1U) << DEFAULT_MAX_ADDRESS_BIT;
|
||||
void* const result_addr = mmap((void*) high_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0);
|
||||
if (result_addr != MAP_FAILED) {
|
||||
max_address_bit = BitsPerSize_t - count_leading_zeros((size_t) result_addr) - 1;
|
||||
munmap(result_addr, page_size);
|
||||
}
|
||||
}
|
||||
log_info_p(gc, init)("Probing address space for the highest valid bit: " SIZE_FORMAT, max_address_bit);
|
||||
return MAX2(max_address_bit, MINIMUM_MAX_ADDRESS_BIT);
|
||||
#else // LINUX
|
||||
return DEFAULT_MAX_ADDRESS_BIT;
|
||||
#endif // LINUX
|
||||
}
|
||||
|
||||
size_t ZPlatformAddressOffsetBits() {
|
||||
const static size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1;
|
||||
const size_t max_address_offset_bits = valid_max_address_offset_bits - 3;
|
||||
const size_t min_address_offset_bits = max_address_offset_bits - 2;
|
||||
const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
|
||||
const size_t address_offset_bits = log2i_exact(address_offset);
|
||||
return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
|
||||
}
|
||||
|
||||
size_t ZPlatformAddressHeapBaseShift() {
|
||||
return ZPlatformAddressOffsetBits();
|
||||
}
|
||||
|
||||
void ZGlobalsPointers::pd_set_good_masks() {
|
||||
BarrierSetAssembler::clear_patching_epoch();
|
||||
}
|
34
src/hotspot/cpu/riscv/gc/z/zAddress_riscv.hpp
Normal file
34
src/hotspot/cpu/riscv/gc/z/zAddress_riscv.hpp
Normal file
@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_GC_Z_ZADDRESS_RISCV_HPP
|
||||
#define CPU_RISCV_GC_Z_ZADDRESS_RISCV_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
const size_t ZPointerLoadShift = 16;
|
||||
|
||||
size_t ZPlatformAddressOffsetBits();
|
||||
size_t ZPlatformAddressHeapBaseShift();
|
||||
|
||||
#endif // CPU_RISCV_GC_Z_ZADDRESS_RISCV_HPP
|
38
src/hotspot/cpu/riscv/gc/z/zAddress_riscv.inline.hpp
Normal file
38
src/hotspot/cpu/riscv/gc/z/zAddress_riscv.inline.hpp
Normal file
@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_GC_Z_ZADDRESS_RISCV_INLINE_HPP
|
||||
#define CPU_RISCV_GC_Z_ZADDRESS_RISCV_INLINE_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
inline uintptr_t ZPointer::remap_bits(uintptr_t colored) {
|
||||
return colored & ZPointerRemappedMask;
|
||||
}
|
||||
|
||||
inline constexpr int ZPointer::load_shift_lookup(uintptr_t value) {
|
||||
return ZPointerLoadShift;
|
||||
}
|
||||
|
||||
#endif // CPU_RISCV_GC_Z_ZADDRESS_RISCV_INLINE_HPP
|
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,22 +28,33 @@
|
||||
|
||||
#include "code/vmreg.hpp"
|
||||
#include "oops/accessDecorators.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_LIR.hpp"
|
||||
#endif // COMPILER1
|
||||
#ifdef COMPILER2
|
||||
#include "gc/z/c2/zBarrierSetC2.hpp"
|
||||
#include "opto/optoreg.hpp"
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef COMPILER1
|
||||
class LIR_Address;
|
||||
class LIR_Assembler;
|
||||
class LIR_Opr;
|
||||
class StubAssembler;
|
||||
class ZLoadBarrierStubC1;
|
||||
class ZStoreBarrierStubC1;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
class MachNode;
|
||||
class Node;
|
||||
class ZLoadBarrierStubC2;
|
||||
#endif // COMPILER2
|
||||
|
||||
const int ZBarrierRelocationFormatLoadBadMask = 0;
|
||||
const int ZBarrierRelocationFormatMarkBadMask = 1;
|
||||
const int ZBarrierRelocationFormatStoreGoodBits = 2;
|
||||
const int ZBarrierRelocationFormatStoreBadMask = 3;
|
||||
|
||||
class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase {
|
||||
public:
|
||||
virtual void load_at(MacroAssembler* masm,
|
||||
@ -54,7 +65,27 @@ public:
|
||||
Register tmp1,
|
||||
Register tmp2);
|
||||
|
||||
#ifdef ASSERT
|
||||
void store_barrier_fast(MacroAssembler* masm,
|
||||
Address ref_addr,
|
||||
Register rnew_zaddress,
|
||||
Register rnew_zpointer,
|
||||
Register rtmp,
|
||||
bool in_nmethod,
|
||||
bool is_atomic,
|
||||
Label& medium_path,
|
||||
Label& medium_path_continuation) const;
|
||||
|
||||
void store_barrier_medium(MacroAssembler* masm,
|
||||
Address ref_addr,
|
||||
Register rtmp1,
|
||||
Register rtmp2,
|
||||
Register rtmp3,
|
||||
bool is_native,
|
||||
bool is_atomic,
|
||||
Label& medium_path_continuation,
|
||||
Label& slow_path,
|
||||
Label& slow_path_continuation) const;
|
||||
|
||||
virtual void store_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
@ -63,7 +94,6 @@ public:
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3);
|
||||
#endif // ASSERT
|
||||
|
||||
virtual void arraycopy_prologue(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
@ -73,23 +103,66 @@ public:
|
||||
Register count,
|
||||
RegSet saved_regs);
|
||||
|
||||
virtual void copy_load_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
size_t bytes,
|
||||
Register dst,
|
||||
Address src,
|
||||
Register tmp);
|
||||
|
||||
virtual void copy_store_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
size_t bytes,
|
||||
Address dst,
|
||||
Register src,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3);
|
||||
|
||||
virtual bool supports_rvv_arraycopy();
|
||||
|
||||
virtual void try_resolve_jobject_in_native(MacroAssembler* masm,
|
||||
Register jni_env,
|
||||
Register robj,
|
||||
Register tmp,
|
||||
Label& slowpath);
|
||||
|
||||
virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; }
|
||||
virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_instruction_and_data_patch; }
|
||||
|
||||
void patch_barrier_relocation(address addr, int format);
|
||||
|
||||
void patch_barriers() {}
|
||||
|
||||
#ifdef COMPILER1
|
||||
void generate_c1_color(LIR_Assembler* ce, LIR_Opr ref) const;
|
||||
void generate_c1_uncolor(LIR_Assembler* ce, LIR_Opr ref) const;
|
||||
|
||||
void generate_c1_load_barrier_test(LIR_Assembler* ce,
|
||||
LIR_Opr ref) const;
|
||||
void generate_c1_load_barrier(LIR_Assembler* ce,
|
||||
LIR_Opr ref,
|
||||
ZLoadBarrierStubC1* stub,
|
||||
bool on_non_strong) const;
|
||||
|
||||
void generate_c1_load_barrier_stub(LIR_Assembler* ce,
|
||||
ZLoadBarrierStubC1* stub) const;
|
||||
|
||||
void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
|
||||
DecoratorSet decorators) const;
|
||||
|
||||
void generate_c1_store_barrier(LIR_Assembler* ce,
|
||||
LIR_Address* addr,
|
||||
LIR_Opr new_zaddress,
|
||||
LIR_Opr new_zpointer,
|
||||
ZStoreBarrierStubC1* stub) const;
|
||||
|
||||
void generate_c1_store_barrier_stub(LIR_Assembler* ce,
|
||||
ZStoreBarrierStubC1* stub) const;
|
||||
|
||||
void generate_c1_store_barrier_runtime_stub(StubAssembler* sasm,
|
||||
bool self_healing) const;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
@ -98,6 +171,8 @@ public:
|
||||
|
||||
void generate_c2_load_barrier_stub(MacroAssembler* masm,
|
||||
ZLoadBarrierStubC2* stub) const;
|
||||
void generate_c2_store_barrier_stub(MacroAssembler* masm,
|
||||
ZStoreBarrierStubC2* stub) const;
|
||||
#endif // COMPILER2
|
||||
|
||||
void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,10 +26,8 @@
|
||||
#ifndef CPU_RISCV_GC_Z_ZGLOBALS_RISCV_HPP
|
||||
#define CPU_RISCV_GC_Z_ZGLOBALS_RISCV_HPP
|
||||
|
||||
const size_t ZPlatformHeapViews = 3;
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
const size_t ZPlatformCacheLineSize = 64;
|
||||
|
||||
size_t ZPlatformAddressOffsetBits();
|
||||
size_t ZPlatformAddressMetadataShift();
|
||||
|
||||
#endif // CPU_RISCV_GC_Z_ZGLOBALS_RISCV_HPP
|
||||
|
@ -1,6 +1,6 @@
|
||||
//
|
||||
// Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
// Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,31 +31,69 @@ source_hpp %{
|
||||
%}
|
||||
|
||||
source %{
|
||||
#include "gc/z/zBarrierSetAssembler.hpp"
|
||||
|
||||
static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, int barrier_data) {
|
||||
if (barrier_data == ZLoadBarrierElided) {
|
||||
static void z_color(MacroAssembler& _masm, const MachNode* node, Register dst, Register src, Register tmp) {
|
||||
assert_different_registers(dst, tmp);
|
||||
|
||||
__ relocate(barrier_Relocation::spec(), [&] {
|
||||
__ li16u(tmp, barrier_Relocation::unpatched);
|
||||
}, ZBarrierRelocationFormatStoreGoodBits);
|
||||
__ slli(dst, src, ZPointerLoadShift);
|
||||
__ orr(dst, dst, tmp);
|
||||
}
|
||||
|
||||
static void z_uncolor(MacroAssembler& _masm, const MachNode* node, Register ref) {
|
||||
__ srli(ref, ref, ZPointerLoadShift);
|
||||
}
|
||||
|
||||
static void check_color(MacroAssembler& _masm, Register ref, bool on_non_strong, Register result) {
|
||||
int format = on_non_strong ? ZBarrierRelocationFormatMarkBadMask
|
||||
: ZBarrierRelocationFormatLoadBadMask;
|
||||
__ relocate(barrier_Relocation::spec(), [&] {
|
||||
__ li16u(result, barrier_Relocation::unpatched);
|
||||
}, format);
|
||||
__ andr(result, ref, result);
|
||||
}
|
||||
|
||||
static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
|
||||
const bool on_non_strong =
|
||||
((node->barrier_data() & ZBarrierWeak) != 0) ||
|
||||
((node->barrier_data() & ZBarrierPhantom) != 0);
|
||||
|
||||
if (node->barrier_data() == ZBarrierElided) {
|
||||
z_uncolor(_masm, node, ref);
|
||||
return;
|
||||
}
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data);
|
||||
__ ld(tmp, Address(xthread, ZThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(tmp, tmp, ref);
|
||||
__ bnez(tmp, *stub->entry(), true /* far */);
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, ZLoadBarrierStrong);
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref);
|
||||
Label good;
|
||||
check_color(_masm, ref, on_non_strong, tmp);
|
||||
__ beqz(tmp, good);
|
||||
__ j(*stub->entry());
|
||||
|
||||
__ bind(good);
|
||||
z_uncolor(_masm, node, ref);
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
static void z_store_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register rnew_zaddress, Register rnew_zpointer, Register tmp, bool is_atomic) {
|
||||
if (node->barrier_data() == ZBarrierElided) {
|
||||
z_color(_masm, node, rnew_zpointer, rnew_zaddress, t0);
|
||||
} else {
|
||||
bool is_native = (node->barrier_data() & ZBarrierNative) != 0;
|
||||
ZStoreBarrierStubC2* const stub = ZStoreBarrierStubC2::create(node, ref_addr, rnew_zaddress, rnew_zpointer, is_native, is_atomic);
|
||||
ZBarrierSetAssembler* bs_asm = ZBarrierSet::assembler();
|
||||
bs_asm->store_barrier_fast(&_masm, ref_addr, rnew_zaddress, rnew_zpointer, tmp, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
|
||||
}
|
||||
}
|
||||
%}
|
||||
|
||||
// Load Pointer
|
||||
instruct zLoadP(iRegPNoSp dst, memory mem)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(UseZGC && (n->as_Load()->barrier_data() != 0));
|
||||
predicate(UseZGC && ZGenerational && n->as_Load()->barrier_data() != 0);
|
||||
effect(TEMP dst);
|
||||
|
||||
ins_cost(4 * DEFAULT_COST);
|
||||
@ -63,19 +101,36 @@ instruct zLoadP(iRegPNoSp dst, memory mem)
|
||||
format %{ "ld $dst, $mem, #@zLoadP" %}
|
||||
|
||||
ins_encode %{
|
||||
const Address ref_addr (as_Register($mem$$base), $mem$$disp);
|
||||
const Address ref_addr(as_Register($mem$$base), $mem$$disp);
|
||||
__ ld($dst$$Register, ref_addr);
|
||||
z_load_barrier(_masm, this, ref_addr, $dst$$Register, t0 /* tmp */, barrier_data());
|
||||
z_load_barrier(_masm, this, ref_addr, $dst$$Register, t0);
|
||||
%}
|
||||
|
||||
ins_pipe(iload_reg_mem);
|
||||
%}
|
||||
|
||||
instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
// Store Pointer
|
||||
instruct zStoreP(memory mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0);
|
||||
match(Set mem (StoreP mem src));
|
||||
effect(TEMP tmp, KILL cr);
|
||||
|
||||
ins_cost(125); // XXX
|
||||
format %{ "sd $mem, $src\t# ptr" %}
|
||||
ins_encode %{
|
||||
const Address ref_addr(as_Register($mem$$base), $mem$$disp);
|
||||
z_store_barrier(_masm, this, ref_addr, $src$$Register, $tmp$$Register, t1, false /* is_atomic */);
|
||||
__ sd($tmp$$Register, ref_addr);
|
||||
%}
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
||||
instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
|
||||
effect(KILL cr, TEMP_DEF res);
|
||||
predicate(UseZGC && ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP oldval_tmp, TEMP newval_tmp, KILL cr, TEMP_DEF res);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
@ -83,35 +138,21 @@ instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newva
|
||||
"mv $res, $res == $oldval" %}
|
||||
|
||||
ins_encode %{
|
||||
Label failed;
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
|
||||
true /* result_as_bool */);
|
||||
__ beqz($res$$Register, failed);
|
||||
__ mv(t0, $oldval$$Register);
|
||||
__ bind(failed);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ld(t1, Address(xthread, ZThreadLocalData::address_bad_mask_offset()), t1 /* tmp */);
|
||||
__ andr(t1, t1, t0);
|
||||
__ beqz(t1, good);
|
||||
z_load_barrier_slow_path(_masm, this, Address($mem$$Register), t0 /* ref */, t1 /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
|
||||
true /* result_as_bool */);
|
||||
__ bind(good);
|
||||
}
|
||||
guarantee($mem$$disp == 0, "impossible encoding");
|
||||
Address ref_addr($mem$$Register);
|
||||
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, t0);
|
||||
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, t1, true /* is_atomic */);
|
||||
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::int64, Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register, true /* result_as_bool */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong));
|
||||
effect(KILL cr, TEMP_DEF res);
|
||||
predicate(UseZGC && ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP oldval_tmp, TEMP newval_tmp, KILL cr, TEMP_DEF res);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
@ -119,81 +160,53 @@ instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne
|
||||
"mv $res, $res == $oldval" %}
|
||||
|
||||
ins_encode %{
|
||||
Label failed;
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
|
||||
true /* result_as_bool */);
|
||||
__ beqz($res$$Register, failed);
|
||||
__ mv(t0, $oldval$$Register);
|
||||
__ bind(failed);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ld(t1, Address(xthread, ZThreadLocalData::address_bad_mask_offset()), t1 /* tmp */);
|
||||
__ andr(t1, t1, t0);
|
||||
__ beqz(t1, good);
|
||||
z_load_barrier_slow_path(_masm, this, Address($mem$$Register), t0 /* ref */, t1 /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
|
||||
true /* result_as_bool */);
|
||||
__ bind(good);
|
||||
}
|
||||
guarantee($mem$$disp == 0, "impossible encoding");
|
||||
Address ref_addr($mem$$Register);
|
||||
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, t0);
|
||||
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, t1, true /* is_atomic */);
|
||||
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::int64, Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register, true /* result_as_bool */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval) %{
|
||||
instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
|
||||
effect(TEMP_DEF res);
|
||||
predicate(UseZGC && ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP oldval_tmp, TEMP newval_tmp, KILL cr, TEMP_DEF res);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "cmpxchg $res = $mem, $oldval, $newval, #@zCompareAndExchangeP" %}
|
||||
|
||||
ins_encode %{
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ld(t0, Address(xthread, ZThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(t0, t0, $res$$Register);
|
||||
__ beqz(t0, good);
|
||||
z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, t0 /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register);
|
||||
__ bind(good);
|
||||
}
|
||||
guarantee($mem$$disp == 0, "impossible encoding");
|
||||
Address ref_addr($mem$$Register);
|
||||
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, t0);
|
||||
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, t1, true /* is_atomic */);
|
||||
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::int64, Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register);
|
||||
z_uncolor(_masm, this, $res$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval) %{
|
||||
instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
|
||||
effect(TEMP_DEF res);
|
||||
predicate(UseZGC && ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP oldval_tmp, TEMP newval_tmp, KILL cr, TEMP_DEF res);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "cmpxchg $res = $mem, $oldval, $newval, #@zCompareAndExchangePAcq" %}
|
||||
|
||||
ins_encode %{
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ld(t0, Address(xthread, ZThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(t0, t0, $res$$Register);
|
||||
__ beqz(t0, good);
|
||||
z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, t0 /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register);
|
||||
__ bind(good);
|
||||
}
|
||||
guarantee($mem$$disp == 0, "impossible encoding");
|
||||
Address ref_addr($mem$$Register);
|
||||
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, t0);
|
||||
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, t1, true /* is_atomic */);
|
||||
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::int64, Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register);
|
||||
z_uncolor(_masm, this, $res$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
@ -201,7 +214,7 @@ instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iReg
|
||||
|
||||
instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
|
||||
match(Set prev (GetAndSetP mem newv));
|
||||
predicate(UseZGC && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
predicate(UseZGC && ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP_DEF prev, KILL cr);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
@ -209,8 +222,9 @@ instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
|
||||
format %{ "atomic_xchg $prev, $newv, [$mem], #@zGetAndSetP" %}
|
||||
|
||||
ins_encode %{
|
||||
__ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
|
||||
z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, t0 /* tmp */, barrier_data());
|
||||
z_store_barrier(_masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, t1, true /* is_atomic */);
|
||||
__ atomic_xchg($prev$$Register, $prev$$Register, $mem$$Register);
|
||||
z_uncolor(_masm, this, $prev$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_serial);
|
||||
@ -218,16 +232,17 @@ instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
|
||||
|
||||
instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
|
||||
match(Set prev (GetAndSetP mem newv));
|
||||
predicate(UseZGC && needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() != 0));
|
||||
predicate(UseZGC && ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP_DEF prev, KILL cr);
|
||||
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "atomic_xchg_acq $prev, $newv, [$mem], #@zGetAndSetPAcq" %}
|
||||
|
||||
ins_encode %{
|
||||
__ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
|
||||
z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, t0 /* tmp */, barrier_data());
|
||||
z_store_barrier(_masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, t1, true /* is_atomic */);
|
||||
__ atomic_xchgal($prev$$Register, $prev$$Register, $mem$$Register);
|
||||
z_uncolor(_masm, this, $prev$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
@ -563,8 +563,8 @@ void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp
|
||||
|
||||
beqz(value, done); // Use null as-is.
|
||||
// Test for tag.
|
||||
andi(t0, value, JNIHandles::tag_mask);
|
||||
bnez(t0, tagged);
|
||||
andi(tmp1, value, JNIHandles::tag_mask);
|
||||
bnez(tmp1, tagged);
|
||||
|
||||
// Resolve local handle
|
||||
access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp1, tmp2);
|
||||
@ -573,12 +573,14 @@ void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp
|
||||
|
||||
bind(tagged);
|
||||
// Test for jweak tag.
|
||||
test_bit(t0, value, exact_log2(JNIHandles::TypeTag::weak_global));
|
||||
bnez(t0, weak_tagged);
|
||||
STATIC_ASSERT(JNIHandles::TypeTag::weak_global == 0b1);
|
||||
test_bit(tmp1, value, exact_log2(JNIHandles::TypeTag::weak_global));
|
||||
bnez(tmp1, weak_tagged);
|
||||
|
||||
// Resolve global handle
|
||||
access_load_at(T_OBJECT, IN_NATIVE, value,
|
||||
Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
|
||||
verify_oop(value);
|
||||
j(done);
|
||||
|
||||
bind(weak_tagged);
|
||||
@ -598,9 +600,10 @@ void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Regis
|
||||
|
||||
#ifdef ASSERT
|
||||
{
|
||||
STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10);
|
||||
Label valid_global_tag;
|
||||
test_bit(t0, value, exact_log2(JNIHandles::TypeTag::global)); // Test for global tag.
|
||||
bnez(t0, valid_global_tag);
|
||||
test_bit(tmp1, value, exact_log2(JNIHandles::TypeTag::global)); // Test for global tag.
|
||||
bnez(tmp1, valid_global_tag);
|
||||
stop("non global jobject using resolve_global_jobject");
|
||||
bind(valid_global_tag);
|
||||
}
|
||||
@ -755,6 +758,11 @@ void MacroAssembler::la(Register Rd, Label &label) {
|
||||
wrap_label(Rd, label, &MacroAssembler::la);
|
||||
}
|
||||
|
||||
void MacroAssembler::li16u(Register Rd, int32_t imm) {
|
||||
lui(Rd, imm << 12);
|
||||
srli(Rd, Rd, 12);
|
||||
}
|
||||
|
||||
void MacroAssembler::li32(Register Rd, int32_t imm) {
|
||||
// int32_t is in range 0x8000 0000 ~ 0x7fff ffff, and imm[31] is the sign bit
|
||||
int64_t upper = imm, lower = imm;
|
||||
@ -1404,6 +1412,11 @@ static int patch_imm_in_li64(address branch, address target) {
|
||||
return LI64_INSTRUCTIONS_NUM * NativeInstruction::instruction_size;
|
||||
}
|
||||
|
||||
static int patch_imm_in_li16u(address branch, int32_t target) {
|
||||
Assembler::patch(branch, 31, 12, target & 0xfffff); // patch lui only
|
||||
return NativeInstruction::instruction_size;
|
||||
}
|
||||
|
||||
int MacroAssembler::patch_imm_in_li32(address branch, int32_t target) {
|
||||
const int LI32_INSTRUCTIONS_NUM = 2; // lui + addiw
|
||||
int64_t upper = (intptr_t)target;
|
||||
@ -1493,6 +1506,9 @@ int MacroAssembler::pd_patch_instruction_size(address branch, address target) {
|
||||
} else if (NativeInstruction::is_li32_at(branch)) { // li32
|
||||
int64_t imm = (intptr_t)target;
|
||||
return patch_imm_in_li32(branch, (int32_t)imm);
|
||||
} else if (NativeInstruction::is_li16u_at(branch)) {
|
||||
int64_t imm = (intptr_t)target;
|
||||
return patch_imm_in_li16u(branch, (int32_t)imm);
|
||||
} else {
|
||||
#ifdef ASSERT
|
||||
tty->print_cr("pd_patch_instruction_size: instruction 0x%x at " INTPTR_FORMAT " could not be patched!\n",
|
||||
@ -2426,6 +2442,10 @@ void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acqui
|
||||
|
||||
void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp,
|
||||
Label &succeed, Label *fail) {
|
||||
assert_different_registers(addr, tmp);
|
||||
assert_different_registers(newv, tmp);
|
||||
assert_different_registers(oldv, tmp);
|
||||
|
||||
// oldv holds comparison value
|
||||
// newv holds value to write in exchange
|
||||
// addr identifies memory word to compare against/update
|
||||
@ -2612,6 +2632,9 @@ void MacroAssembler::cmpxchg(Register addr, Register expected,
|
||||
Assembler::Aqrl acquire, Assembler::Aqrl release,
|
||||
Register result, bool result_as_bool) {
|
||||
assert(size != int8 && size != int16, "unsupported operand size");
|
||||
assert_different_registers(addr, t0);
|
||||
assert_different_registers(expected, t0);
|
||||
assert_different_registers(new_val, t0);
|
||||
|
||||
Label retry_load, done, ne_done;
|
||||
bind(retry_load);
|
||||
@ -2644,6 +2667,10 @@ void MacroAssembler::cmpxchg_weak(Register addr, Register expected,
|
||||
enum operand_size size,
|
||||
Assembler::Aqrl acquire, Assembler::Aqrl release,
|
||||
Register result) {
|
||||
assert_different_registers(addr, t0);
|
||||
assert_different_registers(expected, t0);
|
||||
assert_different_registers(new_val, t0);
|
||||
|
||||
Label fail, done;
|
||||
load_reserved(addr, size, acquire);
|
||||
bne(t0, expected, fail);
|
||||
|
@ -689,6 +689,7 @@ public:
|
||||
void la(Register Rd, const address dest);
|
||||
void la(Register Rd, const Address &adr);
|
||||
|
||||
void li16u(Register Rd, int32_t imm);
|
||||
void li32(Register Rd, int32_t imm);
|
||||
void li64(Register Rd, int64_t imm);
|
||||
void li (Register Rd, int64_t imm); // optimized load immediate
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -97,6 +97,12 @@ bool NativeInstruction::is_movptr_at(address instr) {
|
||||
check_movptr_data_dependency(instr);
|
||||
}
|
||||
|
||||
bool NativeInstruction::is_li16u_at(address instr) {
|
||||
return is_lui_at(instr) && // lui
|
||||
is_srli_at(instr + instruction_size) && // srli
|
||||
check_li16u_data_dependency(instr);
|
||||
}
|
||||
|
||||
bool NativeInstruction::is_li32_at(address instr) {
|
||||
return is_lui_at(instr) && // lui
|
||||
is_addiw_at(instr + instruction_size) && // addiw
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -81,6 +81,14 @@ class NativeInstruction {
|
||||
static bool is_addiw_to_zr_at(address instr) { assert_cond(instr != nullptr); return is_addiw_at(instr) && extract_rd(instr) == zr; }
|
||||
static bool is_lui_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0110111; }
|
||||
static bool is_lui_to_zr_at(address instr) { assert_cond(instr != nullptr); return is_lui_at(instr) && extract_rd(instr) == zr; }
|
||||
|
||||
static bool is_srli_at(address instr) {
|
||||
assert_cond(instr != nullptr);
|
||||
return extract_opcode(instr) == 0b0010011 &&
|
||||
extract_funct3(instr) == 0b101 &&
|
||||
Assembler::extract(((unsigned*)instr)[0], 31, 26) == 0b000000;
|
||||
}
|
||||
|
||||
static bool is_slli_shift_at(address instr, uint32_t shift) {
|
||||
assert_cond(instr != nullptr);
|
||||
return (extract_opcode(instr) == 0b0010011 && // opcode field
|
||||
@ -153,6 +161,17 @@ class NativeInstruction {
|
||||
extract_rs1(addi4) == extract_rd(addi4);
|
||||
}
|
||||
|
||||
// the instruction sequence of li16u is as below:
|
||||
// lui
|
||||
// srli
|
||||
static bool check_li16u_data_dependency(address instr) {
|
||||
address lui = instr;
|
||||
address srli = lui + instruction_size;
|
||||
|
||||
return extract_rs1(srli) == extract_rd(lui) &&
|
||||
extract_rs1(srli) == extract_rd(srli);
|
||||
}
|
||||
|
||||
// the instruction sequence of li32 is as below:
|
||||
// lui
|
||||
// addiw
|
||||
@ -186,6 +205,7 @@ class NativeInstruction {
|
||||
}
|
||||
|
||||
static bool is_movptr_at(address instr);
|
||||
static bool is_li16u_at(address instr);
|
||||
static bool is_li32_at(address instr);
|
||||
static bool is_li64_at(address instr);
|
||||
static bool is_pc_relative_at(address branch);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,7 +32,8 @@
|
||||
// Relocations are byte-aligned.
|
||||
offset_unit = 1,
|
||||
// Must be at least 1 for RelocInfo::narrow_oop_in_const.
|
||||
format_width = 1
|
||||
// Must be at least 2 for ZGC GC barrier patching.
|
||||
format_width = 2
|
||||
};
|
||||
|
||||
public:
|
||||
|
@ -51,9 +51,6 @@
|
||||
#ifdef COMPILER2
|
||||
#include "opto/runtime.hpp"
|
||||
#endif
|
||||
#if INCLUDE_ZGC
|
||||
#include "gc/z/zThreadLocalData.hpp"
|
||||
#endif
|
||||
|
||||
// Declaration and definition of StubGenerator (no .hpp file).
|
||||
// For a more detailed description of the stub routine structure
|
||||
@ -957,7 +954,11 @@ class StubGenerator: public StubCodeGenerator {
|
||||
Label same_aligned;
|
||||
Label copy_big, copy32_loop, copy8_loop, copy_small, done;
|
||||
|
||||
__ beqz(count, done);
|
||||
// The size of copy32_loop body increases significantly with ZGC GC barriers.
|
||||
// Need conditional far branches to reach a point beyond the loop in this case.
|
||||
bool is_far = UseZGC && ZGenerational;
|
||||
|
||||
__ beqz(count, done, is_far);
|
||||
__ slli(cnt, count, exact_log2(granularity));
|
||||
if (is_backwards) {
|
||||
__ add(src, s, cnt);
|
||||
@ -971,15 +972,15 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ addi(t0, cnt, -32);
|
||||
__ bgez(t0, copy32_loop);
|
||||
__ addi(t0, cnt, -8);
|
||||
__ bgez(t0, copy8_loop);
|
||||
__ bgez(t0, copy8_loop, is_far);
|
||||
__ j(copy_small);
|
||||
} else {
|
||||
__ mv(t0, 16);
|
||||
__ blt(cnt, t0, copy_small);
|
||||
__ blt(cnt, t0, copy_small, is_far);
|
||||
|
||||
__ xorr(t0, src, dst);
|
||||
__ andi(t0, t0, 0b111);
|
||||
__ bnez(t0, copy_small);
|
||||
__ bnez(t0, copy_small, is_far);
|
||||
|
||||
__ bind(same_aligned);
|
||||
__ andi(t0, src, 0b111);
|
||||
@ -995,26 +996,27 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ addi(dst, dst, step);
|
||||
}
|
||||
__ addi(cnt, cnt, -granularity);
|
||||
__ beqz(cnt, done);
|
||||
__ beqz(cnt, done, is_far);
|
||||
__ j(same_aligned);
|
||||
|
||||
__ bind(copy_big);
|
||||
__ mv(t0, 32);
|
||||
__ blt(cnt, t0, copy8_loop);
|
||||
__ blt(cnt, t0, copy8_loop, is_far);
|
||||
}
|
||||
|
||||
__ bind(copy32_loop);
|
||||
if (is_backwards) {
|
||||
__ addi(src, src, -wordSize * 4);
|
||||
__ addi(dst, dst, -wordSize * 4);
|
||||
}
|
||||
// we first load 32 bytes, then write it, so the direction here doesn't matter
|
||||
bs_asm->copy_load_at(_masm, decorators, type, 8, tmp3, Address(src), gct1);
|
||||
bs_asm->copy_load_at(_masm, decorators, type, 8, tmp4, Address(src, 8), gct1);
|
||||
bs_asm->copy_load_at(_masm, decorators, type, 8, tmp3, Address(src), gct1);
|
||||
bs_asm->copy_load_at(_masm, decorators, type, 8, tmp4, Address(src, 8), gct1);
|
||||
bs_asm->copy_load_at(_masm, decorators, type, 8, tmp5, Address(src, 16), gct1);
|
||||
bs_asm->copy_load_at(_masm, decorators, type, 8, tmp6, Address(src, 24), gct1);
|
||||
|
||||
bs_asm->copy_store_at(_masm, decorators, type, 8, Address(dst), tmp3, gct1, gct2, gct3);
|
||||
bs_asm->copy_store_at(_masm, decorators, type, 8, Address(dst, 8), tmp4, gct1, gct2, gct3);
|
||||
bs_asm->copy_store_at(_masm, decorators, type, 8, Address(dst), tmp3, gct1, gct2, gct3);
|
||||
bs_asm->copy_store_at(_masm, decorators, type, 8, Address(dst, 8), tmp4, gct1, gct2, gct3);
|
||||
bs_asm->copy_store_at(_masm, decorators, type, 8, Address(dst, 16), tmp5, gct1, gct2, gct3);
|
||||
bs_asm->copy_store_at(_masm, decorators, type, 8, Address(dst, 24), tmp6, gct1, gct2, gct3);
|
||||
|
||||
@ -3731,7 +3733,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
framesize // inclusive of return address
|
||||
};
|
||||
|
||||
const int insts_size = 512;
|
||||
const int insts_size = 1024;
|
||||
const int locs_size = 64;
|
||||
|
||||
CodeBuffer code(name, insts_size, locs_size);
|
||||
@ -3970,7 +3972,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
framesize // inclusive of return address
|
||||
};
|
||||
|
||||
int insts_size = 512;
|
||||
int insts_size = 1024;
|
||||
int locs_size = 64;
|
||||
CodeBuffer code("jfr_write_checkpoint", insts_size, locs_size);
|
||||
OopMapSet* oop_maps = new OopMapSet();
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,8 +39,8 @@ enum platform_dependent_constants {
|
||||
// simply increase sizes if too small (assembler will crash if too small)
|
||||
_initial_stubs_code_size = 19000,
|
||||
_continuation_stubs_code_size = 2000,
|
||||
_compiler_stubs_code_size = 28000,
|
||||
_final_stubs_code_size = 28000
|
||||
_compiler_stubs_code_size = 128000,
|
||||
_final_stubs_code_size = 128000
|
||||
};
|
||||
|
||||
class riscv {
|
||||
|
@ -822,6 +822,7 @@ private:
|
||||
|
||||
// These are all easily abused and hence protected
|
||||
|
||||
public:
|
||||
// 32BIT ONLY SECTION
|
||||
#ifndef _LP64
|
||||
// Make these disappear in 64bit mode since they would never be correct
|
||||
@ -843,6 +844,7 @@ private:
|
||||
void mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec);
|
||||
#endif // _LP64
|
||||
|
||||
protected:
|
||||
// These are unique in that we are ensured by the caller that the 32bit
|
||||
// relative in these instructions will always be able to reach the potentially
|
||||
// 64bit address described by entry. Since they can take a 64bit address they
|
||||
|
@ -1350,8 +1350,8 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
}
|
||||
#endif
|
||||
|
||||
// Load barrier has not yet been applied, so ZGC can't verify the oop here
|
||||
if (!UseZGC) {
|
||||
if (!(UseZGC && !ZGenerational)) {
|
||||
// Load barrier has not yet been applied, so ZGC can't verify the oop here
|
||||
__ verify_oop(dest->as_register());
|
||||
}
|
||||
}
|
||||
|
713
src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.cpp
Normal file
713
src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.cpp
Normal file
@ -0,0 +1,713 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "code/codeBlob.hpp"
|
||||
#include "code/vmreg.inline.hpp"
|
||||
#include "gc/x/xBarrier.inline.hpp"
|
||||
#include "gc/x/xBarrierSet.hpp"
|
||||
#include "gc/x/xBarrierSetAssembler.hpp"
|
||||
#include "gc/x/xBarrierSetRuntime.hpp"
|
||||
#include "gc/x/xThreadLocalData.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "gc/x/c1/xBarrierSetC1.hpp"
|
||||
#endif // COMPILER1
|
||||
#ifdef COMPILER2
|
||||
#include "gc/x/c2/xBarrierSetC2.hpp"
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define BLOCK_COMMENT(str) /* nothing */
|
||||
#else
|
||||
#define BLOCK_COMMENT(str) __ block_comment(str)
|
||||
#endif
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
static void call_vm(MacroAssembler* masm,
|
||||
address entry_point,
|
||||
Register arg0,
|
||||
Register arg1) {
|
||||
// Setup arguments
|
||||
if (arg1 == c_rarg0) {
|
||||
if (arg0 == c_rarg1) {
|
||||
__ xchgptr(c_rarg1, c_rarg0);
|
||||
} else {
|
||||
__ movptr(c_rarg1, arg1);
|
||||
__ movptr(c_rarg0, arg0);
|
||||
}
|
||||
} else {
|
||||
if (arg0 != c_rarg0) {
|
||||
__ movptr(c_rarg0, arg0);
|
||||
}
|
||||
if (arg1 != c_rarg1) {
|
||||
__ movptr(c_rarg1, arg1);
|
||||
}
|
||||
}
|
||||
|
||||
// Call VM
|
||||
__ MacroAssembler::call_VM_leaf_base(entry_point, 2);
|
||||
}
|
||||
|
||||
void XBarrierSetAssembler::load_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
Register dst,
|
||||
Address src,
|
||||
Register tmp1,
|
||||
Register tmp_thread) {
|
||||
if (!XBarrierSet::barrier_needed(decorators, type)) {
|
||||
// Barrier not needed
|
||||
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
|
||||
return;
|
||||
}
|
||||
|
||||
BLOCK_COMMENT("XBarrierSetAssembler::load_at {");
|
||||
|
||||
// Allocate scratch register
|
||||
Register scratch = tmp1;
|
||||
if (tmp1 == noreg) {
|
||||
scratch = r12;
|
||||
__ push(scratch);
|
||||
}
|
||||
|
||||
assert_different_registers(dst, scratch);
|
||||
|
||||
Label done;
|
||||
|
||||
//
|
||||
// Fast Path
|
||||
//
|
||||
|
||||
// Load address
|
||||
__ lea(scratch, src);
|
||||
|
||||
// Load oop at address
|
||||
__ movptr(dst, Address(scratch, 0));
|
||||
|
||||
// Test address bad mask
|
||||
__ testptr(dst, address_bad_mask_from_thread(r15_thread));
|
||||
__ jcc(Assembler::zero, done);
|
||||
|
||||
//
|
||||
// Slow path
|
||||
//
|
||||
|
||||
// Save registers
|
||||
__ push(rax);
|
||||
__ push(rcx);
|
||||
__ push(rdx);
|
||||
__ push(rdi);
|
||||
__ push(rsi);
|
||||
__ push(r8);
|
||||
__ push(r9);
|
||||
__ push(r10);
|
||||
__ push(r11);
|
||||
|
||||
// We may end up here from generate_native_wrapper, then the method may have
|
||||
// floats as arguments, and we must spill them before calling the VM runtime
|
||||
// leaf. From the interpreter all floats are passed on the stack.
|
||||
assert(Argument::n_float_register_parameters_j == 8, "Assumption");
|
||||
const int xmm_size = wordSize * 2;
|
||||
const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j;
|
||||
__ subptr(rsp, xmm_spill_size);
|
||||
__ movdqu(Address(rsp, xmm_size * 7), xmm7);
|
||||
__ movdqu(Address(rsp, xmm_size * 6), xmm6);
|
||||
__ movdqu(Address(rsp, xmm_size * 5), xmm5);
|
||||
__ movdqu(Address(rsp, xmm_size * 4), xmm4);
|
||||
__ movdqu(Address(rsp, xmm_size * 3), xmm3);
|
||||
__ movdqu(Address(rsp, xmm_size * 2), xmm2);
|
||||
__ movdqu(Address(rsp, xmm_size * 1), xmm1);
|
||||
__ movdqu(Address(rsp, xmm_size * 0), xmm0);
|
||||
|
||||
// Call VM
|
||||
call_vm(masm, XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), dst, scratch);
|
||||
|
||||
__ movdqu(xmm0, Address(rsp, xmm_size * 0));
|
||||
__ movdqu(xmm1, Address(rsp, xmm_size * 1));
|
||||
__ movdqu(xmm2, Address(rsp, xmm_size * 2));
|
||||
__ movdqu(xmm3, Address(rsp, xmm_size * 3));
|
||||
__ movdqu(xmm4, Address(rsp, xmm_size * 4));
|
||||
__ movdqu(xmm5, Address(rsp, xmm_size * 5));
|
||||
__ movdqu(xmm6, Address(rsp, xmm_size * 6));
|
||||
__ movdqu(xmm7, Address(rsp, xmm_size * 7));
|
||||
__ addptr(rsp, xmm_spill_size);
|
||||
|
||||
__ pop(r11);
|
||||
__ pop(r10);
|
||||
__ pop(r9);
|
||||
__ pop(r8);
|
||||
__ pop(rsi);
|
||||
__ pop(rdi);
|
||||
__ pop(rdx);
|
||||
__ pop(rcx);
|
||||
|
||||
if (dst == rax) {
|
||||
__ addptr(rsp, wordSize);
|
||||
} else {
|
||||
__ movptr(dst, rax);
|
||||
__ pop(rax);
|
||||
}
|
||||
|
||||
__ bind(done);
|
||||
|
||||
// Restore scratch register
|
||||
if (tmp1 == noreg) {
|
||||
__ pop(scratch);
|
||||
}
|
||||
|
||||
BLOCK_COMMENT("} XBarrierSetAssembler::load_at");
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
|
||||
void XBarrierSetAssembler::store_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
Address dst,
|
||||
Register src,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3) {
|
||||
BLOCK_COMMENT("XBarrierSetAssembler::store_at {");
|
||||
|
||||
// Verify oop store
|
||||
if (is_reference_type(type)) {
|
||||
// Note that src could be noreg, which means we
|
||||
// are storing null and can skip verification.
|
||||
if (src != noreg) {
|
||||
Label done;
|
||||
__ testptr(src, address_bad_mask_from_thread(r15_thread));
|
||||
__ jcc(Assembler::zero, done);
|
||||
__ stop("Verify oop store failed");
|
||||
__ should_not_reach_here();
|
||||
__ bind(done);
|
||||
}
|
||||
}
|
||||
|
||||
// Store value
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, dst, src, tmp1, tmp2, tmp3);
|
||||
|
||||
BLOCK_COMMENT("} XBarrierSetAssembler::store_at");
|
||||
}
|
||||
|
||||
#endif // ASSERT
|
||||
|
||||
void XBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
Register src,
|
||||
Register dst,
|
||||
Register count) {
|
||||
if (!XBarrierSet::barrier_needed(decorators, type)) {
|
||||
// Barrier not needed
|
||||
return;
|
||||
}
|
||||
|
||||
BLOCK_COMMENT("XBarrierSetAssembler::arraycopy_prologue {");
|
||||
|
||||
// Save registers
|
||||
__ pusha();
|
||||
|
||||
// Call VM
|
||||
call_vm(masm, XBarrierSetRuntime::load_barrier_on_oop_array_addr(), src, count);
|
||||
|
||||
// Restore registers
|
||||
__ popa();
|
||||
|
||||
BLOCK_COMMENT("} XBarrierSetAssembler::arraycopy_prologue");
|
||||
}
|
||||
|
||||
void XBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
|
||||
Register jni_env,
|
||||
Register obj,
|
||||
Register tmp,
|
||||
Label& slowpath) {
|
||||
BLOCK_COMMENT("XBarrierSetAssembler::try_resolve_jobject_in_native {");
|
||||
|
||||
// Resolve jobject
|
||||
BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
|
||||
|
||||
// Test address bad mask
|
||||
__ testptr(obj, address_bad_mask_from_jni_env(jni_env));
|
||||
__ jcc(Assembler::notZero, slowpath);
|
||||
|
||||
BLOCK_COMMENT("} XBarrierSetAssembler::try_resolve_jobject_in_native");
|
||||
}
|
||||
|
||||
#ifdef COMPILER1
|
||||
|
||||
#undef __
|
||||
#define __ ce->masm()->
|
||||
|
||||
void XBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
|
||||
LIR_Opr ref) const {
|
||||
__ testptr(ref->as_register(), address_bad_mask_from_thread(r15_thread));
|
||||
}
|
||||
|
||||
void XBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
|
||||
XLoadBarrierStubC1* stub) const {
|
||||
// Stub entry
|
||||
__ bind(*stub->entry());
|
||||
|
||||
Register ref = stub->ref()->as_register();
|
||||
Register ref_addr = noreg;
|
||||
Register tmp = noreg;
|
||||
|
||||
if (stub->tmp()->is_valid()) {
|
||||
// Load address into tmp register
|
||||
ce->leal(stub->ref_addr(), stub->tmp());
|
||||
ref_addr = tmp = stub->tmp()->as_pointer_register();
|
||||
} else {
|
||||
// Address already in register
|
||||
ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register();
|
||||
}
|
||||
|
||||
assert_different_registers(ref, ref_addr, noreg);
|
||||
|
||||
// Save rax unless it is the result or tmp register
|
||||
if (ref != rax && tmp != rax) {
|
||||
__ push(rax);
|
||||
}
|
||||
|
||||
// Setup arguments and call runtime stub
|
||||
__ subptr(rsp, 2 * BytesPerWord);
|
||||
ce->store_parameter(ref_addr, 1);
|
||||
ce->store_parameter(ref, 0);
|
||||
__ call(RuntimeAddress(stub->runtime_stub()));
|
||||
__ addptr(rsp, 2 * BytesPerWord);
|
||||
|
||||
// Verify result
|
||||
__ verify_oop(rax);
|
||||
|
||||
// Move result into place
|
||||
if (ref != rax) {
|
||||
__ movptr(ref, rax);
|
||||
}
|
||||
|
||||
// Restore rax unless it is the result or tmp register
|
||||
if (ref != rax && tmp != rax) {
|
||||
__ pop(rax);
|
||||
}
|
||||
|
||||
// Stub exit
|
||||
__ jmp(*stub->continuation());
|
||||
}
|
||||
|
||||
#undef __
|
||||
#define __ sasm->
|
||||
|
||||
void XBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
|
||||
DecoratorSet decorators) const {
|
||||
// Enter and save registers
|
||||
__ enter();
|
||||
__ save_live_registers_no_oop_map(true /* save_fpu_registers */);
|
||||
|
||||
// Setup arguments
|
||||
__ load_parameter(1, c_rarg1);
|
||||
__ load_parameter(0, c_rarg0);
|
||||
|
||||
// Call VM
|
||||
__ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
|
||||
|
||||
// Restore registers and return
|
||||
__ restore_live_registers_except_rax(true /* restore_fpu_registers */);
|
||||
__ leave();
|
||||
__ ret(0);
|
||||
}
|
||||
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
|
||||
OptoReg::Name XBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
|
||||
if (!OptoReg::is_reg(opto_reg)) {
|
||||
return OptoReg::Bad;
|
||||
}
|
||||
|
||||
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
|
||||
if (vm_reg->is_XMMRegister()) {
|
||||
opto_reg &= ~15;
|
||||
switch (node->ideal_reg()) {
|
||||
case Op_VecX:
|
||||
opto_reg |= 2;
|
||||
break;
|
||||
case Op_VecY:
|
||||
opto_reg |= 4;
|
||||
break;
|
||||
case Op_VecZ:
|
||||
opto_reg |= 8;
|
||||
break;
|
||||
default:
|
||||
opto_reg |= 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return opto_reg;
|
||||
}
|
||||
|
||||
// We use the vec_spill_helper from the x86.ad file to avoid reinventing this wheel
|
||||
extern void vec_spill_helper(CodeBuffer *cbuf, bool is_load,
|
||||
int stack_offset, int reg, uint ireg, outputStream* st);
|
||||
|
||||
#undef __
|
||||
#define __ _masm->
|
||||
|
||||
class XSaveLiveRegisters {
|
||||
private:
|
||||
struct XMMRegisterData {
|
||||
XMMRegister _reg;
|
||||
int _size;
|
||||
|
||||
// Used by GrowableArray::find()
|
||||
bool operator == (const XMMRegisterData& other) {
|
||||
return _reg == other._reg;
|
||||
}
|
||||
};
|
||||
|
||||
MacroAssembler* const _masm;
|
||||
GrowableArray<Register> _gp_registers;
|
||||
GrowableArray<KRegister> _opmask_registers;
|
||||
GrowableArray<XMMRegisterData> _xmm_registers;
|
||||
int _spill_size;
|
||||
int _spill_offset;
|
||||
|
||||
static int xmm_compare_register_size(XMMRegisterData* left, XMMRegisterData* right) {
|
||||
if (left->_size == right->_size) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (left->_size < right->_size) ? -1 : 1;
|
||||
}
|
||||
|
||||
static int xmm_slot_size(OptoReg::Name opto_reg) {
|
||||
// The low order 4 bytes denote what size of the XMM register is live
|
||||
return (opto_reg & 15) << 3;
|
||||
}
|
||||
|
||||
static uint xmm_ideal_reg_for_size(int reg_size) {
|
||||
switch (reg_size) {
|
||||
case 8:
|
||||
return Op_VecD;
|
||||
case 16:
|
||||
return Op_VecX;
|
||||
case 32:
|
||||
return Op_VecY;
|
||||
case 64:
|
||||
return Op_VecZ;
|
||||
default:
|
||||
fatal("Invalid register size %d", reg_size);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool xmm_needs_vzeroupper() const {
|
||||
return _xmm_registers.is_nonempty() && _xmm_registers.at(0)._size > 16;
|
||||
}
|
||||
|
||||
void xmm_register_save(const XMMRegisterData& reg_data) {
|
||||
const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
|
||||
const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
|
||||
_spill_offset -= reg_data._size;
|
||||
vec_spill_helper(__ code(), false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
|
||||
}
|
||||
|
||||
void xmm_register_restore(const XMMRegisterData& reg_data) {
|
||||
const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
|
||||
const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
|
||||
vec_spill_helper(__ code(), true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
|
||||
_spill_offset += reg_data._size;
|
||||
}
|
||||
|
||||
void gp_register_save(Register reg) {
|
||||
_spill_offset -= 8;
|
||||
__ movq(Address(rsp, _spill_offset), reg);
|
||||
}
|
||||
|
||||
void opmask_register_save(KRegister reg) {
|
||||
_spill_offset -= 8;
|
||||
__ kmov(Address(rsp, _spill_offset), reg);
|
||||
}
|
||||
|
||||
void gp_register_restore(Register reg) {
|
||||
__ movq(reg, Address(rsp, _spill_offset));
|
||||
_spill_offset += 8;
|
||||
}
|
||||
|
||||
void opmask_register_restore(KRegister reg) {
|
||||
__ kmov(reg, Address(rsp, _spill_offset));
|
||||
_spill_offset += 8;
|
||||
}
|
||||
|
||||
void initialize(XLoadBarrierStubC2* stub) {
|
||||
// Create mask of caller saved registers that need to
|
||||
// be saved/restored if live
|
||||
RegMask caller_saved;
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(rax->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(rcx->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(rdx->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(rsi->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(rdi->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r8->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r9->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r10->as_VMReg()));
|
||||
caller_saved.Insert(OptoReg::as_OptoReg(r11->as_VMReg()));
|
||||
caller_saved.Remove(OptoReg::as_OptoReg(stub->ref()->as_VMReg()));
|
||||
|
||||
// Create mask of live registers
|
||||
RegMask live = stub->live();
|
||||
if (stub->tmp() != noreg) {
|
||||
live.Insert(OptoReg::as_OptoReg(stub->tmp()->as_VMReg()));
|
||||
}
|
||||
|
||||
int gp_spill_size = 0;
|
||||
int opmask_spill_size = 0;
|
||||
int xmm_spill_size = 0;
|
||||
|
||||
// Record registers that needs to be saved/restored
|
||||
RegMaskIterator rmi(live);
|
||||
while (rmi.has_next()) {
|
||||
const OptoReg::Name opto_reg = rmi.next();
|
||||
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
|
||||
|
||||
if (vm_reg->is_Register()) {
|
||||
if (caller_saved.Member(opto_reg)) {
|
||||
_gp_registers.append(vm_reg->as_Register());
|
||||
gp_spill_size += 8;
|
||||
}
|
||||
} else if (vm_reg->is_KRegister()) {
|
||||
// All opmask registers are caller saved, thus spill the ones
|
||||
// which are live.
|
||||
if (_opmask_registers.find(vm_reg->as_KRegister()) == -1) {
|
||||
_opmask_registers.append(vm_reg->as_KRegister());
|
||||
opmask_spill_size += 8;
|
||||
}
|
||||
} else if (vm_reg->is_XMMRegister()) {
|
||||
// We encode in the low order 4 bits of the opto_reg, how large part of the register is live
|
||||
const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~15);
|
||||
const int reg_size = xmm_slot_size(opto_reg);
|
||||
const XMMRegisterData reg_data = { vm_reg_base->as_XMMRegister(), reg_size };
|
||||
const int reg_index = _xmm_registers.find(reg_data);
|
||||
if (reg_index == -1) {
|
||||
// Not previously appended
|
||||
_xmm_registers.append(reg_data);
|
||||
xmm_spill_size += reg_size;
|
||||
} else {
|
||||
// Previously appended, update size
|
||||
const int reg_size_prev = _xmm_registers.at(reg_index)._size;
|
||||
if (reg_size > reg_size_prev) {
|
||||
_xmm_registers.at_put(reg_index, reg_data);
|
||||
xmm_spill_size += reg_size - reg_size_prev;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fatal("Unexpected register type");
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by size, largest first
|
||||
_xmm_registers.sort(xmm_compare_register_size);
|
||||
|
||||
// On Windows, the caller reserves stack space for spilling register arguments
|
||||
const int arg_spill_size = frame::arg_reg_save_area_bytes;
|
||||
|
||||
// Stack pointer must be 16 bytes aligned for the call
|
||||
_spill_offset = _spill_size = align_up(xmm_spill_size + gp_spill_size + opmask_spill_size + arg_spill_size, 16);
|
||||
}
|
||||
|
||||
public:
|
||||
XSaveLiveRegisters(MacroAssembler* masm, XLoadBarrierStubC2* stub) :
|
||||
_masm(masm),
|
||||
_gp_registers(),
|
||||
_opmask_registers(),
|
||||
_xmm_registers(),
|
||||
_spill_size(0),
|
||||
_spill_offset(0) {
|
||||
|
||||
//
|
||||
// Stack layout after registers have been spilled:
|
||||
//
|
||||
// | ... | original rsp, 16 bytes aligned
|
||||
// ------------------
|
||||
// | zmm0 high |
|
||||
// | ... |
|
||||
// | zmm0 low | 16 bytes aligned
|
||||
// | ... |
|
||||
// | ymm1 high |
|
||||
// | ... |
|
||||
// | ymm1 low | 16 bytes aligned
|
||||
// | ... |
|
||||
// | xmmN high |
|
||||
// | ... |
|
||||
// | xmmN low | 8 bytes aligned
|
||||
// | reg0 | 8 bytes aligned
|
||||
// | reg1 |
|
||||
// | ... |
|
||||
// | regN | new rsp, if 16 bytes aligned
|
||||
// | <padding> | else new rsp, 16 bytes aligned
|
||||
// ------------------
|
||||
//
|
||||
|
||||
// Figure out what registers to save/restore
|
||||
initialize(stub);
|
||||
|
||||
// Allocate stack space
|
||||
if (_spill_size > 0) {
|
||||
__ subptr(rsp, _spill_size);
|
||||
}
|
||||
|
||||
// Save XMM/YMM/ZMM registers
|
||||
for (int i = 0; i < _xmm_registers.length(); i++) {
|
||||
xmm_register_save(_xmm_registers.at(i));
|
||||
}
|
||||
|
||||
if (xmm_needs_vzeroupper()) {
|
||||
__ vzeroupper();
|
||||
}
|
||||
|
||||
// Save general purpose registers
|
||||
for (int i = 0; i < _gp_registers.length(); i++) {
|
||||
gp_register_save(_gp_registers.at(i));
|
||||
}
|
||||
|
||||
// Save opmask registers
|
||||
for (int i = 0; i < _opmask_registers.length(); i++) {
|
||||
opmask_register_save(_opmask_registers.at(i));
|
||||
}
|
||||
}
|
||||
|
||||
~XSaveLiveRegisters() {
|
||||
// Restore opmask registers
|
||||
for (int i = _opmask_registers.length() - 1; i >= 0; i--) {
|
||||
opmask_register_restore(_opmask_registers.at(i));
|
||||
}
|
||||
|
||||
// Restore general purpose registers
|
||||
for (int i = _gp_registers.length() - 1; i >= 0; i--) {
|
||||
gp_register_restore(_gp_registers.at(i));
|
||||
}
|
||||
|
||||
__ vzeroupper();
|
||||
|
||||
// Restore XMM/YMM/ZMM registers
|
||||
for (int i = _xmm_registers.length() - 1; i >= 0; i--) {
|
||||
xmm_register_restore(_xmm_registers.at(i));
|
||||
}
|
||||
|
||||
// Free stack space
|
||||
if (_spill_size > 0) {
|
||||
__ addptr(rsp, _spill_size);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class XSetupArguments {
|
||||
private:
|
||||
MacroAssembler* const _masm;
|
||||
const Register _ref;
|
||||
const Address _ref_addr;
|
||||
|
||||
public:
|
||||
XSetupArguments(MacroAssembler* masm, XLoadBarrierStubC2* stub) :
|
||||
_masm(masm),
|
||||
_ref(stub->ref()),
|
||||
_ref_addr(stub->ref_addr()) {
|
||||
|
||||
// Setup arguments
|
||||
if (_ref_addr.base() == noreg) {
|
||||
// No self healing
|
||||
if (_ref != c_rarg0) {
|
||||
__ movq(c_rarg0, _ref);
|
||||
}
|
||||
__ xorq(c_rarg1, c_rarg1);
|
||||
} else {
|
||||
// Self healing
|
||||
if (_ref == c_rarg0) {
|
||||
__ lea(c_rarg1, _ref_addr);
|
||||
} else if (_ref != c_rarg1) {
|
||||
__ lea(c_rarg1, _ref_addr);
|
||||
__ movq(c_rarg0, _ref);
|
||||
} else if (_ref_addr.base() != c_rarg0 && _ref_addr.index() != c_rarg0) {
|
||||
__ movq(c_rarg0, _ref);
|
||||
__ lea(c_rarg1, _ref_addr);
|
||||
} else {
|
||||
__ xchgq(c_rarg0, c_rarg1);
|
||||
if (_ref_addr.base() == c_rarg0) {
|
||||
__ lea(c_rarg1, Address(c_rarg1, _ref_addr.index(), _ref_addr.scale(), _ref_addr.disp()));
|
||||
} else if (_ref_addr.index() == c_rarg0) {
|
||||
__ lea(c_rarg1, Address(_ref_addr.base(), c_rarg1, _ref_addr.scale(), _ref_addr.disp()));
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
~XSetupArguments() {
|
||||
// Transfer result
|
||||
if (_ref != rax) {
|
||||
__ movq(_ref, rax);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const {
|
||||
BLOCK_COMMENT("XLoadBarrierStubC2");
|
||||
|
||||
// Stub entry
|
||||
__ bind(*stub->entry());
|
||||
|
||||
{
|
||||
XSaveLiveRegisters save_live_registers(masm, stub);
|
||||
XSetupArguments setup_arguments(masm, stub);
|
||||
__ call(RuntimeAddress(stub->slow_path()));
|
||||
}
|
||||
|
||||
// Stub exit
|
||||
__ jmp(*stub->continuation());
|
||||
}
|
||||
|
||||
#endif // COMPILER2
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
void XBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) {
|
||||
// Check if metadata bits indicate a bad oop
|
||||
__ testptr(obj, Address(r15_thread, XThreadLocalData::address_bad_mask_offset()));
|
||||
__ jcc(Assembler::notZero, error);
|
||||
BarrierSetAssembler::check_oop(masm, obj, tmp1, tmp2, error);
|
||||
}
|
||||
|
||||
#undef __
|
109
src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.hpp
Normal file
109
src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.hpp
Normal file
@ -0,0 +1,109 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef CPU_X86_GC_X_XBARRIERSETASSEMBLER_X86_HPP
|
||||
#define CPU_X86_GC_X_XBARRIERSETASSEMBLER_X86_HPP
|
||||
|
||||
#include "code/vmreg.hpp"
|
||||
#include "oops/accessDecorators.hpp"
|
||||
#ifdef COMPILER2
|
||||
#include "opto/optoreg.hpp"
|
||||
#endif // COMPILER2
|
||||
|
||||
class MacroAssembler;
|
||||
|
||||
#ifdef COMPILER1
|
||||
class LIR_Assembler;
|
||||
class LIR_Opr;
|
||||
class StubAssembler;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
class Node;
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef COMPILER1
|
||||
class XLoadBarrierStubC1;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
class XLoadBarrierStubC2;
|
||||
#endif // COMPILER2
|
||||
|
||||
class XBarrierSetAssembler : public XBarrierSetAssemblerBase {
|
||||
public:
|
||||
virtual void load_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
Register dst,
|
||||
Address src,
|
||||
Register tmp1,
|
||||
Register tmp_thread);
|
||||
|
||||
#ifdef ASSERT
|
||||
virtual void store_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
Address dst,
|
||||
Register src,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3);
|
||||
#endif // ASSERT
|
||||
|
||||
virtual void arraycopy_prologue(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
Register src,
|
||||
Register dst,
|
||||
Register count);
|
||||
|
||||
virtual void try_resolve_jobject_in_native(MacroAssembler* masm,
|
||||
Register jni_env,
|
||||
Register obj,
|
||||
Register tmp,
|
||||
Label& slowpath);
|
||||
|
||||
#ifdef COMPILER1
|
||||
void generate_c1_load_barrier_test(LIR_Assembler* ce,
|
||||
LIR_Opr ref) const;
|
||||
|
||||
void generate_c1_load_barrier_stub(LIR_Assembler* ce,
|
||||
XLoadBarrierStubC1* stub) const;
|
||||
|
||||
void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
|
||||
DecoratorSet decorators) const;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
OptoReg::Name refine_register(const Node* node,
|
||||
OptoReg::Name opto_reg);
|
||||
|
||||
void generate_c2_load_barrier_stub(MacroAssembler* masm,
|
||||
XLoadBarrierStubC2* stub) const;
|
||||
#endif // COMPILER2
|
||||
|
||||
void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);
|
||||
};
|
||||
|
||||
#endif // CPU_X86_GC_X_XBARRIERSETASSEMBLER_X86_HPP
|
@ -23,7 +23,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/x/xGlobals.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
@ -136,14 +136,14 @@
|
||||
// * 63-48 Fixed (16-bits, always zero)
|
||||
//
|
||||
|
||||
size_t ZPlatformAddressOffsetBits() {
|
||||
size_t XPlatformAddressOffsetBits() {
|
||||
const size_t min_address_offset_bits = 42; // 4TB
|
||||
const size_t max_address_offset_bits = 44; // 16TB
|
||||
const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
|
||||
const size_t address_offset = round_up_power_of_2(MaxHeapSize * XVirtualToPhysicalRatio);
|
||||
const size_t address_offset_bits = log2i_exact(address_offset);
|
||||
return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
|
||||
}
|
||||
|
||||
size_t ZPlatformAddressMetadataShift() {
|
||||
return ZPlatformAddressOffsetBits();
|
||||
size_t XPlatformAddressMetadataShift() {
|
||||
return XPlatformAddressOffsetBits();
|
||||
}
|
33
src/hotspot/cpu/x86/gc/x/xGlobals_x86.hpp
Normal file
33
src/hotspot/cpu/x86/gc/x/xGlobals_x86.hpp
Normal file
@ -0,0 +1,33 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef CPU_X86_GC_X_XGLOBALS_X86_HPP
|
||||
#define CPU_X86_GC_X_XGLOBALS_X86_HPP
|
||||
|
||||
const size_t XPlatformHeapViews = 3;
|
||||
const size_t XPlatformCacheLineSize = 64;
|
||||
|
||||
size_t XPlatformAddressOffsetBits();
|
||||
size_t XPlatformAddressMetadataShift();
|
||||
|
||||
#endif // CPU_X86_GC_X_XGLOBALS_X86_HPP
|
158
src/hotspot/cpu/x86/gc/x/x_x86_64.ad
Normal file
158
src/hotspot/cpu/x86/gc/x/x_x86_64.ad
Normal file
@ -0,0 +1,158 @@
|
||||
//
|
||||
// Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License version 2 only, as
|
||||
// published by the Free Software Foundation.
|
||||
//
|
||||
// This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
// version 2 for more details (a copy is included in the LICENSE file that
|
||||
// accompanied this code).
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License version
|
||||
// 2 along with this work; if not, write to the Free Software Foundation,
|
||||
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
//
|
||||
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
// or visit www.oracle.com if you need additional information or have any
|
||||
// questions.
|
||||
//
|
||||
|
||||
source_hpp %{
|
||||
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/x/c2/xBarrierSetC2.hpp"
|
||||
#include "gc/x/xThreadLocalData.hpp"
|
||||
|
||||
%}
|
||||
|
||||
source %{
|
||||
|
||||
#include "c2_intelJccErratum_x86.hpp"
|
||||
|
||||
static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) {
|
||||
if (barrier_data == XLoadBarrierElided) {
|
||||
return;
|
||||
}
|
||||
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data);
|
||||
{
|
||||
IntelJccErratumAlignment intel_alignment(_masm, 10 /* jcc_size */);
|
||||
__ testptr(ref, Address(r15_thread, XThreadLocalData::address_bad_mask_offset()));
|
||||
__ jcc(Assembler::notZero, *stub->entry());
|
||||
}
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
static void x_load_barrier_cmpxchg(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, Label& good) {
|
||||
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong);
|
||||
{
|
||||
IntelJccErratumAlignment intel_alignment(_masm, 10 /* jcc_size */);
|
||||
__ testptr(ref, Address(r15_thread, XThreadLocalData::address_bad_mask_offset()));
|
||||
__ jcc(Assembler::zero, good);
|
||||
}
|
||||
{
|
||||
IntelJccErratumAlignment intel_alignment(_masm, 5 /* jcc_size */);
|
||||
__ jmp(*stub->entry());
|
||||
}
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
static void x_cmpxchg_common(MacroAssembler& _masm, const MachNode* node, Register mem_reg, Register newval, Register tmp) {
|
||||
// Compare value (oldval) is in rax
|
||||
const Address mem = Address(mem_reg, 0);
|
||||
|
||||
if (node->barrier_data() != XLoadBarrierElided) {
|
||||
__ movptr(tmp, rax);
|
||||
}
|
||||
|
||||
__ lock();
|
||||
__ cmpxchgptr(newval, mem);
|
||||
|
||||
if (node->barrier_data() != XLoadBarrierElided) {
|
||||
Label good;
|
||||
x_load_barrier_cmpxchg(_masm, node, mem, rax, tmp, good);
|
||||
__ movptr(rax, tmp);
|
||||
__ lock();
|
||||
__ cmpxchgptr(newval, mem);
|
||||
__ bind(good);
|
||||
}
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
// Load Pointer
|
||||
instruct xLoadP(rRegP dst, memory mem, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseZGC && !ZGenerational && n->as_Load()->barrier_data() != 0);
|
||||
match(Set dst (LoadP mem));
|
||||
effect(KILL cr, TEMP dst);
|
||||
|
||||
ins_cost(125);
|
||||
|
||||
format %{ "movq $dst, $mem" %}
|
||||
|
||||
ins_encode %{
|
||||
__ movptr($dst$$Register, $mem$$Address);
|
||||
x_load_barrier(_masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, barrier_data());
|
||||
%}
|
||||
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct xCompareAndExchangeP(indirect mem, rax_RegP oldval, rRegP newval, rRegP tmp, rFlagsReg cr) %{
|
||||
match(Set oldval (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
|
||||
effect(KILL cr, TEMP tmp);
|
||||
|
||||
format %{ "lock\n\t"
|
||||
"cmpxchgq $newval, $mem" %}
|
||||
|
||||
ins_encode %{
|
||||
precond($oldval$$Register == rax);
|
||||
x_cmpxchg_common(_masm, this, $mem$$Register, $newval$$Register, $tmp$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_cmpxchg);
|
||||
%}
|
||||
|
||||
instruct xCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rFlagsReg cr, rax_RegP oldval) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
|
||||
effect(KILL cr, KILL oldval, TEMP tmp);
|
||||
|
||||
format %{ "lock\n\t"
|
||||
"cmpxchgq $newval, $mem\n\t"
|
||||
"sete $res\n\t"
|
||||
"movzbl $res, $res" %}
|
||||
|
||||
ins_encode %{
|
||||
precond($oldval$$Register == rax);
|
||||
x_cmpxchg_common(_masm, this, $mem$$Register, $newval$$Register, $tmp$$Register);
|
||||
if (barrier_data() != XLoadBarrierElided) {
|
||||
__ cmpptr($tmp$$Register, rax);
|
||||
}
|
||||
__ setb(Assembler::equal, $res$$Register);
|
||||
__ movzbl($res$$Register, $res$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_cmpxchg);
|
||||
%}
|
||||
|
||||
instruct xXChgP(indirect mem, rRegP newval, rFlagsReg cr) %{
|
||||
match(Set newval (GetAndSetP mem newval));
|
||||
predicate(UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "xchgq $newval, $mem" %}
|
||||
|
||||
ins_encode %{
|
||||
__ xchgptr($newval$$Register, Address($mem$$Register, 0));
|
||||
x_load_barrier(_masm, this, Address(noreg, 0), $newval$$Register, noreg /* tmp */, barrier_data());
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_cmpxchg);
|
||||
%}
|
47
src/hotspot/cpu/x86/gc/z/zAddress_x86.cpp
Normal file
47
src/hotspot/cpu/x86/gc/z/zAddress_x86.cpp
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
size_t ZPointerLoadShift;
|
||||
|
||||
size_t ZPlatformAddressOffsetBits() {
|
||||
const size_t min_address_offset_bits = 42; // 4TB
|
||||
const size_t max_address_offset_bits = 44; // 16TB
|
||||
const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
|
||||
const size_t address_offset_bits = log2i_exact(address_offset);
|
||||
return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
|
||||
}
|
||||
|
||||
size_t ZPlatformAddressHeapBaseShift() {
|
||||
return ZPlatformAddressOffsetBits();
|
||||
}
|
||||
|
||||
void ZGlobalsPointers::pd_set_good_masks() {
|
||||
ZPointerLoadShift = ZPointer::load_shift_lookup(ZPointerLoadGoodMask);
|
||||
}
|
34
src/hotspot/cpu/x86/gc/z/zAddress_x86.hpp
Normal file
34
src/hotspot/cpu/x86/gc/z/zAddress_x86.hpp
Normal file
@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef CPU_X86_GC_Z_ZADDRESS_X86_HPP
|
||||
#define CPU_X86_GC_Z_ZADDRESS_X86_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
extern size_t ZPointerLoadShift;
|
||||
|
||||
size_t ZPlatformAddressOffsetBits();
|
||||
size_t ZPlatformAddressHeapBaseShift();
|
||||
|
||||
#endif // CPU_X86_GC_Z_ZADDRESS_X86_HPP
|
39
src/hotspot/cpu/x86/gc/z/zAddress_x86.inline.hpp
Normal file
39
src/hotspot/cpu/x86/gc/z/zAddress_x86.inline.hpp
Normal file
@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef CPU_X86_GC_Z_ZADDRESS_X86_INLINE_HPP
|
||||
#define CPU_X86_GC_Z_ZADDRESS_X86_INLINE_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
inline uintptr_t ZPointer::remap_bits(uintptr_t colored) {
|
||||
return colored & ZPointerRemappedMask;
|
||||
}
|
||||
|
||||
inline constexpr int ZPointer::load_shift_lookup(uintptr_t value) {
|
||||
const size_t index = load_shift_lookup_index(value);
|
||||
assert(index == 0 || is_power_of_2(index), "Incorrect load shift: " SIZE_FORMAT, index);
|
||||
return ZPointerLoadShiftTable[index];
|
||||
}
|
||||
|
||||
#endif // CPU_X86_GC_Z_ZADDRESS_X86_INLINE_HPP
|
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -33,19 +33,41 @@
|
||||
class MacroAssembler;
|
||||
|
||||
#ifdef COMPILER1
|
||||
class CodeStub;
|
||||
class LIR_Address;
|
||||
class LIR_Assembler;
|
||||
class LIR_Opr;
|
||||
class StubAssembler;
|
||||
class ZLoadBarrierStubC1;
|
||||
class ZStoreBarrierStubC1;
|
||||
#endif // COMPILER1
|
||||
|
||||
#ifdef COMPILER2
|
||||
class MachNode;
|
||||
class Node;
|
||||
class ZLoadBarrierStubC2;
|
||||
class ZStoreBarrierStubC2;
|
||||
#endif // COMPILER2
|
||||
|
||||
const int ZBarrierRelocationFormatLoadGoodBeforeShl = 0;
|
||||
const int ZBarrierRelocationFormatLoadBadAfterTest = 1;
|
||||
const int ZBarrierRelocationFormatMarkBadAfterTest = 2;
|
||||
const int ZBarrierRelocationFormatStoreGoodAfterCmp = 3;
|
||||
const int ZBarrierRelocationFormatStoreBadAfterTest = 4;
|
||||
const int ZBarrierRelocationFormatStoreGoodAfterOr = 5;
|
||||
const int ZBarrierRelocationFormatStoreGoodAfterMov = 6;
|
||||
|
||||
class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase {
|
||||
private:
|
||||
GrowableArrayCHeap<address, mtGC> _load_bad_relocations;
|
||||
GrowableArrayCHeap<address, mtGC> _store_bad_relocations;
|
||||
GrowableArrayCHeap<address, mtGC> _store_good_relocations;
|
||||
|
||||
public:
|
||||
static const int32_t _zpointer_address_mask = 0xFFFF0000;
|
||||
|
||||
ZBarrierSetAssembler();
|
||||
|
||||
virtual void load_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
@ -54,7 +76,6 @@ public:
|
||||
Register tmp1,
|
||||
Register tmp_thread);
|
||||
|
||||
#ifdef ASSERT
|
||||
virtual void store_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
@ -63,7 +84,43 @@ public:
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3);
|
||||
#endif // ASSERT
|
||||
|
||||
virtual bool supports_avx3_masked_arraycopy();
|
||||
|
||||
virtual void copy_load_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
size_t bytes,
|
||||
Register dst,
|
||||
Address src,
|
||||
Register tmp);
|
||||
|
||||
virtual void copy_store_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
size_t bytes,
|
||||
Address dst,
|
||||
Register src,
|
||||
Register tmp);
|
||||
|
||||
virtual void copy_load_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
size_t bytes,
|
||||
XMMRegister dst,
|
||||
Address src,
|
||||
Register tmp,
|
||||
XMMRegister xmm_tmp);
|
||||
|
||||
virtual void copy_store_at(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
BasicType type,
|
||||
size_t bytes,
|
||||
Address dst,
|
||||
XMMRegister src,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
XMMRegister xmm_tmp);
|
||||
|
||||
virtual void arraycopy_prologue(MacroAssembler* masm,
|
||||
DecoratorSet decorators,
|
||||
@ -79,8 +136,25 @@ public:
|
||||
Label& slowpath);
|
||||
|
||||
#ifdef COMPILER1
|
||||
void generate_c1_load_barrier_test(LIR_Assembler* ce,
|
||||
LIR_Opr ref) const;
|
||||
void generate_c1_color(LIR_Assembler* ce, LIR_Opr ref) const;
|
||||
void generate_c1_uncolor(LIR_Assembler* ce, LIR_Opr ref) const;
|
||||
|
||||
void generate_c1_store_barrier(LIR_Assembler* ce,
|
||||
LIR_Address* addr,
|
||||
LIR_Opr new_zaddress,
|
||||
LIR_Opr new_zpointer,
|
||||
ZStoreBarrierStubC1* stub) const;
|
||||
|
||||
void generate_c1_store_barrier_stub(LIR_Assembler* ce,
|
||||
ZStoreBarrierStubC1* stub) const;
|
||||
|
||||
void generate_c1_store_barrier_runtime_stub(StubAssembler* sasm,
|
||||
bool self_healing) const;
|
||||
|
||||
void generate_c1_load_barrier(LIR_Assembler* ce,
|
||||
LIR_Opr ref,
|
||||
ZLoadBarrierStubC1* stub,
|
||||
bool on_non_strong) const;
|
||||
|
||||
void generate_c1_load_barrier_stub(LIR_Assembler* ce,
|
||||
ZLoadBarrierStubC1* stub) const;
|
||||
@ -95,8 +169,32 @@ public:
|
||||
|
||||
void generate_c2_load_barrier_stub(MacroAssembler* masm,
|
||||
ZLoadBarrierStubC2* stub) const;
|
||||
void generate_c2_store_barrier_stub(MacroAssembler* masm,
|
||||
ZStoreBarrierStubC2* stub) const;
|
||||
#endif // COMPILER2
|
||||
|
||||
void store_barrier_fast(MacroAssembler* masm,
|
||||
Address ref_addr,
|
||||
Register rnew_persistent,
|
||||
Register rnew_transient,
|
||||
bool in_nmethod,
|
||||
bool is_atomic,
|
||||
Label& medium_path,
|
||||
Label& medium_path_continuation) const;
|
||||
|
||||
void store_barrier_medium(MacroAssembler* masm,
|
||||
Address ref_addr,
|
||||
Register tmp,
|
||||
bool is_native,
|
||||
bool is_atomic,
|
||||
Label& medium_path_continuation,
|
||||
Label& slow_path,
|
||||
Label& slow_path_continuation) const;
|
||||
|
||||
void patch_barrier_relocation(address addr, int format);
|
||||
|
||||
void patch_barriers();
|
||||
|
||||
void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);
|
||||
};
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,10 +24,6 @@
|
||||
#ifndef CPU_X86_GC_Z_ZGLOBALS_X86_HPP
|
||||
#define CPU_X86_GC_Z_ZGLOBALS_X86_HPP
|
||||
|
||||
const size_t ZPlatformHeapViews = 3;
|
||||
const size_t ZPlatformCacheLineSize = 64;
|
||||
|
||||
size_t ZPlatformAddressOffsetBits();
|
||||
size_t ZPlatformAddressMetadataShift();
|
||||
|
||||
#endif // CPU_X86_GC_Z_ZGLOBALS_X86_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
//
|
||||
// Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,52 +32,66 @@ source_hpp %{
|
||||
source %{
|
||||
|
||||
#include "c2_intelJccErratum_x86.hpp"
|
||||
#include "gc/z/zBarrierSetAssembler.hpp"
|
||||
|
||||
static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) {
|
||||
if (barrier_data == ZLoadBarrierElided) {
|
||||
static void z_color(MacroAssembler& _masm, const MachNode* node, Register ref) {
|
||||
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatLoadGoodBeforeShl);
|
||||
__ shlq(ref, barrier_Relocation::unpatched);
|
||||
__ orq_imm32(ref, barrier_Relocation::unpatched);
|
||||
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodAfterOr);
|
||||
}
|
||||
|
||||
static void z_uncolor(MacroAssembler& _masm, const MachNode* node, Register ref) {
|
||||
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatLoadGoodBeforeShl);
|
||||
__ shrq(ref, barrier_Relocation::unpatched);
|
||||
}
|
||||
|
||||
static void z_keep_alive_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref) {
|
||||
__ Assembler::testl(ref, barrier_Relocation::unpatched);
|
||||
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatMarkBadAfterTest);
|
||||
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref);
|
||||
__ jcc(Assembler::notEqual, *stub->entry());
|
||||
|
||||
z_uncolor(_masm, node, ref);
|
||||
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref) {
|
||||
Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm);
|
||||
const bool on_non_strong =
|
||||
((node->barrier_data() & ZBarrierWeak) != 0) ||
|
||||
((node->barrier_data() & ZBarrierPhantom) != 0);
|
||||
|
||||
if (on_non_strong) {
|
||||
z_keep_alive_load_barrier(_masm, node, ref_addr, ref);
|
||||
return;
|
||||
}
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data);
|
||||
{
|
||||
IntelJccErratumAlignment intel_alignment(_masm, 10 /* jcc_size */);
|
||||
__ testptr(ref, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
|
||||
__ jcc(Assembler::notZero, *stub->entry());
|
||||
|
||||
z_uncolor(_masm, node, ref);
|
||||
if (node->barrier_data() == ZBarrierElided) {
|
||||
return;
|
||||
}
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref);
|
||||
IntelJccErratumAlignment(_masm, 6);
|
||||
__ jcc(Assembler::above, *stub->entry());
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
static void z_load_barrier_cmpxchg(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, Label& good) {
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, ZLoadBarrierStrong);
|
||||
{
|
||||
IntelJccErratumAlignment intel_alignment(_masm, 10 /* jcc_size */);
|
||||
__ testptr(ref, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
|
||||
__ jcc(Assembler::zero, good);
|
||||
}
|
||||
{
|
||||
IntelJccErratumAlignment intel_alignment(_masm, 5 /* jcc_size */);
|
||||
__ jmp(*stub->entry());
|
||||
}
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
static void z_cmpxchg_common(MacroAssembler& _masm, const MachNode* node, Register mem_reg, Register newval, Register tmp) {
|
||||
// Compare value (oldval) is in rax
|
||||
const Address mem = Address(mem_reg, 0);
|
||||
|
||||
if (node->barrier_data() != ZLoadBarrierElided) {
|
||||
__ movptr(tmp, rax);
|
||||
}
|
||||
|
||||
__ lock();
|
||||
__ cmpxchgptr(newval, mem);
|
||||
|
||||
if (node->barrier_data() != ZLoadBarrierElided) {
|
||||
Label good;
|
||||
z_load_barrier_cmpxchg(_masm, node, mem, rax, tmp, good);
|
||||
__ movptr(rax, tmp);
|
||||
__ lock();
|
||||
__ cmpxchgptr(newval, mem);
|
||||
__ bind(good);
|
||||
static void z_store_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register rnew_zaddress, Register rnew_zpointer, bool is_atomic) {
|
||||
Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm);
|
||||
if (node->barrier_data() == ZBarrierElided) {
|
||||
if (rnew_zaddress != noreg) {
|
||||
// noreg means null; no need to color
|
||||
__ movptr(rnew_zpointer, rnew_zaddress);
|
||||
z_color(_masm, node, rnew_zpointer);
|
||||
}
|
||||
} else {
|
||||
bool is_native = (node->barrier_data() & ZBarrierNative) != 0;
|
||||
ZStoreBarrierStubC2* const stub = ZStoreBarrierStubC2::create(node, ref_addr, rnew_zaddress, rnew_zpointer, is_native, is_atomic);
|
||||
ZBarrierSetAssembler* bs_asm = ZBarrierSet::assembler();
|
||||
bs_asm->store_barrier_fast(&_masm, ref_addr, rnew_zaddress, rnew_zpointer, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
|
||||
}
|
||||
}
|
||||
|
||||
@ -86,9 +100,9 @@ static void z_cmpxchg_common(MacroAssembler& _masm, const MachNode* node, Regist
|
||||
// Load Pointer
|
||||
instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseZGC && n->as_Load()->barrier_data() != 0);
|
||||
predicate(UseZGC && ZGenerational && n->as_Load()->barrier_data() != 0);
|
||||
match(Set dst (LoadP mem));
|
||||
effect(KILL cr, TEMP dst);
|
||||
effect(TEMP dst, KILL cr);
|
||||
|
||||
ins_cost(125);
|
||||
|
||||
@ -96,33 +110,91 @@ instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr)
|
||||
|
||||
ins_encode %{
|
||||
__ movptr($dst$$Register, $mem$$Address);
|
||||
z_load_barrier(_masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, barrier_data());
|
||||
z_load_barrier(_masm, this, $mem$$Address, $dst$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct zCompareAndExchangeP(indirect mem, rax_RegP oldval, rRegP newval, rRegP tmp, rFlagsReg cr) %{
|
||||
// Load Pointer and Null Check
|
||||
instruct zLoadPNullCheck(rFlagsReg cr, memory op, immP0 zero)
|
||||
%{
|
||||
predicate(UseZGC && ZGenerational && n->in(1)->as_Load()->barrier_data() != 0);
|
||||
match(Set cr (CmpP (LoadP op) zero));
|
||||
|
||||
ins_cost(500); // XXX
|
||||
format %{ "testq $op, 0xffffffffffff0000\t# ptr" %}
|
||||
ins_encode %{
|
||||
// A null pointer will have all address bits 0. This mask sign extends
|
||||
// all address bits, so we can test if the address is 0.
|
||||
__ testq($op$$Address, ZBarrierSetAssembler::_zpointer_address_mask);
|
||||
%}
|
||||
ins_pipe(ialu_cr_reg_imm);
|
||||
%}
|
||||
|
||||
// Store Pointer
|
||||
instruct zStoreP(memory mem, any_RegP src, rRegP tmp, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0);
|
||||
match(Set mem (StoreP mem src));
|
||||
effect(TEMP tmp, KILL cr);
|
||||
|
||||
ins_cost(125); // XXX
|
||||
format %{ "movq $mem, $src\t# ptr" %}
|
||||
ins_encode %{
|
||||
z_store_barrier(_masm, this, $mem$$Address, $src$$Register, $tmp$$Register, false /* is_atomic */);
|
||||
__ movq($mem$$Address, $tmp$$Register);
|
||||
%}
|
||||
ins_pipe(ialu_mem_reg);
|
||||
%}
|
||||
|
||||
// Store Null Pointer
|
||||
instruct zStorePNull(memory mem, immP0 zero, rRegP tmp, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0);
|
||||
match(Set mem (StoreP mem zero));
|
||||
effect(TEMP tmp, KILL cr);
|
||||
|
||||
ins_cost(125); // XXX
|
||||
format %{ "movq $mem, 0\t# ptr" %}
|
||||
ins_encode %{
|
||||
z_store_barrier(_masm, this, $mem$$Address, noreg, $tmp$$Register, false /* is_atomic */);
|
||||
// Store a colored null - barrier code above does not need to color
|
||||
__ movq($mem$$Address, barrier_Relocation::unpatched);
|
||||
// The relocation cant be fully after the mov, as that is the beginning of a random subsequent
|
||||
// instruction, which violates assumptions made by unrelated code. Hence the end() - 1
|
||||
__ code_section()->relocate(__ code_section()->end() - 1, barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodAfterMov);
|
||||
%}
|
||||
ins_pipe(ialu_mem_reg);
|
||||
%}
|
||||
|
||||
instruct zCompareAndExchangeP(indirect mem, no_rax_RegP newval, rRegP tmp, rax_RegP oldval, rFlagsReg cr) %{
|
||||
match(Set oldval (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
|
||||
effect(KILL cr, TEMP tmp);
|
||||
predicate(UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP tmp, KILL cr);
|
||||
|
||||
format %{ "lock\n\t"
|
||||
"cmpxchgq $newval, $mem" %}
|
||||
|
||||
ins_encode %{
|
||||
precond($oldval$$Register == rax);
|
||||
z_cmpxchg_common(_masm, this, $mem$$Register, $newval$$Register, $tmp$$Register);
|
||||
assert_different_registers($oldval$$Register, $mem$$Register);
|
||||
assert_different_registers($oldval$$Register, $newval$$Register);
|
||||
const Address mem_addr = Address($mem$$Register, 0);
|
||||
z_store_barrier(_masm, this, mem_addr, $newval$$Register, $tmp$$Register, true /* is_atomic */);
|
||||
z_color(_masm, this, $oldval$$Register);
|
||||
__ lock();
|
||||
__ cmpxchgptr($tmp$$Register, mem_addr);
|
||||
z_uncolor(_masm, this, $oldval$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_cmpxchg);
|
||||
%}
|
||||
|
||||
instruct zCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rFlagsReg cr, rax_RegP oldval) %{
|
||||
instruct zCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rax_RegP oldval, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
|
||||
effect(KILL cr, KILL oldval, TEMP tmp);
|
||||
predicate(UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP tmp, KILL oldval, KILL cr);
|
||||
|
||||
format %{ "lock\n\t"
|
||||
"cmpxchgq $newval, $mem\n\t"
|
||||
@ -130,11 +202,12 @@ instruct zCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rFla
|
||||
"movzbl $res, $res" %}
|
||||
|
||||
ins_encode %{
|
||||
precond($oldval$$Register == rax);
|
||||
z_cmpxchg_common(_masm, this, $mem$$Register, $newval$$Register, $tmp$$Register);
|
||||
if (barrier_data() != ZLoadBarrierElided) {
|
||||
__ cmpptr($tmp$$Register, rax);
|
||||
}
|
||||
assert_different_registers($oldval$$Register, $mem$$Register);
|
||||
const Address mem_addr = Address($mem$$Register, 0);
|
||||
z_store_barrier(_masm, this, mem_addr, $newval$$Register, $tmp$$Register, true /* is_atomic */);
|
||||
z_color(_masm, this, $oldval$$Register);
|
||||
__ lock();
|
||||
__ cmpxchgptr($tmp$$Register, mem_addr);
|
||||
__ setb(Assembler::equal, $res$$Register);
|
||||
__ movzbl($res$$Register, $res$$Register);
|
||||
%}
|
||||
@ -142,16 +215,20 @@ instruct zCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rFla
|
||||
ins_pipe(pipe_cmpxchg);
|
||||
%}
|
||||
|
||||
instruct zXChgP(indirect mem, rRegP newval, rFlagsReg cr) %{
|
||||
instruct zXChgP(indirect mem, rRegP newval, rRegP tmp, rFlagsReg cr) %{
|
||||
match(Set newval (GetAndSetP mem newval));
|
||||
predicate(UseZGC && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(KILL cr);
|
||||
predicate(UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP tmp, KILL cr);
|
||||
|
||||
format %{ "xchgq $newval, $mem" %}
|
||||
|
||||
ins_encode %{
|
||||
__ xchgptr($newval$$Register, Address($mem$$Register, 0));
|
||||
z_load_barrier(_masm, this, Address(noreg, 0), $newval$$Register, noreg /* tmp */, barrier_data());
|
||||
assert_different_registers($mem$$Register, $newval$$Register);
|
||||
const Address mem_addr = Address($mem$$Register, 0);
|
||||
z_store_barrier(_masm, this, mem_addr, $newval$$Register, $tmp$$Register, true /* is_atomic */);
|
||||
__ movptr($newval$$Register, $tmp$$Register);
|
||||
__ xchgptr($newval$$Register, mem_addr);
|
||||
z_uncolor(_masm, this, $newval$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_cmpxchg);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -35,8 +35,8 @@
|
||||
#ifndef AMD64
|
||||
format_width = 1
|
||||
#else
|
||||
// vs Assembler::narrow_oop_operand.
|
||||
format_width = 2
|
||||
// vs Assembler::narrow_oop_operand and ZGC barrier encodings.
|
||||
format_width = 3
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -44,9 +44,6 @@
|
||||
#if INCLUDE_JVMCI
|
||||
#include "jvmci/jvmci_globals.hpp"
|
||||
#endif
|
||||
#if INCLUDE_ZGC
|
||||
#include "gc/z/zThreadLocalData.hpp"
|
||||
#endif
|
||||
#if INCLUDE_JFR
|
||||
#include "jfr/support/jfrIntrinsics.hpp"
|
||||
#endif
|
||||
|
@ -38,7 +38,7 @@ enum platform_dependent_constants {
|
||||
// AVX512 intrinsics add more code in 64-bit VM,
|
||||
// Windows have more code to save/restore registers
|
||||
_compiler_stubs_code_size = 20000 LP64_ONLY(+30000) WINDOWS_ONLY(+2000),
|
||||
_final_stubs_code_size = 10000 LP64_ONLY(+20000) WINDOWS_ONLY(+2000)
|
||||
_final_stubs_code_size = 10000 LP64_ONLY(+20000) WINDOWS_ONLY(+2000) ZGC_ONLY(+20000)
|
||||
};
|
||||
|
||||
class x86 {
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "compiler/disassembler.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/shared/tlab_globals.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/interpreterRuntime.hpp"
|
||||
|
34
src/hotspot/os/bsd/gc/x/xLargePages_bsd.cpp
Normal file
34
src/hotspot/os/bsd/gc/x/xLargePages_bsd.cpp
Normal file
@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/x/xLargePages.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
|
||||
void XLargePages::pd_initialize() {
|
||||
if (UseLargePages) {
|
||||
_state = Explicit;
|
||||
} else {
|
||||
_state = Disabled;
|
||||
}
|
||||
}
|
43
src/hotspot/os/bsd/gc/x/xNUMA_bsd.cpp
Normal file
43
src/hotspot/os/bsd/gc/x/xNUMA_bsd.cpp
Normal file
@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/x/xNUMA.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
void XNUMA::pd_initialize() {
|
||||
_enabled = false;
|
||||
}
|
||||
|
||||
uint32_t XNUMA::count() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
uint32_t XNUMA::id() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t XNUMA::memory_id(uintptr_t addr) {
|
||||
// NUMA support not enabled, assume everything belongs to node zero
|
||||
return 0;
|
||||
}
|
181
src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.cpp
Normal file
181
src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.cpp
Normal file
@ -0,0 +1,181 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/x/xErrno.hpp"
|
||||
#include "gc/x/xGlobals.hpp"
|
||||
#include "gc/x/xLargePages.inline.hpp"
|
||||
#include "gc/x/xPhysicalMemory.inline.hpp"
|
||||
#include "gc/x/xPhysicalMemoryBacking_bsd.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
#include <mach/mach.h>
|
||||
#include <mach/mach_vm.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
// The backing is represented by a reserved virtual address space, in which
|
||||
// we commit and uncommit physical memory. Multi-mapping the different heap
|
||||
// views is done by simply remapping the backing memory using mach_vm_remap().
|
||||
|
||||
static int vm_flags_superpage() {
|
||||
if (!XLargePages::is_explicit()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const int page_size_in_megabytes = XGranuleSize >> 20;
|
||||
return page_size_in_megabytes << VM_FLAGS_SUPERPAGE_SHIFT;
|
||||
}
|
||||
|
||||
static XErrno mremap(uintptr_t from_addr, uintptr_t to_addr, size_t size) {
|
||||
mach_vm_address_t remap_addr = to_addr;
|
||||
vm_prot_t remap_cur_prot;
|
||||
vm_prot_t remap_max_prot;
|
||||
|
||||
// Remap memory to an additional location
|
||||
const kern_return_t res = mach_vm_remap(mach_task_self(),
|
||||
&remap_addr,
|
||||
size,
|
||||
0 /* mask */,
|
||||
VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | vm_flags_superpage(),
|
||||
mach_task_self(),
|
||||
from_addr,
|
||||
FALSE /* copy */,
|
||||
&remap_cur_prot,
|
||||
&remap_max_prot,
|
||||
VM_INHERIT_COPY);
|
||||
|
||||
return (res == KERN_SUCCESS) ? XErrno(0) : XErrno(EINVAL);
|
||||
}
|
||||
|
||||
XPhysicalMemoryBacking::XPhysicalMemoryBacking(size_t max_capacity) :
|
||||
_base(0),
|
||||
_initialized(false) {
|
||||
|
||||
// Reserve address space for backing memory
|
||||
_base = (uintptr_t)os::reserve_memory(max_capacity);
|
||||
if (_base == 0) {
|
||||
// Failed
|
||||
log_error_pd(gc)("Failed to reserve address space for backing memory");
|
||||
return;
|
||||
}
|
||||
|
||||
// Successfully initialized
|
||||
_initialized = true;
|
||||
}
|
||||
|
||||
bool XPhysicalMemoryBacking::is_initialized() const {
|
||||
return _initialized;
|
||||
}
|
||||
|
||||
void XPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
|
||||
// Does nothing
|
||||
}
|
||||
|
||||
bool XPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const {
|
||||
assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
|
||||
assert(is_aligned(length, os::vm_page_size()), "Invalid length");
|
||||
|
||||
log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
|
||||
offset / M, (offset + length) / M, length / M);
|
||||
|
||||
const uintptr_t addr = _base + offset;
|
||||
const void* const res = mmap((void*)addr, length, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
||||
if (res == MAP_FAILED) {
|
||||
XErrno err;
|
||||
log_error(gc)("Failed to commit memory (%s)", err.to_string());
|
||||
return false;
|
||||
}
|
||||
|
||||
// Success
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t XPhysicalMemoryBacking::commit(size_t offset, size_t length) const {
|
||||
// Try to commit the whole region
|
||||
if (commit_inner(offset, length)) {
|
||||
// Success
|
||||
return length;
|
||||
}
|
||||
|
||||
// Failed, try to commit as much as possible
|
||||
size_t start = offset;
|
||||
size_t end = offset + length;
|
||||
|
||||
for (;;) {
|
||||
length = align_down((end - start) / 2, XGranuleSize);
|
||||
if (length == 0) {
|
||||
// Done, don't commit more
|
||||
return start - offset;
|
||||
}
|
||||
|
||||
if (commit_inner(start, length)) {
|
||||
// Success, try commit more
|
||||
start += length;
|
||||
} else {
|
||||
// Failed, try commit less
|
||||
end -= length;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
size_t XPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const {
|
||||
assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
|
||||
assert(is_aligned(length, os::vm_page_size()), "Invalid length");
|
||||
|
||||
log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
|
||||
offset / M, (offset + length) / M, length / M);
|
||||
|
||||
const uintptr_t start = _base + offset;
|
||||
const void* const res = mmap((void*)start, length, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
|
||||
if (res == MAP_FAILED) {
|
||||
XErrno err;
|
||||
log_error(gc)("Failed to uncommit memory (%s)", err.to_string());
|
||||
return 0;
|
||||
}
|
||||
|
||||
return length;
|
||||
}
|
||||
|
||||
void XPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const {
|
||||
const XErrno err = mremap(_base + offset, addr, size);
|
||||
if (err) {
|
||||
fatal("Failed to remap memory (%s)", err.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
void XPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const {
|
||||
// Note that we must keep the address space reservation intact and just detach
|
||||
// the backing memory. For this reason we map a new anonymous, non-accessible
|
||||
// and non-reserved page over the mapping instead of actually unmapping.
|
||||
const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
|
||||
if (res == MAP_FAILED) {
|
||||
XErrno err;
|
||||
fatal("Failed to map memory (%s)", err.to_string());
|
||||
}
|
||||
}
|
48
src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.hpp
Normal file
48
src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.hpp
Normal file
@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef OS_BSD_GC_X_XPHYSICALMEMORYBACKING_BSD_HPP
|
||||
#define OS_BSD_GC_X_XPHYSICALMEMORYBACKING_BSD_HPP
|
||||
|
||||
class XPhysicalMemoryBacking {
|
||||
private:
|
||||
uintptr_t _base;
|
||||
bool _initialized;
|
||||
|
||||
bool commit_inner(size_t offset, size_t length) const;
|
||||
|
||||
public:
|
||||
XPhysicalMemoryBacking(size_t max_capacity);
|
||||
|
||||
bool is_initialized() const;
|
||||
|
||||
void warn_commit_limits(size_t max_capacity) const;
|
||||
|
||||
size_t commit(size_t offset, size_t length) const;
|
||||
size_t uncommit(size_t offset, size_t length) const;
|
||||
|
||||
void map(uintptr_t addr, size_t size, uintptr_t offset) const;
|
||||
void unmap(uintptr_t addr, size_t size) const;
|
||||
};
|
||||
|
||||
#endif // OS_BSD_GC_X_XPHYSICALMEMORYBACKING_BSD_HPP
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,6 +23,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zErrno.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zLargePages.inline.hpp"
|
||||
@ -97,14 +98,14 @@ void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
|
||||
// Does nothing
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const {
|
||||
assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
|
||||
bool ZPhysicalMemoryBacking::commit_inner(zoffset offset, size_t length) const {
|
||||
assert(is_aligned(untype(offset), os::vm_page_size()), "Invalid offset");
|
||||
assert(is_aligned(length, os::vm_page_size()), "Invalid length");
|
||||
|
||||
log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
|
||||
offset / M, (offset + length) / M, length / M);
|
||||
untype(offset) / M, untype(offset) + length / M, length / M);
|
||||
|
||||
const uintptr_t addr = _base + offset;
|
||||
const uintptr_t addr = _base + untype(offset);
|
||||
const void* const res = mmap((void*)addr, length, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
||||
if (res == MAP_FAILED) {
|
||||
ZErrno err;
|
||||
@ -116,7 +117,7 @@ bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const {
|
||||
size_t ZPhysicalMemoryBacking::commit(zoffset offset, size_t length) const {
|
||||
// Try to commit the whole region
|
||||
if (commit_inner(offset, length)) {
|
||||
// Success
|
||||
@ -124,8 +125,8 @@ size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const {
|
||||
}
|
||||
|
||||
// Failed, try to commit as much as possible
|
||||
size_t start = offset;
|
||||
size_t end = offset + length;
|
||||
zoffset start = offset;
|
||||
zoffset end = offset + length;
|
||||
|
||||
for (;;) {
|
||||
length = align_down((end - start) / 2, ZGranuleSize);
|
||||
@ -144,14 +145,14 @@ size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const {
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const {
|
||||
assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
|
||||
size_t ZPhysicalMemoryBacking::uncommit(zoffset offset, size_t length) const {
|
||||
assert(is_aligned(untype(offset), os::vm_page_size()), "Invalid offset");
|
||||
assert(is_aligned(length, os::vm_page_size()), "Invalid length");
|
||||
|
||||
log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
|
||||
offset / M, (offset + length) / M, length / M);
|
||||
untype(offset) / M, untype(offset) + length / M, length / M);
|
||||
|
||||
const uintptr_t start = _base + offset;
|
||||
const uintptr_t start = _base + untype(offset);
|
||||
const void* const res = mmap((void*)start, length, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
|
||||
if (res == MAP_FAILED) {
|
||||
ZErrno err;
|
||||
@ -162,18 +163,18 @@ size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const {
|
||||
return length;
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const {
|
||||
const ZErrno err = mremap(_base + offset, addr, size);
|
||||
void ZPhysicalMemoryBacking::map(zaddress_unsafe addr, size_t size, zoffset offset) const {
|
||||
const ZErrno err = mremap(_base + untype(offset), untype(addr), size);
|
||||
if (err) {
|
||||
fatal("Failed to remap memory (%s)", err.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const {
|
||||
void ZPhysicalMemoryBacking::unmap(zaddress_unsafe addr, size_t size) const {
|
||||
// Note that we must keep the address space reservation intact and just detach
|
||||
// the backing memory. For this reason we map a new anonymous, non-accessible
|
||||
// and non-reserved page over the mapping instead of actually unmapping.
|
||||
const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
|
||||
const void* const res = mmap((void*)untype(addr), size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
|
||||
if (res == MAP_FAILED) {
|
||||
ZErrno err;
|
||||
fatal("Failed to map memory (%s)", err.to_string());
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,12 +24,14 @@
|
||||
#ifndef OS_BSD_GC_Z_ZPHYSICALMEMORYBACKING_BSD_HPP
|
||||
#define OS_BSD_GC_Z_ZPHYSICALMEMORYBACKING_BSD_HPP
|
||||
|
||||
#include "gc/z/zAddress.hpp"
|
||||
|
||||
class ZPhysicalMemoryBacking {
|
||||
private:
|
||||
uintptr_t _base;
|
||||
bool _initialized;
|
||||
|
||||
bool commit_inner(size_t offset, size_t length) const;
|
||||
bool commit_inner(zoffset offset, size_t length) const;
|
||||
|
||||
public:
|
||||
ZPhysicalMemoryBacking(size_t max_capacity);
|
||||
@ -38,11 +40,11 @@ public:
|
||||
|
||||
void warn_commit_limits(size_t max_capacity) const;
|
||||
|
||||
size_t commit(size_t offset, size_t length) const;
|
||||
size_t uncommit(size_t offset, size_t length) const;
|
||||
size_t commit(zoffset offset, size_t length) const;
|
||||
size_t uncommit(zoffset offset, size_t length) const;
|
||||
|
||||
void map(uintptr_t addr, size_t size, uintptr_t offset) const;
|
||||
void unmap(uintptr_t addr, size_t size) const;
|
||||
void map(zaddress_unsafe addr, size_t size, zoffset offset) const;
|
||||
void unmap(zaddress_unsafe addr, size_t size) const;
|
||||
};
|
||||
|
||||
#endif // OS_BSD_GC_Z_ZPHYSICALMEMORYBACKING_BSD_HPP
|
||||
|
38
src/hotspot/os/linux/gc/x/xLargePages_linux.cpp
Normal file
38
src/hotspot/os/linux/gc/x/xLargePages_linux.cpp
Normal file
@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/x/xLargePages.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
|
||||
void XLargePages::pd_initialize() {
|
||||
if (UseLargePages) {
|
||||
if (UseTransparentHugePages) {
|
||||
_state = Transparent;
|
||||
} else {
|
||||
_state = Explicit;
|
||||
}
|
||||
} else {
|
||||
_state = Disabled;
|
||||
}
|
||||
}
|
154
src/hotspot/os/linux/gc/x/xMountPoint_linux.cpp
Normal file
154
src/hotspot/os/linux/gc/x/xMountPoint_linux.cpp
Normal file
@ -0,0 +1,154 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/x/xArray.inline.hpp"
|
||||
#include "gc/x/xErrno.hpp"
|
||||
#include "gc/x/xMountPoint_linux.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
|
||||
// Mount information, see proc(5) for more details.
|
||||
#define PROC_SELF_MOUNTINFO "/proc/self/mountinfo"
|
||||
|
||||
XMountPoint::XMountPoint(const char* filesystem, const char** preferred_mountpoints) {
|
||||
if (AllocateHeapAt != nullptr) {
|
||||
// Use specified path
|
||||
_path = os::strdup(AllocateHeapAt, mtGC);
|
||||
} else {
|
||||
// Find suitable path
|
||||
_path = find_mountpoint(filesystem, preferred_mountpoints);
|
||||
}
|
||||
}
|
||||
|
||||
XMountPoint::~XMountPoint() {
|
||||
os::free(_path);
|
||||
_path = nullptr;
|
||||
}
|
||||
|
||||
char* XMountPoint::get_mountpoint(const char* line, const char* filesystem) const {
|
||||
char* line_mountpoint = nullptr;
|
||||
char* line_filesystem = nullptr;
|
||||
|
||||
// Parse line and return a newly allocated string containing the mount point if
|
||||
// the line contains a matching filesystem and the mount point is accessible by
|
||||
// the current user.
|
||||
// sscanf, using %m, will return malloced memory. Need raw ::free, not os::free.
|
||||
if (sscanf(line, "%*u %*u %*u:%*u %*s %ms %*[^-]- %ms", &line_mountpoint, &line_filesystem) != 2 ||
|
||||
strcmp(line_filesystem, filesystem) != 0 ||
|
||||
access(line_mountpoint, R_OK|W_OK|X_OK) != 0) {
|
||||
// Not a matching or accessible filesystem
|
||||
ALLOW_C_FUNCTION(::free, ::free(line_mountpoint);)
|
||||
line_mountpoint = nullptr;
|
||||
}
|
||||
|
||||
ALLOW_C_FUNCTION(::free, ::free(line_filesystem);)
|
||||
|
||||
return line_mountpoint;
|
||||
}
|
||||
|
||||
void XMountPoint::get_mountpoints(const char* filesystem, XArray<char*>* mountpoints) const {
|
||||
FILE* fd = os::fopen(PROC_SELF_MOUNTINFO, "r");
|
||||
if (fd == nullptr) {
|
||||
XErrno err;
|
||||
log_error_p(gc)("Failed to open %s: %s", PROC_SELF_MOUNTINFO, err.to_string());
|
||||
return;
|
||||
}
|
||||
|
||||
char* line = nullptr;
|
||||
size_t length = 0;
|
||||
|
||||
while (getline(&line, &length, fd) != -1) {
|
||||
char* const mountpoint = get_mountpoint(line, filesystem);
|
||||
if (mountpoint != nullptr) {
|
||||
mountpoints->append(mountpoint);
|
||||
}
|
||||
}
|
||||
|
||||
// readline will return malloced memory. Need raw ::free, not os::free.
|
||||
ALLOW_C_FUNCTION(::free, ::free(line);)
|
||||
fclose(fd);
|
||||
}
|
||||
|
||||
void XMountPoint::free_mountpoints(XArray<char*>* mountpoints) const {
|
||||
XArrayIterator<char*> iter(mountpoints);
|
||||
for (char* mountpoint; iter.next(&mountpoint);) {
|
||||
ALLOW_C_FUNCTION(::free, ::free(mountpoint);) // *not* os::free
|
||||
}
|
||||
mountpoints->clear();
|
||||
}
|
||||
|
||||
char* XMountPoint::find_preferred_mountpoint(const char* filesystem,
|
||||
XArray<char*>* mountpoints,
|
||||
const char** preferred_mountpoints) const {
|
||||
// Find preferred mount point
|
||||
XArrayIterator<char*> iter1(mountpoints);
|
||||
for (char* mountpoint; iter1.next(&mountpoint);) {
|
||||
for (const char** preferred = preferred_mountpoints; *preferred != nullptr; preferred++) {
|
||||
if (!strcmp(mountpoint, *preferred)) {
|
||||
// Preferred mount point found
|
||||
return os::strdup(mountpoint, mtGC);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Preferred mount point not found
|
||||
log_error_p(gc)("More than one %s filesystem found:", filesystem);
|
||||
XArrayIterator<char*> iter2(mountpoints);
|
||||
for (char* mountpoint; iter2.next(&mountpoint);) {
|
||||
log_error_p(gc)(" %s", mountpoint);
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
char* XMountPoint::find_mountpoint(const char* filesystem, const char** preferred_mountpoints) const {
|
||||
char* path = nullptr;
|
||||
XArray<char*> mountpoints;
|
||||
|
||||
get_mountpoints(filesystem, &mountpoints);
|
||||
|
||||
if (mountpoints.length() == 0) {
|
||||
// No mount point found
|
||||
log_error_p(gc)("Failed to find an accessible %s filesystem", filesystem);
|
||||
} else if (mountpoints.length() == 1) {
|
||||
// One mount point found
|
||||
path = os::strdup(mountpoints.at(0), mtGC);
|
||||
} else {
|
||||
// More than one mount point found
|
||||
path = find_preferred_mountpoint(filesystem, &mountpoints, preferred_mountpoints);
|
||||
}
|
||||
|
||||
free_mountpoints(&mountpoints);
|
||||
|
||||
return path;
|
||||
}
|
||||
|
||||
const char* XMountPoint::get() const {
|
||||
return _path;
|
||||
}
|
52
src/hotspot/os/linux/gc/x/xMountPoint_linux.hpp
Normal file
52
src/hotspot/os/linux/gc/x/xMountPoint_linux.hpp
Normal file
@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef OS_LINUX_GC_X_XMOUNTPOINT_LINUX_HPP
|
||||
#define OS_LINUX_GC_X_XMOUNTPOINT_LINUX_HPP
|
||||
|
||||
#include "gc/x/xArray.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
class XMountPoint : public StackObj {
|
||||
private:
|
||||
char* _path;
|
||||
|
||||
char* get_mountpoint(const char* line,
|
||||
const char* filesystem) const;
|
||||
void get_mountpoints(const char* filesystem,
|
||||
XArray<char*>* mountpoints) const;
|
||||
void free_mountpoints(XArray<char*>* mountpoints) const;
|
||||
char* find_preferred_mountpoint(const char* filesystem,
|
||||
XArray<char*>* mountpoints,
|
||||
const char** preferred_mountpoints) const;
|
||||
char* find_mountpoint(const char* filesystem,
|
||||
const char** preferred_mountpoints) const;
|
||||
|
||||
public:
|
||||
XMountPoint(const char* filesystem, const char** preferred_mountpoints);
|
||||
~XMountPoint();
|
||||
|
||||
const char* get() const;
|
||||
};
|
||||
|
||||
#endif // OS_LINUX_GC_X_XMOUNTPOINT_LINUX_HPP
|
71
src/hotspot/os/linux/gc/x/xNUMA_linux.cpp
Normal file
71
src/hotspot/os/linux/gc/x/xNUMA_linux.cpp
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "gc/x/xCPU.inline.hpp"
|
||||
#include "gc/x/xErrno.hpp"
|
||||
#include "gc/x/xNUMA.hpp"
|
||||
#include "gc/x/xSyscall_linux.hpp"
|
||||
#include "os_linux.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
void XNUMA::pd_initialize() {
|
||||
_enabled = UseNUMA;
|
||||
}
|
||||
|
||||
uint32_t XNUMA::count() {
|
||||
if (!_enabled) {
|
||||
// NUMA support not enabled
|
||||
return 1;
|
||||
}
|
||||
|
||||
return os::Linux::numa_max_node() + 1;
|
||||
}
|
||||
|
||||
uint32_t XNUMA::id() {
|
||||
if (!_enabled) {
|
||||
// NUMA support not enabled
|
||||
return 0;
|
||||
}
|
||||
|
||||
return os::Linux::get_node_by_cpu(XCPU::id());
|
||||
}
|
||||
|
||||
uint32_t XNUMA::memory_id(uintptr_t addr) {
|
||||
if (!_enabled) {
|
||||
// NUMA support not enabled, assume everything belongs to node zero
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t id = (uint32_t)-1;
|
||||
|
||||
if (XSyscall::get_mempolicy((int*)&id, nullptr, 0, (void*)addr, MPOL_F_NODE | MPOL_F_ADDR) == -1) {
|
||||
XErrno err;
|
||||
fatal("Failed to get NUMA id for memory at " PTR_FORMAT " (%s)", addr, err.to_string());
|
||||
}
|
||||
|
||||
assert(id < count(), "Invalid NUMA id");
|
||||
|
||||
return id;
|
||||
}
|
724
src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.cpp
Normal file
724
src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.cpp
Normal file
@ -0,0 +1,724 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/x/xArray.inline.hpp"
|
||||
#include "gc/x/xErrno.hpp"
|
||||
#include "gc/x/xGlobals.hpp"
|
||||
#include "gc/x/xLargePages.inline.hpp"
|
||||
#include "gc/x/xMountPoint_linux.hpp"
|
||||
#include "gc/x/xNUMA.inline.hpp"
|
||||
#include "gc/x/xPhysicalMemoryBacking_linux.hpp"
|
||||
#include "gc/x/xSyscall_linux.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "os_linux.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/safefetch.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
#include <fcntl.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/statfs.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
//
|
||||
// Support for building on older Linux systems
|
||||
//
|
||||
|
||||
// memfd_create(2) flags
|
||||
#ifndef MFD_CLOEXEC
|
||||
#define MFD_CLOEXEC 0x0001U
|
||||
#endif
|
||||
#ifndef MFD_HUGETLB
|
||||
#define MFD_HUGETLB 0x0004U
|
||||
#endif
|
||||
#ifndef MFD_HUGE_2MB
|
||||
#define MFD_HUGE_2MB 0x54000000U
|
||||
#endif
|
||||
|
||||
// open(2) flags
|
||||
#ifndef O_CLOEXEC
|
||||
#define O_CLOEXEC 02000000
|
||||
#endif
|
||||
#ifndef O_TMPFILE
|
||||
#define O_TMPFILE (020000000 | O_DIRECTORY)
|
||||
#endif
|
||||
|
||||
// fallocate(2) flags
|
||||
#ifndef FALLOC_FL_KEEP_SIZE
|
||||
#define FALLOC_FL_KEEP_SIZE 0x01
|
||||
#endif
|
||||
#ifndef FALLOC_FL_PUNCH_HOLE
|
||||
#define FALLOC_FL_PUNCH_HOLE 0x02
|
||||
#endif
|
||||
|
||||
// Filesystem types, see statfs(2)
|
||||
#ifndef TMPFS_MAGIC
|
||||
#define TMPFS_MAGIC 0x01021994
|
||||
#endif
|
||||
#ifndef HUGETLBFS_MAGIC
|
||||
#define HUGETLBFS_MAGIC 0x958458f6
|
||||
#endif
|
||||
|
||||
// Filesystem names
|
||||
#define XFILESYSTEM_TMPFS "tmpfs"
|
||||
#define XFILESYSTEM_HUGETLBFS "hugetlbfs"
|
||||
|
||||
// Proc file entry for max map mount
|
||||
#define XFILENAME_PROC_MAX_MAP_COUNT "/proc/sys/vm/max_map_count"
|
||||
|
||||
// Sysfs file for transparent huge page on tmpfs
|
||||
#define XFILENAME_SHMEM_ENABLED "/sys/kernel/mm/transparent_hugepage/shmem_enabled"
|
||||
|
||||
// Java heap filename
|
||||
#define XFILENAME_HEAP "java_heap"
|
||||
|
||||
// Preferred tmpfs mount points, ordered by priority
|
||||
static const char* z_preferred_tmpfs_mountpoints[] = {
|
||||
"/dev/shm",
|
||||
"/run/shm",
|
||||
nullptr
|
||||
};
|
||||
|
||||
// Preferred hugetlbfs mount points, ordered by priority
|
||||
static const char* z_preferred_hugetlbfs_mountpoints[] = {
|
||||
"/dev/hugepages",
|
||||
"/hugepages",
|
||||
nullptr
|
||||
};
|
||||
|
||||
static int z_fallocate_hugetlbfs_attempts = 3;
|
||||
static bool z_fallocate_supported = true;
|
||||
|
||||
XPhysicalMemoryBacking::XPhysicalMemoryBacking(size_t max_capacity) :
|
||||
_fd(-1),
|
||||
_filesystem(0),
|
||||
_block_size(0),
|
||||
_available(0),
|
||||
_initialized(false) {
|
||||
|
||||
// Create backing file
|
||||
_fd = create_fd(XFILENAME_HEAP);
|
||||
if (_fd == -1) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Truncate backing file
|
||||
while (ftruncate(_fd, max_capacity) == -1) {
|
||||
if (errno != EINTR) {
|
||||
XErrno err;
|
||||
log_error_p(gc)("Failed to truncate backing file (%s)", err.to_string());
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Get filesystem statistics
|
||||
struct statfs buf;
|
||||
if (fstatfs(_fd, &buf) == -1) {
|
||||
XErrno err;
|
||||
log_error_p(gc)("Failed to determine filesystem type for backing file (%s)", err.to_string());
|
||||
return;
|
||||
}
|
||||
|
||||
_filesystem = buf.f_type;
|
||||
_block_size = buf.f_bsize;
|
||||
_available = buf.f_bavail * _block_size;
|
||||
|
||||
log_info_p(gc, init)("Heap Backing Filesystem: %s (" UINT64_FORMAT_X ")",
|
||||
is_tmpfs() ? XFILESYSTEM_TMPFS : is_hugetlbfs() ? XFILESYSTEM_HUGETLBFS : "other", _filesystem);
|
||||
|
||||
// Make sure the filesystem type matches requested large page type
|
||||
if (XLargePages::is_transparent() && !is_tmpfs()) {
|
||||
log_error_p(gc)("-XX:+UseTransparentHugePages can only be enabled when using a %s filesystem",
|
||||
XFILESYSTEM_TMPFS);
|
||||
return;
|
||||
}
|
||||
|
||||
if (XLargePages::is_transparent() && !tmpfs_supports_transparent_huge_pages()) {
|
||||
log_error_p(gc)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel",
|
||||
XFILESYSTEM_TMPFS);
|
||||
return;
|
||||
}
|
||||
|
||||
if (XLargePages::is_explicit() && !is_hugetlbfs()) {
|
||||
log_error_p(gc)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled "
|
||||
"when using a %s filesystem", XFILESYSTEM_HUGETLBFS);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!XLargePages::is_explicit() && is_hugetlbfs()) {
|
||||
log_error_p(gc)("-XX:+UseLargePages must be enabled when using a %s filesystem",
|
||||
XFILESYSTEM_HUGETLBFS);
|
||||
return;
|
||||
}
|
||||
|
||||
// Make sure the filesystem block size is compatible
|
||||
if (XGranuleSize % _block_size != 0) {
|
||||
log_error_p(gc)("Filesystem backing the heap has incompatible block size (" SIZE_FORMAT ")",
|
||||
_block_size);
|
||||
return;
|
||||
}
|
||||
|
||||
if (is_hugetlbfs() && _block_size != XGranuleSize) {
|
||||
log_error_p(gc)("%s filesystem has unexpected block size " SIZE_FORMAT " (expected " SIZE_FORMAT ")",
|
||||
XFILESYSTEM_HUGETLBFS, _block_size, XGranuleSize);
|
||||
return;
|
||||
}
|
||||
|
||||
// Successfully initialized
|
||||
_initialized = true;
|
||||
}
|
||||
|
||||
int XPhysicalMemoryBacking::create_mem_fd(const char* name) const {
|
||||
assert(XGranuleSize == 2 * M, "Granule size must match MFD_HUGE_2MB");
|
||||
|
||||
// Create file name
|
||||
char filename[PATH_MAX];
|
||||
snprintf(filename, sizeof(filename), "%s%s", name, XLargePages::is_explicit() ? ".hugetlb" : "");
|
||||
|
||||
// Create file
|
||||
const int extra_flags = XLargePages::is_explicit() ? (MFD_HUGETLB | MFD_HUGE_2MB) : 0;
|
||||
const int fd = XSyscall::memfd_create(filename, MFD_CLOEXEC | extra_flags);
|
||||
if (fd == -1) {
|
||||
XErrno err;
|
||||
log_debug_p(gc, init)("Failed to create memfd file (%s)",
|
||||
(XLargePages::is_explicit() && (err == EINVAL || err == ENODEV)) ?
|
||||
"Hugepages (2M) not available" : err.to_string());
|
||||
return -1;
|
||||
}
|
||||
|
||||
log_info_p(gc, init)("Heap Backing File: /memfd:%s", filename);
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
int XPhysicalMemoryBacking::create_file_fd(const char* name) const {
|
||||
const char* const filesystem = XLargePages::is_explicit()
|
||||
? XFILESYSTEM_HUGETLBFS
|
||||
: XFILESYSTEM_TMPFS;
|
||||
const char** const preferred_mountpoints = XLargePages::is_explicit()
|
||||
? z_preferred_hugetlbfs_mountpoints
|
||||
: z_preferred_tmpfs_mountpoints;
|
||||
|
||||
// Find mountpoint
|
||||
XMountPoint mountpoint(filesystem, preferred_mountpoints);
|
||||
if (mountpoint.get() == nullptr) {
|
||||
log_error_p(gc)("Use -XX:AllocateHeapAt to specify the path to a %s filesystem", filesystem);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Try to create an anonymous file using the O_TMPFILE flag. Note that this
|
||||
// flag requires kernel >= 3.11. If this fails we fall back to open/unlink.
|
||||
const int fd_anon = os::open(mountpoint.get(), O_TMPFILE|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
|
||||
if (fd_anon == -1) {
|
||||
XErrno err;
|
||||
log_debug_p(gc, init)("Failed to create anonymous file in %s (%s)", mountpoint.get(),
|
||||
(err == EINVAL ? "Not supported" : err.to_string()));
|
||||
} else {
|
||||
// Get inode number for anonymous file
|
||||
struct stat stat_buf;
|
||||
if (fstat(fd_anon, &stat_buf) == -1) {
|
||||
XErrno err;
|
||||
log_error_pd(gc)("Failed to determine inode number for anonymous file (%s)", err.to_string());
|
||||
return -1;
|
||||
}
|
||||
|
||||
log_info_p(gc, init)("Heap Backing File: %s/#" UINT64_FORMAT, mountpoint.get(), (uint64_t)stat_buf.st_ino);
|
||||
|
||||
return fd_anon;
|
||||
}
|
||||
|
||||
log_debug_p(gc, init)("Falling back to open/unlink");
|
||||
|
||||
// Create file name
|
||||
char filename[PATH_MAX];
|
||||
snprintf(filename, sizeof(filename), "%s/%s.%d", mountpoint.get(), name, os::current_process_id());
|
||||
|
||||
// Create file
|
||||
const int fd = os::open(filename, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
|
||||
if (fd == -1) {
|
||||
XErrno err;
|
||||
log_error_p(gc)("Failed to create file %s (%s)", filename, err.to_string());
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Unlink file
|
||||
if (unlink(filename) == -1) {
|
||||
XErrno err;
|
||||
log_error_p(gc)("Failed to unlink file %s (%s)", filename, err.to_string());
|
||||
return -1;
|
||||
}
|
||||
|
||||
log_info_p(gc, init)("Heap Backing File: %s", filename);
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
int XPhysicalMemoryBacking::create_fd(const char* name) const {
|
||||
if (AllocateHeapAt == nullptr) {
|
||||
// If the path is not explicitly specified, then we first try to create a memfd file
|
||||
// instead of looking for a tmpfd/hugetlbfs mount point. Note that memfd_create() might
|
||||
// not be supported at all (requires kernel >= 3.17), or it might not support large
|
||||
// pages (requires kernel >= 4.14). If memfd_create() fails, then we try to create a
|
||||
// file on an accessible tmpfs or hugetlbfs mount point.
|
||||
const int fd = create_mem_fd(name);
|
||||
if (fd != -1) {
|
||||
return fd;
|
||||
}
|
||||
|
||||
log_debug_p(gc)("Falling back to searching for an accessible mount point");
|
||||
}
|
||||
|
||||
return create_file_fd(name);
|
||||
}
|
||||
|
||||
bool XPhysicalMemoryBacking::is_initialized() const {
|
||||
return _initialized;
|
||||
}
|
||||
|
||||
void XPhysicalMemoryBacking::warn_available_space(size_t max_capacity) const {
|
||||
// Note that the available space on a tmpfs or a hugetlbfs filesystem
|
||||
// will be zero if no size limit was specified when it was mounted.
|
||||
if (_available == 0) {
|
||||
// No size limit set, skip check
|
||||
log_info_p(gc, init)("Available space on backing filesystem: N/A");
|
||||
return;
|
||||
}
|
||||
|
||||
log_info_p(gc, init)("Available space on backing filesystem: " SIZE_FORMAT "M", _available / M);
|
||||
|
||||
// Warn if the filesystem doesn't currently have enough space available to hold
|
||||
// the max heap size. The max heap size will be capped if we later hit this limit
|
||||
// when trying to expand the heap.
|
||||
if (_available < max_capacity) {
|
||||
log_warning_p(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
|
||||
log_warning_p(gc)("Not enough space available on the backing filesystem to hold the current max Java heap");
|
||||
log_warning_p(gc)("size (" SIZE_FORMAT "M). Please adjust the size of the backing filesystem accordingly "
|
||||
"(available", max_capacity / M);
|
||||
log_warning_p(gc)("space is currently " SIZE_FORMAT "M). Continuing execution with the current filesystem "
|
||||
"size could", _available / M);
|
||||
log_warning_p(gc)("lead to a premature OutOfMemoryError being thrown, due to failure to commit memory.");
|
||||
}
|
||||
}
|
||||
|
||||
void XPhysicalMemoryBacking::warn_max_map_count(size_t max_capacity) const {
|
||||
const char* const filename = XFILENAME_PROC_MAX_MAP_COUNT;
|
||||
FILE* const file = os::fopen(filename, "r");
|
||||
if (file == nullptr) {
|
||||
// Failed to open file, skip check
|
||||
log_debug_p(gc, init)("Failed to open %s", filename);
|
||||
return;
|
||||
}
|
||||
|
||||
size_t actual_max_map_count = 0;
|
||||
const int result = fscanf(file, SIZE_FORMAT, &actual_max_map_count);
|
||||
fclose(file);
|
||||
if (result != 1) {
|
||||
// Failed to read file, skip check
|
||||
log_debug_p(gc, init)("Failed to read %s", filename);
|
||||
return;
|
||||
}
|
||||
|
||||
// The required max map count is impossible to calculate exactly since subsystems
|
||||
// other than ZGC are also creating memory mappings, and we have no control over that.
|
||||
// However, ZGC tends to create the most mappings and dominate the total count.
|
||||
// In the worst cases, ZGC will map each granule three times, i.e. once per heap view.
|
||||
// We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.
|
||||
const size_t required_max_map_count = (max_capacity / XGranuleSize) * 3 * 1.2;
|
||||
if (actual_max_map_count < required_max_map_count) {
|
||||
log_warning_p(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
|
||||
log_warning_p(gc)("The system limit on number of memory mappings per process might be too low for the given");
|
||||
log_warning_p(gc)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at",
|
||||
max_capacity / M, filename);
|
||||
log_warning_p(gc)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing execution "
|
||||
"with the current", required_max_map_count, actual_max_map_count);
|
||||
log_warning_p(gc)("limit could lead to a premature OutOfMemoryError being thrown, due to failure to map memory.");
|
||||
}
|
||||
}
|
||||
|
||||
void XPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
|
||||
// Warn if available space is too low
|
||||
warn_available_space(max_capacity);
|
||||
|
||||
// Warn if max map count is too low
|
||||
warn_max_map_count(max_capacity);
|
||||
}
|
||||
|
||||
bool XPhysicalMemoryBacking::is_tmpfs() const {
|
||||
return _filesystem == TMPFS_MAGIC;
|
||||
}
|
||||
|
||||
bool XPhysicalMemoryBacking::is_hugetlbfs() const {
|
||||
return _filesystem == HUGETLBFS_MAGIC;
|
||||
}
|
||||
|
||||
bool XPhysicalMemoryBacking::tmpfs_supports_transparent_huge_pages() const {
|
||||
// If the shmem_enabled file exists and is readable then we
|
||||
// know the kernel supports transparent huge pages for tmpfs.
|
||||
return access(XFILENAME_SHMEM_ENABLED, R_OK) == 0;
|
||||
}
|
||||
|
||||
XErrno XPhysicalMemoryBacking::fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const {
|
||||
// On hugetlbfs, mapping a file segment will fail immediately, without
|
||||
// the need to touch the mapped pages first, if there aren't enough huge
|
||||
// pages available to back the mapping.
|
||||
void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
|
||||
if (addr == MAP_FAILED) {
|
||||
// Failed
|
||||
return errno;
|
||||
}
|
||||
|
||||
// Once mapped, the huge pages are only reserved. We need to touch them
|
||||
// to associate them with the file segment. Note that we can not punch
|
||||
// hole in file segments which only have reserved pages.
|
||||
if (touch) {
|
||||
char* const start = (char*)addr;
|
||||
char* const end = start + length;
|
||||
os::pretouch_memory(start, end, _block_size);
|
||||
}
|
||||
|
||||
// Unmap again. From now on, the huge pages that were mapped are allocated
|
||||
// to this file. There's no risk of getting a SIGBUS when mapping and
|
||||
// touching these pages again.
|
||||
if (munmap(addr, length) == -1) {
|
||||
// Failed
|
||||
return errno;
|
||||
}
|
||||
|
||||
// Success
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool safe_touch_mapping(void* addr, size_t length, size_t page_size) {
|
||||
char* const start = (char*)addr;
|
||||
char* const end = start + length;
|
||||
|
||||
// Touching a mapping that can't be backed by memory will generate a
|
||||
// SIGBUS. By using SafeFetch32 any SIGBUS will be safely caught and
|
||||
// handled. On tmpfs, doing a fetch (rather than a store) is enough
|
||||
// to cause backing pages to be allocated (there's no zero-page to
|
||||
// worry about).
|
||||
for (char *p = start; p < end; p += page_size) {
|
||||
if (SafeFetch32((int*)p, -1) == -1) {
|
||||
// Failed
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Success
|
||||
return true;
|
||||
}
|
||||
|
||||
XErrno XPhysicalMemoryBacking::fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const {
|
||||
// On tmpfs, we need to touch the mapped pages to figure out
|
||||
// if there are enough pages available to back the mapping.
|
||||
void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
|
||||
if (addr == MAP_FAILED) {
|
||||
// Failed
|
||||
return errno;
|
||||
}
|
||||
|
||||
// Advise mapping to use transparent huge pages
|
||||
os::realign_memory((char*)addr, length, XGranuleSize);
|
||||
|
||||
// Touch the mapping (safely) to make sure it's backed by memory
|
||||
const bool backed = safe_touch_mapping(addr, length, _block_size);
|
||||
|
||||
// Unmap again. If successfully touched, the backing memory will
|
||||
// be allocated to this file. There's no risk of getting a SIGBUS
|
||||
// when mapping and touching these pages again.
|
||||
if (munmap(addr, length) == -1) {
|
||||
// Failed
|
||||
return errno;
|
||||
}
|
||||
|
||||
// Success
|
||||
return backed ? 0 : ENOMEM;
|
||||
}
|
||||
|
||||
XErrno XPhysicalMemoryBacking::fallocate_compat_pwrite(size_t offset, size_t length) const {
|
||||
uint8_t data = 0;
|
||||
|
||||
// Allocate backing memory by writing to each block
|
||||
for (size_t pos = offset; pos < offset + length; pos += _block_size) {
|
||||
if (pwrite(_fd, &data, sizeof(data), pos) == -1) {
|
||||
// Failed
|
||||
return errno;
|
||||
}
|
||||
}
|
||||
|
||||
// Success
|
||||
return 0;
|
||||
}
|
||||
|
||||
XErrno XPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t length) const {
|
||||
// fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs
|
||||
// since Linux 4.3. When fallocate(2) is not supported we emulate it using
|
||||
// mmap/munmap (for hugetlbfs and tmpfs with transparent huge pages) or pwrite
|
||||
// (for tmpfs without transparent huge pages and other filesystem types).
|
||||
if (XLargePages::is_explicit()) {
|
||||
return fallocate_compat_mmap_hugetlbfs(offset, length, false /* touch */);
|
||||
} else if (XLargePages::is_transparent()) {
|
||||
return fallocate_compat_mmap_tmpfs(offset, length);
|
||||
} else {
|
||||
return fallocate_compat_pwrite(offset, length);
|
||||
}
|
||||
}
|
||||
|
||||
XErrno XPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t length) const {
|
||||
const int mode = 0; // Allocate
|
||||
const int res = XSyscall::fallocate(_fd, mode, offset, length);
|
||||
if (res == -1) {
|
||||
// Failed
|
||||
return errno;
|
||||
}
|
||||
|
||||
// Success
|
||||
return 0;
|
||||
}
|
||||
|
||||
XErrno XPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length) const {
|
||||
// Using compat mode is more efficient when allocating space on hugetlbfs.
|
||||
// Note that allocating huge pages this way will only reserve them, and not
|
||||
// associate them with segments of the file. We must guarantee that we at
|
||||
// some point touch these segments, otherwise we can not punch hole in them.
|
||||
// Also note that we need to use compat mode when using transparent huge pages,
|
||||
// since we need to use madvise(2) on the mapping before the page is allocated.
|
||||
if (z_fallocate_supported && !XLargePages::is_enabled()) {
|
||||
const XErrno err = fallocate_fill_hole_syscall(offset, length);
|
||||
if (!err) {
|
||||
// Success
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (err != ENOSYS && err != EOPNOTSUPP) {
|
||||
// Failed
|
||||
return err;
|
||||
}
|
||||
|
||||
// Not supported
|
||||
log_debug_p(gc)("Falling back to fallocate() compatibility mode");
|
||||
z_fallocate_supported = false;
|
||||
}
|
||||
|
||||
return fallocate_fill_hole_compat(offset, length);
|
||||
}
|
||||
|
||||
XErrno XPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length) const {
|
||||
if (XLargePages::is_explicit()) {
|
||||
// We can only punch hole in pages that have been touched. Non-touched
|
||||
// pages are only reserved, and not associated with any specific file
|
||||
// segment. We don't know which pages have been previously touched, so
|
||||
// we always touch them here to guarantee that we can punch hole.
|
||||
const XErrno err = fallocate_compat_mmap_hugetlbfs(offset, length, true /* touch */);
|
||||
if (err) {
|
||||
// Failed
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
const int mode = FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE;
|
||||
if (XSyscall::fallocate(_fd, mode, offset, length) == -1) {
|
||||
// Failed
|
||||
return errno;
|
||||
}
|
||||
|
||||
// Success
|
||||
return 0;
|
||||
}
|
||||
|
||||
XErrno XPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offset, size_t length) const {
|
||||
// Try first half
|
||||
const size_t offset0 = offset;
|
||||
const size_t length0 = align_up(length / 2, _block_size);
|
||||
const XErrno err0 = fallocate(punch_hole, offset0, length0);
|
||||
if (err0) {
|
||||
return err0;
|
||||
}
|
||||
|
||||
// Try second half
|
||||
const size_t offset1 = offset0 + length0;
|
||||
const size_t length1 = length - length0;
|
||||
const XErrno err1 = fallocate(punch_hole, offset1, length1);
|
||||
if (err1) {
|
||||
return err1;
|
||||
}
|
||||
|
||||
// Success
|
||||
return 0;
|
||||
}
|
||||
|
||||
XErrno XPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t length) const {
|
||||
assert(is_aligned(offset, _block_size), "Invalid offset");
|
||||
assert(is_aligned(length, _block_size), "Invalid length");
|
||||
|
||||
const XErrno err = punch_hole ? fallocate_punch_hole(offset, length) : fallocate_fill_hole(offset, length);
|
||||
if (err == EINTR && length > _block_size) {
|
||||
// Calling fallocate(2) with a large length can take a long time to
|
||||
// complete. When running profilers, such as VTune, this syscall will
|
||||
// be constantly interrupted by signals. Expanding the file in smaller
|
||||
// steps avoids this problem.
|
||||
return split_and_fallocate(punch_hole, offset, length);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
bool XPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const {
|
||||
log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
|
||||
offset / M, (offset + length) / M, length / M);
|
||||
|
||||
retry:
|
||||
const XErrno err = fallocate(false /* punch_hole */, offset, length);
|
||||
if (err) {
|
||||
if (err == ENOSPC && !is_init_completed() && XLargePages::is_explicit() && z_fallocate_hugetlbfs_attempts-- > 0) {
|
||||
// If we fail to allocate during initialization, due to lack of space on
|
||||
// the hugetlbfs filesystem, then we wait and retry a few times before
|
||||
// giving up. Otherwise there is a risk that running JVMs back-to-back
|
||||
// will fail, since there is a delay between process termination and the
|
||||
// huge pages owned by that process being returned to the huge page pool
|
||||
// and made available for new allocations.
|
||||
log_debug_p(gc, init)("Failed to commit memory (%s), retrying", err.to_string());
|
||||
|
||||
// Wait and retry in one second, in the hope that huge pages will be
|
||||
// available by then.
|
||||
sleep(1);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
// Failed
|
||||
log_error_p(gc)("Failed to commit memory (%s)", err.to_string());
|
||||
return false;
|
||||
}
|
||||
|
||||
// Success
|
||||
return true;
|
||||
}
|
||||
|
||||
static int offset_to_node(size_t offset) {
|
||||
const GrowableArray<int>* mapping = os::Linux::numa_nindex_to_node();
|
||||
const size_t nindex = (offset >> XGranuleSizeShift) % mapping->length();
|
||||
return mapping->at((int)nindex);
|
||||
}
|
||||
|
||||
size_t XPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t length) const {
|
||||
size_t committed = 0;
|
||||
|
||||
// Commit one granule at a time, so that each granule
|
||||
// can be allocated from a different preferred node.
|
||||
while (committed < length) {
|
||||
const size_t granule_offset = offset + committed;
|
||||
|
||||
// Setup NUMA policy to allocate memory from a preferred node
|
||||
os::Linux::numa_set_preferred(offset_to_node(granule_offset));
|
||||
|
||||
if (!commit_inner(granule_offset, XGranuleSize)) {
|
||||
// Failed
|
||||
break;
|
||||
}
|
||||
|
||||
committed += XGranuleSize;
|
||||
}
|
||||
|
||||
// Restore NUMA policy
|
||||
os::Linux::numa_set_preferred(-1);
|
||||
|
||||
return committed;
|
||||
}
|
||||
|
||||
size_t XPhysicalMemoryBacking::commit_default(size_t offset, size_t length) const {
|
||||
// Try to commit the whole region
|
||||
if (commit_inner(offset, length)) {
|
||||
// Success
|
||||
return length;
|
||||
}
|
||||
|
||||
// Failed, try to commit as much as possible
|
||||
size_t start = offset;
|
||||
size_t end = offset + length;
|
||||
|
||||
for (;;) {
|
||||
length = align_down((end - start) / 2, XGranuleSize);
|
||||
if (length < XGranuleSize) {
|
||||
// Done, don't commit more
|
||||
return start - offset;
|
||||
}
|
||||
|
||||
if (commit_inner(start, length)) {
|
||||
// Success, try commit more
|
||||
start += length;
|
||||
} else {
|
||||
// Failed, try commit less
|
||||
end -= length;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
size_t XPhysicalMemoryBacking::commit(size_t offset, size_t length) const {
|
||||
if (XNUMA::is_enabled() && !XLargePages::is_explicit()) {
|
||||
// To get granule-level NUMA interleaving when using non-large pages,
|
||||
// we must explicitly interleave the memory at commit/fallocate time.
|
||||
return commit_numa_interleaved(offset, length);
|
||||
}
|
||||
|
||||
return commit_default(offset, length);
|
||||
}
|
||||
|
||||
size_t XPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const {
|
||||
log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
|
||||
offset / M, (offset + length) / M, length / M);
|
||||
|
||||
const XErrno err = fallocate(true /* punch_hole */, offset, length);
|
||||
if (err) {
|
||||
log_error(gc)("Failed to uncommit memory (%s)", err.to_string());
|
||||
return 0;
|
||||
}
|
||||
|
||||
return length;
|
||||
}
|
||||
|
||||
void XPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const {
|
||||
const void* const res = mmap((void*)addr, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _fd, offset);
|
||||
if (res == MAP_FAILED) {
|
||||
XErrno err;
|
||||
fatal("Failed to map memory (%s)", err.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
void XPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const {
|
||||
// Note that we must keep the address space reservation intact and just detach
|
||||
// the backing memory. For this reason we map a new anonymous, non-accessible
|
||||
// and non-reserved page over the mapping instead of actually unmapping.
|
||||
const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
|
||||
if (res == MAP_FAILED) {
|
||||
XErrno err;
|
||||
fatal("Failed to map memory (%s)", err.to_string());
|
||||
}
|
||||
}
|
77
src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.hpp
Normal file
77
src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.hpp
Normal file
@ -0,0 +1,77 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef OS_LINUX_GC_X_XPHYSICALMEMORYBACKING_LINUX_HPP
|
||||
#define OS_LINUX_GC_X_XPHYSICALMEMORYBACKING_LINUX_HPP
|
||||
|
||||
class XErrno;
|
||||
|
||||
class XPhysicalMemoryBacking {
|
||||
private:
|
||||
int _fd;
|
||||
size_t _size;
|
||||
uint64_t _filesystem;
|
||||
size_t _block_size;
|
||||
size_t _available;
|
||||
bool _initialized;
|
||||
|
||||
void warn_available_space(size_t max_capacity) const;
|
||||
void warn_max_map_count(size_t max_capacity) const;
|
||||
|
||||
int create_mem_fd(const char* name) const;
|
||||
int create_file_fd(const char* name) const;
|
||||
int create_fd(const char* name) const;
|
||||
|
||||
bool is_tmpfs() const;
|
||||
bool is_hugetlbfs() const;
|
||||
bool tmpfs_supports_transparent_huge_pages() const;
|
||||
|
||||
XErrno fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const;
|
||||
XErrno fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const;
|
||||
XErrno fallocate_compat_pwrite(size_t offset, size_t length) const;
|
||||
XErrno fallocate_fill_hole_compat(size_t offset, size_t length) const;
|
||||
XErrno fallocate_fill_hole_syscall(size_t offset, size_t length) const;
|
||||
XErrno fallocate_fill_hole(size_t offset, size_t length) const;
|
||||
XErrno fallocate_punch_hole(size_t offset, size_t length) const;
|
||||
XErrno split_and_fallocate(bool punch_hole, size_t offset, size_t length) const;
|
||||
XErrno fallocate(bool punch_hole, size_t offset, size_t length) const;
|
||||
|
||||
bool commit_inner(size_t offset, size_t length) const;
|
||||
size_t commit_numa_interleaved(size_t offset, size_t length) const;
|
||||
size_t commit_default(size_t offset, size_t length) const;
|
||||
|
||||
public:
|
||||
XPhysicalMemoryBacking(size_t max_capacity);
|
||||
|
||||
bool is_initialized() const;
|
||||
|
||||
void warn_commit_limits(size_t max_capacity) const;
|
||||
|
||||
size_t commit(size_t offset, size_t length) const;
|
||||
size_t uncommit(size_t offset, size_t length) const;
|
||||
|
||||
void map(uintptr_t addr, size_t size, uintptr_t offset) const;
|
||||
void unmap(uintptr_t addr, size_t size) const;
|
||||
};
|
||||
|
||||
#endif // OS_LINUX_GC_X_XPHYSICALMEMORYBACKING_LINUX_HPP
|
40
src/hotspot/os/linux/gc/x/xSyscall_linux.cpp
Normal file
40
src/hotspot/os/linux/gc/x/xSyscall_linux.cpp
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/x/xSyscall_linux.hpp"
|
||||
#include OS_CPU_HEADER(gc/x/xSyscall)
|
||||
|
||||
#include <unistd.h>
|
||||
|
||||
int XSyscall::memfd_create(const char *name, unsigned int flags) {
|
||||
return syscall(SYS_memfd_create, name, flags);
|
||||
}
|
||||
|
||||
int XSyscall::fallocate(int fd, int mode, size_t offset, size_t length) {
|
||||
return syscall(SYS_fallocate, fd, mode, offset, length);
|
||||
}
|
||||
|
||||
long XSyscall::get_mempolicy(int* mode, unsigned long* nodemask, unsigned long maxnode, void* addr, unsigned long flags) {
|
||||
return syscall(SYS_get_mempolicy, mode, nodemask, maxnode, addr, flags);
|
||||
}
|
45
src/hotspot/os/linux/gc/x/xSyscall_linux.hpp
Normal file
45
src/hotspot/os/linux/gc/x/xSyscall_linux.hpp
Normal file
@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef OS_LINUX_GC_X_XSYSCALL_LINUX_HPP
|
||||
#define OS_LINUX_GC_X_XSYSCALL_LINUX_HPP
|
||||
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
// Flags for get_mempolicy()
|
||||
#ifndef MPOL_F_NODE
|
||||
#define MPOL_F_NODE (1<<0)
|
||||
#endif
|
||||
#ifndef MPOL_F_ADDR
|
||||
#define MPOL_F_ADDR (1<<1)
|
||||
#endif
|
||||
|
||||
class XSyscall : public AllStatic {
|
||||
public:
|
||||
static int memfd_create(const char* name, unsigned int flags);
|
||||
static int fallocate(int fd, int mode, size_t offset, size_t length);
|
||||
static long get_mempolicy(int* mode, unsigned long* nodemask, unsigned long maxnode, void* addr, unsigned long flags);
|
||||
};
|
||||
|
||||
#endif // OS_LINUX_GC_X_XSYSCALL_LINUX_HPP
|
@ -23,6 +23,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zArray.inline.hpp"
|
||||
#include "gc/z/zErrno.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
@ -385,11 +386,11 @@ bool ZPhysicalMemoryBacking::tmpfs_supports_transparent_huge_pages() const {
|
||||
return access(ZFILENAME_SHMEM_ENABLED, R_OK) == 0;
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const {
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_hugetlbfs(zoffset offset, size_t length, bool touch) const {
|
||||
// On hugetlbfs, mapping a file segment will fail immediately, without
|
||||
// the need to touch the mapped pages first, if there aren't enough huge
|
||||
// pages available to back the mapping.
|
||||
void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
|
||||
void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, untype(offset));
|
||||
if (addr == MAP_FAILED) {
|
||||
// Failed
|
||||
return errno;
|
||||
@ -436,10 +437,10 @@ static bool safe_touch_mapping(void* addr, size_t length, size_t page_size) {
|
||||
return true;
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const {
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_tmpfs(zoffset offset, size_t length) const {
|
||||
// On tmpfs, we need to touch the mapped pages to figure out
|
||||
// if there are enough pages available to back the mapping.
|
||||
void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
|
||||
void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, untype(offset));
|
||||
if (addr == MAP_FAILED) {
|
||||
// Failed
|
||||
return errno;
|
||||
@ -463,12 +464,12 @@ ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_tmpfs(size_t offset, size_t
|
||||
return backed ? 0 : ENOMEM;
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_compat_pwrite(size_t offset, size_t length) const {
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_compat_pwrite(zoffset offset, size_t length) const {
|
||||
uint8_t data = 0;
|
||||
|
||||
// Allocate backing memory by writing to each block
|
||||
for (size_t pos = offset; pos < offset + length; pos += _block_size) {
|
||||
if (pwrite(_fd, &data, sizeof(data), pos) == -1) {
|
||||
for (zoffset pos = offset; pos < offset + length; pos += _block_size) {
|
||||
if (pwrite(_fd, &data, sizeof(data), untype(pos)) == -1) {
|
||||
// Failed
|
||||
return errno;
|
||||
}
|
||||
@ -478,7 +479,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_compat_pwrite(size_t offset, size_t len
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t length) const {
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(zoffset offset, size_t length) const {
|
||||
// fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs
|
||||
// since Linux 4.3. When fallocate(2) is not supported we emulate it using
|
||||
// mmap/munmap (for hugetlbfs and tmpfs with transparent huge pages) or pwrite
|
||||
@ -492,9 +493,9 @@ ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t
|
||||
}
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t length) const {
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(zoffset offset, size_t length) const {
|
||||
const int mode = 0; // Allocate
|
||||
const int res = ZSyscall::fallocate(_fd, mode, offset, length);
|
||||
const int res = ZSyscall::fallocate(_fd, mode, untype(offset), length);
|
||||
if (res == -1) {
|
||||
// Failed
|
||||
return errno;
|
||||
@ -504,7 +505,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length) const {
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(zoffset offset, size_t length) const {
|
||||
// Using compat mode is more efficient when allocating space on hugetlbfs.
|
||||
// Note that allocating huge pages this way will only reserve them, and not
|
||||
// associate them with segments of the file. We must guarantee that we at
|
||||
@ -531,7 +532,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length)
|
||||
return fallocate_fill_hole_compat(offset, length);
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length) const {
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(zoffset offset, size_t length) const {
|
||||
if (ZLargePages::is_explicit()) {
|
||||
// We can only punch hole in pages that have been touched. Non-touched
|
||||
// pages are only reserved, and not associated with any specific file
|
||||
@ -545,7 +546,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length
|
||||
}
|
||||
|
||||
const int mode = FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE;
|
||||
if (ZSyscall::fallocate(_fd, mode, offset, length) == -1) {
|
||||
if (ZSyscall::fallocate(_fd, mode, untype(offset), length) == -1) {
|
||||
// Failed
|
||||
return errno;
|
||||
}
|
||||
@ -554,9 +555,9 @@ ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offset, size_t length) const {
|
||||
ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, zoffset offset, size_t length) const {
|
||||
// Try first half
|
||||
const size_t offset0 = offset;
|
||||
const zoffset offset0 = offset;
|
||||
const size_t length0 = align_up(length / 2, _block_size);
|
||||
const ZErrno err0 = fallocate(punch_hole, offset0, length0);
|
||||
if (err0) {
|
||||
@ -564,7 +565,7 @@ ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offse
|
||||
}
|
||||
|
||||
// Try second half
|
||||
const size_t offset1 = offset0 + length0;
|
||||
const zoffset offset1 = offset0 + length0;
|
||||
const size_t length1 = length - length0;
|
||||
const ZErrno err1 = fallocate(punch_hole, offset1, length1);
|
||||
if (err1) {
|
||||
@ -575,8 +576,8 @@ ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offse
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t length) const {
|
||||
assert(is_aligned(offset, _block_size), "Invalid offset");
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, zoffset offset, size_t length) const {
|
||||
assert(is_aligned(untype(offset), _block_size), "Invalid offset");
|
||||
assert(is_aligned(length, _block_size), "Invalid length");
|
||||
|
||||
const ZErrno err = punch_hole ? fallocate_punch_hole(offset, length) : fallocate_fill_hole(offset, length);
|
||||
@ -591,9 +592,9 @@ ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t
|
||||
return err;
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const {
|
||||
bool ZPhysicalMemoryBacking::commit_inner(zoffset offset, size_t length) const {
|
||||
log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
|
||||
offset / M, (offset + length) / M, length / M);
|
||||
untype(offset) / M, untype(offset + length) / M, length / M);
|
||||
|
||||
retry:
|
||||
const ZErrno err = fallocate(false /* punch_hole */, offset, length);
|
||||
@ -622,19 +623,19 @@ retry:
|
||||
return true;
|
||||
}
|
||||
|
||||
static int offset_to_node(size_t offset) {
|
||||
static int offset_to_node(zoffset offset) {
|
||||
const GrowableArray<int>* mapping = os::Linux::numa_nindex_to_node();
|
||||
const size_t nindex = (offset >> ZGranuleSizeShift) % mapping->length();
|
||||
const size_t nindex = (untype(offset) >> ZGranuleSizeShift) % mapping->length();
|
||||
return mapping->at((int)nindex);
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t length) const {
|
||||
size_t ZPhysicalMemoryBacking::commit_numa_interleaved(zoffset offset, size_t length) const {
|
||||
size_t committed = 0;
|
||||
|
||||
// Commit one granule at a time, so that each granule
|
||||
// can be allocated from a different preferred node.
|
||||
while (committed < length) {
|
||||
const size_t granule_offset = offset + committed;
|
||||
const zoffset granule_offset = offset + committed;
|
||||
|
||||
// Setup NUMA policy to allocate memory from a preferred node
|
||||
os::Linux::numa_set_preferred(offset_to_node(granule_offset));
|
||||
@ -653,7 +654,7 @@ size_t ZPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t len
|
||||
return committed;
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::commit_default(size_t offset, size_t length) const {
|
||||
size_t ZPhysicalMemoryBacking::commit_default(zoffset offset, size_t length) const {
|
||||
// Try to commit the whole region
|
||||
if (commit_inner(offset, length)) {
|
||||
// Success
|
||||
@ -661,8 +662,8 @@ size_t ZPhysicalMemoryBacking::commit_default(size_t offset, size_t length) cons
|
||||
}
|
||||
|
||||
// Failed, try to commit as much as possible
|
||||
size_t start = offset;
|
||||
size_t end = offset + length;
|
||||
zoffset start = offset;
|
||||
zoffset end = offset + length;
|
||||
|
||||
for (;;) {
|
||||
length = align_down((end - start) / 2, ZGranuleSize);
|
||||
@ -681,7 +682,7 @@ size_t ZPhysicalMemoryBacking::commit_default(size_t offset, size_t length) cons
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const {
|
||||
size_t ZPhysicalMemoryBacking::commit(zoffset offset, size_t length) const {
|
||||
if (ZNUMA::is_enabled() && !ZLargePages::is_explicit()) {
|
||||
// To get granule-level NUMA interleaving when using non-large pages,
|
||||
// we must explicitly interleave the memory at commit/fallocate time.
|
||||
@ -691,9 +692,9 @@ size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const {
|
||||
return commit_default(offset, length);
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const {
|
||||
size_t ZPhysicalMemoryBacking::uncommit(zoffset offset, size_t length) const {
|
||||
log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
|
||||
offset / M, (offset + length) / M, length / M);
|
||||
untype(offset) / M, untype(offset + length) / M, length / M);
|
||||
|
||||
const ZErrno err = fallocate(true /* punch_hole */, offset, length);
|
||||
if (err) {
|
||||
@ -704,19 +705,19 @@ size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const {
|
||||
return length;
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const {
|
||||
const void* const res = mmap((void*)addr, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _fd, offset);
|
||||
void ZPhysicalMemoryBacking::map(zaddress_unsafe addr, size_t size, zoffset offset) const {
|
||||
const void* const res = mmap((void*)untype(addr), size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _fd, untype(offset));
|
||||
if (res == MAP_FAILED) {
|
||||
ZErrno err;
|
||||
fatal("Failed to map memory (%s)", err.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const {
|
||||
void ZPhysicalMemoryBacking::unmap(zaddress_unsafe addr, size_t size) const {
|
||||
// Note that we must keep the address space reservation intact and just detach
|
||||
// the backing memory. For this reason we map a new anonymous, non-accessible
|
||||
// and non-reserved page over the mapping instead of actually unmapping.
|
||||
const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
|
||||
const void* const res = mmap((void*)untype(addr), size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
|
||||
if (res == MAP_FAILED) {
|
||||
ZErrno err;
|
||||
fatal("Failed to map memory (%s)", err.to_string());
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,6 +24,8 @@
|
||||
#ifndef OS_LINUX_GC_Z_ZPHYSICALMEMORYBACKING_LINUX_HPP
|
||||
#define OS_LINUX_GC_Z_ZPHYSICALMEMORYBACKING_LINUX_HPP
|
||||
|
||||
#include "gc/z/zAddress.hpp"
|
||||
|
||||
class ZErrno;
|
||||
|
||||
class ZPhysicalMemoryBacking {
|
||||
@ -46,19 +48,19 @@ private:
|
||||
bool is_hugetlbfs() const;
|
||||
bool tmpfs_supports_transparent_huge_pages() const;
|
||||
|
||||
ZErrno fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const;
|
||||
ZErrno fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const;
|
||||
ZErrno fallocate_compat_pwrite(size_t offset, size_t length) const;
|
||||
ZErrno fallocate_fill_hole_compat(size_t offset, size_t length) const;
|
||||
ZErrno fallocate_fill_hole_syscall(size_t offset, size_t length) const;
|
||||
ZErrno fallocate_fill_hole(size_t offset, size_t length) const;
|
||||
ZErrno fallocate_punch_hole(size_t offset, size_t length) const;
|
||||
ZErrno split_and_fallocate(bool punch_hole, size_t offset, size_t length) const;
|
||||
ZErrno fallocate(bool punch_hole, size_t offset, size_t length) const;
|
||||
ZErrno fallocate_compat_mmap_hugetlbfs(zoffset offset, size_t length, bool touch) const;
|
||||
ZErrno fallocate_compat_mmap_tmpfs(zoffset offset, size_t length) const;
|
||||
ZErrno fallocate_compat_pwrite(zoffset offset, size_t length) const;
|
||||
ZErrno fallocate_fill_hole_compat(zoffset offset, size_t length) const;
|
||||
ZErrno fallocate_fill_hole_syscall(zoffset offset, size_t length) const;
|
||||
ZErrno fallocate_fill_hole(zoffset offset, size_t length) const;
|
||||
ZErrno fallocate_punch_hole(zoffset offset, size_t length) const;
|
||||
ZErrno split_and_fallocate(bool punch_hole, zoffset offset, size_t length) const;
|
||||
ZErrno fallocate(bool punch_hole, zoffset offset, size_t length) const;
|
||||
|
||||
bool commit_inner(size_t offset, size_t length) const;
|
||||
size_t commit_numa_interleaved(size_t offset, size_t length) const;
|
||||
size_t commit_default(size_t offset, size_t length) const;
|
||||
bool commit_inner(zoffset offset, size_t length) const;
|
||||
size_t commit_numa_interleaved(zoffset offset, size_t length) const;
|
||||
size_t commit_default(zoffset offset, size_t length) const;
|
||||
|
||||
public:
|
||||
ZPhysicalMemoryBacking(size_t max_capacity);
|
||||
@ -67,11 +69,11 @@ public:
|
||||
|
||||
void warn_commit_limits(size_t max_capacity) const;
|
||||
|
||||
size_t commit(size_t offset, size_t length) const;
|
||||
size_t uncommit(size_t offset, size_t length) const;
|
||||
size_t commit(zoffset offset, size_t length) const;
|
||||
size_t uncommit(zoffset offset, size_t length) const;
|
||||
|
||||
void map(uintptr_t addr, size_t size, uintptr_t offset) const;
|
||||
void unmap(uintptr_t addr, size_t size) const;
|
||||
void map(zaddress_unsafe addr, size_t size, zoffset offset) const;
|
||||
void unmap(zaddress_unsafe addr, size_t size) const;
|
||||
};
|
||||
|
||||
#endif // OS_LINUX_GC_Z_ZPHYSICALMEMORYBACKING_LINUX_HPP
|
||||
|
29
src/hotspot/os/posix/gc/x/xArguments_posix.cpp
Normal file
29
src/hotspot/os/posix/gc/x/xArguments_posix.cpp
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/x/xArguments.hpp"
|
||||
|
||||
bool XArguments::is_os_supported() {
|
||||
return true;
|
||||
}
|
29
src/hotspot/os/posix/gc/x/xInitialize_posix.cpp
Normal file
29
src/hotspot/os/posix/gc/x/xInitialize_posix.cpp
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/x/xInitialize.hpp"
|
||||
|
||||
void XInitialize::pd_initialize() {
|
||||
// Does nothing
|
||||
}
|
43
src/hotspot/os/posix/gc/x/xUtils_posix.cpp
Normal file
43
src/hotspot/os/posix/gc/x/xUtils_posix.cpp
Normal file
@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/x/xUtils.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
uintptr_t XUtils::alloc_aligned(size_t alignment, size_t size) {
|
||||
void* res = nullptr;
|
||||
|
||||
// Use raw posix_memalign as long as we have no wrapper for it
|
||||
ALLOW_C_FUNCTION(::posix_memalign, int rc = posix_memalign(&res, alignment, size);)
|
||||
if (rc != 0) {
|
||||
fatal("posix_memalign() failed");
|
||||
}
|
||||
|
||||
memset(res, 0, size);
|
||||
|
||||
return (uintptr_t)res;
|
||||
}
|
60
src/hotspot/os/posix/gc/x/xVirtualMemory_posix.cpp
Normal file
60
src/hotspot/os/posix/gc/x/xVirtualMemory_posix.cpp
Normal file
@ -0,0 +1,60 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/x/xAddress.inline.hpp"
|
||||
#include "gc/x/xVirtualMemory.hpp"
|
||||
#include "logging/log.hpp"
|
||||
|
||||
#include <sys/mman.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
void XVirtualMemoryManager::pd_initialize_before_reserve() {
|
||||
// Does nothing
|
||||
}
|
||||
|
||||
void XVirtualMemoryManager::pd_initialize_after_reserve() {
|
||||
// Does nothing
|
||||
}
|
||||
|
||||
bool XVirtualMemoryManager::pd_reserve(uintptr_t addr, size_t size) {
|
||||
const uintptr_t res = (uintptr_t)mmap((void*)addr, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
|
||||
if (res == (uintptr_t)MAP_FAILED) {
|
||||
// Failed to reserve memory
|
||||
return false;
|
||||
}
|
||||
|
||||
if (res != addr) {
|
||||
// Failed to reserve memory at the requested address
|
||||
munmap((void*)res, size);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Success
|
||||
return true;
|
||||
}
|
||||
|
||||
void XVirtualMemoryManager::pd_unreserve(uintptr_t addr, size_t size) {
|
||||
const int res = munmap((void*)addr, size);
|
||||
assert(res == 0, "Failed to unmap memory");
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,6 +24,6 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/z/zArguments.hpp"
|
||||
|
||||
bool ZArguments::is_os_supported() const {
|
||||
bool ZArguments::is_os_supported() {
|
||||
return true;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,16 +37,16 @@ void ZVirtualMemoryManager::pd_initialize_after_reserve() {
|
||||
// Does nothing
|
||||
}
|
||||
|
||||
bool ZVirtualMemoryManager::pd_reserve(uintptr_t addr, size_t size) {
|
||||
const uintptr_t res = (uintptr_t)mmap((void*)addr, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
|
||||
if (res == (uintptr_t)MAP_FAILED) {
|
||||
bool ZVirtualMemoryManager::pd_reserve(zaddress_unsafe addr, size_t size) {
|
||||
void* const res = mmap((void*)untype(addr), size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
|
||||
if (res == MAP_FAILED) {
|
||||
// Failed to reserve memory
|
||||
return false;
|
||||
}
|
||||
|
||||
if (res != addr) {
|
||||
if (res != (void*)untype(addr)) {
|
||||
// Failed to reserve memory at the requested address
|
||||
munmap((void*)res, size);
|
||||
munmap(res, size);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -54,7 +54,7 @@ bool ZVirtualMemoryManager::pd_reserve(uintptr_t addr, size_t size) {
|
||||
return true;
|
||||
}
|
||||
|
||||
void ZVirtualMemoryManager::pd_unreserve(uintptr_t addr, size_t size) {
|
||||
const int res = munmap((void*)addr, size);
|
||||
void ZVirtualMemoryManager::pd_unreserve(zaddress_unsafe addr, size_t size) {
|
||||
const int res = munmap((void*)untype(addr), size);
|
||||
assert(res == 0, "Failed to unmap memory");
|
||||
}
|
||||
|
30
src/hotspot/os/windows/gc/x/xArguments_windows.cpp
Normal file
30
src/hotspot/os/windows/gc/x/xArguments_windows.cpp
Normal file
@ -0,0 +1,30 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/x/xArguments.hpp"
|
||||
#include "gc/x/xSyscall_windows.hpp"
|
||||
|
||||
bool XArguments::is_os_supported() {
|
||||
return XSyscall::is_supported();
|
||||
}
|
30
src/hotspot/os/windows/gc/x/xInitialize_windows.cpp
Normal file
30
src/hotspot/os/windows/gc/x/xInitialize_windows.cpp
Normal file
@ -0,0 +1,30 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/x/xInitialize.hpp"
|
||||
#include "gc/x/xSyscall_windows.hpp"
|
||||
|
||||
void XInitialize::pd_initialize() {
|
||||
XSyscall::initialize();
|
||||
}
|
40
src/hotspot/os/windows/gc/x/xLargePages_windows.cpp
Normal file
40
src/hotspot/os/windows/gc/x/xLargePages_windows.cpp
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/x/xLargePages.hpp"
|
||||
#include "gc/x/xSyscall_windows.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
|
||||
void XLargePages::pd_initialize() {
|
||||
if (UseLargePages) {
|
||||
if (XSyscall::is_large_pages_supported()) {
|
||||
_state = Explicit;
|
||||
return;
|
||||
}
|
||||
log_info_p(gc, init)("Shared large pages not supported on this OS version");
|
||||
}
|
||||
|
||||
_state = Disabled;
|
||||
}
|
310
src/hotspot/os/windows/gc/x/xMapper_windows.cpp
Normal file
310
src/hotspot/os/windows/gc/x/xMapper_windows.cpp
Normal file
@ -0,0 +1,310 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/x/xMapper_windows.hpp"
|
||||
#include "gc/x/xSyscall_windows.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
#include <Windows.h>
|
||||
|
||||
// Memory reservation, commit, views, and placeholders.
|
||||
//
|
||||
// To be able to up-front reserve address space for the heap views, and later
|
||||
// multi-map the heap views to the same physical memory, without ever losing the
|
||||
// reservation of the reserved address space, we use "placeholders".
|
||||
//
|
||||
// These placeholders block out the address space from being used by other parts
|
||||
// of the process. To commit memory in this address space, the placeholder must
|
||||
// be replaced by anonymous memory, or replaced by mapping a view against a
|
||||
// paging file mapping. We use the later to support multi-mapping.
|
||||
//
|
||||
// We want to be able to dynamically commit and uncommit the physical memory of
|
||||
// the heap (and also unmap ZPages), in granules of ZGranuleSize bytes. There is
|
||||
// no way to grow and shrink the committed memory of a paging file mapping.
|
||||
// Therefore, we create multiple granule-sized page file mappings. The memory is
|
||||
// committed by creating a page file mapping, map a view against it, commit the
|
||||
// memory, unmap the view. The memory will stay committed until all views are
|
||||
// unmapped, and the paging file mapping handle is closed.
|
||||
//
|
||||
// When replacing a placeholder address space reservation with a mapped view
|
||||
// against a paging file mapping, the virtual address space must exactly match
|
||||
// an existing placeholder's address and size. Therefore we only deal with
|
||||
// granule-sized placeholders at this layer. Higher layers that keep track of
|
||||
// reserved available address space can (and will) coalesce placeholders, but
|
||||
// they will be split before being used.
|
||||
|
||||
#define fatal_error(msg, addr, size) \
|
||||
fatal(msg ": " PTR_FORMAT " " SIZE_FORMAT "M (%d)", \
|
||||
(addr), (size) / M, GetLastError())
|
||||
|
||||
uintptr_t XMapper::reserve(uintptr_t addr, size_t size) {
|
||||
void* const res = XSyscall::VirtualAlloc2(
|
||||
GetCurrentProcess(), // Process
|
||||
(void*)addr, // BaseAddress
|
||||
size, // Size
|
||||
MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, // AllocationType
|
||||
PAGE_NOACCESS, // PageProtection
|
||||
nullptr, // ExtendedParameters
|
||||
0 // ParameterCount
|
||||
);
|
||||
|
||||
// Caller responsible for error handling
|
||||
return (uintptr_t)res;
|
||||
}
|
||||
|
||||
void XMapper::unreserve(uintptr_t addr, size_t size) {
|
||||
const bool res = XSyscall::VirtualFreeEx(
|
||||
GetCurrentProcess(), // hProcess
|
||||
(void*)addr, // lpAddress
|
||||
size, // dwSize
|
||||
MEM_RELEASE // dwFreeType
|
||||
);
|
||||
|
||||
if (!res) {
|
||||
fatal_error("Failed to unreserve memory", addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
HANDLE XMapper::create_paging_file_mapping(size_t size) {
|
||||
// Create mapping with SEC_RESERVE instead of SEC_COMMIT.
|
||||
//
|
||||
// We use MapViewOfFile3 for two different reasons:
|
||||
// 1) When committing memory for the created paging file
|
||||
// 2) When mapping a view of the memory created in (2)
|
||||
//
|
||||
// The non-platform code is only setup to deal with out-of-memory
|
||||
// errors in (1). By using SEC_RESERVE, we prevent MapViewOfFile3
|
||||
// from failing because of "commit limit" checks. To actually commit
|
||||
// memory in (1), a call to VirtualAlloc2 is done.
|
||||
|
||||
HANDLE const res = XSyscall::CreateFileMappingW(
|
||||
INVALID_HANDLE_VALUE, // hFile
|
||||
nullptr, // lpFileMappingAttribute
|
||||
PAGE_READWRITE | SEC_RESERVE, // flProtect
|
||||
size >> 32, // dwMaximumSizeHigh
|
||||
size & 0xFFFFFFFF, // dwMaximumSizeLow
|
||||
nullptr // lpName
|
||||
);
|
||||
|
||||
// Caller responsible for error handling
|
||||
return res;
|
||||
}
|
||||
|
||||
bool XMapper::commit_paging_file_mapping(HANDLE file_handle, uintptr_t file_offset, size_t size) {
|
||||
const uintptr_t addr = map_view_no_placeholder(file_handle, file_offset, size);
|
||||
if (addr == 0) {
|
||||
log_error(gc)("Failed to map view of paging file mapping (%d)", GetLastError());
|
||||
return false;
|
||||
}
|
||||
|
||||
const uintptr_t res = commit(addr, size);
|
||||
if (res != addr) {
|
||||
log_error(gc)("Failed to commit memory (%d)", GetLastError());
|
||||
}
|
||||
|
||||
unmap_view_no_placeholder(addr, size);
|
||||
|
||||
return res == addr;
|
||||
}
|
||||
|
||||
uintptr_t XMapper::map_view_no_placeholder(HANDLE file_handle, uintptr_t file_offset, size_t size) {
|
||||
void* const res = XSyscall::MapViewOfFile3(
|
||||
file_handle, // FileMapping
|
||||
GetCurrentProcess(), // ProcessHandle
|
||||
nullptr, // BaseAddress
|
||||
file_offset, // Offset
|
||||
size, // ViewSize
|
||||
0, // AllocationType
|
||||
PAGE_NOACCESS, // PageProtection
|
||||
nullptr, // ExtendedParameters
|
||||
0 // ParameterCount
|
||||
);
|
||||
|
||||
// Caller responsible for error handling
|
||||
return (uintptr_t)res;
|
||||
}
|
||||
|
||||
void XMapper::unmap_view_no_placeholder(uintptr_t addr, size_t size) {
|
||||
const bool res = XSyscall::UnmapViewOfFile2(
|
||||
GetCurrentProcess(), // ProcessHandle
|
||||
(void*)addr, // BaseAddress
|
||||
0 // UnmapFlags
|
||||
);
|
||||
|
||||
if (!res) {
|
||||
fatal_error("Failed to unmap memory", addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
uintptr_t XMapper::commit(uintptr_t addr, size_t size) {
|
||||
void* const res = XSyscall::VirtualAlloc2(
|
||||
GetCurrentProcess(), // Process
|
||||
(void*)addr, // BaseAddress
|
||||
size, // Size
|
||||
MEM_COMMIT, // AllocationType
|
||||
PAGE_NOACCESS, // PageProtection
|
||||
nullptr, // ExtendedParameters
|
||||
0 // ParameterCount
|
||||
);
|
||||
|
||||
// Caller responsible for error handling
|
||||
return (uintptr_t)res;
|
||||
}
|
||||
|
||||
HANDLE XMapper::create_and_commit_paging_file_mapping(size_t size) {
|
||||
HANDLE const file_handle = create_paging_file_mapping(size);
|
||||
if (file_handle == 0) {
|
||||
log_error(gc)("Failed to create paging file mapping (%d)", GetLastError());
|
||||
return 0;
|
||||
}
|
||||
|
||||
const bool res = commit_paging_file_mapping(file_handle, 0 /* file_offset */, size);
|
||||
if (!res) {
|
||||
close_paging_file_mapping(file_handle);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return file_handle;
|
||||
}
|
||||
|
||||
void XMapper::close_paging_file_mapping(HANDLE file_handle) {
|
||||
const bool res = CloseHandle(
|
||||
file_handle // hObject
|
||||
);
|
||||
|
||||
if (!res) {
|
||||
fatal("Failed to close paging file handle (%d)", GetLastError());
|
||||
}
|
||||
}
|
||||
|
||||
HANDLE XMapper::create_shared_awe_section() {
|
||||
MEM_EXTENDED_PARAMETER parameter = { 0 };
|
||||
parameter.Type = MemSectionExtendedParameterUserPhysicalFlags;
|
||||
parameter.ULong64 = 0;
|
||||
|
||||
HANDLE section = XSyscall::CreateFileMapping2(
|
||||
INVALID_HANDLE_VALUE, // File
|
||||
nullptr, // SecurityAttributes
|
||||
SECTION_MAP_READ | SECTION_MAP_WRITE, // DesiredAccess
|
||||
PAGE_READWRITE, // PageProtection
|
||||
SEC_RESERVE | SEC_LARGE_PAGES, // AllocationAttributes
|
||||
0, // MaximumSize
|
||||
nullptr, // Name
|
||||
¶meter, // ExtendedParameters
|
||||
1 // ParameterCount
|
||||
);
|
||||
|
||||
if (section == nullptr) {
|
||||
fatal("Could not create shared AWE section (%d)", GetLastError());
|
||||
}
|
||||
|
||||
return section;
|
||||
}
|
||||
|
||||
uintptr_t XMapper::reserve_for_shared_awe(HANDLE awe_section, uintptr_t addr, size_t size) {
|
||||
MEM_EXTENDED_PARAMETER parameter = { 0 };
|
||||
parameter.Type = MemExtendedParameterUserPhysicalHandle;
|
||||
parameter.Handle = awe_section;
|
||||
|
||||
void* const res = XSyscall::VirtualAlloc2(
|
||||
GetCurrentProcess(), // Process
|
||||
(void*)addr, // BaseAddress
|
||||
size, // Size
|
||||
MEM_RESERVE | MEM_PHYSICAL, // AllocationType
|
||||
PAGE_READWRITE, // PageProtection
|
||||
¶meter, // ExtendedParameters
|
||||
1 // ParameterCount
|
||||
);
|
||||
|
||||
// Caller responsible for error handling
|
||||
return (uintptr_t)res;
|
||||
}
|
||||
|
||||
void XMapper::unreserve_for_shared_awe(uintptr_t addr, size_t size) {
|
||||
bool res = VirtualFree(
|
||||
(void*)addr, // lpAddress
|
||||
0, // dwSize
|
||||
MEM_RELEASE // dwFreeType
|
||||
);
|
||||
|
||||
if (!res) {
|
||||
fatal("Failed to unreserve memory: " PTR_FORMAT " " SIZE_FORMAT "M (%d)",
|
||||
addr, size / M, GetLastError());
|
||||
}
|
||||
}
|
||||
|
||||
void XMapper::split_placeholder(uintptr_t addr, size_t size) {
|
||||
const bool res = VirtualFree(
|
||||
(void*)addr, // lpAddress
|
||||
size, // dwSize
|
||||
MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER // dwFreeType
|
||||
);
|
||||
|
||||
if (!res) {
|
||||
fatal_error("Failed to split placeholder", addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
void XMapper::coalesce_placeholders(uintptr_t addr, size_t size) {
|
||||
const bool res = VirtualFree(
|
||||
(void*)addr, // lpAddress
|
||||
size, // dwSize
|
||||
MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS // dwFreeType
|
||||
);
|
||||
|
||||
if (!res) {
|
||||
fatal_error("Failed to coalesce placeholders", addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
void XMapper::map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_offset, uintptr_t addr, size_t size) {
|
||||
void* const res = XSyscall::MapViewOfFile3(
|
||||
file_handle, // FileMapping
|
||||
GetCurrentProcess(), // ProcessHandle
|
||||
(void*)addr, // BaseAddress
|
||||
file_offset, // Offset
|
||||
size, // ViewSize
|
||||
MEM_REPLACE_PLACEHOLDER, // AllocationType
|
||||
PAGE_READWRITE, // PageProtection
|
||||
nullptr, // ExtendedParameters
|
||||
0 // ParameterCount
|
||||
);
|
||||
|
||||
if (res == nullptr) {
|
||||
fatal_error("Failed to map memory", addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
void XMapper::unmap_view_preserve_placeholder(uintptr_t addr, size_t size) {
|
||||
const bool res = XSyscall::UnmapViewOfFile2(
|
||||
GetCurrentProcess(), // ProcessHandle
|
||||
(void*)addr, // BaseAddress
|
||||
MEM_PRESERVE_PLACEHOLDER // UnmapFlags
|
||||
);
|
||||
|
||||
if (!res) {
|
||||
fatal_error("Failed to unmap memory", addr, size);
|
||||
}
|
||||
}
|
94
src/hotspot/os/windows/gc/x/xMapper_windows.hpp
Normal file
94
src/hotspot/os/windows/gc/x/xMapper_windows.hpp
Normal file
@ -0,0 +1,94 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef OS_WINDOWS_GC_X_XMAPPER_WINDOWS_HPP
|
||||
#define OS_WINDOWS_GC_X_XMAPPER_WINDOWS_HPP
|
||||
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
#include <Windows.h>
|
||||
|
||||
class XMapper : public AllStatic {
|
||||
private:
|
||||
// Create paging file mapping
|
||||
static HANDLE create_paging_file_mapping(size_t size);
|
||||
|
||||
// Commit paging file mapping
|
||||
static bool commit_paging_file_mapping(HANDLE file_handle, uintptr_t file_offset, size_t size);
|
||||
|
||||
// Map a view anywhere without a placeholder
|
||||
static uintptr_t map_view_no_placeholder(HANDLE file_handle, uintptr_t file_offset, size_t size);
|
||||
|
||||
// Unmap a view without preserving a placeholder
|
||||
static void unmap_view_no_placeholder(uintptr_t addr, size_t size);
|
||||
|
||||
// Commit memory covering the given virtual address range
|
||||
static uintptr_t commit(uintptr_t addr, size_t size);
|
||||
|
||||
public:
|
||||
// Reserve memory with a placeholder
|
||||
static uintptr_t reserve(uintptr_t addr, size_t size);
|
||||
|
||||
// Unreserve memory
|
||||
static void unreserve(uintptr_t addr, size_t size);
|
||||
|
||||
// Create and commit paging file mapping
|
||||
static HANDLE create_and_commit_paging_file_mapping(size_t size);
|
||||
|
||||
// Close paging file mapping
|
||||
static void close_paging_file_mapping(HANDLE file_handle);
|
||||
|
||||
// Create a shared AWE section
|
||||
static HANDLE create_shared_awe_section();
|
||||
|
||||
// Reserve memory attached to the shared AWE section
|
||||
static uintptr_t reserve_for_shared_awe(HANDLE awe_section, uintptr_t addr, size_t size);
|
||||
|
||||
// Unreserve memory attached to a shared AWE section
|
||||
static void unreserve_for_shared_awe(uintptr_t addr, size_t size);
|
||||
|
||||
// Split a placeholder
|
||||
//
|
||||
// A view can only replace an entire placeholder, so placeholders need to be
|
||||
// split and coalesced to be the exact size of the new views.
|
||||
// [addr, addr + size) needs to be a proper sub-placeholder of an existing
|
||||
// placeholder.
|
||||
static void split_placeholder(uintptr_t addr, size_t size);
|
||||
|
||||
// Coalesce a placeholder
|
||||
//
|
||||
// [addr, addr + size) is the new placeholder. A sub-placeholder needs to
|
||||
// exist within that range.
|
||||
static void coalesce_placeholders(uintptr_t addr, size_t size);
|
||||
|
||||
// Map a view of the file handle and replace the placeholder covering the
|
||||
// given virtual address range
|
||||
static void map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_offset, uintptr_t addr, size_t size);
|
||||
|
||||
// Unmap the view and reinstate a placeholder covering the given virtual
|
||||
// address range
|
||||
static void unmap_view_preserve_placeholder(uintptr_t addr, size_t size);
|
||||
};
|
||||
|
||||
#endif // OS_WINDOWS_GC_X_XMAPPER_WINDOWS_HPP
|
42
src/hotspot/os/windows/gc/x/xNUMA_windows.cpp
Normal file
42
src/hotspot/os/windows/gc/x/xNUMA_windows.cpp
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/x/xNUMA.hpp"
|
||||
|
||||
void XNUMA::pd_initialize() {
|
||||
_enabled = false;
|
||||
}
|
||||
|
||||
uint32_t XNUMA::count() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
uint32_t XNUMA::id() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t XNUMA::memory_id(uintptr_t addr) {
|
||||
// NUMA support not enabled, assume everything belongs to node zero
|
||||
return 0;
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user